1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/if.h> 35 #include <linux/if_vlan.h> 36 #include <linux/if_bridge.h> 37 #include <linux/rtc.h> 38 #include <linux/bpf.h> 39 #include <net/ip.h> 40 #include <net/tcp.h> 41 #include <net/udp.h> 42 #include <net/checksum.h> 43 #include <net/ip6_checksum.h> 44 #include <net/udp_tunnel.h> 45 #include <linux/workqueue.h> 46 #include <linux/prefetch.h> 47 #include <linux/cache.h> 48 #include <linux/log2.h> 49 #include <linux/aer.h> 50 #include <linux/bitmap.h> 51 #include <linux/cpu_rmap.h> 52 #include <linux/cpumask.h> 53 #include <net/pkt_cls.h> 54 55 #include "bnxt_hsi.h" 56 #include "bnxt.h" 57 #include "bnxt_ulp.h" 58 #include "bnxt_sriov.h" 59 #include "bnxt_ethtool.h" 60 #include "bnxt_dcb.h" 61 #include "bnxt_xdp.h" 62 #include "bnxt_vfr.h" 63 #include "bnxt_tc.h" 64 65 #define BNXT_TX_TIMEOUT (5 * HZ) 66 67 static const char version[] = 68 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 69 70 MODULE_LICENSE("GPL"); 71 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 72 MODULE_VERSION(DRV_MODULE_VERSION); 73 74 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 75 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 76 #define BNXT_RX_COPY_THRESH 256 77 78 #define BNXT_TX_PUSH_THRESH 164 79 80 enum board_idx { 81 BCM57301, 82 BCM57302, 83 BCM57304, 84 BCM57417_NPAR, 85 BCM58700, 86 BCM57311, 87 BCM57312, 88 BCM57402, 89 BCM57404, 90 BCM57406, 91 BCM57402_NPAR, 92 BCM57407, 93 BCM57412, 94 BCM57414, 95 BCM57416, 96 BCM57417, 97 BCM57412_NPAR, 98 BCM57314, 99 BCM57417_SFP, 100 BCM57416_SFP, 101 BCM57404_NPAR, 102 BCM57406_NPAR, 103 BCM57407_SFP, 104 BCM57407_NPAR, 105 BCM57414_NPAR, 106 BCM57416_NPAR, 107 BCM57452, 108 BCM57454, 109 BCM58802, 110 BCM58808, 111 NETXTREME_E_VF, 112 NETXTREME_C_VF, 113 }; 114 115 /* indexed by enum above */ 116 static const struct { 117 char *name; 118 } board_info[] = { 119 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 120 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 121 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 122 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 123 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 124 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 125 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 126 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 127 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 128 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 129 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 130 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 131 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 132 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 133 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 134 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 135 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 136 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 137 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 138 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 139 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 140 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 141 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 142 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 143 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 144 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 145 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 146 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 147 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 148 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 149 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 150 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 151 }; 152 153 static const struct pci_device_id bnxt_pci_tbl[] = { 154 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 155 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 156 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 157 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 158 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 159 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 160 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 161 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 162 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 163 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 164 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 165 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 166 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 167 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 168 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 169 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 170 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 171 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 172 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 174 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 175 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 176 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 179 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 183 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 184 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 185 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 186 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 187 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 188 #ifdef CONFIG_BNXT_SRIOV 189 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 190 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 191 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 192 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 193 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 194 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 195 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 196 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 197 #endif 198 { 0 } 199 }; 200 201 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 202 203 static const u16 bnxt_vf_req_snif[] = { 204 HWRM_FUNC_CFG, 205 HWRM_PORT_PHY_QCFG, 206 HWRM_CFA_L2_FILTER_ALLOC, 207 }; 208 209 static const u16 bnxt_async_events_arr[] = { 210 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 211 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 212 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 213 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 215 }; 216 217 static bool bnxt_vf_pciid(enum board_idx idx) 218 { 219 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 220 } 221 222 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 223 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 224 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 225 226 #define BNXT_CP_DB_REARM(db, raw_cons) \ 227 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 228 229 #define BNXT_CP_DB(db, raw_cons) \ 230 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 231 232 #define BNXT_CP_DB_IRQ_DIS(db) \ 233 writel(DB_CP_IRQ_DIS_FLAGS, db) 234 235 const u16 bnxt_lhint_arr[] = { 236 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 237 TX_BD_FLAGS_LHINT_512_TO_1023, 238 TX_BD_FLAGS_LHINT_1024_TO_2047, 239 TX_BD_FLAGS_LHINT_1024_TO_2047, 240 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 241 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 242 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 243 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 244 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 245 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 246 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 247 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 248 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 249 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 250 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 251 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 252 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 253 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 254 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 255 }; 256 257 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 258 { 259 struct metadata_dst *md_dst = skb_metadata_dst(skb); 260 261 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 262 return 0; 263 264 return md_dst->u.port_info.port_id; 265 } 266 267 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 268 { 269 struct bnxt *bp = netdev_priv(dev); 270 struct tx_bd *txbd; 271 struct tx_bd_ext *txbd1; 272 struct netdev_queue *txq; 273 int i; 274 dma_addr_t mapping; 275 unsigned int length, pad = 0; 276 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 277 u16 prod, last_frag; 278 struct pci_dev *pdev = bp->pdev; 279 struct bnxt_tx_ring_info *txr; 280 struct bnxt_sw_tx_bd *tx_buf; 281 282 i = skb_get_queue_mapping(skb); 283 if (unlikely(i >= bp->tx_nr_rings)) { 284 dev_kfree_skb_any(skb); 285 return NETDEV_TX_OK; 286 } 287 288 txq = netdev_get_tx_queue(dev, i); 289 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 290 prod = txr->tx_prod; 291 292 free_size = bnxt_tx_avail(bp, txr); 293 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 294 netif_tx_stop_queue(txq); 295 return NETDEV_TX_BUSY; 296 } 297 298 length = skb->len; 299 len = skb_headlen(skb); 300 last_frag = skb_shinfo(skb)->nr_frags; 301 302 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 303 304 txbd->tx_bd_opaque = prod; 305 306 tx_buf = &txr->tx_buf_ring[prod]; 307 tx_buf->skb = skb; 308 tx_buf->nr_frags = last_frag; 309 310 vlan_tag_flags = 0; 311 cfa_action = bnxt_xmit_get_cfa_action(skb); 312 if (skb_vlan_tag_present(skb)) { 313 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 314 skb_vlan_tag_get(skb); 315 /* Currently supports 8021Q, 8021AD vlan offloads 316 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 317 */ 318 if (skb->vlan_proto == htons(ETH_P_8021Q)) 319 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 320 } 321 322 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 323 struct tx_push_buffer *tx_push_buf = txr->tx_push; 324 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 325 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 326 void *pdata = tx_push_buf->data; 327 u64 *end; 328 int j, push_len; 329 330 /* Set COAL_NOW to be ready quickly for the next push */ 331 tx_push->tx_bd_len_flags_type = 332 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 333 TX_BD_TYPE_LONG_TX_BD | 334 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 335 TX_BD_FLAGS_COAL_NOW | 336 TX_BD_FLAGS_PACKET_END | 337 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 338 339 if (skb->ip_summed == CHECKSUM_PARTIAL) 340 tx_push1->tx_bd_hsize_lflags = 341 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 342 else 343 tx_push1->tx_bd_hsize_lflags = 0; 344 345 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 346 tx_push1->tx_bd_cfa_action = 347 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 348 349 end = pdata + length; 350 end = PTR_ALIGN(end, 8) - 1; 351 *end = 0; 352 353 skb_copy_from_linear_data(skb, pdata, len); 354 pdata += len; 355 for (j = 0; j < last_frag; j++) { 356 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 357 void *fptr; 358 359 fptr = skb_frag_address_safe(frag); 360 if (!fptr) 361 goto normal_tx; 362 363 memcpy(pdata, fptr, skb_frag_size(frag)); 364 pdata += skb_frag_size(frag); 365 } 366 367 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 368 txbd->tx_bd_haddr = txr->data_mapping; 369 prod = NEXT_TX(prod); 370 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 371 memcpy(txbd, tx_push1, sizeof(*txbd)); 372 prod = NEXT_TX(prod); 373 tx_push->doorbell = 374 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 375 txr->tx_prod = prod; 376 377 tx_buf->is_push = 1; 378 netdev_tx_sent_queue(txq, skb->len); 379 wmb(); /* Sync is_push and byte queue before pushing data */ 380 381 push_len = (length + sizeof(*tx_push) + 7) / 8; 382 if (push_len > 16) { 383 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 384 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 385 (push_len - 16) << 1); 386 } else { 387 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 388 push_len); 389 } 390 391 goto tx_done; 392 } 393 394 normal_tx: 395 if (length < BNXT_MIN_PKT_SIZE) { 396 pad = BNXT_MIN_PKT_SIZE - length; 397 if (skb_pad(skb, pad)) { 398 /* SKB already freed. */ 399 tx_buf->skb = NULL; 400 return NETDEV_TX_OK; 401 } 402 length = BNXT_MIN_PKT_SIZE; 403 } 404 405 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 406 407 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 408 dev_kfree_skb_any(skb); 409 tx_buf->skb = NULL; 410 return NETDEV_TX_OK; 411 } 412 413 dma_unmap_addr_set(tx_buf, mapping, mapping); 414 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 415 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 416 417 txbd->tx_bd_haddr = cpu_to_le64(mapping); 418 419 prod = NEXT_TX(prod); 420 txbd1 = (struct tx_bd_ext *) 421 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 422 423 txbd1->tx_bd_hsize_lflags = 0; 424 if (skb_is_gso(skb)) { 425 u32 hdr_len; 426 427 if (skb->encapsulation) 428 hdr_len = skb_inner_network_offset(skb) + 429 skb_inner_network_header_len(skb) + 430 inner_tcp_hdrlen(skb); 431 else 432 hdr_len = skb_transport_offset(skb) + 433 tcp_hdrlen(skb); 434 435 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 436 TX_BD_FLAGS_T_IPID | 437 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 438 length = skb_shinfo(skb)->gso_size; 439 txbd1->tx_bd_mss = cpu_to_le32(length); 440 length += hdr_len; 441 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 442 txbd1->tx_bd_hsize_lflags = 443 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 444 txbd1->tx_bd_mss = 0; 445 } 446 447 length >>= 9; 448 flags |= bnxt_lhint_arr[length]; 449 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 450 451 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 452 txbd1->tx_bd_cfa_action = 453 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 454 for (i = 0; i < last_frag; i++) { 455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 456 457 prod = NEXT_TX(prod); 458 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 459 460 len = skb_frag_size(frag); 461 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 462 DMA_TO_DEVICE); 463 464 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 465 goto tx_dma_error; 466 467 tx_buf = &txr->tx_buf_ring[prod]; 468 dma_unmap_addr_set(tx_buf, mapping, mapping); 469 470 txbd->tx_bd_haddr = cpu_to_le64(mapping); 471 472 flags = len << TX_BD_LEN_SHIFT; 473 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 474 } 475 476 flags &= ~TX_BD_LEN; 477 txbd->tx_bd_len_flags_type = 478 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 479 TX_BD_FLAGS_PACKET_END); 480 481 netdev_tx_sent_queue(txq, skb->len); 482 483 /* Sync BD data before updating doorbell */ 484 wmb(); 485 486 prod = NEXT_TX(prod); 487 txr->tx_prod = prod; 488 489 if (!skb->xmit_more || netif_xmit_stopped(txq)) 490 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 491 492 tx_done: 493 494 mmiowb(); 495 496 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 497 if (skb->xmit_more && !tx_buf->is_push) 498 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 499 500 netif_tx_stop_queue(txq); 501 502 /* netif_tx_stop_queue() must be done before checking 503 * tx index in bnxt_tx_avail() below, because in 504 * bnxt_tx_int(), we update tx index before checking for 505 * netif_tx_queue_stopped(). 506 */ 507 smp_mb(); 508 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 509 netif_tx_wake_queue(txq); 510 } 511 return NETDEV_TX_OK; 512 513 tx_dma_error: 514 last_frag = i; 515 516 /* start back at beginning and unmap skb */ 517 prod = txr->tx_prod; 518 tx_buf = &txr->tx_buf_ring[prod]; 519 tx_buf->skb = NULL; 520 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 521 skb_headlen(skb), PCI_DMA_TODEVICE); 522 prod = NEXT_TX(prod); 523 524 /* unmap remaining mapped pages */ 525 for (i = 0; i < last_frag; i++) { 526 prod = NEXT_TX(prod); 527 tx_buf = &txr->tx_buf_ring[prod]; 528 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 529 skb_frag_size(&skb_shinfo(skb)->frags[i]), 530 PCI_DMA_TODEVICE); 531 } 532 533 dev_kfree_skb_any(skb); 534 return NETDEV_TX_OK; 535 } 536 537 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 538 { 539 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 540 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 541 u16 cons = txr->tx_cons; 542 struct pci_dev *pdev = bp->pdev; 543 int i; 544 unsigned int tx_bytes = 0; 545 546 for (i = 0; i < nr_pkts; i++) { 547 struct bnxt_sw_tx_bd *tx_buf; 548 struct sk_buff *skb; 549 int j, last; 550 551 tx_buf = &txr->tx_buf_ring[cons]; 552 cons = NEXT_TX(cons); 553 skb = tx_buf->skb; 554 tx_buf->skb = NULL; 555 556 if (tx_buf->is_push) { 557 tx_buf->is_push = 0; 558 goto next_tx_int; 559 } 560 561 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 562 skb_headlen(skb), PCI_DMA_TODEVICE); 563 last = tx_buf->nr_frags; 564 565 for (j = 0; j < last; j++) { 566 cons = NEXT_TX(cons); 567 tx_buf = &txr->tx_buf_ring[cons]; 568 dma_unmap_page( 569 &pdev->dev, 570 dma_unmap_addr(tx_buf, mapping), 571 skb_frag_size(&skb_shinfo(skb)->frags[j]), 572 PCI_DMA_TODEVICE); 573 } 574 575 next_tx_int: 576 cons = NEXT_TX(cons); 577 578 tx_bytes += skb->len; 579 dev_kfree_skb_any(skb); 580 } 581 582 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 583 txr->tx_cons = cons; 584 585 /* Need to make the tx_cons update visible to bnxt_start_xmit() 586 * before checking for netif_tx_queue_stopped(). Without the 587 * memory barrier, there is a small possibility that bnxt_start_xmit() 588 * will miss it and cause the queue to be stopped forever. 589 */ 590 smp_mb(); 591 592 if (unlikely(netif_tx_queue_stopped(txq)) && 593 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 594 __netif_tx_lock(txq, smp_processor_id()); 595 if (netif_tx_queue_stopped(txq) && 596 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 597 txr->dev_state != BNXT_DEV_STATE_CLOSING) 598 netif_tx_wake_queue(txq); 599 __netif_tx_unlock(txq); 600 } 601 } 602 603 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 604 gfp_t gfp) 605 { 606 struct device *dev = &bp->pdev->dev; 607 struct page *page; 608 609 page = alloc_page(gfp); 610 if (!page) 611 return NULL; 612 613 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 614 DMA_ATTR_WEAK_ORDERING); 615 if (dma_mapping_error(dev, *mapping)) { 616 __free_page(page); 617 return NULL; 618 } 619 *mapping += bp->rx_dma_offset; 620 return page; 621 } 622 623 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 624 gfp_t gfp) 625 { 626 u8 *data; 627 struct pci_dev *pdev = bp->pdev; 628 629 data = kmalloc(bp->rx_buf_size, gfp); 630 if (!data) 631 return NULL; 632 633 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 634 bp->rx_buf_use_size, bp->rx_dir, 635 DMA_ATTR_WEAK_ORDERING); 636 637 if (dma_mapping_error(&pdev->dev, *mapping)) { 638 kfree(data); 639 data = NULL; 640 } 641 return data; 642 } 643 644 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 645 u16 prod, gfp_t gfp) 646 { 647 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 648 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 649 dma_addr_t mapping; 650 651 if (BNXT_RX_PAGE_MODE(bp)) { 652 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); 653 654 if (!page) 655 return -ENOMEM; 656 657 rx_buf->data = page; 658 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 659 } else { 660 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 661 662 if (!data) 663 return -ENOMEM; 664 665 rx_buf->data = data; 666 rx_buf->data_ptr = data + bp->rx_offset; 667 } 668 rx_buf->mapping = mapping; 669 670 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 671 return 0; 672 } 673 674 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 675 { 676 u16 prod = rxr->rx_prod; 677 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 678 struct rx_bd *cons_bd, *prod_bd; 679 680 prod_rx_buf = &rxr->rx_buf_ring[prod]; 681 cons_rx_buf = &rxr->rx_buf_ring[cons]; 682 683 prod_rx_buf->data = data; 684 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 685 686 prod_rx_buf->mapping = cons_rx_buf->mapping; 687 688 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 689 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 690 691 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 692 } 693 694 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 695 { 696 u16 next, max = rxr->rx_agg_bmap_size; 697 698 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 699 if (next >= max) 700 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 701 return next; 702 } 703 704 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 705 struct bnxt_rx_ring_info *rxr, 706 u16 prod, gfp_t gfp) 707 { 708 struct rx_bd *rxbd = 709 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 710 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 711 struct pci_dev *pdev = bp->pdev; 712 struct page *page; 713 dma_addr_t mapping; 714 u16 sw_prod = rxr->rx_sw_agg_prod; 715 unsigned int offset = 0; 716 717 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 718 page = rxr->rx_page; 719 if (!page) { 720 page = alloc_page(gfp); 721 if (!page) 722 return -ENOMEM; 723 rxr->rx_page = page; 724 rxr->rx_page_offset = 0; 725 } 726 offset = rxr->rx_page_offset; 727 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 728 if (rxr->rx_page_offset == PAGE_SIZE) 729 rxr->rx_page = NULL; 730 else 731 get_page(page); 732 } else { 733 page = alloc_page(gfp); 734 if (!page) 735 return -ENOMEM; 736 } 737 738 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 739 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 740 DMA_ATTR_WEAK_ORDERING); 741 if (dma_mapping_error(&pdev->dev, mapping)) { 742 __free_page(page); 743 return -EIO; 744 } 745 746 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 747 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 748 749 __set_bit(sw_prod, rxr->rx_agg_bmap); 750 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 751 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 752 753 rx_agg_buf->page = page; 754 rx_agg_buf->offset = offset; 755 rx_agg_buf->mapping = mapping; 756 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 757 rxbd->rx_bd_opaque = sw_prod; 758 return 0; 759 } 760 761 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 762 u32 agg_bufs) 763 { 764 struct bnxt *bp = bnapi->bp; 765 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 766 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 767 u16 prod = rxr->rx_agg_prod; 768 u16 sw_prod = rxr->rx_sw_agg_prod; 769 u32 i; 770 771 for (i = 0; i < agg_bufs; i++) { 772 u16 cons; 773 struct rx_agg_cmp *agg; 774 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 775 struct rx_bd *prod_bd; 776 struct page *page; 777 778 agg = (struct rx_agg_cmp *) 779 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 780 cons = agg->rx_agg_cmp_opaque; 781 __clear_bit(cons, rxr->rx_agg_bmap); 782 783 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 784 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 785 786 __set_bit(sw_prod, rxr->rx_agg_bmap); 787 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 788 cons_rx_buf = &rxr->rx_agg_ring[cons]; 789 790 /* It is possible for sw_prod to be equal to cons, so 791 * set cons_rx_buf->page to NULL first. 792 */ 793 page = cons_rx_buf->page; 794 cons_rx_buf->page = NULL; 795 prod_rx_buf->page = page; 796 prod_rx_buf->offset = cons_rx_buf->offset; 797 798 prod_rx_buf->mapping = cons_rx_buf->mapping; 799 800 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 801 802 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 803 prod_bd->rx_bd_opaque = sw_prod; 804 805 prod = NEXT_RX_AGG(prod); 806 sw_prod = NEXT_RX_AGG(sw_prod); 807 cp_cons = NEXT_CMP(cp_cons); 808 } 809 rxr->rx_agg_prod = prod; 810 rxr->rx_sw_agg_prod = sw_prod; 811 } 812 813 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 814 struct bnxt_rx_ring_info *rxr, 815 u16 cons, void *data, u8 *data_ptr, 816 dma_addr_t dma_addr, 817 unsigned int offset_and_len) 818 { 819 unsigned int payload = offset_and_len >> 16; 820 unsigned int len = offset_and_len & 0xffff; 821 struct skb_frag_struct *frag; 822 struct page *page = data; 823 u16 prod = rxr->rx_prod; 824 struct sk_buff *skb; 825 int off, err; 826 827 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 828 if (unlikely(err)) { 829 bnxt_reuse_rx_data(rxr, cons, data); 830 return NULL; 831 } 832 dma_addr -= bp->rx_dma_offset; 833 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 834 DMA_ATTR_WEAK_ORDERING); 835 836 if (unlikely(!payload)) 837 payload = eth_get_headlen(data_ptr, len); 838 839 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 840 if (!skb) { 841 __free_page(page); 842 return NULL; 843 } 844 845 off = (void *)data_ptr - page_address(page); 846 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 847 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 848 payload + NET_IP_ALIGN); 849 850 frag = &skb_shinfo(skb)->frags[0]; 851 skb_frag_size_sub(frag, payload); 852 frag->page_offset += payload; 853 skb->data_len -= payload; 854 skb->tail += payload; 855 856 return skb; 857 } 858 859 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 860 struct bnxt_rx_ring_info *rxr, u16 cons, 861 void *data, u8 *data_ptr, 862 dma_addr_t dma_addr, 863 unsigned int offset_and_len) 864 { 865 u16 prod = rxr->rx_prod; 866 struct sk_buff *skb; 867 int err; 868 869 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 870 if (unlikely(err)) { 871 bnxt_reuse_rx_data(rxr, cons, data); 872 return NULL; 873 } 874 875 skb = build_skb(data, 0); 876 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 877 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 878 if (!skb) { 879 kfree(data); 880 return NULL; 881 } 882 883 skb_reserve(skb, bp->rx_offset); 884 skb_put(skb, offset_and_len & 0xffff); 885 return skb; 886 } 887 888 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 889 struct sk_buff *skb, u16 cp_cons, 890 u32 agg_bufs) 891 { 892 struct pci_dev *pdev = bp->pdev; 893 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 894 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 895 u16 prod = rxr->rx_agg_prod; 896 u32 i; 897 898 for (i = 0; i < agg_bufs; i++) { 899 u16 cons, frag_len; 900 struct rx_agg_cmp *agg; 901 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 902 struct page *page; 903 dma_addr_t mapping; 904 905 agg = (struct rx_agg_cmp *) 906 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 907 cons = agg->rx_agg_cmp_opaque; 908 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 909 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 910 911 cons_rx_buf = &rxr->rx_agg_ring[cons]; 912 skb_fill_page_desc(skb, i, cons_rx_buf->page, 913 cons_rx_buf->offset, frag_len); 914 __clear_bit(cons, rxr->rx_agg_bmap); 915 916 /* It is possible for bnxt_alloc_rx_page() to allocate 917 * a sw_prod index that equals the cons index, so we 918 * need to clear the cons entry now. 919 */ 920 mapping = cons_rx_buf->mapping; 921 page = cons_rx_buf->page; 922 cons_rx_buf->page = NULL; 923 924 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 925 struct skb_shared_info *shinfo; 926 unsigned int nr_frags; 927 928 shinfo = skb_shinfo(skb); 929 nr_frags = --shinfo->nr_frags; 930 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 931 932 dev_kfree_skb(skb); 933 934 cons_rx_buf->page = page; 935 936 /* Update prod since possibly some pages have been 937 * allocated already. 938 */ 939 rxr->rx_agg_prod = prod; 940 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 941 return NULL; 942 } 943 944 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 945 PCI_DMA_FROMDEVICE, 946 DMA_ATTR_WEAK_ORDERING); 947 948 skb->data_len += frag_len; 949 skb->len += frag_len; 950 skb->truesize += PAGE_SIZE; 951 952 prod = NEXT_RX_AGG(prod); 953 cp_cons = NEXT_CMP(cp_cons); 954 } 955 rxr->rx_agg_prod = prod; 956 return skb; 957 } 958 959 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 960 u8 agg_bufs, u32 *raw_cons) 961 { 962 u16 last; 963 struct rx_agg_cmp *agg; 964 965 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 966 last = RING_CMP(*raw_cons); 967 agg = (struct rx_agg_cmp *) 968 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 969 return RX_AGG_CMP_VALID(agg, *raw_cons); 970 } 971 972 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 973 unsigned int len, 974 dma_addr_t mapping) 975 { 976 struct bnxt *bp = bnapi->bp; 977 struct pci_dev *pdev = bp->pdev; 978 struct sk_buff *skb; 979 980 skb = napi_alloc_skb(&bnapi->napi, len); 981 if (!skb) 982 return NULL; 983 984 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 985 bp->rx_dir); 986 987 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 988 len + NET_IP_ALIGN); 989 990 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 991 bp->rx_dir); 992 993 skb_put(skb, len); 994 return skb; 995 } 996 997 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 998 u32 *raw_cons, void *cmp) 999 { 1000 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1001 struct rx_cmp *rxcmp = cmp; 1002 u32 tmp_raw_cons = *raw_cons; 1003 u8 cmp_type, agg_bufs = 0; 1004 1005 cmp_type = RX_CMP_TYPE(rxcmp); 1006 1007 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1008 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1009 RX_CMP_AGG_BUFS) >> 1010 RX_CMP_AGG_BUFS_SHIFT; 1011 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1012 struct rx_tpa_end_cmp *tpa_end = cmp; 1013 1014 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1015 RX_TPA_END_CMP_AGG_BUFS) >> 1016 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1017 } 1018 1019 if (agg_bufs) { 1020 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1021 return -EBUSY; 1022 } 1023 *raw_cons = tmp_raw_cons; 1024 return 0; 1025 } 1026 1027 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1028 { 1029 if (!rxr->bnapi->in_reset) { 1030 rxr->bnapi->in_reset = true; 1031 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1032 schedule_work(&bp->sp_task); 1033 } 1034 rxr->rx_next_cons = 0xffff; 1035 } 1036 1037 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1038 struct rx_tpa_start_cmp *tpa_start, 1039 struct rx_tpa_start_cmp_ext *tpa_start1) 1040 { 1041 u8 agg_id = TPA_START_AGG_ID(tpa_start); 1042 u16 cons, prod; 1043 struct bnxt_tpa_info *tpa_info; 1044 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1045 struct rx_bd *prod_bd; 1046 dma_addr_t mapping; 1047 1048 cons = tpa_start->rx_tpa_start_cmp_opaque; 1049 prod = rxr->rx_prod; 1050 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1051 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1052 tpa_info = &rxr->rx_tpa[agg_id]; 1053 1054 if (unlikely(cons != rxr->rx_next_cons)) { 1055 bnxt_sched_reset(bp, rxr); 1056 return; 1057 } 1058 /* Store cfa_code in tpa_info to use in tpa_end 1059 * completion processing. 1060 */ 1061 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1062 prod_rx_buf->data = tpa_info->data; 1063 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1064 1065 mapping = tpa_info->mapping; 1066 prod_rx_buf->mapping = mapping; 1067 1068 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1069 1070 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1071 1072 tpa_info->data = cons_rx_buf->data; 1073 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1074 cons_rx_buf->data = NULL; 1075 tpa_info->mapping = cons_rx_buf->mapping; 1076 1077 tpa_info->len = 1078 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1079 RX_TPA_START_CMP_LEN_SHIFT; 1080 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1081 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1082 1083 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1084 tpa_info->gso_type = SKB_GSO_TCPV4; 1085 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1086 if (hash_type == 3) 1087 tpa_info->gso_type = SKB_GSO_TCPV6; 1088 tpa_info->rss_hash = 1089 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1090 } else { 1091 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1092 tpa_info->gso_type = 0; 1093 if (netif_msg_rx_err(bp)) 1094 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1095 } 1096 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1097 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1098 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1099 1100 rxr->rx_prod = NEXT_RX(prod); 1101 cons = NEXT_RX(cons); 1102 rxr->rx_next_cons = NEXT_RX(cons); 1103 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1104 1105 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1106 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1107 cons_rx_buf->data = NULL; 1108 } 1109 1110 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 1111 u16 cp_cons, u32 agg_bufs) 1112 { 1113 if (agg_bufs) 1114 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1115 } 1116 1117 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1118 int payload_off, int tcp_ts, 1119 struct sk_buff *skb) 1120 { 1121 #ifdef CONFIG_INET 1122 struct tcphdr *th; 1123 int len, nw_off; 1124 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1125 u32 hdr_info = tpa_info->hdr_info; 1126 bool loopback = false; 1127 1128 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1129 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1130 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1131 1132 /* If the packet is an internal loopback packet, the offsets will 1133 * have an extra 4 bytes. 1134 */ 1135 if (inner_mac_off == 4) { 1136 loopback = true; 1137 } else if (inner_mac_off > 4) { 1138 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1139 ETH_HLEN - 2)); 1140 1141 /* We only support inner iPv4/ipv6. If we don't see the 1142 * correct protocol ID, it must be a loopback packet where 1143 * the offsets are off by 4. 1144 */ 1145 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1146 loopback = true; 1147 } 1148 if (loopback) { 1149 /* internal loopback packet, subtract all offsets by 4 */ 1150 inner_ip_off -= 4; 1151 inner_mac_off -= 4; 1152 outer_ip_off -= 4; 1153 } 1154 1155 nw_off = inner_ip_off - ETH_HLEN; 1156 skb_set_network_header(skb, nw_off); 1157 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1158 struct ipv6hdr *iph = ipv6_hdr(skb); 1159 1160 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1161 len = skb->len - skb_transport_offset(skb); 1162 th = tcp_hdr(skb); 1163 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1164 } else { 1165 struct iphdr *iph = ip_hdr(skb); 1166 1167 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1168 len = skb->len - skb_transport_offset(skb); 1169 th = tcp_hdr(skb); 1170 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1171 } 1172 1173 if (inner_mac_off) { /* tunnel */ 1174 struct udphdr *uh = NULL; 1175 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1176 ETH_HLEN - 2)); 1177 1178 if (proto == htons(ETH_P_IP)) { 1179 struct iphdr *iph = (struct iphdr *)skb->data; 1180 1181 if (iph->protocol == IPPROTO_UDP) 1182 uh = (struct udphdr *)(iph + 1); 1183 } else { 1184 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1185 1186 if (iph->nexthdr == IPPROTO_UDP) 1187 uh = (struct udphdr *)(iph + 1); 1188 } 1189 if (uh) { 1190 if (uh->check) 1191 skb_shinfo(skb)->gso_type |= 1192 SKB_GSO_UDP_TUNNEL_CSUM; 1193 else 1194 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1195 } 1196 } 1197 #endif 1198 return skb; 1199 } 1200 1201 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1202 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1203 1204 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1205 int payload_off, int tcp_ts, 1206 struct sk_buff *skb) 1207 { 1208 #ifdef CONFIG_INET 1209 struct tcphdr *th; 1210 int len, nw_off, tcp_opt_len = 0; 1211 1212 if (tcp_ts) 1213 tcp_opt_len = 12; 1214 1215 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1216 struct iphdr *iph; 1217 1218 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1219 ETH_HLEN; 1220 skb_set_network_header(skb, nw_off); 1221 iph = ip_hdr(skb); 1222 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1223 len = skb->len - skb_transport_offset(skb); 1224 th = tcp_hdr(skb); 1225 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1226 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1227 struct ipv6hdr *iph; 1228 1229 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1230 ETH_HLEN; 1231 skb_set_network_header(skb, nw_off); 1232 iph = ipv6_hdr(skb); 1233 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1234 len = skb->len - skb_transport_offset(skb); 1235 th = tcp_hdr(skb); 1236 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1237 } else { 1238 dev_kfree_skb_any(skb); 1239 return NULL; 1240 } 1241 1242 if (nw_off) { /* tunnel */ 1243 struct udphdr *uh = NULL; 1244 1245 if (skb->protocol == htons(ETH_P_IP)) { 1246 struct iphdr *iph = (struct iphdr *)skb->data; 1247 1248 if (iph->protocol == IPPROTO_UDP) 1249 uh = (struct udphdr *)(iph + 1); 1250 } else { 1251 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1252 1253 if (iph->nexthdr == IPPROTO_UDP) 1254 uh = (struct udphdr *)(iph + 1); 1255 } 1256 if (uh) { 1257 if (uh->check) 1258 skb_shinfo(skb)->gso_type |= 1259 SKB_GSO_UDP_TUNNEL_CSUM; 1260 else 1261 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1262 } 1263 } 1264 #endif 1265 return skb; 1266 } 1267 1268 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1269 struct bnxt_tpa_info *tpa_info, 1270 struct rx_tpa_end_cmp *tpa_end, 1271 struct rx_tpa_end_cmp_ext *tpa_end1, 1272 struct sk_buff *skb) 1273 { 1274 #ifdef CONFIG_INET 1275 int payload_off; 1276 u16 segs; 1277 1278 segs = TPA_END_TPA_SEGS(tpa_end); 1279 if (segs == 1) 1280 return skb; 1281 1282 NAPI_GRO_CB(skb)->count = segs; 1283 skb_shinfo(skb)->gso_size = 1284 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1285 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1286 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1287 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1288 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1289 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1290 if (likely(skb)) 1291 tcp_gro_complete(skb); 1292 #endif 1293 return skb; 1294 } 1295 1296 /* Given the cfa_code of a received packet determine which 1297 * netdev (vf-rep or PF) the packet is destined to. 1298 */ 1299 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1300 { 1301 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1302 1303 /* if vf-rep dev is NULL, the must belongs to the PF */ 1304 return dev ? dev : bp->dev; 1305 } 1306 1307 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1308 struct bnxt_napi *bnapi, 1309 u32 *raw_cons, 1310 struct rx_tpa_end_cmp *tpa_end, 1311 struct rx_tpa_end_cmp_ext *tpa_end1, 1312 u8 *event) 1313 { 1314 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1315 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1316 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1317 u8 *data_ptr, agg_bufs; 1318 u16 cp_cons = RING_CMP(*raw_cons); 1319 unsigned int len; 1320 struct bnxt_tpa_info *tpa_info; 1321 dma_addr_t mapping; 1322 struct sk_buff *skb; 1323 void *data; 1324 1325 if (unlikely(bnapi->in_reset)) { 1326 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 1327 1328 if (rc < 0) 1329 return ERR_PTR(-EBUSY); 1330 return NULL; 1331 } 1332 1333 tpa_info = &rxr->rx_tpa[agg_id]; 1334 data = tpa_info->data; 1335 data_ptr = tpa_info->data_ptr; 1336 prefetch(data_ptr); 1337 len = tpa_info->len; 1338 mapping = tpa_info->mapping; 1339 1340 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1341 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1342 1343 if (agg_bufs) { 1344 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1345 return ERR_PTR(-EBUSY); 1346 1347 *event |= BNXT_AGG_EVENT; 1348 cp_cons = NEXT_CMP(cp_cons); 1349 } 1350 1351 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1352 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1353 if (agg_bufs > MAX_SKB_FRAGS) 1354 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1355 agg_bufs, (int)MAX_SKB_FRAGS); 1356 return NULL; 1357 } 1358 1359 if (len <= bp->rx_copy_thresh) { 1360 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1361 if (!skb) { 1362 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1363 return NULL; 1364 } 1365 } else { 1366 u8 *new_data; 1367 dma_addr_t new_mapping; 1368 1369 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1370 if (!new_data) { 1371 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1372 return NULL; 1373 } 1374 1375 tpa_info->data = new_data; 1376 tpa_info->data_ptr = new_data + bp->rx_offset; 1377 tpa_info->mapping = new_mapping; 1378 1379 skb = build_skb(data, 0); 1380 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1381 bp->rx_buf_use_size, bp->rx_dir, 1382 DMA_ATTR_WEAK_ORDERING); 1383 1384 if (!skb) { 1385 kfree(data); 1386 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1387 return NULL; 1388 } 1389 skb_reserve(skb, bp->rx_offset); 1390 skb_put(skb, len); 1391 } 1392 1393 if (agg_bufs) { 1394 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1395 if (!skb) { 1396 /* Page reuse already handled by bnxt_rx_pages(). */ 1397 return NULL; 1398 } 1399 } 1400 1401 skb->protocol = 1402 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1403 1404 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1405 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1406 1407 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1408 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1409 u16 vlan_proto = tpa_info->metadata >> 1410 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1411 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1412 1413 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1414 } 1415 1416 skb_checksum_none_assert(skb); 1417 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1418 skb->ip_summed = CHECKSUM_UNNECESSARY; 1419 skb->csum_level = 1420 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1421 } 1422 1423 if (TPA_END_GRO(tpa_end)) 1424 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1425 1426 return skb; 1427 } 1428 1429 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1430 struct sk_buff *skb) 1431 { 1432 if (skb->dev != bp->dev) { 1433 /* this packet belongs to a vf-rep */ 1434 bnxt_vf_rep_rx(bp, skb); 1435 return; 1436 } 1437 skb_record_rx_queue(skb, bnapi->index); 1438 napi_gro_receive(&bnapi->napi, skb); 1439 } 1440 1441 /* returns the following: 1442 * 1 - 1 packet successfully received 1443 * 0 - successful TPA_START, packet not completed yet 1444 * -EBUSY - completion ring does not have all the agg buffers yet 1445 * -ENOMEM - packet aborted due to out of memory 1446 * -EIO - packet aborted due to hw error indicated in BD 1447 */ 1448 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1449 u8 *event) 1450 { 1451 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1452 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1453 struct net_device *dev = bp->dev; 1454 struct rx_cmp *rxcmp; 1455 struct rx_cmp_ext *rxcmp1; 1456 u32 tmp_raw_cons = *raw_cons; 1457 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1458 struct bnxt_sw_rx_bd *rx_buf; 1459 unsigned int len; 1460 u8 *data_ptr, agg_bufs, cmp_type; 1461 dma_addr_t dma_addr; 1462 struct sk_buff *skb; 1463 void *data; 1464 int rc = 0; 1465 u32 misc; 1466 1467 rxcmp = (struct rx_cmp *) 1468 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1469 1470 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1471 cp_cons = RING_CMP(tmp_raw_cons); 1472 rxcmp1 = (struct rx_cmp_ext *) 1473 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1474 1475 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1476 return -EBUSY; 1477 1478 cmp_type = RX_CMP_TYPE(rxcmp); 1479 1480 prod = rxr->rx_prod; 1481 1482 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1483 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1484 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1485 1486 *event |= BNXT_RX_EVENT; 1487 goto next_rx_no_prod; 1488 1489 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1490 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1491 (struct rx_tpa_end_cmp *)rxcmp, 1492 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1493 1494 if (unlikely(IS_ERR(skb))) 1495 return -EBUSY; 1496 1497 rc = -ENOMEM; 1498 if (likely(skb)) { 1499 bnxt_deliver_skb(bp, bnapi, skb); 1500 rc = 1; 1501 } 1502 *event |= BNXT_RX_EVENT; 1503 goto next_rx_no_prod; 1504 } 1505 1506 cons = rxcmp->rx_cmp_opaque; 1507 rx_buf = &rxr->rx_buf_ring[cons]; 1508 data = rx_buf->data; 1509 data_ptr = rx_buf->data_ptr; 1510 if (unlikely(cons != rxr->rx_next_cons)) { 1511 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1512 1513 bnxt_sched_reset(bp, rxr); 1514 return rc1; 1515 } 1516 prefetch(data_ptr); 1517 1518 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1519 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1520 1521 if (agg_bufs) { 1522 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1523 return -EBUSY; 1524 1525 cp_cons = NEXT_CMP(cp_cons); 1526 *event |= BNXT_AGG_EVENT; 1527 } 1528 *event |= BNXT_RX_EVENT; 1529 1530 rx_buf->data = NULL; 1531 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1532 bnxt_reuse_rx_data(rxr, cons, data); 1533 if (agg_bufs) 1534 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1535 1536 rc = -EIO; 1537 goto next_rx; 1538 } 1539 1540 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1541 dma_addr = rx_buf->mapping; 1542 1543 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1544 rc = 1; 1545 goto next_rx; 1546 } 1547 1548 if (len <= bp->rx_copy_thresh) { 1549 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1550 bnxt_reuse_rx_data(rxr, cons, data); 1551 if (!skb) { 1552 rc = -ENOMEM; 1553 goto next_rx; 1554 } 1555 } else { 1556 u32 payload; 1557 1558 if (rx_buf->data_ptr == data_ptr) 1559 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1560 else 1561 payload = 0; 1562 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1563 payload | len); 1564 if (!skb) { 1565 rc = -ENOMEM; 1566 goto next_rx; 1567 } 1568 } 1569 1570 if (agg_bufs) { 1571 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1572 if (!skb) { 1573 rc = -ENOMEM; 1574 goto next_rx; 1575 } 1576 } 1577 1578 if (RX_CMP_HASH_VALID(rxcmp)) { 1579 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1580 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1581 1582 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1583 if (hash_type != 1 && hash_type != 3) 1584 type = PKT_HASH_TYPE_L3; 1585 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1586 } 1587 1588 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1589 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1590 1591 if ((rxcmp1->rx_cmp_flags2 & 1592 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1593 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1594 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1595 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1596 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1597 1598 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1599 } 1600 1601 skb_checksum_none_assert(skb); 1602 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1603 if (dev->features & NETIF_F_RXCSUM) { 1604 skb->ip_summed = CHECKSUM_UNNECESSARY; 1605 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1606 } 1607 } else { 1608 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1609 if (dev->features & NETIF_F_RXCSUM) 1610 cpr->rx_l4_csum_errors++; 1611 } 1612 } 1613 1614 bnxt_deliver_skb(bp, bnapi, skb); 1615 rc = 1; 1616 1617 next_rx: 1618 rxr->rx_prod = NEXT_RX(prod); 1619 rxr->rx_next_cons = NEXT_RX(cons); 1620 1621 next_rx_no_prod: 1622 *raw_cons = tmp_raw_cons; 1623 1624 return rc; 1625 } 1626 1627 /* In netpoll mode, if we are using a combined completion ring, we need to 1628 * discard the rx packets and recycle the buffers. 1629 */ 1630 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, 1631 u32 *raw_cons, u8 *event) 1632 { 1633 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1634 u32 tmp_raw_cons = *raw_cons; 1635 struct rx_cmp_ext *rxcmp1; 1636 struct rx_cmp *rxcmp; 1637 u16 cp_cons; 1638 u8 cmp_type; 1639 1640 cp_cons = RING_CMP(tmp_raw_cons); 1641 rxcmp = (struct rx_cmp *) 1642 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1643 1644 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1645 cp_cons = RING_CMP(tmp_raw_cons); 1646 rxcmp1 = (struct rx_cmp_ext *) 1647 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1648 1649 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1650 return -EBUSY; 1651 1652 cmp_type = RX_CMP_TYPE(rxcmp); 1653 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1654 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1655 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1656 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1657 struct rx_tpa_end_cmp_ext *tpa_end1; 1658 1659 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1660 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1661 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1662 } 1663 return bnxt_rx_pkt(bp, bnapi, raw_cons, event); 1664 } 1665 1666 #define BNXT_GET_EVENT_PORT(data) \ 1667 ((data) & \ 1668 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1669 1670 static int bnxt_async_event_process(struct bnxt *bp, 1671 struct hwrm_async_event_cmpl *cmpl) 1672 { 1673 u16 event_id = le16_to_cpu(cmpl->event_id); 1674 1675 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1676 switch (event_id) { 1677 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1678 u32 data1 = le32_to_cpu(cmpl->event_data1); 1679 struct bnxt_link_info *link_info = &bp->link_info; 1680 1681 if (BNXT_VF(bp)) 1682 goto async_event_process_exit; 1683 if (data1 & 0x20000) { 1684 u16 fw_speed = link_info->force_link_speed; 1685 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1686 1687 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1688 speed); 1689 } 1690 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1691 /* fall thru */ 1692 } 1693 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1694 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1695 break; 1696 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1697 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1698 break; 1699 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1700 u32 data1 = le32_to_cpu(cmpl->event_data1); 1701 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1702 1703 if (BNXT_VF(bp)) 1704 break; 1705 1706 if (bp->pf.port_id != port_id) 1707 break; 1708 1709 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1710 break; 1711 } 1712 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1713 if (BNXT_PF(bp)) 1714 goto async_event_process_exit; 1715 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1716 break; 1717 default: 1718 goto async_event_process_exit; 1719 } 1720 schedule_work(&bp->sp_task); 1721 async_event_process_exit: 1722 bnxt_ulp_async_events(bp, cmpl); 1723 return 0; 1724 } 1725 1726 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1727 { 1728 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1729 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1730 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1731 (struct hwrm_fwd_req_cmpl *)txcmp; 1732 1733 switch (cmpl_type) { 1734 case CMPL_BASE_TYPE_HWRM_DONE: 1735 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1736 if (seq_id == bp->hwrm_intr_seq_id) 1737 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1738 else 1739 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1740 break; 1741 1742 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1743 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1744 1745 if ((vf_id < bp->pf.first_vf_id) || 1746 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1747 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1748 vf_id); 1749 return -EINVAL; 1750 } 1751 1752 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1753 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1754 schedule_work(&bp->sp_task); 1755 break; 1756 1757 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1758 bnxt_async_event_process(bp, 1759 (struct hwrm_async_event_cmpl *)txcmp); 1760 1761 default: 1762 break; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1769 { 1770 struct bnxt_napi *bnapi = dev_instance; 1771 struct bnxt *bp = bnapi->bp; 1772 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1773 u32 cons = RING_CMP(cpr->cp_raw_cons); 1774 1775 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1776 napi_schedule(&bnapi->napi); 1777 return IRQ_HANDLED; 1778 } 1779 1780 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1781 { 1782 u32 raw_cons = cpr->cp_raw_cons; 1783 u16 cons = RING_CMP(raw_cons); 1784 struct tx_cmp *txcmp; 1785 1786 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1787 1788 return TX_CMP_VALID(txcmp, raw_cons); 1789 } 1790 1791 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1792 { 1793 struct bnxt_napi *bnapi = dev_instance; 1794 struct bnxt *bp = bnapi->bp; 1795 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1796 u32 cons = RING_CMP(cpr->cp_raw_cons); 1797 u32 int_status; 1798 1799 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1800 1801 if (!bnxt_has_work(bp, cpr)) { 1802 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1803 /* return if erroneous interrupt */ 1804 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1805 return IRQ_NONE; 1806 } 1807 1808 /* disable ring IRQ */ 1809 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1810 1811 /* Return here if interrupt is shared and is disabled. */ 1812 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1813 return IRQ_HANDLED; 1814 1815 napi_schedule(&bnapi->napi); 1816 return IRQ_HANDLED; 1817 } 1818 1819 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1820 { 1821 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1822 u32 raw_cons = cpr->cp_raw_cons; 1823 u32 cons; 1824 int tx_pkts = 0; 1825 int rx_pkts = 0; 1826 u8 event = 0; 1827 struct tx_cmp *txcmp; 1828 1829 while (1) { 1830 int rc; 1831 1832 cons = RING_CMP(raw_cons); 1833 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1834 1835 if (!TX_CMP_VALID(txcmp, raw_cons)) 1836 break; 1837 1838 /* The valid test of the entry must be done first before 1839 * reading any further. 1840 */ 1841 dma_rmb(); 1842 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1843 tx_pkts++; 1844 /* return full budget so NAPI will complete. */ 1845 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1846 rx_pkts = budget; 1847 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1848 if (likely(budget)) 1849 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1850 else 1851 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, 1852 &event); 1853 if (likely(rc >= 0)) 1854 rx_pkts += rc; 1855 /* Increment rx_pkts when rc is -ENOMEM to count towards 1856 * the NAPI budget. Otherwise, we may potentially loop 1857 * here forever if we consistently cannot allocate 1858 * buffers. 1859 */ 1860 else if (rc == -ENOMEM) 1861 rx_pkts++; 1862 else if (rc == -EBUSY) /* partial completion */ 1863 break; 1864 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1865 CMPL_BASE_TYPE_HWRM_DONE) || 1866 (TX_CMP_TYPE(txcmp) == 1867 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1868 (TX_CMP_TYPE(txcmp) == 1869 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1870 bnxt_hwrm_handler(bp, txcmp); 1871 } 1872 raw_cons = NEXT_RAW_CMP(raw_cons); 1873 1874 if (rx_pkts == budget) 1875 break; 1876 } 1877 1878 if (event & BNXT_TX_EVENT) { 1879 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 1880 void __iomem *db = txr->tx_doorbell; 1881 u16 prod = txr->tx_prod; 1882 1883 /* Sync BD data before updating doorbell */ 1884 wmb(); 1885 1886 bnxt_db_write(bp, db, DB_KEY_TX | prod); 1887 } 1888 1889 cpr->cp_raw_cons = raw_cons; 1890 /* ACK completion ring before freeing tx ring and producing new 1891 * buffers in rx/agg rings to prevent overflowing the completion 1892 * ring. 1893 */ 1894 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1895 1896 if (tx_pkts) 1897 bnapi->tx_int(bp, bnapi, tx_pkts); 1898 1899 if (event & BNXT_RX_EVENT) { 1900 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1901 1902 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1903 if (event & BNXT_AGG_EVENT) 1904 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1905 DB_KEY_RX | rxr->rx_agg_prod); 1906 } 1907 return rx_pkts; 1908 } 1909 1910 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 1911 { 1912 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1913 struct bnxt *bp = bnapi->bp; 1914 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1915 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1916 struct tx_cmp *txcmp; 1917 struct rx_cmp_ext *rxcmp1; 1918 u32 cp_cons, tmp_raw_cons; 1919 u32 raw_cons = cpr->cp_raw_cons; 1920 u32 rx_pkts = 0; 1921 u8 event = 0; 1922 1923 while (1) { 1924 int rc; 1925 1926 cp_cons = RING_CMP(raw_cons); 1927 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1928 1929 if (!TX_CMP_VALID(txcmp, raw_cons)) 1930 break; 1931 1932 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1933 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 1934 cp_cons = RING_CMP(tmp_raw_cons); 1935 rxcmp1 = (struct rx_cmp_ext *) 1936 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1937 1938 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1939 break; 1940 1941 /* force an error to recycle the buffer */ 1942 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1943 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1944 1945 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1946 if (likely(rc == -EIO)) 1947 rx_pkts++; 1948 else if (rc == -EBUSY) /* partial completion */ 1949 break; 1950 } else if (unlikely(TX_CMP_TYPE(txcmp) == 1951 CMPL_BASE_TYPE_HWRM_DONE)) { 1952 bnxt_hwrm_handler(bp, txcmp); 1953 } else { 1954 netdev_err(bp->dev, 1955 "Invalid completion received on special ring\n"); 1956 } 1957 raw_cons = NEXT_RAW_CMP(raw_cons); 1958 1959 if (rx_pkts == budget) 1960 break; 1961 } 1962 1963 cpr->cp_raw_cons = raw_cons; 1964 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1965 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1966 1967 if (event & BNXT_AGG_EVENT) 1968 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1969 DB_KEY_RX | rxr->rx_agg_prod); 1970 1971 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 1972 napi_complete_done(napi, rx_pkts); 1973 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1974 } 1975 return rx_pkts; 1976 } 1977 1978 static int bnxt_poll(struct napi_struct *napi, int budget) 1979 { 1980 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1981 struct bnxt *bp = bnapi->bp; 1982 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1983 int work_done = 0; 1984 1985 while (1) { 1986 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 1987 1988 if (work_done >= budget) 1989 break; 1990 1991 if (!bnxt_has_work(bp, cpr)) { 1992 if (napi_complete_done(napi, work_done)) 1993 BNXT_CP_DB_REARM(cpr->cp_doorbell, 1994 cpr->cp_raw_cons); 1995 break; 1996 } 1997 } 1998 mmiowb(); 1999 return work_done; 2000 } 2001 2002 static void bnxt_free_tx_skbs(struct bnxt *bp) 2003 { 2004 int i, max_idx; 2005 struct pci_dev *pdev = bp->pdev; 2006 2007 if (!bp->tx_ring) 2008 return; 2009 2010 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2011 for (i = 0; i < bp->tx_nr_rings; i++) { 2012 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2013 int j; 2014 2015 for (j = 0; j < max_idx;) { 2016 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2017 struct sk_buff *skb = tx_buf->skb; 2018 int k, last; 2019 2020 if (!skb) { 2021 j++; 2022 continue; 2023 } 2024 2025 tx_buf->skb = NULL; 2026 2027 if (tx_buf->is_push) { 2028 dev_kfree_skb(skb); 2029 j += 2; 2030 continue; 2031 } 2032 2033 dma_unmap_single(&pdev->dev, 2034 dma_unmap_addr(tx_buf, mapping), 2035 skb_headlen(skb), 2036 PCI_DMA_TODEVICE); 2037 2038 last = tx_buf->nr_frags; 2039 j += 2; 2040 for (k = 0; k < last; k++, j++) { 2041 int ring_idx = j & bp->tx_ring_mask; 2042 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2043 2044 tx_buf = &txr->tx_buf_ring[ring_idx]; 2045 dma_unmap_page( 2046 &pdev->dev, 2047 dma_unmap_addr(tx_buf, mapping), 2048 skb_frag_size(frag), PCI_DMA_TODEVICE); 2049 } 2050 dev_kfree_skb(skb); 2051 } 2052 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2053 } 2054 } 2055 2056 static void bnxt_free_rx_skbs(struct bnxt *bp) 2057 { 2058 int i, max_idx, max_agg_idx; 2059 struct pci_dev *pdev = bp->pdev; 2060 2061 if (!bp->rx_ring) 2062 return; 2063 2064 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2065 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2066 for (i = 0; i < bp->rx_nr_rings; i++) { 2067 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2068 int j; 2069 2070 if (rxr->rx_tpa) { 2071 for (j = 0; j < MAX_TPA; j++) { 2072 struct bnxt_tpa_info *tpa_info = 2073 &rxr->rx_tpa[j]; 2074 u8 *data = tpa_info->data; 2075 2076 if (!data) 2077 continue; 2078 2079 dma_unmap_single_attrs(&pdev->dev, 2080 tpa_info->mapping, 2081 bp->rx_buf_use_size, 2082 bp->rx_dir, 2083 DMA_ATTR_WEAK_ORDERING); 2084 2085 tpa_info->data = NULL; 2086 2087 kfree(data); 2088 } 2089 } 2090 2091 for (j = 0; j < max_idx; j++) { 2092 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2093 dma_addr_t mapping = rx_buf->mapping; 2094 void *data = rx_buf->data; 2095 2096 if (!data) 2097 continue; 2098 2099 rx_buf->data = NULL; 2100 2101 if (BNXT_RX_PAGE_MODE(bp)) { 2102 mapping -= bp->rx_dma_offset; 2103 dma_unmap_page_attrs(&pdev->dev, mapping, 2104 PAGE_SIZE, bp->rx_dir, 2105 DMA_ATTR_WEAK_ORDERING); 2106 __free_page(data); 2107 } else { 2108 dma_unmap_single_attrs(&pdev->dev, mapping, 2109 bp->rx_buf_use_size, 2110 bp->rx_dir, 2111 DMA_ATTR_WEAK_ORDERING); 2112 kfree(data); 2113 } 2114 } 2115 2116 for (j = 0; j < max_agg_idx; j++) { 2117 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2118 &rxr->rx_agg_ring[j]; 2119 struct page *page = rx_agg_buf->page; 2120 2121 if (!page) 2122 continue; 2123 2124 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2125 BNXT_RX_PAGE_SIZE, 2126 PCI_DMA_FROMDEVICE, 2127 DMA_ATTR_WEAK_ORDERING); 2128 2129 rx_agg_buf->page = NULL; 2130 __clear_bit(j, rxr->rx_agg_bmap); 2131 2132 __free_page(page); 2133 } 2134 if (rxr->rx_page) { 2135 __free_page(rxr->rx_page); 2136 rxr->rx_page = NULL; 2137 } 2138 } 2139 } 2140 2141 static void bnxt_free_skbs(struct bnxt *bp) 2142 { 2143 bnxt_free_tx_skbs(bp); 2144 bnxt_free_rx_skbs(bp); 2145 } 2146 2147 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2148 { 2149 struct pci_dev *pdev = bp->pdev; 2150 int i; 2151 2152 for (i = 0; i < ring->nr_pages; i++) { 2153 if (!ring->pg_arr[i]) 2154 continue; 2155 2156 dma_free_coherent(&pdev->dev, ring->page_size, 2157 ring->pg_arr[i], ring->dma_arr[i]); 2158 2159 ring->pg_arr[i] = NULL; 2160 } 2161 if (ring->pg_tbl) { 2162 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 2163 ring->pg_tbl, ring->pg_tbl_map); 2164 ring->pg_tbl = NULL; 2165 } 2166 if (ring->vmem_size && *ring->vmem) { 2167 vfree(*ring->vmem); 2168 *ring->vmem = NULL; 2169 } 2170 } 2171 2172 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2173 { 2174 int i; 2175 struct pci_dev *pdev = bp->pdev; 2176 2177 if (ring->nr_pages > 1) { 2178 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 2179 ring->nr_pages * 8, 2180 &ring->pg_tbl_map, 2181 GFP_KERNEL); 2182 if (!ring->pg_tbl) 2183 return -ENOMEM; 2184 } 2185 2186 for (i = 0; i < ring->nr_pages; i++) { 2187 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2188 ring->page_size, 2189 &ring->dma_arr[i], 2190 GFP_KERNEL); 2191 if (!ring->pg_arr[i]) 2192 return -ENOMEM; 2193 2194 if (ring->nr_pages > 1) 2195 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 2196 } 2197 2198 if (ring->vmem_size) { 2199 *ring->vmem = vzalloc(ring->vmem_size); 2200 if (!(*ring->vmem)) 2201 return -ENOMEM; 2202 } 2203 return 0; 2204 } 2205 2206 static void bnxt_free_rx_rings(struct bnxt *bp) 2207 { 2208 int i; 2209 2210 if (!bp->rx_ring) 2211 return; 2212 2213 for (i = 0; i < bp->rx_nr_rings; i++) { 2214 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2215 struct bnxt_ring_struct *ring; 2216 2217 if (rxr->xdp_prog) 2218 bpf_prog_put(rxr->xdp_prog); 2219 2220 kfree(rxr->rx_tpa); 2221 rxr->rx_tpa = NULL; 2222 2223 kfree(rxr->rx_agg_bmap); 2224 rxr->rx_agg_bmap = NULL; 2225 2226 ring = &rxr->rx_ring_struct; 2227 bnxt_free_ring(bp, ring); 2228 2229 ring = &rxr->rx_agg_ring_struct; 2230 bnxt_free_ring(bp, ring); 2231 } 2232 } 2233 2234 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2235 { 2236 int i, rc, agg_rings = 0, tpa_rings = 0; 2237 2238 if (!bp->rx_ring) 2239 return -ENOMEM; 2240 2241 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2242 agg_rings = 1; 2243 2244 if (bp->flags & BNXT_FLAG_TPA) 2245 tpa_rings = 1; 2246 2247 for (i = 0; i < bp->rx_nr_rings; i++) { 2248 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2249 struct bnxt_ring_struct *ring; 2250 2251 ring = &rxr->rx_ring_struct; 2252 2253 rc = bnxt_alloc_ring(bp, ring); 2254 if (rc) 2255 return rc; 2256 2257 if (agg_rings) { 2258 u16 mem_size; 2259 2260 ring = &rxr->rx_agg_ring_struct; 2261 rc = bnxt_alloc_ring(bp, ring); 2262 if (rc) 2263 return rc; 2264 2265 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2266 mem_size = rxr->rx_agg_bmap_size / 8; 2267 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2268 if (!rxr->rx_agg_bmap) 2269 return -ENOMEM; 2270 2271 if (tpa_rings) { 2272 rxr->rx_tpa = kcalloc(MAX_TPA, 2273 sizeof(struct bnxt_tpa_info), 2274 GFP_KERNEL); 2275 if (!rxr->rx_tpa) 2276 return -ENOMEM; 2277 } 2278 } 2279 } 2280 return 0; 2281 } 2282 2283 static void bnxt_free_tx_rings(struct bnxt *bp) 2284 { 2285 int i; 2286 struct pci_dev *pdev = bp->pdev; 2287 2288 if (!bp->tx_ring) 2289 return; 2290 2291 for (i = 0; i < bp->tx_nr_rings; i++) { 2292 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2293 struct bnxt_ring_struct *ring; 2294 2295 if (txr->tx_push) { 2296 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2297 txr->tx_push, txr->tx_push_mapping); 2298 txr->tx_push = NULL; 2299 } 2300 2301 ring = &txr->tx_ring_struct; 2302 2303 bnxt_free_ring(bp, ring); 2304 } 2305 } 2306 2307 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2308 { 2309 int i, j, rc; 2310 struct pci_dev *pdev = bp->pdev; 2311 2312 bp->tx_push_size = 0; 2313 if (bp->tx_push_thresh) { 2314 int push_size; 2315 2316 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2317 bp->tx_push_thresh); 2318 2319 if (push_size > 256) { 2320 push_size = 0; 2321 bp->tx_push_thresh = 0; 2322 } 2323 2324 bp->tx_push_size = push_size; 2325 } 2326 2327 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2328 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2329 struct bnxt_ring_struct *ring; 2330 2331 ring = &txr->tx_ring_struct; 2332 2333 rc = bnxt_alloc_ring(bp, ring); 2334 if (rc) 2335 return rc; 2336 2337 if (bp->tx_push_size) { 2338 dma_addr_t mapping; 2339 2340 /* One pre-allocated DMA buffer to backup 2341 * TX push operation 2342 */ 2343 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2344 bp->tx_push_size, 2345 &txr->tx_push_mapping, 2346 GFP_KERNEL); 2347 2348 if (!txr->tx_push) 2349 return -ENOMEM; 2350 2351 mapping = txr->tx_push_mapping + 2352 sizeof(struct tx_push_bd); 2353 txr->data_mapping = cpu_to_le64(mapping); 2354 2355 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2356 } 2357 ring->queue_id = bp->q_info[j].queue_id; 2358 if (i < bp->tx_nr_rings_xdp) 2359 continue; 2360 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2361 j++; 2362 } 2363 return 0; 2364 } 2365 2366 static void bnxt_free_cp_rings(struct bnxt *bp) 2367 { 2368 int i; 2369 2370 if (!bp->bnapi) 2371 return; 2372 2373 for (i = 0; i < bp->cp_nr_rings; i++) { 2374 struct bnxt_napi *bnapi = bp->bnapi[i]; 2375 struct bnxt_cp_ring_info *cpr; 2376 struct bnxt_ring_struct *ring; 2377 2378 if (!bnapi) 2379 continue; 2380 2381 cpr = &bnapi->cp_ring; 2382 ring = &cpr->cp_ring_struct; 2383 2384 bnxt_free_ring(bp, ring); 2385 } 2386 } 2387 2388 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2389 { 2390 int i, rc; 2391 2392 for (i = 0; i < bp->cp_nr_rings; i++) { 2393 struct bnxt_napi *bnapi = bp->bnapi[i]; 2394 struct bnxt_cp_ring_info *cpr; 2395 struct bnxt_ring_struct *ring; 2396 2397 if (!bnapi) 2398 continue; 2399 2400 cpr = &bnapi->cp_ring; 2401 ring = &cpr->cp_ring_struct; 2402 2403 rc = bnxt_alloc_ring(bp, ring); 2404 if (rc) 2405 return rc; 2406 } 2407 return 0; 2408 } 2409 2410 static void bnxt_init_ring_struct(struct bnxt *bp) 2411 { 2412 int i; 2413 2414 for (i = 0; i < bp->cp_nr_rings; i++) { 2415 struct bnxt_napi *bnapi = bp->bnapi[i]; 2416 struct bnxt_cp_ring_info *cpr; 2417 struct bnxt_rx_ring_info *rxr; 2418 struct bnxt_tx_ring_info *txr; 2419 struct bnxt_ring_struct *ring; 2420 2421 if (!bnapi) 2422 continue; 2423 2424 cpr = &bnapi->cp_ring; 2425 ring = &cpr->cp_ring_struct; 2426 ring->nr_pages = bp->cp_nr_pages; 2427 ring->page_size = HW_CMPD_RING_SIZE; 2428 ring->pg_arr = (void **)cpr->cp_desc_ring; 2429 ring->dma_arr = cpr->cp_desc_mapping; 2430 ring->vmem_size = 0; 2431 2432 rxr = bnapi->rx_ring; 2433 if (!rxr) 2434 goto skip_rx; 2435 2436 ring = &rxr->rx_ring_struct; 2437 ring->nr_pages = bp->rx_nr_pages; 2438 ring->page_size = HW_RXBD_RING_SIZE; 2439 ring->pg_arr = (void **)rxr->rx_desc_ring; 2440 ring->dma_arr = rxr->rx_desc_mapping; 2441 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2442 ring->vmem = (void **)&rxr->rx_buf_ring; 2443 2444 ring = &rxr->rx_agg_ring_struct; 2445 ring->nr_pages = bp->rx_agg_nr_pages; 2446 ring->page_size = HW_RXBD_RING_SIZE; 2447 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 2448 ring->dma_arr = rxr->rx_agg_desc_mapping; 2449 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2450 ring->vmem = (void **)&rxr->rx_agg_ring; 2451 2452 skip_rx: 2453 txr = bnapi->tx_ring; 2454 if (!txr) 2455 continue; 2456 2457 ring = &txr->tx_ring_struct; 2458 ring->nr_pages = bp->tx_nr_pages; 2459 ring->page_size = HW_RXBD_RING_SIZE; 2460 ring->pg_arr = (void **)txr->tx_desc_ring; 2461 ring->dma_arr = txr->tx_desc_mapping; 2462 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2463 ring->vmem = (void **)&txr->tx_buf_ring; 2464 } 2465 } 2466 2467 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2468 { 2469 int i; 2470 u32 prod; 2471 struct rx_bd **rx_buf_ring; 2472 2473 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 2474 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 2475 int j; 2476 struct rx_bd *rxbd; 2477 2478 rxbd = rx_buf_ring[i]; 2479 if (!rxbd) 2480 continue; 2481 2482 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2483 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2484 rxbd->rx_bd_opaque = prod; 2485 } 2486 } 2487 } 2488 2489 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2490 { 2491 struct net_device *dev = bp->dev; 2492 struct bnxt_rx_ring_info *rxr; 2493 struct bnxt_ring_struct *ring; 2494 u32 prod, type; 2495 int i; 2496 2497 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2498 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2499 2500 if (NET_IP_ALIGN == 2) 2501 type |= RX_BD_FLAGS_SOP; 2502 2503 rxr = &bp->rx_ring[ring_nr]; 2504 ring = &rxr->rx_ring_struct; 2505 bnxt_init_rxbd_pages(ring, type); 2506 2507 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 2508 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); 2509 if (IS_ERR(rxr->xdp_prog)) { 2510 int rc = PTR_ERR(rxr->xdp_prog); 2511 2512 rxr->xdp_prog = NULL; 2513 return rc; 2514 } 2515 } 2516 prod = rxr->rx_prod; 2517 for (i = 0; i < bp->rx_ring_size; i++) { 2518 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2519 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2520 ring_nr, i, bp->rx_ring_size); 2521 break; 2522 } 2523 prod = NEXT_RX(prod); 2524 } 2525 rxr->rx_prod = prod; 2526 ring->fw_ring_id = INVALID_HW_RING_ID; 2527 2528 ring = &rxr->rx_agg_ring_struct; 2529 ring->fw_ring_id = INVALID_HW_RING_ID; 2530 2531 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2532 return 0; 2533 2534 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2535 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2536 2537 bnxt_init_rxbd_pages(ring, type); 2538 2539 prod = rxr->rx_agg_prod; 2540 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2541 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2542 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2543 ring_nr, i, bp->rx_ring_size); 2544 break; 2545 } 2546 prod = NEXT_RX_AGG(prod); 2547 } 2548 rxr->rx_agg_prod = prod; 2549 2550 if (bp->flags & BNXT_FLAG_TPA) { 2551 if (rxr->rx_tpa) { 2552 u8 *data; 2553 dma_addr_t mapping; 2554 2555 for (i = 0; i < MAX_TPA; i++) { 2556 data = __bnxt_alloc_rx_data(bp, &mapping, 2557 GFP_KERNEL); 2558 if (!data) 2559 return -ENOMEM; 2560 2561 rxr->rx_tpa[i].data = data; 2562 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 2563 rxr->rx_tpa[i].mapping = mapping; 2564 } 2565 } else { 2566 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2567 return -ENOMEM; 2568 } 2569 } 2570 2571 return 0; 2572 } 2573 2574 static void bnxt_init_cp_rings(struct bnxt *bp) 2575 { 2576 int i; 2577 2578 for (i = 0; i < bp->cp_nr_rings; i++) { 2579 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 2580 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 2581 2582 ring->fw_ring_id = INVALID_HW_RING_ID; 2583 } 2584 } 2585 2586 static int bnxt_init_rx_rings(struct bnxt *bp) 2587 { 2588 int i, rc = 0; 2589 2590 if (BNXT_RX_PAGE_MODE(bp)) { 2591 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 2592 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 2593 } else { 2594 bp->rx_offset = BNXT_RX_OFFSET; 2595 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 2596 } 2597 2598 for (i = 0; i < bp->rx_nr_rings; i++) { 2599 rc = bnxt_init_one_rx_ring(bp, i); 2600 if (rc) 2601 break; 2602 } 2603 2604 return rc; 2605 } 2606 2607 static int bnxt_init_tx_rings(struct bnxt *bp) 2608 { 2609 u16 i; 2610 2611 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2612 MAX_SKB_FRAGS + 1); 2613 2614 for (i = 0; i < bp->tx_nr_rings; i++) { 2615 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2616 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2617 2618 ring->fw_ring_id = INVALID_HW_RING_ID; 2619 } 2620 2621 return 0; 2622 } 2623 2624 static void bnxt_free_ring_grps(struct bnxt *bp) 2625 { 2626 kfree(bp->grp_info); 2627 bp->grp_info = NULL; 2628 } 2629 2630 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2631 { 2632 int i; 2633 2634 if (irq_re_init) { 2635 bp->grp_info = kcalloc(bp->cp_nr_rings, 2636 sizeof(struct bnxt_ring_grp_info), 2637 GFP_KERNEL); 2638 if (!bp->grp_info) 2639 return -ENOMEM; 2640 } 2641 for (i = 0; i < bp->cp_nr_rings; i++) { 2642 if (irq_re_init) 2643 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2644 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2645 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2646 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2647 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2648 } 2649 return 0; 2650 } 2651 2652 static void bnxt_free_vnics(struct bnxt *bp) 2653 { 2654 kfree(bp->vnic_info); 2655 bp->vnic_info = NULL; 2656 bp->nr_vnics = 0; 2657 } 2658 2659 static int bnxt_alloc_vnics(struct bnxt *bp) 2660 { 2661 int num_vnics = 1; 2662 2663 #ifdef CONFIG_RFS_ACCEL 2664 if (bp->flags & BNXT_FLAG_RFS) 2665 num_vnics += bp->rx_nr_rings; 2666 #endif 2667 2668 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2669 num_vnics++; 2670 2671 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2672 GFP_KERNEL); 2673 if (!bp->vnic_info) 2674 return -ENOMEM; 2675 2676 bp->nr_vnics = num_vnics; 2677 return 0; 2678 } 2679 2680 static void bnxt_init_vnics(struct bnxt *bp) 2681 { 2682 int i; 2683 2684 for (i = 0; i < bp->nr_vnics; i++) { 2685 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2686 2687 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2688 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 2689 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 2690 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2691 2692 if (bp->vnic_info[i].rss_hash_key) { 2693 if (i == 0) 2694 prandom_bytes(vnic->rss_hash_key, 2695 HW_HASH_KEY_SIZE); 2696 else 2697 memcpy(vnic->rss_hash_key, 2698 bp->vnic_info[0].rss_hash_key, 2699 HW_HASH_KEY_SIZE); 2700 } 2701 } 2702 } 2703 2704 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2705 { 2706 int pages; 2707 2708 pages = ring_size / desc_per_pg; 2709 2710 if (!pages) 2711 return 1; 2712 2713 pages++; 2714 2715 while (pages & (pages - 1)) 2716 pages++; 2717 2718 return pages; 2719 } 2720 2721 void bnxt_set_tpa_flags(struct bnxt *bp) 2722 { 2723 bp->flags &= ~BNXT_FLAG_TPA; 2724 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 2725 return; 2726 if (bp->dev->features & NETIF_F_LRO) 2727 bp->flags |= BNXT_FLAG_LRO; 2728 if (bp->dev->features & NETIF_F_GRO) 2729 bp->flags |= BNXT_FLAG_GRO; 2730 } 2731 2732 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2733 * be set on entry. 2734 */ 2735 void bnxt_set_ring_params(struct bnxt *bp) 2736 { 2737 u32 ring_size, rx_size, rx_space; 2738 u32 agg_factor = 0, agg_ring_size = 0; 2739 2740 /* 8 for CRC and VLAN */ 2741 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2742 2743 rx_space = rx_size + NET_SKB_PAD + 2744 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2745 2746 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2747 ring_size = bp->rx_ring_size; 2748 bp->rx_agg_ring_size = 0; 2749 bp->rx_agg_nr_pages = 0; 2750 2751 if (bp->flags & BNXT_FLAG_TPA) 2752 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 2753 2754 bp->flags &= ~BNXT_FLAG_JUMBO; 2755 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 2756 u32 jumbo_factor; 2757 2758 bp->flags |= BNXT_FLAG_JUMBO; 2759 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2760 if (jumbo_factor > agg_factor) 2761 agg_factor = jumbo_factor; 2762 } 2763 agg_ring_size = ring_size * agg_factor; 2764 2765 if (agg_ring_size) { 2766 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2767 RX_DESC_CNT); 2768 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2769 u32 tmp = agg_ring_size; 2770 2771 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2772 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2773 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2774 tmp, agg_ring_size); 2775 } 2776 bp->rx_agg_ring_size = agg_ring_size; 2777 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2778 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2779 rx_space = rx_size + NET_SKB_PAD + 2780 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2781 } 2782 2783 bp->rx_buf_use_size = rx_size; 2784 bp->rx_buf_size = rx_space; 2785 2786 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2787 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2788 2789 ring_size = bp->tx_ring_size; 2790 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2791 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2792 2793 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2794 bp->cp_ring_size = ring_size; 2795 2796 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2797 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2798 bp->cp_nr_pages = MAX_CP_PAGES; 2799 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2800 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2801 ring_size, bp->cp_ring_size); 2802 } 2803 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2804 bp->cp_ring_mask = bp->cp_bit - 1; 2805 } 2806 2807 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 2808 { 2809 if (page_mode) { 2810 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 2811 return -EOPNOTSUPP; 2812 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; 2813 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 2814 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 2815 bp->dev->hw_features &= ~NETIF_F_LRO; 2816 bp->dev->features &= ~NETIF_F_LRO; 2817 bp->rx_dir = DMA_BIDIRECTIONAL; 2818 bp->rx_skb_func = bnxt_rx_page_skb; 2819 } else { 2820 bp->dev->max_mtu = BNXT_MAX_MTU; 2821 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 2822 bp->rx_dir = DMA_FROM_DEVICE; 2823 bp->rx_skb_func = bnxt_rx_skb; 2824 } 2825 return 0; 2826 } 2827 2828 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2829 { 2830 int i; 2831 struct bnxt_vnic_info *vnic; 2832 struct pci_dev *pdev = bp->pdev; 2833 2834 if (!bp->vnic_info) 2835 return; 2836 2837 for (i = 0; i < bp->nr_vnics; i++) { 2838 vnic = &bp->vnic_info[i]; 2839 2840 kfree(vnic->fw_grp_ids); 2841 vnic->fw_grp_ids = NULL; 2842 2843 kfree(vnic->uc_list); 2844 vnic->uc_list = NULL; 2845 2846 if (vnic->mc_list) { 2847 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2848 vnic->mc_list, vnic->mc_list_mapping); 2849 vnic->mc_list = NULL; 2850 } 2851 2852 if (vnic->rss_table) { 2853 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2854 vnic->rss_table, 2855 vnic->rss_table_dma_addr); 2856 vnic->rss_table = NULL; 2857 } 2858 2859 vnic->rss_hash_key = NULL; 2860 vnic->flags = 0; 2861 } 2862 } 2863 2864 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2865 { 2866 int i, rc = 0, size; 2867 struct bnxt_vnic_info *vnic; 2868 struct pci_dev *pdev = bp->pdev; 2869 int max_rings; 2870 2871 for (i = 0; i < bp->nr_vnics; i++) { 2872 vnic = &bp->vnic_info[i]; 2873 2874 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2875 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2876 2877 if (mem_size > 0) { 2878 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2879 if (!vnic->uc_list) { 2880 rc = -ENOMEM; 2881 goto out; 2882 } 2883 } 2884 } 2885 2886 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2887 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2888 vnic->mc_list = 2889 dma_alloc_coherent(&pdev->dev, 2890 vnic->mc_list_size, 2891 &vnic->mc_list_mapping, 2892 GFP_KERNEL); 2893 if (!vnic->mc_list) { 2894 rc = -ENOMEM; 2895 goto out; 2896 } 2897 } 2898 2899 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2900 max_rings = bp->rx_nr_rings; 2901 else 2902 max_rings = 1; 2903 2904 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2905 if (!vnic->fw_grp_ids) { 2906 rc = -ENOMEM; 2907 goto out; 2908 } 2909 2910 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 2911 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 2912 continue; 2913 2914 /* Allocate rss table and hash key */ 2915 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2916 &vnic->rss_table_dma_addr, 2917 GFP_KERNEL); 2918 if (!vnic->rss_table) { 2919 rc = -ENOMEM; 2920 goto out; 2921 } 2922 2923 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2924 2925 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2926 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2927 } 2928 return 0; 2929 2930 out: 2931 return rc; 2932 } 2933 2934 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2935 { 2936 struct pci_dev *pdev = bp->pdev; 2937 2938 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2939 bp->hwrm_cmd_resp_dma_addr); 2940 2941 bp->hwrm_cmd_resp_addr = NULL; 2942 if (bp->hwrm_dbg_resp_addr) { 2943 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2944 bp->hwrm_dbg_resp_addr, 2945 bp->hwrm_dbg_resp_dma_addr); 2946 2947 bp->hwrm_dbg_resp_addr = NULL; 2948 } 2949 } 2950 2951 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2952 { 2953 struct pci_dev *pdev = bp->pdev; 2954 2955 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2956 &bp->hwrm_cmd_resp_dma_addr, 2957 GFP_KERNEL); 2958 if (!bp->hwrm_cmd_resp_addr) 2959 return -ENOMEM; 2960 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 2961 HWRM_DBG_REG_BUF_SIZE, 2962 &bp->hwrm_dbg_resp_dma_addr, 2963 GFP_KERNEL); 2964 if (!bp->hwrm_dbg_resp_addr) 2965 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 2966 2967 return 0; 2968 } 2969 2970 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 2971 { 2972 if (bp->hwrm_short_cmd_req_addr) { 2973 struct pci_dev *pdev = bp->pdev; 2974 2975 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 2976 bp->hwrm_short_cmd_req_addr, 2977 bp->hwrm_short_cmd_req_dma_addr); 2978 bp->hwrm_short_cmd_req_addr = NULL; 2979 } 2980 } 2981 2982 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 2983 { 2984 struct pci_dev *pdev = bp->pdev; 2985 2986 bp->hwrm_short_cmd_req_addr = 2987 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 2988 &bp->hwrm_short_cmd_req_dma_addr, 2989 GFP_KERNEL); 2990 if (!bp->hwrm_short_cmd_req_addr) 2991 return -ENOMEM; 2992 2993 return 0; 2994 } 2995 2996 static void bnxt_free_stats(struct bnxt *bp) 2997 { 2998 u32 size, i; 2999 struct pci_dev *pdev = bp->pdev; 3000 3001 if (bp->hw_rx_port_stats) { 3002 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3003 bp->hw_rx_port_stats, 3004 bp->hw_rx_port_stats_map); 3005 bp->hw_rx_port_stats = NULL; 3006 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3007 } 3008 3009 if (!bp->bnapi) 3010 return; 3011 3012 size = sizeof(struct ctx_hw_stats); 3013 3014 for (i = 0; i < bp->cp_nr_rings; i++) { 3015 struct bnxt_napi *bnapi = bp->bnapi[i]; 3016 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3017 3018 if (cpr->hw_stats) { 3019 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3020 cpr->hw_stats_map); 3021 cpr->hw_stats = NULL; 3022 } 3023 } 3024 } 3025 3026 static int bnxt_alloc_stats(struct bnxt *bp) 3027 { 3028 u32 size, i; 3029 struct pci_dev *pdev = bp->pdev; 3030 3031 size = sizeof(struct ctx_hw_stats); 3032 3033 for (i = 0; i < bp->cp_nr_rings; i++) { 3034 struct bnxt_napi *bnapi = bp->bnapi[i]; 3035 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3036 3037 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3038 &cpr->hw_stats_map, 3039 GFP_KERNEL); 3040 if (!cpr->hw_stats) 3041 return -ENOMEM; 3042 3043 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3044 } 3045 3046 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 3047 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3048 sizeof(struct tx_port_stats) + 1024; 3049 3050 bp->hw_rx_port_stats = 3051 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3052 &bp->hw_rx_port_stats_map, 3053 GFP_KERNEL); 3054 if (!bp->hw_rx_port_stats) 3055 return -ENOMEM; 3056 3057 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 3058 512; 3059 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3060 sizeof(struct rx_port_stats) + 512; 3061 bp->flags |= BNXT_FLAG_PORT_STATS; 3062 } 3063 return 0; 3064 } 3065 3066 static void bnxt_clear_ring_indices(struct bnxt *bp) 3067 { 3068 int i; 3069 3070 if (!bp->bnapi) 3071 return; 3072 3073 for (i = 0; i < bp->cp_nr_rings; i++) { 3074 struct bnxt_napi *bnapi = bp->bnapi[i]; 3075 struct bnxt_cp_ring_info *cpr; 3076 struct bnxt_rx_ring_info *rxr; 3077 struct bnxt_tx_ring_info *txr; 3078 3079 if (!bnapi) 3080 continue; 3081 3082 cpr = &bnapi->cp_ring; 3083 cpr->cp_raw_cons = 0; 3084 3085 txr = bnapi->tx_ring; 3086 if (txr) { 3087 txr->tx_prod = 0; 3088 txr->tx_cons = 0; 3089 } 3090 3091 rxr = bnapi->rx_ring; 3092 if (rxr) { 3093 rxr->rx_prod = 0; 3094 rxr->rx_agg_prod = 0; 3095 rxr->rx_sw_agg_prod = 0; 3096 rxr->rx_next_cons = 0; 3097 } 3098 } 3099 } 3100 3101 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3102 { 3103 #ifdef CONFIG_RFS_ACCEL 3104 int i; 3105 3106 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3107 * safe to delete the hash table. 3108 */ 3109 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3110 struct hlist_head *head; 3111 struct hlist_node *tmp; 3112 struct bnxt_ntuple_filter *fltr; 3113 3114 head = &bp->ntp_fltr_hash_tbl[i]; 3115 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3116 hlist_del(&fltr->hash); 3117 kfree(fltr); 3118 } 3119 } 3120 if (irq_reinit) { 3121 kfree(bp->ntp_fltr_bmap); 3122 bp->ntp_fltr_bmap = NULL; 3123 } 3124 bp->ntp_fltr_count = 0; 3125 #endif 3126 } 3127 3128 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3129 { 3130 #ifdef CONFIG_RFS_ACCEL 3131 int i, rc = 0; 3132 3133 if (!(bp->flags & BNXT_FLAG_RFS)) 3134 return 0; 3135 3136 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3137 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3138 3139 bp->ntp_fltr_count = 0; 3140 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3141 sizeof(long), 3142 GFP_KERNEL); 3143 3144 if (!bp->ntp_fltr_bmap) 3145 rc = -ENOMEM; 3146 3147 return rc; 3148 #else 3149 return 0; 3150 #endif 3151 } 3152 3153 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3154 { 3155 bnxt_free_vnic_attributes(bp); 3156 bnxt_free_tx_rings(bp); 3157 bnxt_free_rx_rings(bp); 3158 bnxt_free_cp_rings(bp); 3159 bnxt_free_ntp_fltrs(bp, irq_re_init); 3160 if (irq_re_init) { 3161 bnxt_free_stats(bp); 3162 bnxt_free_ring_grps(bp); 3163 bnxt_free_vnics(bp); 3164 kfree(bp->tx_ring_map); 3165 bp->tx_ring_map = NULL; 3166 kfree(bp->tx_ring); 3167 bp->tx_ring = NULL; 3168 kfree(bp->rx_ring); 3169 bp->rx_ring = NULL; 3170 kfree(bp->bnapi); 3171 bp->bnapi = NULL; 3172 } else { 3173 bnxt_clear_ring_indices(bp); 3174 } 3175 } 3176 3177 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3178 { 3179 int i, j, rc, size, arr_size; 3180 void *bnapi; 3181 3182 if (irq_re_init) { 3183 /* Allocate bnapi mem pointer array and mem block for 3184 * all queues 3185 */ 3186 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3187 bp->cp_nr_rings); 3188 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3189 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3190 if (!bnapi) 3191 return -ENOMEM; 3192 3193 bp->bnapi = bnapi; 3194 bnapi += arr_size; 3195 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3196 bp->bnapi[i] = bnapi; 3197 bp->bnapi[i]->index = i; 3198 bp->bnapi[i]->bp = bp; 3199 } 3200 3201 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3202 sizeof(struct bnxt_rx_ring_info), 3203 GFP_KERNEL); 3204 if (!bp->rx_ring) 3205 return -ENOMEM; 3206 3207 for (i = 0; i < bp->rx_nr_rings; i++) { 3208 bp->rx_ring[i].bnapi = bp->bnapi[i]; 3209 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 3210 } 3211 3212 bp->tx_ring = kcalloc(bp->tx_nr_rings, 3213 sizeof(struct bnxt_tx_ring_info), 3214 GFP_KERNEL); 3215 if (!bp->tx_ring) 3216 return -ENOMEM; 3217 3218 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 3219 GFP_KERNEL); 3220 3221 if (!bp->tx_ring_map) 3222 return -ENOMEM; 3223 3224 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 3225 j = 0; 3226 else 3227 j = bp->rx_nr_rings; 3228 3229 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 3230 bp->tx_ring[i].bnapi = bp->bnapi[j]; 3231 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 3232 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 3233 if (i >= bp->tx_nr_rings_xdp) { 3234 bp->tx_ring[i].txq_index = i - 3235 bp->tx_nr_rings_xdp; 3236 bp->bnapi[j]->tx_int = bnxt_tx_int; 3237 } else { 3238 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 3239 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 3240 } 3241 } 3242 3243 rc = bnxt_alloc_stats(bp); 3244 if (rc) 3245 goto alloc_mem_err; 3246 3247 rc = bnxt_alloc_ntp_fltrs(bp); 3248 if (rc) 3249 goto alloc_mem_err; 3250 3251 rc = bnxt_alloc_vnics(bp); 3252 if (rc) 3253 goto alloc_mem_err; 3254 } 3255 3256 bnxt_init_ring_struct(bp); 3257 3258 rc = bnxt_alloc_rx_rings(bp); 3259 if (rc) 3260 goto alloc_mem_err; 3261 3262 rc = bnxt_alloc_tx_rings(bp); 3263 if (rc) 3264 goto alloc_mem_err; 3265 3266 rc = bnxt_alloc_cp_rings(bp); 3267 if (rc) 3268 goto alloc_mem_err; 3269 3270 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 3271 BNXT_VNIC_UCAST_FLAG; 3272 rc = bnxt_alloc_vnic_attributes(bp); 3273 if (rc) 3274 goto alloc_mem_err; 3275 return 0; 3276 3277 alloc_mem_err: 3278 bnxt_free_mem(bp, true); 3279 return rc; 3280 } 3281 3282 static void bnxt_disable_int(struct bnxt *bp) 3283 { 3284 int i; 3285 3286 if (!bp->bnapi) 3287 return; 3288 3289 for (i = 0; i < bp->cp_nr_rings; i++) { 3290 struct bnxt_napi *bnapi = bp->bnapi[i]; 3291 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3292 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3293 3294 if (ring->fw_ring_id != INVALID_HW_RING_ID) 3295 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3296 } 3297 } 3298 3299 static void bnxt_disable_int_sync(struct bnxt *bp) 3300 { 3301 int i; 3302 3303 atomic_inc(&bp->intr_sem); 3304 3305 bnxt_disable_int(bp); 3306 for (i = 0; i < bp->cp_nr_rings; i++) 3307 synchronize_irq(bp->irq_tbl[i].vector); 3308 } 3309 3310 static void bnxt_enable_int(struct bnxt *bp) 3311 { 3312 int i; 3313 3314 atomic_set(&bp->intr_sem, 0); 3315 for (i = 0; i < bp->cp_nr_rings; i++) { 3316 struct bnxt_napi *bnapi = bp->bnapi[i]; 3317 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3318 3319 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 3320 } 3321 } 3322 3323 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 3324 u16 cmpl_ring, u16 target_id) 3325 { 3326 struct input *req = request; 3327 3328 req->req_type = cpu_to_le16(req_type); 3329 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3330 req->target_id = cpu_to_le16(target_id); 3331 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3332 } 3333 3334 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3335 int timeout, bool silent) 3336 { 3337 int i, intr_process, rc, tmo_count; 3338 struct input *req = msg; 3339 u32 *data = msg; 3340 __le32 *resp_len, *valid; 3341 u16 cp_ring_id, len = 0; 3342 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3343 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3344 3345 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3346 memset(resp, 0, PAGE_SIZE); 3347 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3348 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3349 3350 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 3351 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3352 struct hwrm_short_input short_input = {0}; 3353 3354 memcpy(short_cmd_req, req, msg_len); 3355 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - 3356 msg_len); 3357 3358 short_input.req_type = req->req_type; 3359 short_input.signature = 3360 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 3361 short_input.size = cpu_to_le16(msg_len); 3362 short_input.req_addr = 3363 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 3364 3365 data = (u32 *)&short_input; 3366 msg_len = sizeof(short_input); 3367 3368 /* Sync memory write before updating doorbell */ 3369 wmb(); 3370 3371 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 3372 } 3373 3374 /* Write request msg to hwrm channel */ 3375 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3376 3377 for (i = msg_len; i < max_req_len; i += 4) 3378 writel(0, bp->bar0 + i); 3379 3380 /* currently supports only one outstanding message */ 3381 if (intr_process) 3382 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3383 3384 /* Ring channel doorbell */ 3385 writel(1, bp->bar0 + 0x100); 3386 3387 if (!timeout) 3388 timeout = DFLT_HWRM_CMD_TIMEOUT; 3389 3390 i = 0; 3391 tmo_count = timeout * 40; 3392 if (intr_process) { 3393 /* Wait until hwrm response cmpl interrupt is processed */ 3394 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3395 i++ < tmo_count) { 3396 usleep_range(25, 40); 3397 } 3398 3399 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3400 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3401 le16_to_cpu(req->req_type)); 3402 return -1; 3403 } 3404 } else { 3405 /* Check if response len is updated */ 3406 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3407 for (i = 0; i < tmo_count; i++) { 3408 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3409 HWRM_RESP_LEN_SFT; 3410 if (len) 3411 break; 3412 usleep_range(25, 40); 3413 } 3414 3415 if (i >= tmo_count) { 3416 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3417 timeout, le16_to_cpu(req->req_type), 3418 le16_to_cpu(req->seq_id), len); 3419 return -1; 3420 } 3421 3422 /* Last word of resp contains valid bit */ 3423 valid = bp->hwrm_cmd_resp_addr + len - 4; 3424 for (i = 0; i < 5; i++) { 3425 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 3426 break; 3427 udelay(1); 3428 } 3429 3430 if (i >= 5) { 3431 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3432 timeout, le16_to_cpu(req->req_type), 3433 le16_to_cpu(req->seq_id), len, *valid); 3434 return -1; 3435 } 3436 } 3437 3438 rc = le16_to_cpu(resp->error_code); 3439 if (rc && !silent) 3440 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3441 le16_to_cpu(resp->req_type), 3442 le16_to_cpu(resp->seq_id), rc); 3443 return rc; 3444 } 3445 3446 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3447 { 3448 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3449 } 3450 3451 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3452 { 3453 int rc; 3454 3455 mutex_lock(&bp->hwrm_cmd_lock); 3456 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3457 mutex_unlock(&bp->hwrm_cmd_lock); 3458 return rc; 3459 } 3460 3461 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3462 int timeout) 3463 { 3464 int rc; 3465 3466 mutex_lock(&bp->hwrm_cmd_lock); 3467 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3468 mutex_unlock(&bp->hwrm_cmd_lock); 3469 return rc; 3470 } 3471 3472 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3473 int bmap_size) 3474 { 3475 struct hwrm_func_drv_rgtr_input req = {0}; 3476 DECLARE_BITMAP(async_events_bmap, 256); 3477 u32 *events = (u32 *)async_events_bmap; 3478 int i; 3479 3480 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3481 3482 req.enables = 3483 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3484 3485 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3486 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3487 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3488 3489 if (bmap && bmap_size) { 3490 for (i = 0; i < bmap_size; i++) { 3491 if (test_bit(i, bmap)) 3492 __set_bit(i, async_events_bmap); 3493 } 3494 } 3495 3496 for (i = 0; i < 8; i++) 3497 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3498 3499 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3500 } 3501 3502 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3503 { 3504 struct hwrm_func_drv_rgtr_input req = {0}; 3505 3506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3507 3508 req.enables = 3509 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3510 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3511 3512 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3513 req.ver_maj = DRV_VER_MAJ; 3514 req.ver_min = DRV_VER_MIN; 3515 req.ver_upd = DRV_VER_UPD; 3516 3517 if (BNXT_PF(bp)) { 3518 u32 data[8]; 3519 int i; 3520 3521 memset(data, 0, sizeof(data)); 3522 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 3523 u16 cmd = bnxt_vf_req_snif[i]; 3524 unsigned int bit, idx; 3525 3526 idx = cmd / 32; 3527 bit = cmd % 32; 3528 data[idx] |= 1 << bit; 3529 } 3530 3531 for (i = 0; i < 8; i++) 3532 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3533 3534 req.enables |= 3535 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3536 } 3537 3538 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3539 } 3540 3541 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 3542 { 3543 struct hwrm_func_drv_unrgtr_input req = {0}; 3544 3545 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 3546 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3547 } 3548 3549 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 3550 { 3551 u32 rc = 0; 3552 struct hwrm_tunnel_dst_port_free_input req = {0}; 3553 3554 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 3555 req.tunnel_type = tunnel_type; 3556 3557 switch (tunnel_type) { 3558 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 3559 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 3560 break; 3561 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 3562 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 3563 break; 3564 default: 3565 break; 3566 } 3567 3568 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3569 if (rc) 3570 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 3571 rc); 3572 return rc; 3573 } 3574 3575 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 3576 u8 tunnel_type) 3577 { 3578 u32 rc = 0; 3579 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 3580 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3581 3582 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 3583 3584 req.tunnel_type = tunnel_type; 3585 req.tunnel_dst_port_val = port; 3586 3587 mutex_lock(&bp->hwrm_cmd_lock); 3588 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3589 if (rc) { 3590 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 3591 rc); 3592 goto err_out; 3593 } 3594 3595 switch (tunnel_type) { 3596 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3597 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3598 break; 3599 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3600 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3601 break; 3602 default: 3603 break; 3604 } 3605 3606 err_out: 3607 mutex_unlock(&bp->hwrm_cmd_lock); 3608 return rc; 3609 } 3610 3611 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 3612 { 3613 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 3614 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3615 3616 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 3617 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3618 3619 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 3620 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 3621 req.mask = cpu_to_le32(vnic->rx_mask); 3622 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3623 } 3624 3625 #ifdef CONFIG_RFS_ACCEL 3626 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 3627 struct bnxt_ntuple_filter *fltr) 3628 { 3629 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 3630 3631 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 3632 req.ntuple_filter_id = fltr->filter_id; 3633 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3634 } 3635 3636 #define BNXT_NTP_FLTR_FLAGS \ 3637 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 3638 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 3639 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 3640 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 3641 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 3642 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 3643 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 3644 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 3645 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 3646 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 3647 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 3648 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 3649 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 3650 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 3651 3652 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 3653 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 3654 3655 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 3656 struct bnxt_ntuple_filter *fltr) 3657 { 3658 int rc = 0; 3659 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 3660 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 3661 bp->hwrm_cmd_resp_addr; 3662 struct flow_keys *keys = &fltr->fkeys; 3663 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 3664 3665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 3666 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 3667 3668 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 3669 3670 req.ethertype = htons(ETH_P_IP); 3671 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 3672 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 3673 req.ip_protocol = keys->basic.ip_proto; 3674 3675 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 3676 int i; 3677 3678 req.ethertype = htons(ETH_P_IPV6); 3679 req.ip_addr_type = 3680 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 3681 *(struct in6_addr *)&req.src_ipaddr[0] = 3682 keys->addrs.v6addrs.src; 3683 *(struct in6_addr *)&req.dst_ipaddr[0] = 3684 keys->addrs.v6addrs.dst; 3685 for (i = 0; i < 4; i++) { 3686 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3687 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3688 } 3689 } else { 3690 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 3691 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3692 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 3693 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3694 } 3695 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 3696 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 3697 req.tunnel_type = 3698 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 3699 } 3700 3701 req.src_port = keys->ports.src; 3702 req.src_port_mask = cpu_to_be16(0xffff); 3703 req.dst_port = keys->ports.dst; 3704 req.dst_port_mask = cpu_to_be16(0xffff); 3705 3706 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 3707 mutex_lock(&bp->hwrm_cmd_lock); 3708 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3709 if (!rc) 3710 fltr->filter_id = resp->ntuple_filter_id; 3711 mutex_unlock(&bp->hwrm_cmd_lock); 3712 return rc; 3713 } 3714 #endif 3715 3716 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 3717 u8 *mac_addr) 3718 { 3719 u32 rc = 0; 3720 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 3721 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3722 3723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 3724 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 3725 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 3726 req.flags |= 3727 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 3728 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 3729 req.enables = 3730 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 3731 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 3732 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 3733 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 3734 req.l2_addr_mask[0] = 0xff; 3735 req.l2_addr_mask[1] = 0xff; 3736 req.l2_addr_mask[2] = 0xff; 3737 req.l2_addr_mask[3] = 0xff; 3738 req.l2_addr_mask[4] = 0xff; 3739 req.l2_addr_mask[5] = 0xff; 3740 3741 mutex_lock(&bp->hwrm_cmd_lock); 3742 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3743 if (!rc) 3744 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 3745 resp->l2_filter_id; 3746 mutex_unlock(&bp->hwrm_cmd_lock); 3747 return rc; 3748 } 3749 3750 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 3751 { 3752 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 3753 int rc = 0; 3754 3755 /* Any associated ntuple filters will also be cleared by firmware. */ 3756 mutex_lock(&bp->hwrm_cmd_lock); 3757 for (i = 0; i < num_of_vnics; i++) { 3758 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3759 3760 for (j = 0; j < vnic->uc_filter_count; j++) { 3761 struct hwrm_cfa_l2_filter_free_input req = {0}; 3762 3763 bnxt_hwrm_cmd_hdr_init(bp, &req, 3764 HWRM_CFA_L2_FILTER_FREE, -1, -1); 3765 3766 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 3767 3768 rc = _hwrm_send_message(bp, &req, sizeof(req), 3769 HWRM_CMD_TIMEOUT); 3770 } 3771 vnic->uc_filter_count = 0; 3772 } 3773 mutex_unlock(&bp->hwrm_cmd_lock); 3774 3775 return rc; 3776 } 3777 3778 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 3779 { 3780 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3781 struct hwrm_vnic_tpa_cfg_input req = {0}; 3782 3783 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3784 3785 if (tpa_flags) { 3786 u16 mss = bp->dev->mtu - 40; 3787 u32 nsegs, n, segs = 0, flags; 3788 3789 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3790 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3791 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3792 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3793 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3794 if (tpa_flags & BNXT_FLAG_GRO) 3795 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3796 3797 req.flags = cpu_to_le32(flags); 3798 3799 req.enables = 3800 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3801 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3802 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3803 3804 /* Number of segs are log2 units, and first packet is not 3805 * included as part of this units. 3806 */ 3807 if (mss <= BNXT_RX_PAGE_SIZE) { 3808 n = BNXT_RX_PAGE_SIZE / mss; 3809 nsegs = (MAX_SKB_FRAGS - 1) * n; 3810 } else { 3811 n = mss / BNXT_RX_PAGE_SIZE; 3812 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 3813 n++; 3814 nsegs = (MAX_SKB_FRAGS - n) / n; 3815 } 3816 3817 segs = ilog2(nsegs); 3818 req.max_agg_segs = cpu_to_le16(segs); 3819 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3820 3821 req.min_agg_len = cpu_to_le32(512); 3822 } 3823 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3824 3825 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3826 } 3827 3828 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3829 { 3830 u32 i, j, max_rings; 3831 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3832 struct hwrm_vnic_rss_cfg_input req = {0}; 3833 3834 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 3835 return 0; 3836 3837 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3838 if (set_rss) { 3839 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 3840 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 3841 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3842 max_rings = bp->rx_nr_rings - 1; 3843 else 3844 max_rings = bp->rx_nr_rings; 3845 } else { 3846 max_rings = 1; 3847 } 3848 3849 /* Fill the RSS indirection table with ring group ids */ 3850 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3851 if (j == max_rings) 3852 j = 0; 3853 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3854 } 3855 3856 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3857 req.hash_key_tbl_addr = 3858 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3859 } 3860 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3861 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3862 } 3863 3864 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3865 { 3866 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3867 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3868 3869 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3870 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3871 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3872 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3873 req.enables = 3874 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3875 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3876 /* thresholds not implemented in firmware yet */ 3877 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3878 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3879 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3880 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3881 } 3882 3883 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 3884 u16 ctx_idx) 3885 { 3886 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3887 3888 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3889 req.rss_cos_lb_ctx_id = 3890 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 3891 3892 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3893 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 3894 } 3895 3896 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3897 { 3898 int i, j; 3899 3900 for (i = 0; i < bp->nr_vnics; i++) { 3901 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3902 3903 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 3904 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 3905 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 3906 } 3907 } 3908 bp->rsscos_nr_ctxs = 0; 3909 } 3910 3911 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 3912 { 3913 int rc; 3914 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3915 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3916 bp->hwrm_cmd_resp_addr; 3917 3918 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3919 -1); 3920 3921 mutex_lock(&bp->hwrm_cmd_lock); 3922 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3923 if (!rc) 3924 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 3925 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3926 mutex_unlock(&bp->hwrm_cmd_lock); 3927 3928 return rc; 3929 } 3930 3931 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3932 { 3933 unsigned int ring = 0, grp_idx; 3934 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3935 struct hwrm_vnic_cfg_input req = {0}; 3936 u16 def_vlan = 0; 3937 3938 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3939 3940 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 3941 /* Only RSS support for now TBD: COS & LB */ 3942 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 3943 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3944 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3945 VNIC_CFG_REQ_ENABLES_MRU); 3946 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 3947 req.rss_rule = 3948 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 3949 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3950 VNIC_CFG_REQ_ENABLES_MRU); 3951 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 3952 } else { 3953 req.rss_rule = cpu_to_le16(0xffff); 3954 } 3955 3956 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 3957 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 3958 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 3959 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 3960 } else { 3961 req.cos_rule = cpu_to_le16(0xffff); 3962 } 3963 3964 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3965 ring = 0; 3966 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 3967 ring = vnic_id - 1; 3968 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 3969 ring = bp->rx_nr_rings - 1; 3970 3971 grp_idx = bp->rx_ring[ring].bnapi->index; 3972 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3973 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 3974 3975 req.lb_rule = cpu_to_le16(0xffff); 3976 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3977 VLAN_HLEN); 3978 3979 #ifdef CONFIG_BNXT_SRIOV 3980 if (BNXT_VF(bp)) 3981 def_vlan = bp->vf.vlan; 3982 #endif 3983 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 3984 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3985 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 3986 req.flags |= 3987 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); 3988 3989 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3990 } 3991 3992 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 3993 { 3994 u32 rc = 0; 3995 3996 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 3997 struct hwrm_vnic_free_input req = {0}; 3998 3999 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 4000 req.vnic_id = 4001 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 4002 4003 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4004 if (rc) 4005 return rc; 4006 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 4007 } 4008 return rc; 4009 } 4010 4011 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 4012 { 4013 u16 i; 4014 4015 for (i = 0; i < bp->nr_vnics; i++) 4016 bnxt_hwrm_vnic_free_one(bp, i); 4017 } 4018 4019 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 4020 unsigned int start_rx_ring_idx, 4021 unsigned int nr_rings) 4022 { 4023 int rc = 0; 4024 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 4025 struct hwrm_vnic_alloc_input req = {0}; 4026 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4027 4028 /* map ring groups to this vnic */ 4029 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 4030 grp_idx = bp->rx_ring[i].bnapi->index; 4031 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 4032 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 4033 j, nr_rings); 4034 break; 4035 } 4036 bp->vnic_info[vnic_id].fw_grp_ids[j] = 4037 bp->grp_info[grp_idx].fw_grp_id; 4038 } 4039 4040 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 4041 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 4042 if (vnic_id == 0) 4043 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 4044 4045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 4046 4047 mutex_lock(&bp->hwrm_cmd_lock); 4048 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4049 if (!rc) 4050 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 4051 mutex_unlock(&bp->hwrm_cmd_lock); 4052 return rc; 4053 } 4054 4055 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 4056 { 4057 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4058 struct hwrm_vnic_qcaps_input req = {0}; 4059 int rc; 4060 4061 if (bp->hwrm_spec_code < 0x10600) 4062 return 0; 4063 4064 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 4065 mutex_lock(&bp->hwrm_cmd_lock); 4066 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4067 if (!rc) { 4068 if (resp->flags & 4069 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 4070 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 4071 } 4072 mutex_unlock(&bp->hwrm_cmd_lock); 4073 return rc; 4074 } 4075 4076 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 4077 { 4078 u16 i; 4079 u32 rc = 0; 4080 4081 mutex_lock(&bp->hwrm_cmd_lock); 4082 for (i = 0; i < bp->rx_nr_rings; i++) { 4083 struct hwrm_ring_grp_alloc_input req = {0}; 4084 struct hwrm_ring_grp_alloc_output *resp = 4085 bp->hwrm_cmd_resp_addr; 4086 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 4087 4088 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 4089 4090 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 4091 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 4092 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 4093 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 4094 4095 rc = _hwrm_send_message(bp, &req, sizeof(req), 4096 HWRM_CMD_TIMEOUT); 4097 if (rc) 4098 break; 4099 4100 bp->grp_info[grp_idx].fw_grp_id = 4101 le32_to_cpu(resp->ring_group_id); 4102 } 4103 mutex_unlock(&bp->hwrm_cmd_lock); 4104 return rc; 4105 } 4106 4107 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 4108 { 4109 u16 i; 4110 u32 rc = 0; 4111 struct hwrm_ring_grp_free_input req = {0}; 4112 4113 if (!bp->grp_info) 4114 return 0; 4115 4116 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 4117 4118 mutex_lock(&bp->hwrm_cmd_lock); 4119 for (i = 0; i < bp->cp_nr_rings; i++) { 4120 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 4121 continue; 4122 req.ring_group_id = 4123 cpu_to_le32(bp->grp_info[i].fw_grp_id); 4124 4125 rc = _hwrm_send_message(bp, &req, sizeof(req), 4126 HWRM_CMD_TIMEOUT); 4127 if (rc) 4128 break; 4129 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4130 } 4131 mutex_unlock(&bp->hwrm_cmd_lock); 4132 return rc; 4133 } 4134 4135 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 4136 struct bnxt_ring_struct *ring, 4137 u32 ring_type, u32 map_index, 4138 u32 stats_ctx_id) 4139 { 4140 int rc = 0, err = 0; 4141 struct hwrm_ring_alloc_input req = {0}; 4142 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4143 u16 ring_id; 4144 4145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 4146 4147 req.enables = 0; 4148 if (ring->nr_pages > 1) { 4149 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 4150 /* Page size is in log2 units */ 4151 req.page_size = BNXT_PAGE_SHIFT; 4152 req.page_tbl_depth = 1; 4153 } else { 4154 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 4155 } 4156 req.fbo = 0; 4157 /* Association of ring index with doorbell index and MSIX number */ 4158 req.logical_id = cpu_to_le16(map_index); 4159 4160 switch (ring_type) { 4161 case HWRM_RING_ALLOC_TX: 4162 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 4163 /* Association of transmit ring with completion ring */ 4164 req.cmpl_ring_id = 4165 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 4166 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 4167 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 4168 req.queue_id = cpu_to_le16(ring->queue_id); 4169 break; 4170 case HWRM_RING_ALLOC_RX: 4171 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4172 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 4173 break; 4174 case HWRM_RING_ALLOC_AGG: 4175 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4176 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 4177 break; 4178 case HWRM_RING_ALLOC_CMPL: 4179 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 4180 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 4181 if (bp->flags & BNXT_FLAG_USING_MSIX) 4182 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 4183 break; 4184 default: 4185 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 4186 ring_type); 4187 return -1; 4188 } 4189 4190 mutex_lock(&bp->hwrm_cmd_lock); 4191 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4192 err = le16_to_cpu(resp->error_code); 4193 ring_id = le16_to_cpu(resp->ring_id); 4194 mutex_unlock(&bp->hwrm_cmd_lock); 4195 4196 if (rc || err) { 4197 switch (ring_type) { 4198 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4199 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 4200 rc, err); 4201 return -1; 4202 4203 case RING_FREE_REQ_RING_TYPE_RX: 4204 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 4205 rc, err); 4206 return -1; 4207 4208 case RING_FREE_REQ_RING_TYPE_TX: 4209 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 4210 rc, err); 4211 return -1; 4212 4213 default: 4214 netdev_err(bp->dev, "Invalid ring\n"); 4215 return -1; 4216 } 4217 } 4218 ring->fw_ring_id = ring_id; 4219 return rc; 4220 } 4221 4222 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 4223 { 4224 int rc; 4225 4226 if (BNXT_PF(bp)) { 4227 struct hwrm_func_cfg_input req = {0}; 4228 4229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4230 req.fid = cpu_to_le16(0xffff); 4231 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4232 req.async_event_cr = cpu_to_le16(idx); 4233 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4234 } else { 4235 struct hwrm_func_vf_cfg_input req = {0}; 4236 4237 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4238 req.enables = 4239 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4240 req.async_event_cr = cpu_to_le16(idx); 4241 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4242 } 4243 return rc; 4244 } 4245 4246 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 4247 { 4248 int i, rc = 0; 4249 4250 for (i = 0; i < bp->cp_nr_rings; i++) { 4251 struct bnxt_napi *bnapi = bp->bnapi[i]; 4252 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4253 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4254 4255 cpr->cp_doorbell = bp->bar1 + i * 0x80; 4256 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 4257 INVALID_STATS_CTX_ID); 4258 if (rc) 4259 goto err_out; 4260 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4261 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4262 4263 if (!i) { 4264 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 4265 if (rc) 4266 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 4267 } 4268 } 4269 4270 for (i = 0; i < bp->tx_nr_rings; i++) { 4271 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4272 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4273 u32 map_idx = txr->bnapi->index; 4274 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 4275 4276 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 4277 map_idx, fw_stats_ctx); 4278 if (rc) 4279 goto err_out; 4280 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 4281 } 4282 4283 for (i = 0; i < bp->rx_nr_rings; i++) { 4284 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4285 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4286 u32 map_idx = rxr->bnapi->index; 4287 4288 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 4289 map_idx, INVALID_STATS_CTX_ID); 4290 if (rc) 4291 goto err_out; 4292 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 4293 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 4294 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 4295 } 4296 4297 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4298 for (i = 0; i < bp->rx_nr_rings; i++) { 4299 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4300 struct bnxt_ring_struct *ring = 4301 &rxr->rx_agg_ring_struct; 4302 u32 grp_idx = rxr->bnapi->index; 4303 u32 map_idx = grp_idx + bp->rx_nr_rings; 4304 4305 rc = hwrm_ring_alloc_send_msg(bp, ring, 4306 HWRM_RING_ALLOC_AGG, 4307 map_idx, 4308 INVALID_STATS_CTX_ID); 4309 if (rc) 4310 goto err_out; 4311 4312 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 4313 writel(DB_KEY_RX | rxr->rx_agg_prod, 4314 rxr->rx_agg_doorbell); 4315 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 4316 } 4317 } 4318 err_out: 4319 return rc; 4320 } 4321 4322 static int hwrm_ring_free_send_msg(struct bnxt *bp, 4323 struct bnxt_ring_struct *ring, 4324 u32 ring_type, int cmpl_ring_id) 4325 { 4326 int rc; 4327 struct hwrm_ring_free_input req = {0}; 4328 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 4329 u16 error_code; 4330 4331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 4332 req.ring_type = ring_type; 4333 req.ring_id = cpu_to_le16(ring->fw_ring_id); 4334 4335 mutex_lock(&bp->hwrm_cmd_lock); 4336 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4337 error_code = le16_to_cpu(resp->error_code); 4338 mutex_unlock(&bp->hwrm_cmd_lock); 4339 4340 if (rc || error_code) { 4341 switch (ring_type) { 4342 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4343 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 4344 rc); 4345 return rc; 4346 case RING_FREE_REQ_RING_TYPE_RX: 4347 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 4348 rc); 4349 return rc; 4350 case RING_FREE_REQ_RING_TYPE_TX: 4351 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 4352 rc); 4353 return rc; 4354 default: 4355 netdev_err(bp->dev, "Invalid ring\n"); 4356 return -1; 4357 } 4358 } 4359 return 0; 4360 } 4361 4362 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 4363 { 4364 int i; 4365 4366 if (!bp->bnapi) 4367 return; 4368 4369 for (i = 0; i < bp->tx_nr_rings; i++) { 4370 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4371 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4372 u32 grp_idx = txr->bnapi->index; 4373 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4374 4375 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4376 hwrm_ring_free_send_msg(bp, ring, 4377 RING_FREE_REQ_RING_TYPE_TX, 4378 close_path ? cmpl_ring_id : 4379 INVALID_HW_RING_ID); 4380 ring->fw_ring_id = INVALID_HW_RING_ID; 4381 } 4382 } 4383 4384 for (i = 0; i < bp->rx_nr_rings; i++) { 4385 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4386 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4387 u32 grp_idx = rxr->bnapi->index; 4388 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4389 4390 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4391 hwrm_ring_free_send_msg(bp, ring, 4392 RING_FREE_REQ_RING_TYPE_RX, 4393 close_path ? cmpl_ring_id : 4394 INVALID_HW_RING_ID); 4395 ring->fw_ring_id = INVALID_HW_RING_ID; 4396 bp->grp_info[grp_idx].rx_fw_ring_id = 4397 INVALID_HW_RING_ID; 4398 } 4399 } 4400 4401 for (i = 0; i < bp->rx_nr_rings; i++) { 4402 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4403 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 4404 u32 grp_idx = rxr->bnapi->index; 4405 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4406 4407 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4408 hwrm_ring_free_send_msg(bp, ring, 4409 RING_FREE_REQ_RING_TYPE_RX, 4410 close_path ? cmpl_ring_id : 4411 INVALID_HW_RING_ID); 4412 ring->fw_ring_id = INVALID_HW_RING_ID; 4413 bp->grp_info[grp_idx].agg_fw_ring_id = 4414 INVALID_HW_RING_ID; 4415 } 4416 } 4417 4418 /* The completion rings are about to be freed. After that the 4419 * IRQ doorbell will not work anymore. So we need to disable 4420 * IRQ here. 4421 */ 4422 bnxt_disable_int_sync(bp); 4423 4424 for (i = 0; i < bp->cp_nr_rings; i++) { 4425 struct bnxt_napi *bnapi = bp->bnapi[i]; 4426 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4427 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4428 4429 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4430 hwrm_ring_free_send_msg(bp, ring, 4431 RING_FREE_REQ_RING_TYPE_L2_CMPL, 4432 INVALID_HW_RING_ID); 4433 ring->fw_ring_id = INVALID_HW_RING_ID; 4434 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4435 } 4436 } 4437 } 4438 4439 /* Caller must hold bp->hwrm_cmd_lock */ 4440 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 4441 { 4442 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4443 struct hwrm_func_qcfg_input req = {0}; 4444 int rc; 4445 4446 if (bp->hwrm_spec_code < 0x10601) 4447 return 0; 4448 4449 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4450 req.fid = cpu_to_le16(fid); 4451 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4452 if (!rc) 4453 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 4454 4455 return rc; 4456 } 4457 4458 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) 4459 { 4460 struct hwrm_func_cfg_input req = {0}; 4461 int rc; 4462 4463 if (bp->hwrm_spec_code < 0x10601) 4464 return 0; 4465 4466 if (BNXT_VF(bp)) 4467 return 0; 4468 4469 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4470 req.fid = cpu_to_le16(0xffff); 4471 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4472 req.num_tx_rings = cpu_to_le16(*tx_rings); 4473 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4474 if (rc) 4475 return rc; 4476 4477 mutex_lock(&bp->hwrm_cmd_lock); 4478 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); 4479 mutex_unlock(&bp->hwrm_cmd_lock); 4480 if (!rc) 4481 bp->tx_reserved_rings = *tx_rings; 4482 return rc; 4483 } 4484 4485 static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) 4486 { 4487 struct hwrm_func_cfg_input req = {0}; 4488 int rc; 4489 4490 if (bp->hwrm_spec_code < 0x10801) 4491 return 0; 4492 4493 if (BNXT_VF(bp)) 4494 return 0; 4495 4496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4497 req.fid = cpu_to_le16(0xffff); 4498 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); 4499 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4500 req.num_tx_rings = cpu_to_le16(tx_rings); 4501 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4502 if (rc) 4503 return -ENOMEM; 4504 return 0; 4505 } 4506 4507 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, 4508 u32 buf_tmrs, u16 flags, 4509 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 4510 { 4511 req->flags = cpu_to_le16(flags); 4512 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); 4513 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); 4514 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); 4515 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); 4516 /* Minimum time between 2 interrupts set to buf_tmr x 2 */ 4517 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); 4518 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); 4519 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); 4520 } 4521 4522 int bnxt_hwrm_set_coal(struct bnxt *bp) 4523 { 4524 int i, rc = 0; 4525 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 4526 req_tx = {0}, *req; 4527 u16 max_buf, max_buf_irq; 4528 u16 buf_tmr, buf_tmr_irq; 4529 u32 flags; 4530 4531 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 4532 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4533 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 4534 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4535 4536 /* Each rx completion (2 records) should be DMAed immediately. 4537 * DMA 1/4 of the completion buffers at a time. 4538 */ 4539 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); 4540 /* max_buf must not be zero */ 4541 max_buf = clamp_t(u16, max_buf, 1, 63); 4542 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); 4543 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); 4544 /* buf timer set to 1/4 of interrupt timer */ 4545 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4546 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); 4547 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4548 4549 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4550 4551 /* RING_IDLE generates more IRQs for lower latency. Enable it only 4552 * if coal_ticks is less than 25 us. 4553 */ 4554 if (bp->rx_coal_ticks < 25) 4555 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 4556 4557 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4558 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); 4559 4560 /* max_buf must not be zero */ 4561 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); 4562 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); 4563 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); 4564 /* buf timer set to 1/4 of interrupt timer */ 4565 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4566 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); 4567 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4568 4569 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4570 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4571 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); 4572 4573 mutex_lock(&bp->hwrm_cmd_lock); 4574 for (i = 0; i < bp->cp_nr_rings; i++) { 4575 struct bnxt_napi *bnapi = bp->bnapi[i]; 4576 4577 req = &req_rx; 4578 if (!bnapi->rx_ring) 4579 req = &req_tx; 4580 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 4581 4582 rc = _hwrm_send_message(bp, req, sizeof(*req), 4583 HWRM_CMD_TIMEOUT); 4584 if (rc) 4585 break; 4586 } 4587 mutex_unlock(&bp->hwrm_cmd_lock); 4588 return rc; 4589 } 4590 4591 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 4592 { 4593 int rc = 0, i; 4594 struct hwrm_stat_ctx_free_input req = {0}; 4595 4596 if (!bp->bnapi) 4597 return 0; 4598 4599 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4600 return 0; 4601 4602 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 4603 4604 mutex_lock(&bp->hwrm_cmd_lock); 4605 for (i = 0; i < bp->cp_nr_rings; i++) { 4606 struct bnxt_napi *bnapi = bp->bnapi[i]; 4607 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4608 4609 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 4610 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 4611 4612 rc = _hwrm_send_message(bp, &req, sizeof(req), 4613 HWRM_CMD_TIMEOUT); 4614 if (rc) 4615 break; 4616 4617 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4618 } 4619 } 4620 mutex_unlock(&bp->hwrm_cmd_lock); 4621 return rc; 4622 } 4623 4624 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 4625 { 4626 int rc = 0, i; 4627 struct hwrm_stat_ctx_alloc_input req = {0}; 4628 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4629 4630 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4631 return 0; 4632 4633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 4634 4635 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 4636 4637 mutex_lock(&bp->hwrm_cmd_lock); 4638 for (i = 0; i < bp->cp_nr_rings; i++) { 4639 struct bnxt_napi *bnapi = bp->bnapi[i]; 4640 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4641 4642 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 4643 4644 rc = _hwrm_send_message(bp, &req, sizeof(req), 4645 HWRM_CMD_TIMEOUT); 4646 if (rc) 4647 break; 4648 4649 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 4650 4651 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 4652 } 4653 mutex_unlock(&bp->hwrm_cmd_lock); 4654 return rc; 4655 } 4656 4657 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 4658 { 4659 struct hwrm_func_qcfg_input req = {0}; 4660 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4661 u16 flags; 4662 int rc; 4663 4664 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4665 req.fid = cpu_to_le16(0xffff); 4666 mutex_lock(&bp->hwrm_cmd_lock); 4667 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4668 if (rc) 4669 goto func_qcfg_exit; 4670 4671 #ifdef CONFIG_BNXT_SRIOV 4672 if (BNXT_VF(bp)) { 4673 struct bnxt_vf_info *vf = &bp->vf; 4674 4675 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4676 } 4677 #endif 4678 flags = le16_to_cpu(resp->flags); 4679 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 4680 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 4681 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; 4682 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 4683 bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; 4684 } 4685 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 4686 bp->flags |= BNXT_FLAG_MULTI_HOST; 4687 4688 switch (resp->port_partition_type) { 4689 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4690 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4691 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 4692 bp->port_partition_type = resp->port_partition_type; 4693 break; 4694 } 4695 if (bp->hwrm_spec_code < 0x10707 || 4696 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 4697 bp->br_mode = BRIDGE_MODE_VEB; 4698 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 4699 bp->br_mode = BRIDGE_MODE_VEPA; 4700 else 4701 bp->br_mode = BRIDGE_MODE_UNDEF; 4702 4703 func_qcfg_exit: 4704 mutex_unlock(&bp->hwrm_cmd_lock); 4705 return rc; 4706 } 4707 4708 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4709 { 4710 int rc = 0; 4711 struct hwrm_func_qcaps_input req = {0}; 4712 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4713 4714 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 4715 req.fid = cpu_to_le16(0xffff); 4716 4717 mutex_lock(&bp->hwrm_cmd_lock); 4718 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4719 if (rc) 4720 goto hwrm_func_qcaps_exit; 4721 4722 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) 4723 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 4724 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) 4725 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 4726 4727 bp->tx_push_thresh = 0; 4728 if (resp->flags & 4729 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 4730 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 4731 4732 if (BNXT_PF(bp)) { 4733 struct bnxt_pf_info *pf = &bp->pf; 4734 4735 pf->fw_fid = le16_to_cpu(resp->fid); 4736 pf->port_id = le16_to_cpu(resp->port_id); 4737 bp->dev->dev_port = pf->port_id; 4738 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4739 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4740 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4741 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4742 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4743 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4744 if (!pf->max_hw_ring_grps) 4745 pf->max_hw_ring_grps = pf->max_tx_rings; 4746 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4747 pf->max_vnics = le16_to_cpu(resp->max_vnics); 4748 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4749 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 4750 pf->max_vfs = le16_to_cpu(resp->max_vfs); 4751 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 4752 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 4753 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 4754 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 4755 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 4756 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 4757 if (resp->flags & 4758 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)) 4759 bp->flags |= BNXT_FLAG_WOL_CAP; 4760 } else { 4761 #ifdef CONFIG_BNXT_SRIOV 4762 struct bnxt_vf_info *vf = &bp->vf; 4763 4764 vf->fw_fid = le16_to_cpu(resp->fid); 4765 4766 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4767 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4768 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4769 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4770 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4771 if (!vf->max_hw_ring_grps) 4772 vf->max_hw_ring_grps = vf->max_tx_rings; 4773 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4774 vf->max_vnics = le16_to_cpu(resp->max_vnics); 4775 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4776 4777 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4778 #endif 4779 } 4780 4781 hwrm_func_qcaps_exit: 4782 mutex_unlock(&bp->hwrm_cmd_lock); 4783 return rc; 4784 } 4785 4786 static int bnxt_hwrm_func_reset(struct bnxt *bp) 4787 { 4788 struct hwrm_func_reset_input req = {0}; 4789 4790 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 4791 req.enables = 0; 4792 4793 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 4794 } 4795 4796 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 4797 { 4798 int rc = 0; 4799 struct hwrm_queue_qportcfg_input req = {0}; 4800 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 4801 u8 i, *qptr; 4802 4803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 4804 4805 mutex_lock(&bp->hwrm_cmd_lock); 4806 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4807 if (rc) 4808 goto qportcfg_exit; 4809 4810 if (!resp->max_configurable_queues) { 4811 rc = -EINVAL; 4812 goto qportcfg_exit; 4813 } 4814 bp->max_tc = resp->max_configurable_queues; 4815 bp->max_lltc = resp->max_configurable_lossless_queues; 4816 if (bp->max_tc > BNXT_MAX_QUEUE) 4817 bp->max_tc = BNXT_MAX_QUEUE; 4818 4819 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 4820 bp->max_tc = 1; 4821 4822 if (bp->max_lltc > bp->max_tc) 4823 bp->max_lltc = bp->max_tc; 4824 4825 qptr = &resp->queue_id0; 4826 for (i = 0; i < bp->max_tc; i++) { 4827 bp->q_info[i].queue_id = *qptr++; 4828 bp->q_info[i].queue_profile = *qptr++; 4829 } 4830 4831 qportcfg_exit: 4832 mutex_unlock(&bp->hwrm_cmd_lock); 4833 return rc; 4834 } 4835 4836 static int bnxt_hwrm_ver_get(struct bnxt *bp) 4837 { 4838 int rc; 4839 struct hwrm_ver_get_input req = {0}; 4840 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 4841 u32 dev_caps_cfg; 4842 4843 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 4844 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 4845 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 4846 req.hwrm_intf_min = HWRM_VERSION_MINOR; 4847 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 4848 mutex_lock(&bp->hwrm_cmd_lock); 4849 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4850 if (rc) 4851 goto hwrm_ver_get_exit; 4852 4853 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 4854 4855 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | 4856 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; 4857 if (resp->hwrm_intf_maj < 1) { 4858 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 4859 resp->hwrm_intf_maj, resp->hwrm_intf_min, 4860 resp->hwrm_intf_upd); 4861 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 4862 } 4863 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", 4864 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 4865 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); 4866 4867 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 4868 if (!bp->hwrm_cmd_timeout) 4869 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 4870 4871 if (resp->hwrm_intf_maj >= 1) 4872 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4873 4874 bp->chip_num = le16_to_cpu(resp->chip_num); 4875 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 4876 !resp->chip_metal) 4877 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 4878 4879 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 4880 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 4881 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 4882 bp->flags |= BNXT_FLAG_SHORT_CMD; 4883 4884 hwrm_ver_get_exit: 4885 mutex_unlock(&bp->hwrm_cmd_lock); 4886 return rc; 4887 } 4888 4889 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 4890 { 4891 #if IS_ENABLED(CONFIG_RTC_LIB) 4892 struct hwrm_fw_set_time_input req = {0}; 4893 struct rtc_time tm; 4894 struct timeval tv; 4895 4896 if (bp->hwrm_spec_code < 0x10400) 4897 return -EOPNOTSUPP; 4898 4899 do_gettimeofday(&tv); 4900 rtc_time_to_tm(tv.tv_sec, &tm); 4901 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 4902 req.year = cpu_to_le16(1900 + tm.tm_year); 4903 req.month = 1 + tm.tm_mon; 4904 req.day = tm.tm_mday; 4905 req.hour = tm.tm_hour; 4906 req.minute = tm.tm_min; 4907 req.second = tm.tm_sec; 4908 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4909 #else 4910 return -EOPNOTSUPP; 4911 #endif 4912 } 4913 4914 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 4915 { 4916 int rc; 4917 struct bnxt_pf_info *pf = &bp->pf; 4918 struct hwrm_port_qstats_input req = {0}; 4919 4920 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 4921 return 0; 4922 4923 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 4924 req.port_id = cpu_to_le16(pf->port_id); 4925 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 4926 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 4927 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4928 return rc; 4929 } 4930 4931 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 4932 { 4933 if (bp->vxlan_port_cnt) { 4934 bnxt_hwrm_tunnel_dst_port_free( 4935 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 4936 } 4937 bp->vxlan_port_cnt = 0; 4938 if (bp->nge_port_cnt) { 4939 bnxt_hwrm_tunnel_dst_port_free( 4940 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 4941 } 4942 bp->nge_port_cnt = 0; 4943 } 4944 4945 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 4946 { 4947 int rc, i; 4948 u32 tpa_flags = 0; 4949 4950 if (set_tpa) 4951 tpa_flags = bp->flags & BNXT_FLAG_TPA; 4952 for (i = 0; i < bp->nr_vnics; i++) { 4953 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4954 if (rc) { 4955 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4956 i, rc); 4957 return rc; 4958 } 4959 } 4960 return 0; 4961 } 4962 4963 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 4964 { 4965 int i; 4966 4967 for (i = 0; i < bp->nr_vnics; i++) 4968 bnxt_hwrm_vnic_set_rss(bp, i, false); 4969 } 4970 4971 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 4972 bool irq_re_init) 4973 { 4974 if (bp->vnic_info) { 4975 bnxt_hwrm_clear_vnic_filter(bp); 4976 /* clear all RSS setting before free vnic ctx */ 4977 bnxt_hwrm_clear_vnic_rss(bp); 4978 bnxt_hwrm_vnic_ctx_free(bp); 4979 /* before free the vnic, undo the vnic tpa settings */ 4980 if (bp->flags & BNXT_FLAG_TPA) 4981 bnxt_set_tpa(bp, false); 4982 bnxt_hwrm_vnic_free(bp); 4983 } 4984 bnxt_hwrm_ring_free(bp, close_path); 4985 bnxt_hwrm_ring_grp_free(bp); 4986 if (irq_re_init) { 4987 bnxt_hwrm_stat_ctx_free(bp); 4988 bnxt_hwrm_free_tunnel_ports(bp); 4989 } 4990 } 4991 4992 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 4993 { 4994 struct hwrm_func_cfg_input req = {0}; 4995 int rc; 4996 4997 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4998 req.fid = cpu_to_le16(0xffff); 4999 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 5000 if (br_mode == BRIDGE_MODE_VEB) 5001 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 5002 else if (br_mode == BRIDGE_MODE_VEPA) 5003 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 5004 else 5005 return -EINVAL; 5006 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5007 if (rc) 5008 rc = -EIO; 5009 return rc; 5010 } 5011 5012 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 5013 { 5014 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5015 int rc; 5016 5017 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 5018 goto skip_rss_ctx; 5019 5020 /* allocate context for vnic */ 5021 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 5022 if (rc) { 5023 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5024 vnic_id, rc); 5025 goto vnic_setup_err; 5026 } 5027 bp->rsscos_nr_ctxs++; 5028 5029 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5030 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 5031 if (rc) { 5032 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 5033 vnic_id, rc); 5034 goto vnic_setup_err; 5035 } 5036 bp->rsscos_nr_ctxs++; 5037 } 5038 5039 skip_rss_ctx: 5040 /* configure default vnic, ring grp */ 5041 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 5042 if (rc) { 5043 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 5044 vnic_id, rc); 5045 goto vnic_setup_err; 5046 } 5047 5048 /* Enable RSS hashing on vnic */ 5049 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 5050 if (rc) { 5051 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 5052 vnic_id, rc); 5053 goto vnic_setup_err; 5054 } 5055 5056 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5057 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 5058 if (rc) { 5059 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 5060 vnic_id, rc); 5061 } 5062 } 5063 5064 vnic_setup_err: 5065 return rc; 5066 } 5067 5068 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 5069 { 5070 #ifdef CONFIG_RFS_ACCEL 5071 int i, rc = 0; 5072 5073 for (i = 0; i < bp->rx_nr_rings; i++) { 5074 struct bnxt_vnic_info *vnic; 5075 u16 vnic_id = i + 1; 5076 u16 ring_id = i; 5077 5078 if (vnic_id >= bp->nr_vnics) 5079 break; 5080 5081 vnic = &bp->vnic_info[vnic_id]; 5082 vnic->flags |= BNXT_VNIC_RFS_FLAG; 5083 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 5084 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 5085 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 5086 if (rc) { 5087 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5088 vnic_id, rc); 5089 break; 5090 } 5091 rc = bnxt_setup_vnic(bp, vnic_id); 5092 if (rc) 5093 break; 5094 } 5095 return rc; 5096 #else 5097 return 0; 5098 #endif 5099 } 5100 5101 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 5102 static bool bnxt_promisc_ok(struct bnxt *bp) 5103 { 5104 #ifdef CONFIG_BNXT_SRIOV 5105 if (BNXT_VF(bp) && !bp->vf.vlan) 5106 return false; 5107 #endif 5108 return true; 5109 } 5110 5111 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 5112 { 5113 unsigned int rc = 0; 5114 5115 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 5116 if (rc) { 5117 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5118 rc); 5119 return rc; 5120 } 5121 5122 rc = bnxt_hwrm_vnic_cfg(bp, 1); 5123 if (rc) { 5124 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5125 rc); 5126 return rc; 5127 } 5128 return rc; 5129 } 5130 5131 static int bnxt_cfg_rx_mode(struct bnxt *); 5132 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 5133 5134 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 5135 { 5136 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5137 int rc = 0; 5138 unsigned int rx_nr_rings = bp->rx_nr_rings; 5139 5140 if (irq_re_init) { 5141 rc = bnxt_hwrm_stat_ctx_alloc(bp); 5142 if (rc) { 5143 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 5144 rc); 5145 goto err_out; 5146 } 5147 if (bp->tx_reserved_rings != bp->tx_nr_rings) { 5148 int tx = bp->tx_nr_rings; 5149 5150 if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || 5151 tx < bp->tx_nr_rings) { 5152 rc = -ENOMEM; 5153 goto err_out; 5154 } 5155 } 5156 } 5157 5158 rc = bnxt_hwrm_ring_alloc(bp); 5159 if (rc) { 5160 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 5161 goto err_out; 5162 } 5163 5164 rc = bnxt_hwrm_ring_grp_alloc(bp); 5165 if (rc) { 5166 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 5167 goto err_out; 5168 } 5169 5170 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5171 rx_nr_rings--; 5172 5173 /* default vnic 0 */ 5174 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 5175 if (rc) { 5176 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 5177 goto err_out; 5178 } 5179 5180 rc = bnxt_setup_vnic(bp, 0); 5181 if (rc) 5182 goto err_out; 5183 5184 if (bp->flags & BNXT_FLAG_RFS) { 5185 rc = bnxt_alloc_rfs_vnics(bp); 5186 if (rc) 5187 goto err_out; 5188 } 5189 5190 if (bp->flags & BNXT_FLAG_TPA) { 5191 rc = bnxt_set_tpa(bp, true); 5192 if (rc) 5193 goto err_out; 5194 } 5195 5196 if (BNXT_VF(bp)) 5197 bnxt_update_vf_mac(bp); 5198 5199 /* Filter for default vnic 0 */ 5200 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 5201 if (rc) { 5202 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 5203 goto err_out; 5204 } 5205 vnic->uc_filter_count = 1; 5206 5207 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 5208 5209 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5210 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5211 5212 if (bp->dev->flags & IFF_ALLMULTI) { 5213 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5214 vnic->mc_list_count = 0; 5215 } else { 5216 u32 mask = 0; 5217 5218 bnxt_mc_list_updated(bp, &mask); 5219 vnic->rx_mask |= mask; 5220 } 5221 5222 rc = bnxt_cfg_rx_mode(bp); 5223 if (rc) 5224 goto err_out; 5225 5226 rc = bnxt_hwrm_set_coal(bp); 5227 if (rc) 5228 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 5229 rc); 5230 5231 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5232 rc = bnxt_setup_nitroa0_vnic(bp); 5233 if (rc) 5234 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 5235 rc); 5236 } 5237 5238 if (BNXT_VF(bp)) { 5239 bnxt_hwrm_func_qcfg(bp); 5240 netdev_update_features(bp->dev); 5241 } 5242 5243 return 0; 5244 5245 err_out: 5246 bnxt_hwrm_resource_free(bp, 0, true); 5247 5248 return rc; 5249 } 5250 5251 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 5252 { 5253 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 5254 return 0; 5255 } 5256 5257 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5258 { 5259 bnxt_init_cp_rings(bp); 5260 bnxt_init_rx_rings(bp); 5261 bnxt_init_tx_rings(bp); 5262 bnxt_init_ring_grps(bp, irq_re_init); 5263 bnxt_init_vnics(bp); 5264 5265 return bnxt_init_chip(bp, irq_re_init); 5266 } 5267 5268 static int bnxt_set_real_num_queues(struct bnxt *bp) 5269 { 5270 int rc; 5271 struct net_device *dev = bp->dev; 5272 5273 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 5274 bp->tx_nr_rings_xdp); 5275 if (rc) 5276 return rc; 5277 5278 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 5279 if (rc) 5280 return rc; 5281 5282 #ifdef CONFIG_RFS_ACCEL 5283 if (bp->flags & BNXT_FLAG_RFS) 5284 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 5285 #endif 5286 5287 return rc; 5288 } 5289 5290 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5291 bool shared) 5292 { 5293 int _rx = *rx, _tx = *tx; 5294 5295 if (shared) { 5296 *rx = min_t(int, _rx, max); 5297 *tx = min_t(int, _tx, max); 5298 } else { 5299 if (max < 2) 5300 return -ENOMEM; 5301 5302 while (_rx + _tx > max) { 5303 if (_rx > _tx && _rx > 1) 5304 _rx--; 5305 else if (_tx > 1) 5306 _tx--; 5307 } 5308 *rx = _rx; 5309 *tx = _tx; 5310 } 5311 return 0; 5312 } 5313 5314 static void bnxt_setup_msix(struct bnxt *bp) 5315 { 5316 const int len = sizeof(bp->irq_tbl[0].name); 5317 struct net_device *dev = bp->dev; 5318 int tcs, i; 5319 5320 tcs = netdev_get_num_tc(dev); 5321 if (tcs > 1) { 5322 int i, off, count; 5323 5324 for (i = 0; i < tcs; i++) { 5325 count = bp->tx_nr_rings_per_tc; 5326 off = i * count; 5327 netdev_set_tc_queue(dev, i, count, off); 5328 } 5329 } 5330 5331 for (i = 0; i < bp->cp_nr_rings; i++) { 5332 char *attr; 5333 5334 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5335 attr = "TxRx"; 5336 else if (i < bp->rx_nr_rings) 5337 attr = "rx"; 5338 else 5339 attr = "tx"; 5340 5341 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, 5342 i); 5343 bp->irq_tbl[i].handler = bnxt_msix; 5344 } 5345 } 5346 5347 static void bnxt_setup_inta(struct bnxt *bp) 5348 { 5349 const int len = sizeof(bp->irq_tbl[0].name); 5350 5351 if (netdev_get_num_tc(bp->dev)) 5352 netdev_reset_tc(bp->dev); 5353 5354 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 5355 0); 5356 bp->irq_tbl[0].handler = bnxt_inta; 5357 } 5358 5359 static int bnxt_setup_int_mode(struct bnxt *bp) 5360 { 5361 int rc; 5362 5363 if (bp->flags & BNXT_FLAG_USING_MSIX) 5364 bnxt_setup_msix(bp); 5365 else 5366 bnxt_setup_inta(bp); 5367 5368 rc = bnxt_set_real_num_queues(bp); 5369 return rc; 5370 } 5371 5372 #ifdef CONFIG_RFS_ACCEL 5373 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 5374 { 5375 #if defined(CONFIG_BNXT_SRIOV) 5376 if (BNXT_VF(bp)) 5377 return bp->vf.max_rsscos_ctxs; 5378 #endif 5379 return bp->pf.max_rsscos_ctxs; 5380 } 5381 5382 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 5383 { 5384 #if defined(CONFIG_BNXT_SRIOV) 5385 if (BNXT_VF(bp)) 5386 return bp->vf.max_vnics; 5387 #endif 5388 return bp->pf.max_vnics; 5389 } 5390 #endif 5391 5392 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 5393 { 5394 #if defined(CONFIG_BNXT_SRIOV) 5395 if (BNXT_VF(bp)) 5396 return bp->vf.max_stat_ctxs; 5397 #endif 5398 return bp->pf.max_stat_ctxs; 5399 } 5400 5401 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 5402 { 5403 #if defined(CONFIG_BNXT_SRIOV) 5404 if (BNXT_VF(bp)) 5405 bp->vf.max_stat_ctxs = max; 5406 else 5407 #endif 5408 bp->pf.max_stat_ctxs = max; 5409 } 5410 5411 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 5412 { 5413 #if defined(CONFIG_BNXT_SRIOV) 5414 if (BNXT_VF(bp)) 5415 return bp->vf.max_cp_rings; 5416 #endif 5417 return bp->pf.max_cp_rings; 5418 } 5419 5420 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5421 { 5422 #if defined(CONFIG_BNXT_SRIOV) 5423 if (BNXT_VF(bp)) 5424 bp->vf.max_cp_rings = max; 5425 else 5426 #endif 5427 bp->pf.max_cp_rings = max; 5428 } 5429 5430 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5431 { 5432 #if defined(CONFIG_BNXT_SRIOV) 5433 if (BNXT_VF(bp)) 5434 return min_t(unsigned int, bp->vf.max_irqs, 5435 bp->vf.max_cp_rings); 5436 #endif 5437 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings); 5438 } 5439 5440 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5441 { 5442 #if defined(CONFIG_BNXT_SRIOV) 5443 if (BNXT_VF(bp)) 5444 bp->vf.max_irqs = max_irqs; 5445 else 5446 #endif 5447 bp->pf.max_irqs = max_irqs; 5448 } 5449 5450 static int bnxt_init_msix(struct bnxt *bp) 5451 { 5452 int i, total_vecs, rc = 0, min = 1; 5453 struct msix_entry *msix_ent; 5454 5455 total_vecs = bnxt_get_max_func_irqs(bp); 5456 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 5457 if (!msix_ent) 5458 return -ENOMEM; 5459 5460 for (i = 0; i < total_vecs; i++) { 5461 msix_ent[i].entry = i; 5462 msix_ent[i].vector = 0; 5463 } 5464 5465 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 5466 min = 2; 5467 5468 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 5469 if (total_vecs < 0) { 5470 rc = -ENODEV; 5471 goto msix_setup_exit; 5472 } 5473 5474 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 5475 if (bp->irq_tbl) { 5476 for (i = 0; i < total_vecs; i++) 5477 bp->irq_tbl[i].vector = msix_ent[i].vector; 5478 5479 bp->total_irqs = total_vecs; 5480 /* Trim rings based upon num of vectors allocated */ 5481 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 5482 total_vecs, min == 1); 5483 if (rc) 5484 goto msix_setup_exit; 5485 5486 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5487 bp->cp_nr_rings = (min == 1) ? 5488 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5489 bp->tx_nr_rings + bp->rx_nr_rings; 5490 5491 } else { 5492 rc = -ENOMEM; 5493 goto msix_setup_exit; 5494 } 5495 bp->flags |= BNXT_FLAG_USING_MSIX; 5496 kfree(msix_ent); 5497 return 0; 5498 5499 msix_setup_exit: 5500 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 5501 kfree(bp->irq_tbl); 5502 bp->irq_tbl = NULL; 5503 pci_disable_msix(bp->pdev); 5504 kfree(msix_ent); 5505 return rc; 5506 } 5507 5508 static int bnxt_init_inta(struct bnxt *bp) 5509 { 5510 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 5511 if (!bp->irq_tbl) 5512 return -ENOMEM; 5513 5514 bp->total_irqs = 1; 5515 bp->rx_nr_rings = 1; 5516 bp->tx_nr_rings = 1; 5517 bp->cp_nr_rings = 1; 5518 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5519 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5520 bp->irq_tbl[0].vector = bp->pdev->irq; 5521 return 0; 5522 } 5523 5524 static int bnxt_init_int_mode(struct bnxt *bp) 5525 { 5526 int rc = 0; 5527 5528 if (bp->flags & BNXT_FLAG_MSIX_CAP) 5529 rc = bnxt_init_msix(bp); 5530 5531 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 5532 /* fallback to INTA */ 5533 rc = bnxt_init_inta(bp); 5534 } 5535 return rc; 5536 } 5537 5538 static void bnxt_clear_int_mode(struct bnxt *bp) 5539 { 5540 if (bp->flags & BNXT_FLAG_USING_MSIX) 5541 pci_disable_msix(bp->pdev); 5542 5543 kfree(bp->irq_tbl); 5544 bp->irq_tbl = NULL; 5545 bp->flags &= ~BNXT_FLAG_USING_MSIX; 5546 } 5547 5548 static void bnxt_free_irq(struct bnxt *bp) 5549 { 5550 struct bnxt_irq *irq; 5551 int i; 5552 5553 #ifdef CONFIG_RFS_ACCEL 5554 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 5555 bp->dev->rx_cpu_rmap = NULL; 5556 #endif 5557 if (!bp->irq_tbl) 5558 return; 5559 5560 for (i = 0; i < bp->cp_nr_rings; i++) { 5561 irq = &bp->irq_tbl[i]; 5562 if (irq->requested) { 5563 if (irq->have_cpumask) { 5564 irq_set_affinity_hint(irq->vector, NULL); 5565 free_cpumask_var(irq->cpu_mask); 5566 irq->have_cpumask = 0; 5567 } 5568 free_irq(irq->vector, bp->bnapi[i]); 5569 } 5570 5571 irq->requested = 0; 5572 } 5573 } 5574 5575 static int bnxt_request_irq(struct bnxt *bp) 5576 { 5577 int i, j, rc = 0; 5578 unsigned long flags = 0; 5579 #ifdef CONFIG_RFS_ACCEL 5580 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 5581 #endif 5582 5583 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 5584 flags = IRQF_SHARED; 5585 5586 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 5587 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5588 #ifdef CONFIG_RFS_ACCEL 5589 if (rmap && bp->bnapi[i]->rx_ring) { 5590 rc = irq_cpu_rmap_add(rmap, irq->vector); 5591 if (rc) 5592 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 5593 j); 5594 j++; 5595 } 5596 #endif 5597 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5598 bp->bnapi[i]); 5599 if (rc) 5600 break; 5601 5602 irq->requested = 1; 5603 5604 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 5605 int numa_node = dev_to_node(&bp->pdev->dev); 5606 5607 irq->have_cpumask = 1; 5608 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 5609 irq->cpu_mask); 5610 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 5611 if (rc) { 5612 netdev_warn(bp->dev, 5613 "Set affinity failed, IRQ = %d\n", 5614 irq->vector); 5615 break; 5616 } 5617 } 5618 } 5619 return rc; 5620 } 5621 5622 static void bnxt_del_napi(struct bnxt *bp) 5623 { 5624 int i; 5625 5626 if (!bp->bnapi) 5627 return; 5628 5629 for (i = 0; i < bp->cp_nr_rings; i++) { 5630 struct bnxt_napi *bnapi = bp->bnapi[i]; 5631 5632 napi_hash_del(&bnapi->napi); 5633 netif_napi_del(&bnapi->napi); 5634 } 5635 /* We called napi_hash_del() before netif_napi_del(), we need 5636 * to respect an RCU grace period before freeing napi structures. 5637 */ 5638 synchronize_net(); 5639 } 5640 5641 static void bnxt_init_napi(struct bnxt *bp) 5642 { 5643 int i; 5644 unsigned int cp_nr_rings = bp->cp_nr_rings; 5645 struct bnxt_napi *bnapi; 5646 5647 if (bp->flags & BNXT_FLAG_USING_MSIX) { 5648 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5649 cp_nr_rings--; 5650 for (i = 0; i < cp_nr_rings; i++) { 5651 bnapi = bp->bnapi[i]; 5652 netif_napi_add(bp->dev, &bnapi->napi, 5653 bnxt_poll, 64); 5654 } 5655 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5656 bnapi = bp->bnapi[cp_nr_rings]; 5657 netif_napi_add(bp->dev, &bnapi->napi, 5658 bnxt_poll_nitroa0, 64); 5659 } 5660 } else { 5661 bnapi = bp->bnapi[0]; 5662 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 5663 } 5664 } 5665 5666 static void bnxt_disable_napi(struct bnxt *bp) 5667 { 5668 int i; 5669 5670 if (!bp->bnapi) 5671 return; 5672 5673 for (i = 0; i < bp->cp_nr_rings; i++) 5674 napi_disable(&bp->bnapi[i]->napi); 5675 } 5676 5677 static void bnxt_enable_napi(struct bnxt *bp) 5678 { 5679 int i; 5680 5681 for (i = 0; i < bp->cp_nr_rings; i++) { 5682 bp->bnapi[i]->in_reset = false; 5683 napi_enable(&bp->bnapi[i]->napi); 5684 } 5685 } 5686 5687 void bnxt_tx_disable(struct bnxt *bp) 5688 { 5689 int i; 5690 struct bnxt_tx_ring_info *txr; 5691 5692 if (bp->tx_ring) { 5693 for (i = 0; i < bp->tx_nr_rings; i++) { 5694 txr = &bp->tx_ring[i]; 5695 txr->dev_state = BNXT_DEV_STATE_CLOSING; 5696 } 5697 } 5698 /* Stop all TX queues */ 5699 netif_tx_disable(bp->dev); 5700 netif_carrier_off(bp->dev); 5701 } 5702 5703 void bnxt_tx_enable(struct bnxt *bp) 5704 { 5705 int i; 5706 struct bnxt_tx_ring_info *txr; 5707 5708 for (i = 0; i < bp->tx_nr_rings; i++) { 5709 txr = &bp->tx_ring[i]; 5710 txr->dev_state = 0; 5711 } 5712 netif_tx_wake_all_queues(bp->dev); 5713 if (bp->link_info.link_up) 5714 netif_carrier_on(bp->dev); 5715 } 5716 5717 static void bnxt_report_link(struct bnxt *bp) 5718 { 5719 if (bp->link_info.link_up) { 5720 const char *duplex; 5721 const char *flow_ctrl; 5722 u32 speed; 5723 u16 fec; 5724 5725 netif_carrier_on(bp->dev); 5726 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 5727 duplex = "full"; 5728 else 5729 duplex = "half"; 5730 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 5731 flow_ctrl = "ON - receive & transmit"; 5732 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 5733 flow_ctrl = "ON - transmit"; 5734 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 5735 flow_ctrl = "ON - receive"; 5736 else 5737 flow_ctrl = "none"; 5738 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 5739 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 5740 speed, duplex, flow_ctrl); 5741 if (bp->flags & BNXT_FLAG_EEE_CAP) 5742 netdev_info(bp->dev, "EEE is %s\n", 5743 bp->eee.eee_active ? "active" : 5744 "not active"); 5745 fec = bp->link_info.fec_cfg; 5746 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 5747 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 5748 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 5749 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 5750 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 5751 } else { 5752 netif_carrier_off(bp->dev); 5753 netdev_err(bp->dev, "NIC Link is Down\n"); 5754 } 5755 } 5756 5757 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 5758 { 5759 int rc = 0; 5760 struct hwrm_port_phy_qcaps_input req = {0}; 5761 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5762 struct bnxt_link_info *link_info = &bp->link_info; 5763 5764 if (bp->hwrm_spec_code < 0x10201) 5765 return 0; 5766 5767 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 5768 5769 mutex_lock(&bp->hwrm_cmd_lock); 5770 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5771 if (rc) 5772 goto hwrm_phy_qcaps_exit; 5773 5774 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 5775 struct ethtool_eee *eee = &bp->eee; 5776 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 5777 5778 bp->flags |= BNXT_FLAG_EEE_CAP; 5779 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5780 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 5781 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 5782 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5783 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5784 } 5785 if (resp->supported_speeds_auto_mode) 5786 link_info->support_auto_speeds = 5787 le16_to_cpu(resp->supported_speeds_auto_mode); 5788 5789 bp->port_count = resp->port_cnt; 5790 5791 hwrm_phy_qcaps_exit: 5792 mutex_unlock(&bp->hwrm_cmd_lock); 5793 return rc; 5794 } 5795 5796 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 5797 { 5798 int rc = 0; 5799 struct bnxt_link_info *link_info = &bp->link_info; 5800 struct hwrm_port_phy_qcfg_input req = {0}; 5801 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5802 u8 link_up = link_info->link_up; 5803 u16 diff; 5804 5805 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 5806 5807 mutex_lock(&bp->hwrm_cmd_lock); 5808 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5809 if (rc) { 5810 mutex_unlock(&bp->hwrm_cmd_lock); 5811 return rc; 5812 } 5813 5814 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 5815 link_info->phy_link_status = resp->link; 5816 link_info->duplex = resp->duplex_cfg; 5817 if (bp->hwrm_spec_code >= 0x10800) 5818 link_info->duplex = resp->duplex_state; 5819 link_info->pause = resp->pause; 5820 link_info->auto_mode = resp->auto_mode; 5821 link_info->auto_pause_setting = resp->auto_pause; 5822 link_info->lp_pause = resp->link_partner_adv_pause; 5823 link_info->force_pause_setting = resp->force_pause; 5824 link_info->duplex_setting = resp->duplex_cfg; 5825 if (link_info->phy_link_status == BNXT_LINK_LINK) 5826 link_info->link_speed = le16_to_cpu(resp->link_speed); 5827 else 5828 link_info->link_speed = 0; 5829 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 5830 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 5831 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 5832 link_info->lp_auto_link_speeds = 5833 le16_to_cpu(resp->link_partner_adv_speeds); 5834 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 5835 link_info->phy_ver[0] = resp->phy_maj; 5836 link_info->phy_ver[1] = resp->phy_min; 5837 link_info->phy_ver[2] = resp->phy_bld; 5838 link_info->media_type = resp->media_type; 5839 link_info->phy_type = resp->phy_type; 5840 link_info->transceiver = resp->xcvr_pkg_type; 5841 link_info->phy_addr = resp->eee_config_phy_addr & 5842 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 5843 link_info->module_status = resp->module_status; 5844 5845 if (bp->flags & BNXT_FLAG_EEE_CAP) { 5846 struct ethtool_eee *eee = &bp->eee; 5847 u16 fw_speeds; 5848 5849 eee->eee_active = 0; 5850 if (resp->eee_config_phy_addr & 5851 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 5852 eee->eee_active = 1; 5853 fw_speeds = le16_to_cpu( 5854 resp->link_partner_adv_eee_link_speed_mask); 5855 eee->lp_advertised = 5856 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5857 } 5858 5859 /* Pull initial EEE config */ 5860 if (!chng_link_state) { 5861 if (resp->eee_config_phy_addr & 5862 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 5863 eee->eee_enabled = 1; 5864 5865 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 5866 eee->advertised = 5867 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5868 5869 if (resp->eee_config_phy_addr & 5870 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 5871 __le32 tmr; 5872 5873 eee->tx_lpi_enabled = 1; 5874 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 5875 eee->tx_lpi_timer = le32_to_cpu(tmr) & 5876 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 5877 } 5878 } 5879 } 5880 5881 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 5882 if (bp->hwrm_spec_code >= 0x10504) 5883 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 5884 5885 /* TODO: need to add more logic to report VF link */ 5886 if (chng_link_state) { 5887 if (link_info->phy_link_status == BNXT_LINK_LINK) 5888 link_info->link_up = 1; 5889 else 5890 link_info->link_up = 0; 5891 if (link_up != link_info->link_up) 5892 bnxt_report_link(bp); 5893 } else { 5894 /* alwasy link down if not require to update link state */ 5895 link_info->link_up = 0; 5896 } 5897 mutex_unlock(&bp->hwrm_cmd_lock); 5898 5899 diff = link_info->support_auto_speeds ^ link_info->advertising; 5900 if ((link_info->support_auto_speeds | diff) != 5901 link_info->support_auto_speeds) { 5902 /* An advertised speed is no longer supported, so we need to 5903 * update the advertisement settings. Caller holds RTNL 5904 * so we can modify link settings. 5905 */ 5906 link_info->advertising = link_info->support_auto_speeds; 5907 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 5908 bnxt_hwrm_set_link_setting(bp, true, false); 5909 } 5910 return 0; 5911 } 5912 5913 static void bnxt_get_port_module_status(struct bnxt *bp) 5914 { 5915 struct bnxt_link_info *link_info = &bp->link_info; 5916 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 5917 u8 module_status; 5918 5919 if (bnxt_update_link(bp, true)) 5920 return; 5921 5922 module_status = link_info->module_status; 5923 switch (module_status) { 5924 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 5925 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 5926 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 5927 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 5928 bp->pf.port_id); 5929 if (bp->hwrm_spec_code >= 0x10201) { 5930 netdev_warn(bp->dev, "Module part number %s\n", 5931 resp->phy_vendor_partnumber); 5932 } 5933 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 5934 netdev_warn(bp->dev, "TX is disabled\n"); 5935 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 5936 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 5937 } 5938 } 5939 5940 static void 5941 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 5942 { 5943 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 5944 if (bp->hwrm_spec_code >= 0x10201) 5945 req->auto_pause = 5946 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 5947 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5948 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 5949 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5950 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 5951 req->enables |= 5952 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5953 } else { 5954 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5955 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 5956 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5957 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 5958 req->enables |= 5959 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 5960 if (bp->hwrm_spec_code >= 0x10201) { 5961 req->auto_pause = req->force_pause; 5962 req->enables |= cpu_to_le32( 5963 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5964 } 5965 } 5966 } 5967 5968 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 5969 struct hwrm_port_phy_cfg_input *req) 5970 { 5971 u8 autoneg = bp->link_info.autoneg; 5972 u16 fw_link_speed = bp->link_info.req_link_speed; 5973 u16 advertising = bp->link_info.advertising; 5974 5975 if (autoneg & BNXT_AUTONEG_SPEED) { 5976 req->auto_mode |= 5977 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 5978 5979 req->enables |= cpu_to_le32( 5980 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 5981 req->auto_link_speed_mask = cpu_to_le16(advertising); 5982 5983 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 5984 req->flags |= 5985 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 5986 } else { 5987 req->force_link_speed = cpu_to_le16(fw_link_speed); 5988 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 5989 } 5990 5991 /* tell chimp that the setting takes effect immediately */ 5992 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5993 } 5994 5995 int bnxt_hwrm_set_pause(struct bnxt *bp) 5996 { 5997 struct hwrm_port_phy_cfg_input req = {0}; 5998 int rc; 5999 6000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6001 bnxt_hwrm_set_pause_common(bp, &req); 6002 6003 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 6004 bp->link_info.force_link_chng) 6005 bnxt_hwrm_set_link_common(bp, &req); 6006 6007 mutex_lock(&bp->hwrm_cmd_lock); 6008 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6009 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 6010 /* since changing of pause setting doesn't trigger any link 6011 * change event, the driver needs to update the current pause 6012 * result upon successfully return of the phy_cfg command 6013 */ 6014 bp->link_info.pause = 6015 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 6016 bp->link_info.auto_pause_setting = 0; 6017 if (!bp->link_info.force_link_chng) 6018 bnxt_report_link(bp); 6019 } 6020 bp->link_info.force_link_chng = false; 6021 mutex_unlock(&bp->hwrm_cmd_lock); 6022 return rc; 6023 } 6024 6025 static void bnxt_hwrm_set_eee(struct bnxt *bp, 6026 struct hwrm_port_phy_cfg_input *req) 6027 { 6028 struct ethtool_eee *eee = &bp->eee; 6029 6030 if (eee->eee_enabled) { 6031 u16 eee_speeds; 6032 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 6033 6034 if (eee->tx_lpi_enabled) 6035 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 6036 else 6037 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 6038 6039 req->flags |= cpu_to_le32(flags); 6040 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 6041 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 6042 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 6043 } else { 6044 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 6045 } 6046 } 6047 6048 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 6049 { 6050 struct hwrm_port_phy_cfg_input req = {0}; 6051 6052 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6053 if (set_pause) 6054 bnxt_hwrm_set_pause_common(bp, &req); 6055 6056 bnxt_hwrm_set_link_common(bp, &req); 6057 6058 if (set_eee) 6059 bnxt_hwrm_set_eee(bp, &req); 6060 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6061 } 6062 6063 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 6064 { 6065 struct hwrm_port_phy_cfg_input req = {0}; 6066 6067 if (!BNXT_SINGLE_PF(bp)) 6068 return 0; 6069 6070 if (pci_num_vf(bp->pdev)) 6071 return 0; 6072 6073 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6074 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 6075 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6076 } 6077 6078 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 6079 { 6080 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6081 struct hwrm_port_led_qcaps_input req = {0}; 6082 struct bnxt_pf_info *pf = &bp->pf; 6083 int rc; 6084 6085 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 6086 return 0; 6087 6088 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 6089 req.port_id = cpu_to_le16(pf->port_id); 6090 mutex_lock(&bp->hwrm_cmd_lock); 6091 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6092 if (rc) { 6093 mutex_unlock(&bp->hwrm_cmd_lock); 6094 return rc; 6095 } 6096 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 6097 int i; 6098 6099 bp->num_leds = resp->num_leds; 6100 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 6101 bp->num_leds); 6102 for (i = 0; i < bp->num_leds; i++) { 6103 struct bnxt_led_info *led = &bp->leds[i]; 6104 __le16 caps = led->led_state_caps; 6105 6106 if (!led->led_group_id || 6107 !BNXT_LED_ALT_BLINK_CAP(caps)) { 6108 bp->num_leds = 0; 6109 break; 6110 } 6111 } 6112 } 6113 mutex_unlock(&bp->hwrm_cmd_lock); 6114 return 0; 6115 } 6116 6117 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 6118 { 6119 struct hwrm_wol_filter_alloc_input req = {0}; 6120 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6121 int rc; 6122 6123 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 6124 req.port_id = cpu_to_le16(bp->pf.port_id); 6125 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 6126 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 6127 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 6128 mutex_lock(&bp->hwrm_cmd_lock); 6129 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6130 if (!rc) 6131 bp->wol_filter_id = resp->wol_filter_id; 6132 mutex_unlock(&bp->hwrm_cmd_lock); 6133 return rc; 6134 } 6135 6136 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 6137 { 6138 struct hwrm_wol_filter_free_input req = {0}; 6139 int rc; 6140 6141 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 6142 req.port_id = cpu_to_le16(bp->pf.port_id); 6143 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 6144 req.wol_filter_id = bp->wol_filter_id; 6145 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6146 return rc; 6147 } 6148 6149 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 6150 { 6151 struct hwrm_wol_filter_qcfg_input req = {0}; 6152 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6153 u16 next_handle = 0; 6154 int rc; 6155 6156 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 6157 req.port_id = cpu_to_le16(bp->pf.port_id); 6158 req.handle = cpu_to_le16(handle); 6159 mutex_lock(&bp->hwrm_cmd_lock); 6160 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6161 if (!rc) { 6162 next_handle = le16_to_cpu(resp->next_handle); 6163 if (next_handle != 0) { 6164 if (resp->wol_type == 6165 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 6166 bp->wol = 1; 6167 bp->wol_filter_id = resp->wol_filter_id; 6168 } 6169 } 6170 } 6171 mutex_unlock(&bp->hwrm_cmd_lock); 6172 return next_handle; 6173 } 6174 6175 static void bnxt_get_wol_settings(struct bnxt *bp) 6176 { 6177 u16 handle = 0; 6178 6179 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 6180 return; 6181 6182 do { 6183 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 6184 } while (handle && handle != 0xffff); 6185 } 6186 6187 static bool bnxt_eee_config_ok(struct bnxt *bp) 6188 { 6189 struct ethtool_eee *eee = &bp->eee; 6190 struct bnxt_link_info *link_info = &bp->link_info; 6191 6192 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 6193 return true; 6194 6195 if (eee->eee_enabled) { 6196 u32 advertising = 6197 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 6198 6199 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6200 eee->eee_enabled = 0; 6201 return false; 6202 } 6203 if (eee->advertised & ~advertising) { 6204 eee->advertised = advertising & eee->supported; 6205 return false; 6206 } 6207 } 6208 return true; 6209 } 6210 6211 static int bnxt_update_phy_setting(struct bnxt *bp) 6212 { 6213 int rc; 6214 bool update_link = false; 6215 bool update_pause = false; 6216 bool update_eee = false; 6217 struct bnxt_link_info *link_info = &bp->link_info; 6218 6219 rc = bnxt_update_link(bp, true); 6220 if (rc) { 6221 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 6222 rc); 6223 return rc; 6224 } 6225 if (!BNXT_SINGLE_PF(bp)) 6226 return 0; 6227 6228 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6229 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 6230 link_info->req_flow_ctrl) 6231 update_pause = true; 6232 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6233 link_info->force_pause_setting != link_info->req_flow_ctrl) 6234 update_pause = true; 6235 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6236 if (BNXT_AUTO_MODE(link_info->auto_mode)) 6237 update_link = true; 6238 if (link_info->req_link_speed != link_info->force_link_speed) 6239 update_link = true; 6240 if (link_info->req_duplex != link_info->duplex_setting) 6241 update_link = true; 6242 } else { 6243 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 6244 update_link = true; 6245 if (link_info->advertising != link_info->auto_link_speeds) 6246 update_link = true; 6247 } 6248 6249 /* The last close may have shutdown the link, so need to call 6250 * PHY_CFG to bring it back up. 6251 */ 6252 if (!netif_carrier_ok(bp->dev)) 6253 update_link = true; 6254 6255 if (!bnxt_eee_config_ok(bp)) 6256 update_eee = true; 6257 6258 if (update_link) 6259 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 6260 else if (update_pause) 6261 rc = bnxt_hwrm_set_pause(bp); 6262 if (rc) { 6263 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 6264 rc); 6265 return rc; 6266 } 6267 6268 return rc; 6269 } 6270 6271 /* Common routine to pre-map certain register block to different GRC window. 6272 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 6273 * in PF and 3 windows in VF that can be customized to map in different 6274 * register blocks. 6275 */ 6276 static void bnxt_preset_reg_win(struct bnxt *bp) 6277 { 6278 if (BNXT_PF(bp)) { 6279 /* CAG registers map to GRC window #4 */ 6280 writel(BNXT_CAG_REG_BASE, 6281 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 6282 } 6283 } 6284 6285 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6286 { 6287 int rc = 0; 6288 6289 bnxt_preset_reg_win(bp); 6290 netif_carrier_off(bp->dev); 6291 if (irq_re_init) { 6292 rc = bnxt_setup_int_mode(bp); 6293 if (rc) { 6294 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 6295 rc); 6296 return rc; 6297 } 6298 } 6299 if ((bp->flags & BNXT_FLAG_RFS) && 6300 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 6301 /* disable RFS if falling back to INTA */ 6302 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 6303 bp->flags &= ~BNXT_FLAG_RFS; 6304 } 6305 6306 rc = bnxt_alloc_mem(bp, irq_re_init); 6307 if (rc) { 6308 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6309 goto open_err_free_mem; 6310 } 6311 6312 if (irq_re_init) { 6313 bnxt_init_napi(bp); 6314 rc = bnxt_request_irq(bp); 6315 if (rc) { 6316 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 6317 goto open_err; 6318 } 6319 } 6320 6321 bnxt_enable_napi(bp); 6322 6323 rc = bnxt_init_nic(bp, irq_re_init); 6324 if (rc) { 6325 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6326 goto open_err; 6327 } 6328 6329 if (link_re_init) { 6330 rc = bnxt_update_phy_setting(bp); 6331 if (rc) 6332 netdev_warn(bp->dev, "failed to update phy settings\n"); 6333 } 6334 6335 if (irq_re_init) 6336 udp_tunnel_get_rx_info(bp->dev); 6337 6338 set_bit(BNXT_STATE_OPEN, &bp->state); 6339 bnxt_enable_int(bp); 6340 /* Enable TX queues */ 6341 bnxt_tx_enable(bp); 6342 mod_timer(&bp->timer, jiffies + bp->current_interval); 6343 /* Poll link status and check for SFP+ module status */ 6344 bnxt_get_port_module_status(bp); 6345 6346 /* VF-reps may need to be re-opened after the PF is re-opened */ 6347 if (BNXT_PF(bp)) 6348 bnxt_vf_reps_open(bp); 6349 return 0; 6350 6351 open_err: 6352 bnxt_disable_napi(bp); 6353 bnxt_del_napi(bp); 6354 6355 open_err_free_mem: 6356 bnxt_free_skbs(bp); 6357 bnxt_free_irq(bp); 6358 bnxt_free_mem(bp, true); 6359 return rc; 6360 } 6361 6362 /* rtnl_lock held */ 6363 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6364 { 6365 int rc = 0; 6366 6367 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 6368 if (rc) { 6369 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 6370 dev_close(bp->dev); 6371 } 6372 return rc; 6373 } 6374 6375 /* rtnl_lock held, open the NIC half way by allocating all resources, but 6376 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 6377 * self tests. 6378 */ 6379 int bnxt_half_open_nic(struct bnxt *bp) 6380 { 6381 int rc = 0; 6382 6383 rc = bnxt_alloc_mem(bp, false); 6384 if (rc) { 6385 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6386 goto half_open_err; 6387 } 6388 rc = bnxt_init_nic(bp, false); 6389 if (rc) { 6390 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6391 goto half_open_err; 6392 } 6393 return 0; 6394 6395 half_open_err: 6396 bnxt_free_skbs(bp); 6397 bnxt_free_mem(bp, false); 6398 dev_close(bp->dev); 6399 return rc; 6400 } 6401 6402 /* rtnl_lock held, this call can only be made after a previous successful 6403 * call to bnxt_half_open_nic(). 6404 */ 6405 void bnxt_half_close_nic(struct bnxt *bp) 6406 { 6407 bnxt_hwrm_resource_free(bp, false, false); 6408 bnxt_free_skbs(bp); 6409 bnxt_free_mem(bp, false); 6410 } 6411 6412 static int bnxt_open(struct net_device *dev) 6413 { 6414 struct bnxt *bp = netdev_priv(dev); 6415 6416 return __bnxt_open_nic(bp, true, true); 6417 } 6418 6419 static bool bnxt_drv_busy(struct bnxt *bp) 6420 { 6421 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 6422 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 6423 } 6424 6425 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6426 { 6427 int rc = 0; 6428 6429 #ifdef CONFIG_BNXT_SRIOV 6430 if (bp->sriov_cfg) { 6431 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 6432 !bp->sriov_cfg, 6433 BNXT_SRIOV_CFG_WAIT_TMO); 6434 if (rc) 6435 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 6436 } 6437 6438 /* Close the VF-reps before closing PF */ 6439 if (BNXT_PF(bp)) 6440 bnxt_vf_reps_close(bp); 6441 #endif 6442 /* Change device state to avoid TX queue wake up's */ 6443 bnxt_tx_disable(bp); 6444 6445 clear_bit(BNXT_STATE_OPEN, &bp->state); 6446 smp_mb__after_atomic(); 6447 while (bnxt_drv_busy(bp)) 6448 msleep(20); 6449 6450 /* Flush rings and and disable interrupts */ 6451 bnxt_shutdown_nic(bp, irq_re_init); 6452 6453 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 6454 6455 bnxt_disable_napi(bp); 6456 del_timer_sync(&bp->timer); 6457 bnxt_free_skbs(bp); 6458 6459 if (irq_re_init) { 6460 bnxt_free_irq(bp); 6461 bnxt_del_napi(bp); 6462 } 6463 bnxt_free_mem(bp, irq_re_init); 6464 return rc; 6465 } 6466 6467 static int bnxt_close(struct net_device *dev) 6468 { 6469 struct bnxt *bp = netdev_priv(dev); 6470 6471 bnxt_close_nic(bp, true, true); 6472 bnxt_hwrm_shutdown_link(bp); 6473 return 0; 6474 } 6475 6476 /* rtnl_lock held */ 6477 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6478 { 6479 switch (cmd) { 6480 case SIOCGMIIPHY: 6481 /* fallthru */ 6482 case SIOCGMIIREG: { 6483 if (!netif_running(dev)) 6484 return -EAGAIN; 6485 6486 return 0; 6487 } 6488 6489 case SIOCSMIIREG: 6490 if (!netif_running(dev)) 6491 return -EAGAIN; 6492 6493 return 0; 6494 6495 default: 6496 /* do nothing */ 6497 break; 6498 } 6499 return -EOPNOTSUPP; 6500 } 6501 6502 static void 6503 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6504 { 6505 u32 i; 6506 struct bnxt *bp = netdev_priv(dev); 6507 6508 set_bit(BNXT_STATE_READ_STATS, &bp->state); 6509 /* Make sure bnxt_close_nic() sees that we are reading stats before 6510 * we check the BNXT_STATE_OPEN flag. 6511 */ 6512 smp_mb__after_atomic(); 6513 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6514 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6515 return; 6516 } 6517 6518 /* TODO check if we need to synchronize with bnxt_close path */ 6519 for (i = 0; i < bp->cp_nr_rings; i++) { 6520 struct bnxt_napi *bnapi = bp->bnapi[i]; 6521 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6522 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 6523 6524 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 6525 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 6526 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 6527 6528 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 6529 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 6530 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 6531 6532 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 6533 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 6534 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 6535 6536 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 6537 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 6538 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 6539 6540 stats->rx_missed_errors += 6541 le64_to_cpu(hw_stats->rx_discard_pkts); 6542 6543 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 6544 6545 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 6546 } 6547 6548 if (bp->flags & BNXT_FLAG_PORT_STATS) { 6549 struct rx_port_stats *rx = bp->hw_rx_port_stats; 6550 struct tx_port_stats *tx = bp->hw_tx_port_stats; 6551 6552 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 6553 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 6554 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 6555 le64_to_cpu(rx->rx_ovrsz_frames) + 6556 le64_to_cpu(rx->rx_runt_frames); 6557 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 6558 le64_to_cpu(rx->rx_jbr_frames); 6559 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 6560 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 6561 stats->tx_errors = le64_to_cpu(tx->tx_err); 6562 } 6563 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6564 } 6565 6566 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 6567 { 6568 struct net_device *dev = bp->dev; 6569 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6570 struct netdev_hw_addr *ha; 6571 u8 *haddr; 6572 int mc_count = 0; 6573 bool update = false; 6574 int off = 0; 6575 6576 netdev_for_each_mc_addr(ha, dev) { 6577 if (mc_count >= BNXT_MAX_MC_ADDRS) { 6578 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6579 vnic->mc_list_count = 0; 6580 return false; 6581 } 6582 haddr = ha->addr; 6583 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 6584 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 6585 update = true; 6586 } 6587 off += ETH_ALEN; 6588 mc_count++; 6589 } 6590 if (mc_count) 6591 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 6592 6593 if (mc_count != vnic->mc_list_count) { 6594 vnic->mc_list_count = mc_count; 6595 update = true; 6596 } 6597 return update; 6598 } 6599 6600 static bool bnxt_uc_list_updated(struct bnxt *bp) 6601 { 6602 struct net_device *dev = bp->dev; 6603 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6604 struct netdev_hw_addr *ha; 6605 int off = 0; 6606 6607 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 6608 return true; 6609 6610 netdev_for_each_uc_addr(ha, dev) { 6611 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 6612 return true; 6613 6614 off += ETH_ALEN; 6615 } 6616 return false; 6617 } 6618 6619 static void bnxt_set_rx_mode(struct net_device *dev) 6620 { 6621 struct bnxt *bp = netdev_priv(dev); 6622 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6623 u32 mask = vnic->rx_mask; 6624 bool mc_update = false; 6625 bool uc_update; 6626 6627 if (!netif_running(dev)) 6628 return; 6629 6630 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 6631 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 6632 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 6633 6634 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 6635 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6636 6637 uc_update = bnxt_uc_list_updated(bp); 6638 6639 if (dev->flags & IFF_ALLMULTI) { 6640 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6641 vnic->mc_list_count = 0; 6642 } else { 6643 mc_update = bnxt_mc_list_updated(bp, &mask); 6644 } 6645 6646 if (mask != vnic->rx_mask || uc_update || mc_update) { 6647 vnic->rx_mask = mask; 6648 6649 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6650 schedule_work(&bp->sp_task); 6651 } 6652 } 6653 6654 static int bnxt_cfg_rx_mode(struct bnxt *bp) 6655 { 6656 struct net_device *dev = bp->dev; 6657 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6658 struct netdev_hw_addr *ha; 6659 int i, off = 0, rc; 6660 bool uc_update; 6661 6662 netif_addr_lock_bh(dev); 6663 uc_update = bnxt_uc_list_updated(bp); 6664 netif_addr_unlock_bh(dev); 6665 6666 if (!uc_update) 6667 goto skip_uc; 6668 6669 mutex_lock(&bp->hwrm_cmd_lock); 6670 for (i = 1; i < vnic->uc_filter_count; i++) { 6671 struct hwrm_cfa_l2_filter_free_input req = {0}; 6672 6673 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 6674 -1); 6675 6676 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 6677 6678 rc = _hwrm_send_message(bp, &req, sizeof(req), 6679 HWRM_CMD_TIMEOUT); 6680 } 6681 mutex_unlock(&bp->hwrm_cmd_lock); 6682 6683 vnic->uc_filter_count = 1; 6684 6685 netif_addr_lock_bh(dev); 6686 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 6687 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6688 } else { 6689 netdev_for_each_uc_addr(ha, dev) { 6690 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 6691 off += ETH_ALEN; 6692 vnic->uc_filter_count++; 6693 } 6694 } 6695 netif_addr_unlock_bh(dev); 6696 6697 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 6698 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 6699 if (rc) { 6700 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 6701 rc); 6702 vnic->uc_filter_count = i; 6703 return rc; 6704 } 6705 } 6706 6707 skip_uc: 6708 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 6709 if (rc) 6710 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 6711 rc); 6712 6713 return rc; 6714 } 6715 6716 /* If the chip and firmware supports RFS */ 6717 static bool bnxt_rfs_supported(struct bnxt *bp) 6718 { 6719 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6720 return true; 6721 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6722 return true; 6723 return false; 6724 } 6725 6726 /* If runtime conditions support RFS */ 6727 static bool bnxt_rfs_capable(struct bnxt *bp) 6728 { 6729 #ifdef CONFIG_RFS_ACCEL 6730 int vnics, max_vnics, max_rss_ctxs; 6731 6732 if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) 6733 return false; 6734 6735 vnics = 1 + bp->rx_nr_rings; 6736 max_vnics = bnxt_get_max_func_vnics(bp); 6737 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 6738 6739 /* RSS contexts not a limiting factor */ 6740 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6741 max_rss_ctxs = max_vnics; 6742 if (vnics > max_vnics || vnics > max_rss_ctxs) { 6743 netdev_warn(bp->dev, 6744 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 6745 min(max_rss_ctxs - 1, max_vnics - 1)); 6746 return false; 6747 } 6748 6749 return true; 6750 #else 6751 return false; 6752 #endif 6753 } 6754 6755 static netdev_features_t bnxt_fix_features(struct net_device *dev, 6756 netdev_features_t features) 6757 { 6758 struct bnxt *bp = netdev_priv(dev); 6759 6760 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 6761 features &= ~NETIF_F_NTUPLE; 6762 6763 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 6764 * turned on or off together. 6765 */ 6766 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 6767 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 6768 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 6769 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6770 NETIF_F_HW_VLAN_STAG_RX); 6771 else 6772 features |= NETIF_F_HW_VLAN_CTAG_RX | 6773 NETIF_F_HW_VLAN_STAG_RX; 6774 } 6775 #ifdef CONFIG_BNXT_SRIOV 6776 if (BNXT_VF(bp)) { 6777 if (bp->vf.vlan) { 6778 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6779 NETIF_F_HW_VLAN_STAG_RX); 6780 } 6781 } 6782 #endif 6783 return features; 6784 } 6785 6786 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 6787 { 6788 struct bnxt *bp = netdev_priv(dev); 6789 u32 flags = bp->flags; 6790 u32 changes; 6791 int rc = 0; 6792 bool re_init = false; 6793 bool update_tpa = false; 6794 6795 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 6796 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6797 flags |= BNXT_FLAG_GRO; 6798 if (features & NETIF_F_LRO) 6799 flags |= BNXT_FLAG_LRO; 6800 6801 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6802 flags &= ~BNXT_FLAG_TPA; 6803 6804 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6805 flags |= BNXT_FLAG_STRIP_VLAN; 6806 6807 if (features & NETIF_F_NTUPLE) 6808 flags |= BNXT_FLAG_RFS; 6809 6810 changes = flags ^ bp->flags; 6811 if (changes & BNXT_FLAG_TPA) { 6812 update_tpa = true; 6813 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 6814 (flags & BNXT_FLAG_TPA) == 0) 6815 re_init = true; 6816 } 6817 6818 if (changes & ~BNXT_FLAG_TPA) 6819 re_init = true; 6820 6821 if (flags != bp->flags) { 6822 u32 old_flags = bp->flags; 6823 6824 bp->flags = flags; 6825 6826 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6827 if (update_tpa) 6828 bnxt_set_ring_params(bp); 6829 return rc; 6830 } 6831 6832 if (re_init) { 6833 bnxt_close_nic(bp, false, false); 6834 if (update_tpa) 6835 bnxt_set_ring_params(bp); 6836 6837 return bnxt_open_nic(bp, false, false); 6838 } 6839 if (update_tpa) { 6840 rc = bnxt_set_tpa(bp, 6841 (flags & BNXT_FLAG_TPA) ? 6842 true : false); 6843 if (rc) 6844 bp->flags = old_flags; 6845 } 6846 } 6847 return rc; 6848 } 6849 6850 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 6851 { 6852 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 6853 int i = bnapi->index; 6854 6855 if (!txr) 6856 return; 6857 6858 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 6859 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 6860 txr->tx_cons); 6861 } 6862 6863 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 6864 { 6865 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 6866 int i = bnapi->index; 6867 6868 if (!rxr) 6869 return; 6870 6871 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 6872 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 6873 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 6874 rxr->rx_sw_agg_prod); 6875 } 6876 6877 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 6878 { 6879 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6880 int i = bnapi->index; 6881 6882 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 6883 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 6884 } 6885 6886 static void bnxt_dbg_dump_states(struct bnxt *bp) 6887 { 6888 int i; 6889 struct bnxt_napi *bnapi; 6890 6891 for (i = 0; i < bp->cp_nr_rings; i++) { 6892 bnapi = bp->bnapi[i]; 6893 if (netif_msg_drv(bp)) { 6894 bnxt_dump_tx_sw_state(bnapi); 6895 bnxt_dump_rx_sw_state(bnapi); 6896 bnxt_dump_cp_sw_state(bnapi); 6897 } 6898 } 6899 } 6900 6901 static void bnxt_reset_task(struct bnxt *bp, bool silent) 6902 { 6903 if (!silent) 6904 bnxt_dbg_dump_states(bp); 6905 if (netif_running(bp->dev)) { 6906 int rc; 6907 6908 if (!silent) 6909 bnxt_ulp_stop(bp); 6910 bnxt_close_nic(bp, false, false); 6911 rc = bnxt_open_nic(bp, false, false); 6912 if (!silent && !rc) 6913 bnxt_ulp_start(bp); 6914 } 6915 } 6916 6917 static void bnxt_tx_timeout(struct net_device *dev) 6918 { 6919 struct bnxt *bp = netdev_priv(dev); 6920 6921 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6922 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6923 schedule_work(&bp->sp_task); 6924 } 6925 6926 #ifdef CONFIG_NET_POLL_CONTROLLER 6927 static void bnxt_poll_controller(struct net_device *dev) 6928 { 6929 struct bnxt *bp = netdev_priv(dev); 6930 int i; 6931 6932 /* Only process tx rings/combined rings in netpoll mode. */ 6933 for (i = 0; i < bp->tx_nr_rings; i++) { 6934 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6935 6936 napi_schedule(&txr->bnapi->napi); 6937 } 6938 } 6939 #endif 6940 6941 static void bnxt_timer(unsigned long data) 6942 { 6943 struct bnxt *bp = (struct bnxt *)data; 6944 struct net_device *dev = bp->dev; 6945 6946 if (!netif_running(dev)) 6947 return; 6948 6949 if (atomic_read(&bp->intr_sem) != 0) 6950 goto bnxt_restart_timer; 6951 6952 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6953 bp->stats_coal_ticks) { 6954 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6955 schedule_work(&bp->sp_task); 6956 } 6957 bnxt_restart_timer: 6958 mod_timer(&bp->timer, jiffies + bp->current_interval); 6959 } 6960 6961 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 6962 { 6963 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 6964 * set. If the device is being closed, bnxt_close() may be holding 6965 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6966 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6967 */ 6968 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6969 rtnl_lock(); 6970 } 6971 6972 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 6973 { 6974 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6975 rtnl_unlock(); 6976 } 6977 6978 /* Only called from bnxt_sp_task() */ 6979 static void bnxt_reset(struct bnxt *bp, bool silent) 6980 { 6981 bnxt_rtnl_lock_sp(bp); 6982 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6983 bnxt_reset_task(bp, silent); 6984 bnxt_rtnl_unlock_sp(bp); 6985 } 6986 6987 static void bnxt_cfg_ntp_filters(struct bnxt *); 6988 6989 static void bnxt_sp_task(struct work_struct *work) 6990 { 6991 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6992 6993 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6994 smp_mb__after_atomic(); 6995 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6996 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6997 return; 6998 } 6999 7000 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 7001 bnxt_cfg_rx_mode(bp); 7002 7003 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 7004 bnxt_cfg_ntp_filters(bp); 7005 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 7006 bnxt_hwrm_exec_fwd_req(bp); 7007 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7008 bnxt_hwrm_tunnel_dst_port_alloc( 7009 bp, bp->vxlan_port, 7010 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7011 } 7012 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7013 bnxt_hwrm_tunnel_dst_port_free( 7014 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7015 } 7016 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7017 bnxt_hwrm_tunnel_dst_port_alloc( 7018 bp, bp->nge_port, 7019 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7020 } 7021 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7022 bnxt_hwrm_tunnel_dst_port_free( 7023 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7024 } 7025 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7026 bnxt_hwrm_port_qstats(bp); 7027 7028 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 7029 * must be the last functions to be called before exiting. 7030 */ 7031 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7032 int rc = 0; 7033 7034 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7035 &bp->sp_event)) 7036 bnxt_hwrm_phy_qcaps(bp); 7037 7038 bnxt_rtnl_lock_sp(bp); 7039 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7040 rc = bnxt_update_link(bp, true); 7041 bnxt_rtnl_unlock_sp(bp); 7042 if (rc) 7043 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7044 rc); 7045 } 7046 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7047 bnxt_rtnl_lock_sp(bp); 7048 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7049 bnxt_get_port_module_status(bp); 7050 bnxt_rtnl_unlock_sp(bp); 7051 } 7052 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7053 bnxt_reset(bp, false); 7054 7055 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 7056 bnxt_reset(bp, true); 7057 7058 smp_mb__before_atomic(); 7059 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7060 } 7061 7062 /* Under rtnl_lock */ 7063 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 7064 int tx_xdp) 7065 { 7066 int max_rx, max_tx, tx_sets = 1; 7067 int tx_rings_needed; 7068 int rc; 7069 7070 if (tcs) 7071 tx_sets = tcs; 7072 7073 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 7074 if (rc) 7075 return rc; 7076 7077 if (max_rx < rx) 7078 return -ENOMEM; 7079 7080 tx_rings_needed = tx * tx_sets + tx_xdp; 7081 if (max_tx < tx_rings_needed) 7082 return -ENOMEM; 7083 7084 return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); 7085 } 7086 7087 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7088 { 7089 if (bp->bar2) { 7090 pci_iounmap(pdev, bp->bar2); 7091 bp->bar2 = NULL; 7092 } 7093 7094 if (bp->bar1) { 7095 pci_iounmap(pdev, bp->bar1); 7096 bp->bar1 = NULL; 7097 } 7098 7099 if (bp->bar0) { 7100 pci_iounmap(pdev, bp->bar0); 7101 bp->bar0 = NULL; 7102 } 7103 } 7104 7105 static void bnxt_cleanup_pci(struct bnxt *bp) 7106 { 7107 bnxt_unmap_bars(bp, bp->pdev); 7108 pci_release_regions(bp->pdev); 7109 pci_disable_device(bp->pdev); 7110 } 7111 7112 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 7113 { 7114 int rc; 7115 struct bnxt *bp = netdev_priv(dev); 7116 7117 SET_NETDEV_DEV(dev, &pdev->dev); 7118 7119 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7120 rc = pci_enable_device(pdev); 7121 if (rc) { 7122 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 7123 goto init_err; 7124 } 7125 7126 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 7127 dev_err(&pdev->dev, 7128 "Cannot find PCI device base address, aborting\n"); 7129 rc = -ENODEV; 7130 goto init_err_disable; 7131 } 7132 7133 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 7134 if (rc) { 7135 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 7136 goto init_err_disable; 7137 } 7138 7139 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 7140 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 7141 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 7142 goto init_err_disable; 7143 } 7144 7145 pci_set_master(pdev); 7146 7147 bp->dev = dev; 7148 bp->pdev = pdev; 7149 7150 bp->bar0 = pci_ioremap_bar(pdev, 0); 7151 if (!bp->bar0) { 7152 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 7153 rc = -ENOMEM; 7154 goto init_err_release; 7155 } 7156 7157 bp->bar1 = pci_ioremap_bar(pdev, 2); 7158 if (!bp->bar1) { 7159 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 7160 rc = -ENOMEM; 7161 goto init_err_release; 7162 } 7163 7164 bp->bar2 = pci_ioremap_bar(pdev, 4); 7165 if (!bp->bar2) { 7166 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 7167 rc = -ENOMEM; 7168 goto init_err_release; 7169 } 7170 7171 pci_enable_pcie_error_reporting(pdev); 7172 7173 INIT_WORK(&bp->sp_task, bnxt_sp_task); 7174 7175 spin_lock_init(&bp->ntp_fltr_lock); 7176 7177 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 7178 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 7179 7180 /* tick values in micro seconds */ 7181 bp->rx_coal_ticks = 12; 7182 bp->rx_coal_bufs = 30; 7183 bp->rx_coal_ticks_irq = 1; 7184 bp->rx_coal_bufs_irq = 2; 7185 7186 bp->tx_coal_ticks = 25; 7187 bp->tx_coal_bufs = 30; 7188 bp->tx_coal_ticks_irq = 2; 7189 bp->tx_coal_bufs_irq = 2; 7190 7191 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 7192 7193 init_timer(&bp->timer); 7194 bp->timer.data = (unsigned long)bp; 7195 bp->timer.function = bnxt_timer; 7196 bp->current_interval = BNXT_TIMER_INTERVAL; 7197 7198 clear_bit(BNXT_STATE_OPEN, &bp->state); 7199 return 0; 7200 7201 init_err_release: 7202 bnxt_unmap_bars(bp, pdev); 7203 pci_release_regions(pdev); 7204 7205 init_err_disable: 7206 pci_disable_device(pdev); 7207 7208 init_err: 7209 return rc; 7210 } 7211 7212 /* rtnl_lock held */ 7213 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 7214 { 7215 struct sockaddr *addr = p; 7216 struct bnxt *bp = netdev_priv(dev); 7217 int rc = 0; 7218 7219 if (!is_valid_ether_addr(addr->sa_data)) 7220 return -EADDRNOTAVAIL; 7221 7222 rc = bnxt_approve_mac(bp, addr->sa_data); 7223 if (rc) 7224 return rc; 7225 7226 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 7227 return 0; 7228 7229 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 7230 if (netif_running(dev)) { 7231 bnxt_close_nic(bp, false, false); 7232 rc = bnxt_open_nic(bp, false, false); 7233 } 7234 7235 return rc; 7236 } 7237 7238 /* rtnl_lock held */ 7239 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 7240 { 7241 struct bnxt *bp = netdev_priv(dev); 7242 7243 if (netif_running(dev)) 7244 bnxt_close_nic(bp, false, false); 7245 7246 dev->mtu = new_mtu; 7247 bnxt_set_ring_params(bp); 7248 7249 if (netif_running(dev)) 7250 return bnxt_open_nic(bp, false, false); 7251 7252 return 0; 7253 } 7254 7255 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 7256 { 7257 struct bnxt *bp = netdev_priv(dev); 7258 bool sh = false; 7259 int rc; 7260 7261 if (tc > bp->max_tc) { 7262 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 7263 tc, bp->max_tc); 7264 return -EINVAL; 7265 } 7266 7267 if (netdev_get_num_tc(dev) == tc) 7268 return 0; 7269 7270 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7271 sh = true; 7272 7273 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 7274 sh, tc, bp->tx_nr_rings_xdp); 7275 if (rc) 7276 return rc; 7277 7278 /* Needs to close the device and do hw resource re-allocations */ 7279 if (netif_running(bp->dev)) 7280 bnxt_close_nic(bp, true, false); 7281 7282 if (tc) { 7283 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 7284 netdev_set_num_tc(dev, tc); 7285 } else { 7286 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7287 netdev_reset_tc(dev); 7288 } 7289 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 7290 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7291 bp->tx_nr_rings + bp->rx_nr_rings; 7292 bp->num_stat_ctxs = bp->cp_nr_rings; 7293 7294 if (netif_running(bp->dev)) 7295 return bnxt_open_nic(bp, true, false); 7296 7297 return 0; 7298 } 7299 7300 static int bnxt_setup_flower(struct net_device *dev, 7301 struct tc_cls_flower_offload *cls_flower) 7302 { 7303 struct bnxt *bp = netdev_priv(dev); 7304 7305 if (BNXT_VF(bp)) 7306 return -EOPNOTSUPP; 7307 7308 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower); 7309 } 7310 7311 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 7312 void *type_data) 7313 { 7314 switch (type) { 7315 case TC_SETUP_CLSFLOWER: 7316 return bnxt_setup_flower(dev, type_data); 7317 case TC_SETUP_MQPRIO: { 7318 struct tc_mqprio_qopt *mqprio = type_data; 7319 7320 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 7321 7322 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 7323 } 7324 default: 7325 return -EOPNOTSUPP; 7326 } 7327 } 7328 7329 #ifdef CONFIG_RFS_ACCEL 7330 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 7331 struct bnxt_ntuple_filter *f2) 7332 { 7333 struct flow_keys *keys1 = &f1->fkeys; 7334 struct flow_keys *keys2 = &f2->fkeys; 7335 7336 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 7337 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 7338 keys1->ports.ports == keys2->ports.ports && 7339 keys1->basic.ip_proto == keys2->basic.ip_proto && 7340 keys1->basic.n_proto == keys2->basic.n_proto && 7341 keys1->control.flags == keys2->control.flags && 7342 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 7343 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 7344 return true; 7345 7346 return false; 7347 } 7348 7349 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 7350 u16 rxq_index, u32 flow_id) 7351 { 7352 struct bnxt *bp = netdev_priv(dev); 7353 struct bnxt_ntuple_filter *fltr, *new_fltr; 7354 struct flow_keys *fkeys; 7355 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 7356 int rc = 0, idx, bit_id, l2_idx = 0; 7357 struct hlist_head *head; 7358 7359 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 7360 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7361 int off = 0, j; 7362 7363 netif_addr_lock_bh(dev); 7364 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 7365 if (ether_addr_equal(eth->h_dest, 7366 vnic->uc_list + off)) { 7367 l2_idx = j + 1; 7368 break; 7369 } 7370 } 7371 netif_addr_unlock_bh(dev); 7372 if (!l2_idx) 7373 return -EINVAL; 7374 } 7375 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 7376 if (!new_fltr) 7377 return -ENOMEM; 7378 7379 fkeys = &new_fltr->fkeys; 7380 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 7381 rc = -EPROTONOSUPPORT; 7382 goto err_free; 7383 } 7384 7385 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 7386 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 7387 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 7388 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 7389 rc = -EPROTONOSUPPORT; 7390 goto err_free; 7391 } 7392 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 7393 bp->hwrm_spec_code < 0x10601) { 7394 rc = -EPROTONOSUPPORT; 7395 goto err_free; 7396 } 7397 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 7398 bp->hwrm_spec_code < 0x10601) { 7399 rc = -EPROTONOSUPPORT; 7400 goto err_free; 7401 } 7402 7403 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 7404 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 7405 7406 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 7407 head = &bp->ntp_fltr_hash_tbl[idx]; 7408 rcu_read_lock(); 7409 hlist_for_each_entry_rcu(fltr, head, hash) { 7410 if (bnxt_fltr_match(fltr, new_fltr)) { 7411 rcu_read_unlock(); 7412 rc = 0; 7413 goto err_free; 7414 } 7415 } 7416 rcu_read_unlock(); 7417 7418 spin_lock_bh(&bp->ntp_fltr_lock); 7419 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 7420 BNXT_NTP_FLTR_MAX_FLTR, 0); 7421 if (bit_id < 0) { 7422 spin_unlock_bh(&bp->ntp_fltr_lock); 7423 rc = -ENOMEM; 7424 goto err_free; 7425 } 7426 7427 new_fltr->sw_id = (u16)bit_id; 7428 new_fltr->flow_id = flow_id; 7429 new_fltr->l2_fltr_idx = l2_idx; 7430 new_fltr->rxq = rxq_index; 7431 hlist_add_head_rcu(&new_fltr->hash, head); 7432 bp->ntp_fltr_count++; 7433 spin_unlock_bh(&bp->ntp_fltr_lock); 7434 7435 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7436 schedule_work(&bp->sp_task); 7437 7438 return new_fltr->sw_id; 7439 7440 err_free: 7441 kfree(new_fltr); 7442 return rc; 7443 } 7444 7445 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7446 { 7447 int i; 7448 7449 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 7450 struct hlist_head *head; 7451 struct hlist_node *tmp; 7452 struct bnxt_ntuple_filter *fltr; 7453 int rc; 7454 7455 head = &bp->ntp_fltr_hash_tbl[i]; 7456 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 7457 bool del = false; 7458 7459 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 7460 if (rps_may_expire_flow(bp->dev, fltr->rxq, 7461 fltr->flow_id, 7462 fltr->sw_id)) { 7463 bnxt_hwrm_cfa_ntuple_filter_free(bp, 7464 fltr); 7465 del = true; 7466 } 7467 } else { 7468 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 7469 fltr); 7470 if (rc) 7471 del = true; 7472 else 7473 set_bit(BNXT_FLTR_VALID, &fltr->state); 7474 } 7475 7476 if (del) { 7477 spin_lock_bh(&bp->ntp_fltr_lock); 7478 hlist_del_rcu(&fltr->hash); 7479 bp->ntp_fltr_count--; 7480 spin_unlock_bh(&bp->ntp_fltr_lock); 7481 synchronize_rcu(); 7482 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 7483 kfree(fltr); 7484 } 7485 } 7486 } 7487 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 7488 netdev_info(bp->dev, "Receive PF driver unload event!"); 7489 } 7490 7491 #else 7492 7493 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7494 { 7495 } 7496 7497 #endif /* CONFIG_RFS_ACCEL */ 7498 7499 static void bnxt_udp_tunnel_add(struct net_device *dev, 7500 struct udp_tunnel_info *ti) 7501 { 7502 struct bnxt *bp = netdev_priv(dev); 7503 7504 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7505 return; 7506 7507 if (!netif_running(dev)) 7508 return; 7509 7510 switch (ti->type) { 7511 case UDP_TUNNEL_TYPE_VXLAN: 7512 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 7513 return; 7514 7515 bp->vxlan_port_cnt++; 7516 if (bp->vxlan_port_cnt == 1) { 7517 bp->vxlan_port = ti->port; 7518 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7519 schedule_work(&bp->sp_task); 7520 } 7521 break; 7522 case UDP_TUNNEL_TYPE_GENEVE: 7523 if (bp->nge_port_cnt && bp->nge_port != ti->port) 7524 return; 7525 7526 bp->nge_port_cnt++; 7527 if (bp->nge_port_cnt == 1) { 7528 bp->nge_port = ti->port; 7529 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 7530 } 7531 break; 7532 default: 7533 return; 7534 } 7535 7536 schedule_work(&bp->sp_task); 7537 } 7538 7539 static void bnxt_udp_tunnel_del(struct net_device *dev, 7540 struct udp_tunnel_info *ti) 7541 { 7542 struct bnxt *bp = netdev_priv(dev); 7543 7544 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7545 return; 7546 7547 if (!netif_running(dev)) 7548 return; 7549 7550 switch (ti->type) { 7551 case UDP_TUNNEL_TYPE_VXLAN: 7552 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 7553 return; 7554 bp->vxlan_port_cnt--; 7555 7556 if (bp->vxlan_port_cnt != 0) 7557 return; 7558 7559 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 7560 break; 7561 case UDP_TUNNEL_TYPE_GENEVE: 7562 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 7563 return; 7564 bp->nge_port_cnt--; 7565 7566 if (bp->nge_port_cnt != 0) 7567 return; 7568 7569 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 7570 break; 7571 default: 7572 return; 7573 } 7574 7575 schedule_work(&bp->sp_task); 7576 } 7577 7578 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7579 struct net_device *dev, u32 filter_mask, 7580 int nlflags) 7581 { 7582 struct bnxt *bp = netdev_priv(dev); 7583 7584 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 7585 nlflags, filter_mask, NULL); 7586 } 7587 7588 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7589 u16 flags) 7590 { 7591 struct bnxt *bp = netdev_priv(dev); 7592 struct nlattr *attr, *br_spec; 7593 int rem, rc = 0; 7594 7595 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 7596 return -EOPNOTSUPP; 7597 7598 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7599 if (!br_spec) 7600 return -EINVAL; 7601 7602 nla_for_each_nested(attr, br_spec, rem) { 7603 u16 mode; 7604 7605 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7606 continue; 7607 7608 if (nla_len(attr) < sizeof(mode)) 7609 return -EINVAL; 7610 7611 mode = nla_get_u16(attr); 7612 if (mode == bp->br_mode) 7613 break; 7614 7615 rc = bnxt_hwrm_set_br_mode(bp, mode); 7616 if (!rc) 7617 bp->br_mode = mode; 7618 break; 7619 } 7620 return rc; 7621 } 7622 7623 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, 7624 size_t len) 7625 { 7626 struct bnxt *bp = netdev_priv(dev); 7627 int rc; 7628 7629 /* The PF and it's VF-reps only support the switchdev framework */ 7630 if (!BNXT_PF(bp)) 7631 return -EOPNOTSUPP; 7632 7633 rc = snprintf(buf, len, "p%d", bp->pf.port_id); 7634 7635 if (rc >= len) 7636 return -EOPNOTSUPP; 7637 return 0; 7638 } 7639 7640 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) 7641 { 7642 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 7643 return -EOPNOTSUPP; 7644 7645 /* The PF and it's VF-reps only support the switchdev framework */ 7646 if (!BNXT_PF(bp)) 7647 return -EOPNOTSUPP; 7648 7649 switch (attr->id) { 7650 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 7651 /* In SRIOV each PF-pool (PF + child VFs) serves as a 7652 * switching domain, the PF's perm mac-addr can be used 7653 * as the unique parent-id 7654 */ 7655 attr->u.ppid.id_len = ETH_ALEN; 7656 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); 7657 break; 7658 default: 7659 return -EOPNOTSUPP; 7660 } 7661 return 0; 7662 } 7663 7664 static int bnxt_swdev_port_attr_get(struct net_device *dev, 7665 struct switchdev_attr *attr) 7666 { 7667 return bnxt_port_attr_get(netdev_priv(dev), attr); 7668 } 7669 7670 static const struct switchdev_ops bnxt_switchdev_ops = { 7671 .switchdev_port_attr_get = bnxt_swdev_port_attr_get 7672 }; 7673 7674 static const struct net_device_ops bnxt_netdev_ops = { 7675 .ndo_open = bnxt_open, 7676 .ndo_start_xmit = bnxt_start_xmit, 7677 .ndo_stop = bnxt_close, 7678 .ndo_get_stats64 = bnxt_get_stats64, 7679 .ndo_set_rx_mode = bnxt_set_rx_mode, 7680 .ndo_do_ioctl = bnxt_ioctl, 7681 .ndo_validate_addr = eth_validate_addr, 7682 .ndo_set_mac_address = bnxt_change_mac_addr, 7683 .ndo_change_mtu = bnxt_change_mtu, 7684 .ndo_fix_features = bnxt_fix_features, 7685 .ndo_set_features = bnxt_set_features, 7686 .ndo_tx_timeout = bnxt_tx_timeout, 7687 #ifdef CONFIG_BNXT_SRIOV 7688 .ndo_get_vf_config = bnxt_get_vf_config, 7689 .ndo_set_vf_mac = bnxt_set_vf_mac, 7690 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 7691 .ndo_set_vf_rate = bnxt_set_vf_bw, 7692 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 7693 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 7694 #endif 7695 #ifdef CONFIG_NET_POLL_CONTROLLER 7696 .ndo_poll_controller = bnxt_poll_controller, 7697 #endif 7698 .ndo_setup_tc = bnxt_setup_tc, 7699 #ifdef CONFIG_RFS_ACCEL 7700 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 7701 #endif 7702 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 7703 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 7704 .ndo_xdp = bnxt_xdp, 7705 .ndo_bridge_getlink = bnxt_bridge_getlink, 7706 .ndo_bridge_setlink = bnxt_bridge_setlink, 7707 .ndo_get_phys_port_name = bnxt_get_phys_port_name 7708 }; 7709 7710 static void bnxt_remove_one(struct pci_dev *pdev) 7711 { 7712 struct net_device *dev = pci_get_drvdata(pdev); 7713 struct bnxt *bp = netdev_priv(dev); 7714 7715 if (BNXT_PF(bp)) { 7716 bnxt_sriov_disable(bp); 7717 bnxt_dl_unregister(bp); 7718 } 7719 7720 pci_disable_pcie_error_reporting(pdev); 7721 unregister_netdev(dev); 7722 bnxt_shutdown_tc(bp); 7723 cancel_work_sync(&bp->sp_task); 7724 bp->sp_event = 0; 7725 7726 bnxt_clear_int_mode(bp); 7727 bnxt_hwrm_func_drv_unrgtr(bp); 7728 bnxt_free_hwrm_resources(bp); 7729 bnxt_free_hwrm_short_cmd_req(bp); 7730 bnxt_ethtool_free(bp); 7731 bnxt_dcb_free(bp); 7732 kfree(bp->edev); 7733 bp->edev = NULL; 7734 if (bp->xdp_prog) 7735 bpf_prog_put(bp->xdp_prog); 7736 bnxt_cleanup_pci(bp); 7737 free_netdev(dev); 7738 } 7739 7740 static int bnxt_probe_phy(struct bnxt *bp) 7741 { 7742 int rc = 0; 7743 struct bnxt_link_info *link_info = &bp->link_info; 7744 7745 rc = bnxt_hwrm_phy_qcaps(bp); 7746 if (rc) { 7747 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 7748 rc); 7749 return rc; 7750 } 7751 7752 rc = bnxt_update_link(bp, false); 7753 if (rc) { 7754 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 7755 rc); 7756 return rc; 7757 } 7758 7759 /* Older firmware does not have supported_auto_speeds, so assume 7760 * that all supported speeds can be autonegotiated. 7761 */ 7762 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 7763 link_info->support_auto_speeds = link_info->support_speeds; 7764 7765 /*initialize the ethool setting copy with NVM settings */ 7766 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 7767 link_info->autoneg = BNXT_AUTONEG_SPEED; 7768 if (bp->hwrm_spec_code >= 0x10201) { 7769 if (link_info->auto_pause_setting & 7770 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 7771 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7772 } else { 7773 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7774 } 7775 link_info->advertising = link_info->auto_link_speeds; 7776 } else { 7777 link_info->req_link_speed = link_info->force_link_speed; 7778 link_info->req_duplex = link_info->duplex_setting; 7779 } 7780 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 7781 link_info->req_flow_ctrl = 7782 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 7783 else 7784 link_info->req_flow_ctrl = link_info->force_pause_setting; 7785 return rc; 7786 } 7787 7788 static int bnxt_get_max_irq(struct pci_dev *pdev) 7789 { 7790 u16 ctrl; 7791 7792 if (!pdev->msix_cap) 7793 return 1; 7794 7795 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 7796 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 7797 } 7798 7799 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7800 int *max_cp) 7801 { 7802 int max_ring_grps = 0; 7803 7804 #ifdef CONFIG_BNXT_SRIOV 7805 if (!BNXT_PF(bp)) { 7806 *max_tx = bp->vf.max_tx_rings; 7807 *max_rx = bp->vf.max_rx_rings; 7808 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 7809 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 7810 max_ring_grps = bp->vf.max_hw_ring_grps; 7811 } else 7812 #endif 7813 { 7814 *max_tx = bp->pf.max_tx_rings; 7815 *max_rx = bp->pf.max_rx_rings; 7816 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 7817 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 7818 max_ring_grps = bp->pf.max_hw_ring_grps; 7819 } 7820 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 7821 *max_cp -= 1; 7822 *max_rx -= 2; 7823 } 7824 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7825 *max_rx >>= 1; 7826 *max_rx = min_t(int, *max_rx, max_ring_grps); 7827 } 7828 7829 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 7830 { 7831 int rx, tx, cp; 7832 7833 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 7834 if (!rx || !tx || !cp) 7835 return -ENOMEM; 7836 7837 *max_rx = rx; 7838 *max_tx = tx; 7839 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 7840 } 7841 7842 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7843 bool shared) 7844 { 7845 int rc; 7846 7847 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7848 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 7849 /* Not enough rings, try disabling agg rings. */ 7850 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7851 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7852 if (rc) 7853 return rc; 7854 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7855 bp->dev->hw_features &= ~NETIF_F_LRO; 7856 bp->dev->features &= ~NETIF_F_LRO; 7857 bnxt_set_ring_params(bp); 7858 } 7859 7860 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 7861 int max_cp, max_stat, max_irq; 7862 7863 /* Reserve minimum resources for RoCE */ 7864 max_cp = bnxt_get_max_func_cp_rings(bp); 7865 max_stat = bnxt_get_max_func_stat_ctxs(bp); 7866 max_irq = bnxt_get_max_func_irqs(bp); 7867 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 7868 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 7869 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 7870 return 0; 7871 7872 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 7873 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 7874 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 7875 max_cp = min_t(int, max_cp, max_irq); 7876 max_cp = min_t(int, max_cp, max_stat); 7877 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 7878 if (rc) 7879 rc = 0; 7880 } 7881 return rc; 7882 } 7883 7884 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 7885 { 7886 int dflt_rings, max_rx_rings, max_tx_rings, rc; 7887 7888 if (sh) 7889 bp->flags |= BNXT_FLAG_SHARED_RINGS; 7890 dflt_rings = netif_get_num_default_rss_queues(); 7891 /* Reduce default rings to reduce memory usage on multi-port cards */ 7892 if (bp->port_count > 1) 7893 dflt_rings = min_t(int, dflt_rings, 4); 7894 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 7895 if (rc) 7896 return rc; 7897 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 7898 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 7899 7900 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); 7901 if (rc) 7902 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 7903 7904 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7905 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7906 bp->tx_nr_rings + bp->rx_nr_rings; 7907 bp->num_stat_ctxs = bp->cp_nr_rings; 7908 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7909 bp->rx_nr_rings++; 7910 bp->cp_nr_rings++; 7911 } 7912 return rc; 7913 } 7914 7915 void bnxt_restore_pf_fw_resources(struct bnxt *bp) 7916 { 7917 ASSERT_RTNL(); 7918 bnxt_hwrm_func_qcaps(bp); 7919 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 7920 } 7921 7922 static int bnxt_init_mac_addr(struct bnxt *bp) 7923 { 7924 int rc = 0; 7925 7926 if (BNXT_PF(bp)) { 7927 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 7928 } else { 7929 #ifdef CONFIG_BNXT_SRIOV 7930 struct bnxt_vf_info *vf = &bp->vf; 7931 7932 if (is_valid_ether_addr(vf->mac_addr)) { 7933 /* overwrite netdev dev_adr with admin VF MAC */ 7934 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 7935 } else { 7936 eth_hw_addr_random(bp->dev); 7937 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 7938 } 7939 #endif 7940 } 7941 return rc; 7942 } 7943 7944 static void bnxt_parse_log_pcie_link(struct bnxt *bp) 7945 { 7946 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7947 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7948 7949 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7950 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7951 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 7952 else 7953 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", 7954 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 7955 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 7956 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 7957 "Unknown", width); 7958 } 7959 7960 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7961 { 7962 static int version_printed; 7963 struct net_device *dev; 7964 struct bnxt *bp; 7965 int rc, max_irqs; 7966 7967 if (pci_is_bridge(pdev)) 7968 return -ENODEV; 7969 7970 if (version_printed++ == 0) 7971 pr_info("%s", version); 7972 7973 max_irqs = bnxt_get_max_irq(pdev); 7974 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 7975 if (!dev) 7976 return -ENOMEM; 7977 7978 bp = netdev_priv(dev); 7979 7980 if (bnxt_vf_pciid(ent->driver_data)) 7981 bp->flags |= BNXT_FLAG_VF; 7982 7983 if (pdev->msix_cap) 7984 bp->flags |= BNXT_FLAG_MSIX_CAP; 7985 7986 rc = bnxt_init_board(pdev, dev); 7987 if (rc < 0) 7988 goto init_err_free; 7989 7990 dev->netdev_ops = &bnxt_netdev_ops; 7991 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 7992 dev->ethtool_ops = &bnxt_ethtool_ops; 7993 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); 7994 pci_set_drvdata(pdev, dev); 7995 7996 rc = bnxt_alloc_hwrm_resources(bp); 7997 if (rc) 7998 goto init_err_pci_clean; 7999 8000 mutex_init(&bp->hwrm_cmd_lock); 8001 rc = bnxt_hwrm_ver_get(bp); 8002 if (rc) 8003 goto init_err_pci_clean; 8004 8005 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 8006 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 8007 if (rc) 8008 goto init_err_pci_clean; 8009 } 8010 8011 rc = bnxt_hwrm_func_reset(bp); 8012 if (rc) 8013 goto init_err_pci_clean; 8014 8015 bnxt_hwrm_fw_set_time(bp); 8016 8017 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8018 NETIF_F_TSO | NETIF_F_TSO6 | 8019 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8020 NETIF_F_GSO_IPXIP4 | 8021 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8022 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 8023 NETIF_F_RXCSUM | NETIF_F_GRO; 8024 8025 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 8026 dev->hw_features |= NETIF_F_LRO; 8027 8028 dev->hw_enc_features = 8029 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8030 NETIF_F_TSO | NETIF_F_TSO6 | 8031 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8032 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8033 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 8034 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 8035 NETIF_F_GSO_GRE_CSUM; 8036 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 8037 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 8038 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 8039 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 8040 dev->priv_flags |= IFF_UNICAST_FLT; 8041 8042 /* MTU range: 60 - 9500 */ 8043 dev->min_mtu = ETH_ZLEN; 8044 dev->max_mtu = BNXT_MAX_MTU; 8045 8046 #ifdef CONFIG_BNXT_SRIOV 8047 init_waitqueue_head(&bp->sriov_cfg_wait); 8048 mutex_init(&bp->sriov_lock); 8049 #endif 8050 bp->gro_func = bnxt_gro_func_5730x; 8051 if (BNXT_CHIP_P4_PLUS(bp)) 8052 bp->gro_func = bnxt_gro_func_5731x; 8053 else 8054 bp->flags |= BNXT_FLAG_DOUBLE_DB; 8055 8056 rc = bnxt_hwrm_func_drv_rgtr(bp); 8057 if (rc) 8058 goto init_err_pci_clean; 8059 8060 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 8061 if (rc) 8062 goto init_err_pci_clean; 8063 8064 bp->ulp_probe = bnxt_ulp_probe; 8065 8066 /* Get the MAX capabilities for this function */ 8067 rc = bnxt_hwrm_func_qcaps(bp); 8068 if (rc) { 8069 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 8070 rc); 8071 rc = -1; 8072 goto init_err_pci_clean; 8073 } 8074 rc = bnxt_init_mac_addr(bp); 8075 if (rc) { 8076 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 8077 rc = -EADDRNOTAVAIL; 8078 goto init_err_pci_clean; 8079 } 8080 rc = bnxt_hwrm_queue_qportcfg(bp); 8081 if (rc) { 8082 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 8083 rc); 8084 rc = -1; 8085 goto init_err_pci_clean; 8086 } 8087 8088 bnxt_hwrm_func_qcfg(bp); 8089 bnxt_hwrm_port_led_qcaps(bp); 8090 bnxt_ethtool_init(bp); 8091 bnxt_dcb_init(bp); 8092 8093 rc = bnxt_probe_phy(bp); 8094 if (rc) 8095 goto init_err_pci_clean; 8096 8097 bnxt_set_rx_skb_mode(bp, false); 8098 bnxt_set_tpa_flags(bp); 8099 bnxt_set_ring_params(bp); 8100 bnxt_set_max_func_irqs(bp, max_irqs); 8101 rc = bnxt_set_dflt_rings(bp, true); 8102 if (rc) { 8103 netdev_err(bp->dev, "Not enough rings available.\n"); 8104 rc = -ENOMEM; 8105 goto init_err_pci_clean; 8106 } 8107 8108 /* Default RSS hash cfg. */ 8109 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 8110 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 8111 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 8112 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 8113 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 8114 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 8115 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 8116 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 8117 } 8118 8119 bnxt_hwrm_vnic_qcaps(bp); 8120 if (bnxt_rfs_supported(bp)) { 8121 dev->hw_features |= NETIF_F_NTUPLE; 8122 if (bnxt_rfs_capable(bp)) { 8123 bp->flags |= BNXT_FLAG_RFS; 8124 dev->features |= NETIF_F_NTUPLE; 8125 } 8126 } 8127 8128 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 8129 bp->flags |= BNXT_FLAG_STRIP_VLAN; 8130 8131 rc = bnxt_init_int_mode(bp); 8132 if (rc) 8133 goto init_err_pci_clean; 8134 8135 bnxt_get_wol_settings(bp); 8136 if (bp->flags & BNXT_FLAG_WOL_CAP) 8137 device_set_wakeup_enable(&pdev->dev, bp->wol); 8138 else 8139 device_set_wakeup_capable(&pdev->dev, false); 8140 8141 if (BNXT_PF(bp)) 8142 bnxt_init_tc(bp); 8143 8144 rc = register_netdev(dev); 8145 if (rc) 8146 goto init_err_cleanup_tc; 8147 8148 if (BNXT_PF(bp)) 8149 bnxt_dl_register(bp); 8150 8151 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 8152 board_info[ent->driver_data].name, 8153 (long)pci_resource_start(pdev, 0), dev->dev_addr); 8154 8155 bnxt_parse_log_pcie_link(bp); 8156 8157 return 0; 8158 8159 init_err_cleanup_tc: 8160 bnxt_shutdown_tc(bp); 8161 bnxt_clear_int_mode(bp); 8162 8163 init_err_pci_clean: 8164 bnxt_cleanup_pci(bp); 8165 8166 init_err_free: 8167 free_netdev(dev); 8168 return rc; 8169 } 8170 8171 static void bnxt_shutdown(struct pci_dev *pdev) 8172 { 8173 struct net_device *dev = pci_get_drvdata(pdev); 8174 struct bnxt *bp; 8175 8176 if (!dev) 8177 return; 8178 8179 rtnl_lock(); 8180 bp = netdev_priv(dev); 8181 if (!bp) 8182 goto shutdown_exit; 8183 8184 if (netif_running(dev)) 8185 dev_close(dev); 8186 8187 if (system_state == SYSTEM_POWER_OFF) { 8188 bnxt_ulp_shutdown(bp); 8189 bnxt_clear_int_mode(bp); 8190 pci_wake_from_d3(pdev, bp->wol); 8191 pci_set_power_state(pdev, PCI_D3hot); 8192 } 8193 8194 shutdown_exit: 8195 rtnl_unlock(); 8196 } 8197 8198 #ifdef CONFIG_PM_SLEEP 8199 static int bnxt_suspend(struct device *device) 8200 { 8201 struct pci_dev *pdev = to_pci_dev(device); 8202 struct net_device *dev = pci_get_drvdata(pdev); 8203 struct bnxt *bp = netdev_priv(dev); 8204 int rc = 0; 8205 8206 rtnl_lock(); 8207 if (netif_running(dev)) { 8208 netif_device_detach(dev); 8209 rc = bnxt_close(dev); 8210 } 8211 bnxt_hwrm_func_drv_unrgtr(bp); 8212 rtnl_unlock(); 8213 return rc; 8214 } 8215 8216 static int bnxt_resume(struct device *device) 8217 { 8218 struct pci_dev *pdev = to_pci_dev(device); 8219 struct net_device *dev = pci_get_drvdata(pdev); 8220 struct bnxt *bp = netdev_priv(dev); 8221 int rc = 0; 8222 8223 rtnl_lock(); 8224 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { 8225 rc = -ENODEV; 8226 goto resume_exit; 8227 } 8228 rc = bnxt_hwrm_func_reset(bp); 8229 if (rc) { 8230 rc = -EBUSY; 8231 goto resume_exit; 8232 } 8233 bnxt_get_wol_settings(bp); 8234 if (netif_running(dev)) { 8235 rc = bnxt_open(dev); 8236 if (!rc) 8237 netif_device_attach(dev); 8238 } 8239 8240 resume_exit: 8241 rtnl_unlock(); 8242 return rc; 8243 } 8244 8245 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 8246 #define BNXT_PM_OPS (&bnxt_pm_ops) 8247 8248 #else 8249 8250 #define BNXT_PM_OPS NULL 8251 8252 #endif /* CONFIG_PM_SLEEP */ 8253 8254 /** 8255 * bnxt_io_error_detected - called when PCI error is detected 8256 * @pdev: Pointer to PCI device 8257 * @state: The current pci connection state 8258 * 8259 * This function is called after a PCI bus error affecting 8260 * this device has been detected. 8261 */ 8262 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 8263 pci_channel_state_t state) 8264 { 8265 struct net_device *netdev = pci_get_drvdata(pdev); 8266 struct bnxt *bp = netdev_priv(netdev); 8267 8268 netdev_info(netdev, "PCI I/O error detected\n"); 8269 8270 rtnl_lock(); 8271 netif_device_detach(netdev); 8272 8273 bnxt_ulp_stop(bp); 8274 8275 if (state == pci_channel_io_perm_failure) { 8276 rtnl_unlock(); 8277 return PCI_ERS_RESULT_DISCONNECT; 8278 } 8279 8280 if (netif_running(netdev)) 8281 bnxt_close(netdev); 8282 8283 pci_disable_device(pdev); 8284 rtnl_unlock(); 8285 8286 /* Request a slot slot reset. */ 8287 return PCI_ERS_RESULT_NEED_RESET; 8288 } 8289 8290 /** 8291 * bnxt_io_slot_reset - called after the pci bus has been reset. 8292 * @pdev: Pointer to PCI device 8293 * 8294 * Restart the card from scratch, as if from a cold-boot. 8295 * At this point, the card has exprienced a hard reset, 8296 * followed by fixups by BIOS, and has its config space 8297 * set up identically to what it was at cold boot. 8298 */ 8299 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 8300 { 8301 struct net_device *netdev = pci_get_drvdata(pdev); 8302 struct bnxt *bp = netdev_priv(netdev); 8303 int err = 0; 8304 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 8305 8306 netdev_info(bp->dev, "PCI Slot Reset\n"); 8307 8308 rtnl_lock(); 8309 8310 if (pci_enable_device(pdev)) { 8311 dev_err(&pdev->dev, 8312 "Cannot re-enable PCI device after reset.\n"); 8313 } else { 8314 pci_set_master(pdev); 8315 8316 err = bnxt_hwrm_func_reset(bp); 8317 if (!err && netif_running(netdev)) 8318 err = bnxt_open(netdev); 8319 8320 if (!err) { 8321 result = PCI_ERS_RESULT_RECOVERED; 8322 bnxt_ulp_start(bp); 8323 } 8324 } 8325 8326 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 8327 dev_close(netdev); 8328 8329 rtnl_unlock(); 8330 8331 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8332 if (err) { 8333 dev_err(&pdev->dev, 8334 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 8335 err); /* non-fatal, continue */ 8336 } 8337 8338 return PCI_ERS_RESULT_RECOVERED; 8339 } 8340 8341 /** 8342 * bnxt_io_resume - called when traffic can start flowing again. 8343 * @pdev: Pointer to PCI device 8344 * 8345 * This callback is called when the error recovery driver tells 8346 * us that its OK to resume normal operation. 8347 */ 8348 static void bnxt_io_resume(struct pci_dev *pdev) 8349 { 8350 struct net_device *netdev = pci_get_drvdata(pdev); 8351 8352 rtnl_lock(); 8353 8354 netif_device_attach(netdev); 8355 8356 rtnl_unlock(); 8357 } 8358 8359 static const struct pci_error_handlers bnxt_err_handler = { 8360 .error_detected = bnxt_io_error_detected, 8361 .slot_reset = bnxt_io_slot_reset, 8362 .resume = bnxt_io_resume 8363 }; 8364 8365 static struct pci_driver bnxt_pci_driver = { 8366 .name = DRV_MODULE_NAME, 8367 .id_table = bnxt_pci_tbl, 8368 .probe = bnxt_init_one, 8369 .remove = bnxt_remove_one, 8370 .shutdown = bnxt_shutdown, 8371 .driver.pm = BNXT_PM_OPS, 8372 .err_handler = &bnxt_err_handler, 8373 #if defined(CONFIG_BNXT_SRIOV) 8374 .sriov_configure = bnxt_sriov_configure, 8375 #endif 8376 }; 8377 8378 module_pci_driver(bnxt_pci_driver); 8379