1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/if.h> 35 #include <linux/if_vlan.h> 36 #include <linux/rtc.h> 37 #include <linux/bpf.h> 38 #include <net/ip.h> 39 #include <net/tcp.h> 40 #include <net/udp.h> 41 #include <net/checksum.h> 42 #include <net/ip6_checksum.h> 43 #include <net/udp_tunnel.h> 44 #include <linux/workqueue.h> 45 #include <linux/prefetch.h> 46 #include <linux/cache.h> 47 #include <linux/log2.h> 48 #include <linux/aer.h> 49 #include <linux/bitmap.h> 50 #include <linux/cpu_rmap.h> 51 52 #include "bnxt_hsi.h" 53 #include "bnxt.h" 54 #include "bnxt_ulp.h" 55 #include "bnxt_sriov.h" 56 #include "bnxt_ethtool.h" 57 #include "bnxt_dcb.h" 58 #include "bnxt_xdp.h" 59 60 #define BNXT_TX_TIMEOUT (5 * HZ) 61 62 static const char version[] = 63 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 64 65 MODULE_LICENSE("GPL"); 66 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 67 MODULE_VERSION(DRV_MODULE_VERSION); 68 69 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 70 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 71 #define BNXT_RX_COPY_THRESH 256 72 73 #define BNXT_TX_PUSH_THRESH 164 74 75 enum board_idx { 76 BCM57301, 77 BCM57302, 78 BCM57304, 79 BCM57417_NPAR, 80 BCM58700, 81 BCM57311, 82 BCM57312, 83 BCM57402, 84 BCM57404, 85 BCM57406, 86 BCM57402_NPAR, 87 BCM57407, 88 BCM57412, 89 BCM57414, 90 BCM57416, 91 BCM57417, 92 BCM57412_NPAR, 93 BCM57314, 94 BCM57417_SFP, 95 BCM57416_SFP, 96 BCM57404_NPAR, 97 BCM57406_NPAR, 98 BCM57407_SFP, 99 BCM57407_NPAR, 100 BCM57414_NPAR, 101 BCM57416_NPAR, 102 BCM57452, 103 BCM57454, 104 NETXTREME_E_VF, 105 NETXTREME_C_VF, 106 }; 107 108 /* indexed by enum above */ 109 static const struct { 110 char *name; 111 } board_info[] = { 112 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 113 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 114 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 115 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 116 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 117 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 118 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 119 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 120 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 121 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 122 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 123 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 124 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 125 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 126 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 127 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 128 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 129 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 130 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 131 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 132 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 133 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 134 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 135 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 136 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 137 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 138 { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 139 { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 140 { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 141 { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 142 }; 143 144 static const struct pci_device_id bnxt_pci_tbl[] = { 145 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 146 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 147 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 148 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 149 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 150 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 151 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 152 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 153 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 154 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 157 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 158 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 162 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 163 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 164 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 165 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 166 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 167 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 169 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 171 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 175 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 176 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 177 #ifdef CONFIG_BNXT_SRIOV 178 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 179 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 180 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 181 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 182 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 183 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 184 #endif 185 { 0 } 186 }; 187 188 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 189 190 static const u16 bnxt_vf_req_snif[] = { 191 HWRM_FUNC_CFG, 192 HWRM_PORT_PHY_QCFG, 193 HWRM_CFA_L2_FILTER_ALLOC, 194 }; 195 196 static const u16 bnxt_async_events_arr[] = { 197 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 198 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 199 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 200 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 201 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 202 }; 203 204 static bool bnxt_vf_pciid(enum board_idx idx) 205 { 206 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 207 } 208 209 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 210 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 211 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 212 213 #define BNXT_CP_DB_REARM(db, raw_cons) \ 214 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 215 216 #define BNXT_CP_DB(db, raw_cons) \ 217 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 218 219 #define BNXT_CP_DB_IRQ_DIS(db) \ 220 writel(DB_CP_IRQ_DIS_FLAGS, db) 221 222 const u16 bnxt_lhint_arr[] = { 223 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 224 TX_BD_FLAGS_LHINT_512_TO_1023, 225 TX_BD_FLAGS_LHINT_1024_TO_2047, 226 TX_BD_FLAGS_LHINT_1024_TO_2047, 227 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 228 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 229 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 230 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 231 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 232 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 233 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 234 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 235 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 236 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 237 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 238 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 239 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 240 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 241 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 242 }; 243 244 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 245 { 246 struct bnxt *bp = netdev_priv(dev); 247 struct tx_bd *txbd; 248 struct tx_bd_ext *txbd1; 249 struct netdev_queue *txq; 250 int i; 251 dma_addr_t mapping; 252 unsigned int length, pad = 0; 253 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 254 u16 prod, last_frag; 255 struct pci_dev *pdev = bp->pdev; 256 struct bnxt_tx_ring_info *txr; 257 struct bnxt_sw_tx_bd *tx_buf; 258 259 i = skb_get_queue_mapping(skb); 260 if (unlikely(i >= bp->tx_nr_rings)) { 261 dev_kfree_skb_any(skb); 262 return NETDEV_TX_OK; 263 } 264 265 txq = netdev_get_tx_queue(dev, i); 266 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 267 prod = txr->tx_prod; 268 269 free_size = bnxt_tx_avail(bp, txr); 270 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 271 netif_tx_stop_queue(txq); 272 return NETDEV_TX_BUSY; 273 } 274 275 length = skb->len; 276 len = skb_headlen(skb); 277 last_frag = skb_shinfo(skb)->nr_frags; 278 279 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 280 281 txbd->tx_bd_opaque = prod; 282 283 tx_buf = &txr->tx_buf_ring[prod]; 284 tx_buf->skb = skb; 285 tx_buf->nr_frags = last_frag; 286 287 vlan_tag_flags = 0; 288 cfa_action = 0; 289 if (skb_vlan_tag_present(skb)) { 290 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 291 skb_vlan_tag_get(skb); 292 /* Currently supports 8021Q, 8021AD vlan offloads 293 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 294 */ 295 if (skb->vlan_proto == htons(ETH_P_8021Q)) 296 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 297 } 298 299 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 300 struct tx_push_buffer *tx_push_buf = txr->tx_push; 301 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 302 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 303 void *pdata = tx_push_buf->data; 304 u64 *end; 305 int j, push_len; 306 307 /* Set COAL_NOW to be ready quickly for the next push */ 308 tx_push->tx_bd_len_flags_type = 309 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 310 TX_BD_TYPE_LONG_TX_BD | 311 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 312 TX_BD_FLAGS_COAL_NOW | 313 TX_BD_FLAGS_PACKET_END | 314 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 315 316 if (skb->ip_summed == CHECKSUM_PARTIAL) 317 tx_push1->tx_bd_hsize_lflags = 318 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 319 else 320 tx_push1->tx_bd_hsize_lflags = 0; 321 322 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 323 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 324 325 end = pdata + length; 326 end = PTR_ALIGN(end, 8) - 1; 327 *end = 0; 328 329 skb_copy_from_linear_data(skb, pdata, len); 330 pdata += len; 331 for (j = 0; j < last_frag; j++) { 332 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 333 void *fptr; 334 335 fptr = skb_frag_address_safe(frag); 336 if (!fptr) 337 goto normal_tx; 338 339 memcpy(pdata, fptr, skb_frag_size(frag)); 340 pdata += skb_frag_size(frag); 341 } 342 343 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 344 txbd->tx_bd_haddr = txr->data_mapping; 345 prod = NEXT_TX(prod); 346 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 347 memcpy(txbd, tx_push1, sizeof(*txbd)); 348 prod = NEXT_TX(prod); 349 tx_push->doorbell = 350 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 351 txr->tx_prod = prod; 352 353 tx_buf->is_push = 1; 354 netdev_tx_sent_queue(txq, skb->len); 355 wmb(); /* Sync is_push and byte queue before pushing data */ 356 357 push_len = (length + sizeof(*tx_push) + 7) / 8; 358 if (push_len > 16) { 359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 360 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 361 (push_len - 16) << 1); 362 } else { 363 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 364 push_len); 365 } 366 367 goto tx_done; 368 } 369 370 normal_tx: 371 if (length < BNXT_MIN_PKT_SIZE) { 372 pad = BNXT_MIN_PKT_SIZE - length; 373 if (skb_pad(skb, pad)) { 374 /* SKB already freed. */ 375 tx_buf->skb = NULL; 376 return NETDEV_TX_OK; 377 } 378 length = BNXT_MIN_PKT_SIZE; 379 } 380 381 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 382 383 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 384 dev_kfree_skb_any(skb); 385 tx_buf->skb = NULL; 386 return NETDEV_TX_OK; 387 } 388 389 dma_unmap_addr_set(tx_buf, mapping, mapping); 390 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 391 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 392 393 txbd->tx_bd_haddr = cpu_to_le64(mapping); 394 395 prod = NEXT_TX(prod); 396 txbd1 = (struct tx_bd_ext *) 397 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 398 399 txbd1->tx_bd_hsize_lflags = 0; 400 if (skb_is_gso(skb)) { 401 u32 hdr_len; 402 403 if (skb->encapsulation) 404 hdr_len = skb_inner_network_offset(skb) + 405 skb_inner_network_header_len(skb) + 406 inner_tcp_hdrlen(skb); 407 else 408 hdr_len = skb_transport_offset(skb) + 409 tcp_hdrlen(skb); 410 411 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 412 TX_BD_FLAGS_T_IPID | 413 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 414 length = skb_shinfo(skb)->gso_size; 415 txbd1->tx_bd_mss = cpu_to_le32(length); 416 length += hdr_len; 417 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 418 txbd1->tx_bd_hsize_lflags = 419 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 420 txbd1->tx_bd_mss = 0; 421 } 422 423 length >>= 9; 424 flags |= bnxt_lhint_arr[length]; 425 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 426 427 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 428 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 429 for (i = 0; i < last_frag; i++) { 430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 431 432 prod = NEXT_TX(prod); 433 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 434 435 len = skb_frag_size(frag); 436 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 437 DMA_TO_DEVICE); 438 439 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 440 goto tx_dma_error; 441 442 tx_buf = &txr->tx_buf_ring[prod]; 443 dma_unmap_addr_set(tx_buf, mapping, mapping); 444 445 txbd->tx_bd_haddr = cpu_to_le64(mapping); 446 447 flags = len << TX_BD_LEN_SHIFT; 448 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 449 } 450 451 flags &= ~TX_BD_LEN; 452 txbd->tx_bd_len_flags_type = 453 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 454 TX_BD_FLAGS_PACKET_END); 455 456 netdev_tx_sent_queue(txq, skb->len); 457 458 /* Sync BD data before updating doorbell */ 459 wmb(); 460 461 prod = NEXT_TX(prod); 462 txr->tx_prod = prod; 463 464 writel(DB_KEY_TX | prod, txr->tx_doorbell); 465 writel(DB_KEY_TX | prod, txr->tx_doorbell); 466 467 tx_done: 468 469 mmiowb(); 470 471 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 472 netif_tx_stop_queue(txq); 473 474 /* netif_tx_stop_queue() must be done before checking 475 * tx index in bnxt_tx_avail() below, because in 476 * bnxt_tx_int(), we update tx index before checking for 477 * netif_tx_queue_stopped(). 478 */ 479 smp_mb(); 480 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 481 netif_tx_wake_queue(txq); 482 } 483 return NETDEV_TX_OK; 484 485 tx_dma_error: 486 last_frag = i; 487 488 /* start back at beginning and unmap skb */ 489 prod = txr->tx_prod; 490 tx_buf = &txr->tx_buf_ring[prod]; 491 tx_buf->skb = NULL; 492 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 493 skb_headlen(skb), PCI_DMA_TODEVICE); 494 prod = NEXT_TX(prod); 495 496 /* unmap remaining mapped pages */ 497 for (i = 0; i < last_frag; i++) { 498 prod = NEXT_TX(prod); 499 tx_buf = &txr->tx_buf_ring[prod]; 500 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 501 skb_frag_size(&skb_shinfo(skb)->frags[i]), 502 PCI_DMA_TODEVICE); 503 } 504 505 dev_kfree_skb_any(skb); 506 return NETDEV_TX_OK; 507 } 508 509 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 510 { 511 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 512 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 513 u16 cons = txr->tx_cons; 514 struct pci_dev *pdev = bp->pdev; 515 int i; 516 unsigned int tx_bytes = 0; 517 518 for (i = 0; i < nr_pkts; i++) { 519 struct bnxt_sw_tx_bd *tx_buf; 520 struct sk_buff *skb; 521 int j, last; 522 523 tx_buf = &txr->tx_buf_ring[cons]; 524 cons = NEXT_TX(cons); 525 skb = tx_buf->skb; 526 tx_buf->skb = NULL; 527 528 if (tx_buf->is_push) { 529 tx_buf->is_push = 0; 530 goto next_tx_int; 531 } 532 533 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 534 skb_headlen(skb), PCI_DMA_TODEVICE); 535 last = tx_buf->nr_frags; 536 537 for (j = 0; j < last; j++) { 538 cons = NEXT_TX(cons); 539 tx_buf = &txr->tx_buf_ring[cons]; 540 dma_unmap_page( 541 &pdev->dev, 542 dma_unmap_addr(tx_buf, mapping), 543 skb_frag_size(&skb_shinfo(skb)->frags[j]), 544 PCI_DMA_TODEVICE); 545 } 546 547 next_tx_int: 548 cons = NEXT_TX(cons); 549 550 tx_bytes += skb->len; 551 dev_kfree_skb_any(skb); 552 } 553 554 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 555 txr->tx_cons = cons; 556 557 /* Need to make the tx_cons update visible to bnxt_start_xmit() 558 * before checking for netif_tx_queue_stopped(). Without the 559 * memory barrier, there is a small possibility that bnxt_start_xmit() 560 * will miss it and cause the queue to be stopped forever. 561 */ 562 smp_mb(); 563 564 if (unlikely(netif_tx_queue_stopped(txq)) && 565 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 566 __netif_tx_lock(txq, smp_processor_id()); 567 if (netif_tx_queue_stopped(txq) && 568 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 569 txr->dev_state != BNXT_DEV_STATE_CLOSING) 570 netif_tx_wake_queue(txq); 571 __netif_tx_unlock(txq); 572 } 573 } 574 575 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 576 gfp_t gfp) 577 { 578 struct device *dev = &bp->pdev->dev; 579 struct page *page; 580 581 page = alloc_page(gfp); 582 if (!page) 583 return NULL; 584 585 *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir); 586 if (dma_mapping_error(dev, *mapping)) { 587 __free_page(page); 588 return NULL; 589 } 590 *mapping += bp->rx_dma_offset; 591 return page; 592 } 593 594 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 595 gfp_t gfp) 596 { 597 u8 *data; 598 struct pci_dev *pdev = bp->pdev; 599 600 data = kmalloc(bp->rx_buf_size, gfp); 601 if (!data) 602 return NULL; 603 604 *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset, 605 bp->rx_buf_use_size, bp->rx_dir); 606 607 if (dma_mapping_error(&pdev->dev, *mapping)) { 608 kfree(data); 609 data = NULL; 610 } 611 return data; 612 } 613 614 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 615 u16 prod, gfp_t gfp) 616 { 617 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 618 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 619 dma_addr_t mapping; 620 621 if (BNXT_RX_PAGE_MODE(bp)) { 622 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); 623 624 if (!page) 625 return -ENOMEM; 626 627 rx_buf->data = page; 628 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 629 } else { 630 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 631 632 if (!data) 633 return -ENOMEM; 634 635 rx_buf->data = data; 636 rx_buf->data_ptr = data + bp->rx_offset; 637 } 638 rx_buf->mapping = mapping; 639 640 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 641 return 0; 642 } 643 644 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 645 { 646 u16 prod = rxr->rx_prod; 647 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 648 struct rx_bd *cons_bd, *prod_bd; 649 650 prod_rx_buf = &rxr->rx_buf_ring[prod]; 651 cons_rx_buf = &rxr->rx_buf_ring[cons]; 652 653 prod_rx_buf->data = data; 654 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 655 656 prod_rx_buf->mapping = cons_rx_buf->mapping; 657 658 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 659 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 660 661 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 662 } 663 664 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 665 { 666 u16 next, max = rxr->rx_agg_bmap_size; 667 668 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 669 if (next >= max) 670 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 671 return next; 672 } 673 674 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 675 struct bnxt_rx_ring_info *rxr, 676 u16 prod, gfp_t gfp) 677 { 678 struct rx_bd *rxbd = 679 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 680 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 681 struct pci_dev *pdev = bp->pdev; 682 struct page *page; 683 dma_addr_t mapping; 684 u16 sw_prod = rxr->rx_sw_agg_prod; 685 unsigned int offset = 0; 686 687 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 688 page = rxr->rx_page; 689 if (!page) { 690 page = alloc_page(gfp); 691 if (!page) 692 return -ENOMEM; 693 rxr->rx_page = page; 694 rxr->rx_page_offset = 0; 695 } 696 offset = rxr->rx_page_offset; 697 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 698 if (rxr->rx_page_offset == PAGE_SIZE) 699 rxr->rx_page = NULL; 700 else 701 get_page(page); 702 } else { 703 page = alloc_page(gfp); 704 if (!page) 705 return -ENOMEM; 706 } 707 708 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE, 709 PCI_DMA_FROMDEVICE); 710 if (dma_mapping_error(&pdev->dev, mapping)) { 711 __free_page(page); 712 return -EIO; 713 } 714 715 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 716 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 717 718 __set_bit(sw_prod, rxr->rx_agg_bmap); 719 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 720 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 721 722 rx_agg_buf->page = page; 723 rx_agg_buf->offset = offset; 724 rx_agg_buf->mapping = mapping; 725 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 726 rxbd->rx_bd_opaque = sw_prod; 727 return 0; 728 } 729 730 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 731 u32 agg_bufs) 732 { 733 struct bnxt *bp = bnapi->bp; 734 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 735 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 736 u16 prod = rxr->rx_agg_prod; 737 u16 sw_prod = rxr->rx_sw_agg_prod; 738 u32 i; 739 740 for (i = 0; i < agg_bufs; i++) { 741 u16 cons; 742 struct rx_agg_cmp *agg; 743 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 744 struct rx_bd *prod_bd; 745 struct page *page; 746 747 agg = (struct rx_agg_cmp *) 748 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 749 cons = agg->rx_agg_cmp_opaque; 750 __clear_bit(cons, rxr->rx_agg_bmap); 751 752 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 753 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 754 755 __set_bit(sw_prod, rxr->rx_agg_bmap); 756 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 757 cons_rx_buf = &rxr->rx_agg_ring[cons]; 758 759 /* It is possible for sw_prod to be equal to cons, so 760 * set cons_rx_buf->page to NULL first. 761 */ 762 page = cons_rx_buf->page; 763 cons_rx_buf->page = NULL; 764 prod_rx_buf->page = page; 765 prod_rx_buf->offset = cons_rx_buf->offset; 766 767 prod_rx_buf->mapping = cons_rx_buf->mapping; 768 769 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 770 771 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 772 prod_bd->rx_bd_opaque = sw_prod; 773 774 prod = NEXT_RX_AGG(prod); 775 sw_prod = NEXT_RX_AGG(sw_prod); 776 cp_cons = NEXT_CMP(cp_cons); 777 } 778 rxr->rx_agg_prod = prod; 779 rxr->rx_sw_agg_prod = sw_prod; 780 } 781 782 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 783 struct bnxt_rx_ring_info *rxr, 784 u16 cons, void *data, u8 *data_ptr, 785 dma_addr_t dma_addr, 786 unsigned int offset_and_len) 787 { 788 unsigned int payload = offset_and_len >> 16; 789 unsigned int len = offset_and_len & 0xffff; 790 struct skb_frag_struct *frag; 791 struct page *page = data; 792 u16 prod = rxr->rx_prod; 793 struct sk_buff *skb; 794 int off, err; 795 796 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 797 if (unlikely(err)) { 798 bnxt_reuse_rx_data(rxr, cons, data); 799 return NULL; 800 } 801 dma_addr -= bp->rx_dma_offset; 802 dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir); 803 804 if (unlikely(!payload)) 805 payload = eth_get_headlen(data_ptr, len); 806 807 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 808 if (!skb) { 809 __free_page(page); 810 return NULL; 811 } 812 813 off = (void *)data_ptr - page_address(page); 814 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 815 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 816 payload + NET_IP_ALIGN); 817 818 frag = &skb_shinfo(skb)->frags[0]; 819 skb_frag_size_sub(frag, payload); 820 frag->page_offset += payload; 821 skb->data_len -= payload; 822 skb->tail += payload; 823 824 return skb; 825 } 826 827 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 828 struct bnxt_rx_ring_info *rxr, u16 cons, 829 void *data, u8 *data_ptr, 830 dma_addr_t dma_addr, 831 unsigned int offset_and_len) 832 { 833 u16 prod = rxr->rx_prod; 834 struct sk_buff *skb; 835 int err; 836 837 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 838 if (unlikely(err)) { 839 bnxt_reuse_rx_data(rxr, cons, data); 840 return NULL; 841 } 842 843 skb = build_skb(data, 0); 844 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 845 bp->rx_dir); 846 if (!skb) { 847 kfree(data); 848 return NULL; 849 } 850 851 skb_reserve(skb, bp->rx_offset); 852 skb_put(skb, offset_and_len & 0xffff); 853 return skb; 854 } 855 856 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 857 struct sk_buff *skb, u16 cp_cons, 858 u32 agg_bufs) 859 { 860 struct pci_dev *pdev = bp->pdev; 861 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 862 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 863 u16 prod = rxr->rx_agg_prod; 864 u32 i; 865 866 for (i = 0; i < agg_bufs; i++) { 867 u16 cons, frag_len; 868 struct rx_agg_cmp *agg; 869 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 870 struct page *page; 871 dma_addr_t mapping; 872 873 agg = (struct rx_agg_cmp *) 874 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 875 cons = agg->rx_agg_cmp_opaque; 876 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 877 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 878 879 cons_rx_buf = &rxr->rx_agg_ring[cons]; 880 skb_fill_page_desc(skb, i, cons_rx_buf->page, 881 cons_rx_buf->offset, frag_len); 882 __clear_bit(cons, rxr->rx_agg_bmap); 883 884 /* It is possible for bnxt_alloc_rx_page() to allocate 885 * a sw_prod index that equals the cons index, so we 886 * need to clear the cons entry now. 887 */ 888 mapping = cons_rx_buf->mapping; 889 page = cons_rx_buf->page; 890 cons_rx_buf->page = NULL; 891 892 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 893 struct skb_shared_info *shinfo; 894 unsigned int nr_frags; 895 896 shinfo = skb_shinfo(skb); 897 nr_frags = --shinfo->nr_frags; 898 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 899 900 dev_kfree_skb(skb); 901 902 cons_rx_buf->page = page; 903 904 /* Update prod since possibly some pages have been 905 * allocated already. 906 */ 907 rxr->rx_agg_prod = prod; 908 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 909 return NULL; 910 } 911 912 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 913 PCI_DMA_FROMDEVICE); 914 915 skb->data_len += frag_len; 916 skb->len += frag_len; 917 skb->truesize += PAGE_SIZE; 918 919 prod = NEXT_RX_AGG(prod); 920 cp_cons = NEXT_CMP(cp_cons); 921 } 922 rxr->rx_agg_prod = prod; 923 return skb; 924 } 925 926 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 927 u8 agg_bufs, u32 *raw_cons) 928 { 929 u16 last; 930 struct rx_agg_cmp *agg; 931 932 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 933 last = RING_CMP(*raw_cons); 934 agg = (struct rx_agg_cmp *) 935 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 936 return RX_AGG_CMP_VALID(agg, *raw_cons); 937 } 938 939 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 940 unsigned int len, 941 dma_addr_t mapping) 942 { 943 struct bnxt *bp = bnapi->bp; 944 struct pci_dev *pdev = bp->pdev; 945 struct sk_buff *skb; 946 947 skb = napi_alloc_skb(&bnapi->napi, len); 948 if (!skb) 949 return NULL; 950 951 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 952 bp->rx_dir); 953 954 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 955 len + NET_IP_ALIGN); 956 957 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 958 bp->rx_dir); 959 960 skb_put(skb, len); 961 return skb; 962 } 963 964 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 965 u32 *raw_cons, void *cmp) 966 { 967 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 968 struct rx_cmp *rxcmp = cmp; 969 u32 tmp_raw_cons = *raw_cons; 970 u8 cmp_type, agg_bufs = 0; 971 972 cmp_type = RX_CMP_TYPE(rxcmp); 973 974 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 975 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 976 RX_CMP_AGG_BUFS) >> 977 RX_CMP_AGG_BUFS_SHIFT; 978 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 979 struct rx_tpa_end_cmp *tpa_end = cmp; 980 981 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 982 RX_TPA_END_CMP_AGG_BUFS) >> 983 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 984 } 985 986 if (agg_bufs) { 987 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 988 return -EBUSY; 989 } 990 *raw_cons = tmp_raw_cons; 991 return 0; 992 } 993 994 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 995 { 996 if (!rxr->bnapi->in_reset) { 997 rxr->bnapi->in_reset = true; 998 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 999 schedule_work(&bp->sp_task); 1000 } 1001 rxr->rx_next_cons = 0xffff; 1002 } 1003 1004 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1005 struct rx_tpa_start_cmp *tpa_start, 1006 struct rx_tpa_start_cmp_ext *tpa_start1) 1007 { 1008 u8 agg_id = TPA_START_AGG_ID(tpa_start); 1009 u16 cons, prod; 1010 struct bnxt_tpa_info *tpa_info; 1011 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1012 struct rx_bd *prod_bd; 1013 dma_addr_t mapping; 1014 1015 cons = tpa_start->rx_tpa_start_cmp_opaque; 1016 prod = rxr->rx_prod; 1017 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1018 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1019 tpa_info = &rxr->rx_tpa[agg_id]; 1020 1021 if (unlikely(cons != rxr->rx_next_cons)) { 1022 bnxt_sched_reset(bp, rxr); 1023 return; 1024 } 1025 1026 prod_rx_buf->data = tpa_info->data; 1027 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1028 1029 mapping = tpa_info->mapping; 1030 prod_rx_buf->mapping = mapping; 1031 1032 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1033 1034 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1035 1036 tpa_info->data = cons_rx_buf->data; 1037 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1038 cons_rx_buf->data = NULL; 1039 tpa_info->mapping = cons_rx_buf->mapping; 1040 1041 tpa_info->len = 1042 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1043 RX_TPA_START_CMP_LEN_SHIFT; 1044 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1045 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1046 1047 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1048 tpa_info->gso_type = SKB_GSO_TCPV4; 1049 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1050 if (hash_type == 3) 1051 tpa_info->gso_type = SKB_GSO_TCPV6; 1052 tpa_info->rss_hash = 1053 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1054 } else { 1055 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1056 tpa_info->gso_type = 0; 1057 if (netif_msg_rx_err(bp)) 1058 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1059 } 1060 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1061 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1062 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1063 1064 rxr->rx_prod = NEXT_RX(prod); 1065 cons = NEXT_RX(cons); 1066 rxr->rx_next_cons = NEXT_RX(cons); 1067 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1068 1069 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1070 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1071 cons_rx_buf->data = NULL; 1072 } 1073 1074 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 1075 u16 cp_cons, u32 agg_bufs) 1076 { 1077 if (agg_bufs) 1078 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1079 } 1080 1081 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1082 int payload_off, int tcp_ts, 1083 struct sk_buff *skb) 1084 { 1085 #ifdef CONFIG_INET 1086 struct tcphdr *th; 1087 int len, nw_off; 1088 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1089 u32 hdr_info = tpa_info->hdr_info; 1090 bool loopback = false; 1091 1092 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1093 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1094 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1095 1096 /* If the packet is an internal loopback packet, the offsets will 1097 * have an extra 4 bytes. 1098 */ 1099 if (inner_mac_off == 4) { 1100 loopback = true; 1101 } else if (inner_mac_off > 4) { 1102 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1103 ETH_HLEN - 2)); 1104 1105 /* We only support inner iPv4/ipv6. If we don't see the 1106 * correct protocol ID, it must be a loopback packet where 1107 * the offsets are off by 4. 1108 */ 1109 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1110 loopback = true; 1111 } 1112 if (loopback) { 1113 /* internal loopback packet, subtract all offsets by 4 */ 1114 inner_ip_off -= 4; 1115 inner_mac_off -= 4; 1116 outer_ip_off -= 4; 1117 } 1118 1119 nw_off = inner_ip_off - ETH_HLEN; 1120 skb_set_network_header(skb, nw_off); 1121 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1122 struct ipv6hdr *iph = ipv6_hdr(skb); 1123 1124 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1125 len = skb->len - skb_transport_offset(skb); 1126 th = tcp_hdr(skb); 1127 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1128 } else { 1129 struct iphdr *iph = ip_hdr(skb); 1130 1131 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1132 len = skb->len - skb_transport_offset(skb); 1133 th = tcp_hdr(skb); 1134 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1135 } 1136 1137 if (inner_mac_off) { /* tunnel */ 1138 struct udphdr *uh = NULL; 1139 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1140 ETH_HLEN - 2)); 1141 1142 if (proto == htons(ETH_P_IP)) { 1143 struct iphdr *iph = (struct iphdr *)skb->data; 1144 1145 if (iph->protocol == IPPROTO_UDP) 1146 uh = (struct udphdr *)(iph + 1); 1147 } else { 1148 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1149 1150 if (iph->nexthdr == IPPROTO_UDP) 1151 uh = (struct udphdr *)(iph + 1); 1152 } 1153 if (uh) { 1154 if (uh->check) 1155 skb_shinfo(skb)->gso_type |= 1156 SKB_GSO_UDP_TUNNEL_CSUM; 1157 else 1158 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1159 } 1160 } 1161 #endif 1162 return skb; 1163 } 1164 1165 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1166 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1167 1168 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1169 int payload_off, int tcp_ts, 1170 struct sk_buff *skb) 1171 { 1172 #ifdef CONFIG_INET 1173 struct tcphdr *th; 1174 int len, nw_off, tcp_opt_len = 0; 1175 1176 if (tcp_ts) 1177 tcp_opt_len = 12; 1178 1179 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1180 struct iphdr *iph; 1181 1182 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1183 ETH_HLEN; 1184 skb_set_network_header(skb, nw_off); 1185 iph = ip_hdr(skb); 1186 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1187 len = skb->len - skb_transport_offset(skb); 1188 th = tcp_hdr(skb); 1189 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1190 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1191 struct ipv6hdr *iph; 1192 1193 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1194 ETH_HLEN; 1195 skb_set_network_header(skb, nw_off); 1196 iph = ipv6_hdr(skb); 1197 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1198 len = skb->len - skb_transport_offset(skb); 1199 th = tcp_hdr(skb); 1200 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1201 } else { 1202 dev_kfree_skb_any(skb); 1203 return NULL; 1204 } 1205 1206 if (nw_off) { /* tunnel */ 1207 struct udphdr *uh = NULL; 1208 1209 if (skb->protocol == htons(ETH_P_IP)) { 1210 struct iphdr *iph = (struct iphdr *)skb->data; 1211 1212 if (iph->protocol == IPPROTO_UDP) 1213 uh = (struct udphdr *)(iph + 1); 1214 } else { 1215 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1216 1217 if (iph->nexthdr == IPPROTO_UDP) 1218 uh = (struct udphdr *)(iph + 1); 1219 } 1220 if (uh) { 1221 if (uh->check) 1222 skb_shinfo(skb)->gso_type |= 1223 SKB_GSO_UDP_TUNNEL_CSUM; 1224 else 1225 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1226 } 1227 } 1228 #endif 1229 return skb; 1230 } 1231 1232 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1233 struct bnxt_tpa_info *tpa_info, 1234 struct rx_tpa_end_cmp *tpa_end, 1235 struct rx_tpa_end_cmp_ext *tpa_end1, 1236 struct sk_buff *skb) 1237 { 1238 #ifdef CONFIG_INET 1239 int payload_off; 1240 u16 segs; 1241 1242 segs = TPA_END_TPA_SEGS(tpa_end); 1243 if (segs == 1) 1244 return skb; 1245 1246 NAPI_GRO_CB(skb)->count = segs; 1247 skb_shinfo(skb)->gso_size = 1248 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1249 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1250 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1251 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1252 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1253 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1254 if (likely(skb)) 1255 tcp_gro_complete(skb); 1256 #endif 1257 return skb; 1258 } 1259 1260 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1261 struct bnxt_napi *bnapi, 1262 u32 *raw_cons, 1263 struct rx_tpa_end_cmp *tpa_end, 1264 struct rx_tpa_end_cmp_ext *tpa_end1, 1265 u8 *event) 1266 { 1267 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1268 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1269 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1270 u8 *data_ptr, agg_bufs; 1271 u16 cp_cons = RING_CMP(*raw_cons); 1272 unsigned int len; 1273 struct bnxt_tpa_info *tpa_info; 1274 dma_addr_t mapping; 1275 struct sk_buff *skb; 1276 void *data; 1277 1278 if (unlikely(bnapi->in_reset)) { 1279 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 1280 1281 if (rc < 0) 1282 return ERR_PTR(-EBUSY); 1283 return NULL; 1284 } 1285 1286 tpa_info = &rxr->rx_tpa[agg_id]; 1287 data = tpa_info->data; 1288 data_ptr = tpa_info->data_ptr; 1289 prefetch(data_ptr); 1290 len = tpa_info->len; 1291 mapping = tpa_info->mapping; 1292 1293 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1294 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1295 1296 if (agg_bufs) { 1297 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1298 return ERR_PTR(-EBUSY); 1299 1300 *event |= BNXT_AGG_EVENT; 1301 cp_cons = NEXT_CMP(cp_cons); 1302 } 1303 1304 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 1305 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1306 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1307 agg_bufs, (int)MAX_SKB_FRAGS); 1308 return NULL; 1309 } 1310 1311 if (len <= bp->rx_copy_thresh) { 1312 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1313 if (!skb) { 1314 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1315 return NULL; 1316 } 1317 } else { 1318 u8 *new_data; 1319 dma_addr_t new_mapping; 1320 1321 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1322 if (!new_data) { 1323 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1324 return NULL; 1325 } 1326 1327 tpa_info->data = new_data; 1328 tpa_info->data_ptr = new_data + bp->rx_offset; 1329 tpa_info->mapping = new_mapping; 1330 1331 skb = build_skb(data, 0); 1332 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, 1333 bp->rx_dir); 1334 1335 if (!skb) { 1336 kfree(data); 1337 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1338 return NULL; 1339 } 1340 skb_reserve(skb, bp->rx_offset); 1341 skb_put(skb, len); 1342 } 1343 1344 if (agg_bufs) { 1345 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1346 if (!skb) { 1347 /* Page reuse already handled by bnxt_rx_pages(). */ 1348 return NULL; 1349 } 1350 } 1351 skb->protocol = eth_type_trans(skb, bp->dev); 1352 1353 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1354 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1355 1356 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1357 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1358 u16 vlan_proto = tpa_info->metadata >> 1359 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1360 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1361 1362 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1363 } 1364 1365 skb_checksum_none_assert(skb); 1366 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1367 skb->ip_summed = CHECKSUM_UNNECESSARY; 1368 skb->csum_level = 1369 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1370 } 1371 1372 if (TPA_END_GRO(tpa_end)) 1373 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1374 1375 return skb; 1376 } 1377 1378 /* returns the following: 1379 * 1 - 1 packet successfully received 1380 * 0 - successful TPA_START, packet not completed yet 1381 * -EBUSY - completion ring does not have all the agg buffers yet 1382 * -ENOMEM - packet aborted due to out of memory 1383 * -EIO - packet aborted due to hw error indicated in BD 1384 */ 1385 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1386 u8 *event) 1387 { 1388 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1389 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1390 struct net_device *dev = bp->dev; 1391 struct rx_cmp *rxcmp; 1392 struct rx_cmp_ext *rxcmp1; 1393 u32 tmp_raw_cons = *raw_cons; 1394 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1395 struct bnxt_sw_rx_bd *rx_buf; 1396 unsigned int len; 1397 u8 *data_ptr, agg_bufs, cmp_type; 1398 dma_addr_t dma_addr; 1399 struct sk_buff *skb; 1400 void *data; 1401 int rc = 0; 1402 u32 misc; 1403 1404 rxcmp = (struct rx_cmp *) 1405 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1406 1407 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1408 cp_cons = RING_CMP(tmp_raw_cons); 1409 rxcmp1 = (struct rx_cmp_ext *) 1410 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1411 1412 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1413 return -EBUSY; 1414 1415 cmp_type = RX_CMP_TYPE(rxcmp); 1416 1417 prod = rxr->rx_prod; 1418 1419 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1420 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1421 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1422 1423 *event |= BNXT_RX_EVENT; 1424 goto next_rx_no_prod; 1425 1426 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1427 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1428 (struct rx_tpa_end_cmp *)rxcmp, 1429 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1430 1431 if (unlikely(IS_ERR(skb))) 1432 return -EBUSY; 1433 1434 rc = -ENOMEM; 1435 if (likely(skb)) { 1436 skb_record_rx_queue(skb, bnapi->index); 1437 napi_gro_receive(&bnapi->napi, skb); 1438 rc = 1; 1439 } 1440 *event |= BNXT_RX_EVENT; 1441 goto next_rx_no_prod; 1442 } 1443 1444 cons = rxcmp->rx_cmp_opaque; 1445 rx_buf = &rxr->rx_buf_ring[cons]; 1446 data = rx_buf->data; 1447 data_ptr = rx_buf->data_ptr; 1448 if (unlikely(cons != rxr->rx_next_cons)) { 1449 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1450 1451 bnxt_sched_reset(bp, rxr); 1452 return rc1; 1453 } 1454 prefetch(data_ptr); 1455 1456 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1457 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1458 1459 if (agg_bufs) { 1460 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1461 return -EBUSY; 1462 1463 cp_cons = NEXT_CMP(cp_cons); 1464 *event |= BNXT_AGG_EVENT; 1465 } 1466 *event |= BNXT_RX_EVENT; 1467 1468 rx_buf->data = NULL; 1469 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1470 bnxt_reuse_rx_data(rxr, cons, data); 1471 if (agg_bufs) 1472 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1473 1474 rc = -EIO; 1475 goto next_rx; 1476 } 1477 1478 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1479 dma_addr = rx_buf->mapping; 1480 1481 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1482 rc = 1; 1483 goto next_rx; 1484 } 1485 1486 if (len <= bp->rx_copy_thresh) { 1487 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1488 bnxt_reuse_rx_data(rxr, cons, data); 1489 if (!skb) { 1490 rc = -ENOMEM; 1491 goto next_rx; 1492 } 1493 } else { 1494 u32 payload; 1495 1496 if (rx_buf->data_ptr == data_ptr) 1497 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1498 else 1499 payload = 0; 1500 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1501 payload | len); 1502 if (!skb) { 1503 rc = -ENOMEM; 1504 goto next_rx; 1505 } 1506 } 1507 1508 if (agg_bufs) { 1509 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1510 if (!skb) { 1511 rc = -ENOMEM; 1512 goto next_rx; 1513 } 1514 } 1515 1516 if (RX_CMP_HASH_VALID(rxcmp)) { 1517 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1518 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1519 1520 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1521 if (hash_type != 1 && hash_type != 3) 1522 type = PKT_HASH_TYPE_L3; 1523 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1524 } 1525 1526 skb->protocol = eth_type_trans(skb, dev); 1527 1528 if ((rxcmp1->rx_cmp_flags2 & 1529 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1530 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1531 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1532 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1533 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1534 1535 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1536 } 1537 1538 skb_checksum_none_assert(skb); 1539 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1540 if (dev->features & NETIF_F_RXCSUM) { 1541 skb->ip_summed = CHECKSUM_UNNECESSARY; 1542 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1543 } 1544 } else { 1545 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1546 if (dev->features & NETIF_F_RXCSUM) 1547 cpr->rx_l4_csum_errors++; 1548 } 1549 } 1550 1551 skb_record_rx_queue(skb, bnapi->index); 1552 napi_gro_receive(&bnapi->napi, skb); 1553 rc = 1; 1554 1555 next_rx: 1556 rxr->rx_prod = NEXT_RX(prod); 1557 rxr->rx_next_cons = NEXT_RX(cons); 1558 1559 next_rx_no_prod: 1560 *raw_cons = tmp_raw_cons; 1561 1562 return rc; 1563 } 1564 1565 #define BNXT_GET_EVENT_PORT(data) \ 1566 ((data) & \ 1567 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1568 1569 static int bnxt_async_event_process(struct bnxt *bp, 1570 struct hwrm_async_event_cmpl *cmpl) 1571 { 1572 u16 event_id = le16_to_cpu(cmpl->event_id); 1573 1574 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1575 switch (event_id) { 1576 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1577 u32 data1 = le32_to_cpu(cmpl->event_data1); 1578 struct bnxt_link_info *link_info = &bp->link_info; 1579 1580 if (BNXT_VF(bp)) 1581 goto async_event_process_exit; 1582 if (data1 & 0x20000) { 1583 u16 fw_speed = link_info->force_link_speed; 1584 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1585 1586 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1587 speed); 1588 } 1589 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1590 /* fall thru */ 1591 } 1592 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1593 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1594 break; 1595 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1596 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1597 break; 1598 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1599 u32 data1 = le32_to_cpu(cmpl->event_data1); 1600 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1601 1602 if (BNXT_VF(bp)) 1603 break; 1604 1605 if (bp->pf.port_id != port_id) 1606 break; 1607 1608 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1609 break; 1610 } 1611 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1612 if (BNXT_PF(bp)) 1613 goto async_event_process_exit; 1614 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1615 break; 1616 default: 1617 goto async_event_process_exit; 1618 } 1619 schedule_work(&bp->sp_task); 1620 async_event_process_exit: 1621 bnxt_ulp_async_events(bp, cmpl); 1622 return 0; 1623 } 1624 1625 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1626 { 1627 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1628 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1629 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1630 (struct hwrm_fwd_req_cmpl *)txcmp; 1631 1632 switch (cmpl_type) { 1633 case CMPL_BASE_TYPE_HWRM_DONE: 1634 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1635 if (seq_id == bp->hwrm_intr_seq_id) 1636 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1637 else 1638 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1639 break; 1640 1641 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1642 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1643 1644 if ((vf_id < bp->pf.first_vf_id) || 1645 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1646 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1647 vf_id); 1648 return -EINVAL; 1649 } 1650 1651 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1652 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1653 schedule_work(&bp->sp_task); 1654 break; 1655 1656 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1657 bnxt_async_event_process(bp, 1658 (struct hwrm_async_event_cmpl *)txcmp); 1659 1660 default: 1661 break; 1662 } 1663 1664 return 0; 1665 } 1666 1667 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1668 { 1669 struct bnxt_napi *bnapi = dev_instance; 1670 struct bnxt *bp = bnapi->bp; 1671 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1672 u32 cons = RING_CMP(cpr->cp_raw_cons); 1673 1674 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1675 napi_schedule(&bnapi->napi); 1676 return IRQ_HANDLED; 1677 } 1678 1679 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1680 { 1681 u32 raw_cons = cpr->cp_raw_cons; 1682 u16 cons = RING_CMP(raw_cons); 1683 struct tx_cmp *txcmp; 1684 1685 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1686 1687 return TX_CMP_VALID(txcmp, raw_cons); 1688 } 1689 1690 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1691 { 1692 struct bnxt_napi *bnapi = dev_instance; 1693 struct bnxt *bp = bnapi->bp; 1694 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1695 u32 cons = RING_CMP(cpr->cp_raw_cons); 1696 u32 int_status; 1697 1698 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1699 1700 if (!bnxt_has_work(bp, cpr)) { 1701 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1702 /* return if erroneous interrupt */ 1703 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1704 return IRQ_NONE; 1705 } 1706 1707 /* disable ring IRQ */ 1708 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1709 1710 /* Return here if interrupt is shared and is disabled. */ 1711 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1712 return IRQ_HANDLED; 1713 1714 napi_schedule(&bnapi->napi); 1715 return IRQ_HANDLED; 1716 } 1717 1718 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1719 { 1720 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1721 u32 raw_cons = cpr->cp_raw_cons; 1722 u32 cons; 1723 int tx_pkts = 0; 1724 int rx_pkts = 0; 1725 u8 event = 0; 1726 struct tx_cmp *txcmp; 1727 1728 while (1) { 1729 int rc; 1730 1731 cons = RING_CMP(raw_cons); 1732 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1733 1734 if (!TX_CMP_VALID(txcmp, raw_cons)) 1735 break; 1736 1737 /* The valid test of the entry must be done first before 1738 * reading any further. 1739 */ 1740 dma_rmb(); 1741 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1742 tx_pkts++; 1743 /* return full budget so NAPI will complete. */ 1744 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1745 rx_pkts = budget; 1746 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1747 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1748 if (likely(rc >= 0)) 1749 rx_pkts += rc; 1750 else if (rc == -EBUSY) /* partial completion */ 1751 break; 1752 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1753 CMPL_BASE_TYPE_HWRM_DONE) || 1754 (TX_CMP_TYPE(txcmp) == 1755 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1756 (TX_CMP_TYPE(txcmp) == 1757 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1758 bnxt_hwrm_handler(bp, txcmp); 1759 } 1760 raw_cons = NEXT_RAW_CMP(raw_cons); 1761 1762 if (rx_pkts == budget) 1763 break; 1764 } 1765 1766 if (event & BNXT_TX_EVENT) { 1767 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 1768 void __iomem *db = txr->tx_doorbell; 1769 u16 prod = txr->tx_prod; 1770 1771 /* Sync BD data before updating doorbell */ 1772 wmb(); 1773 1774 writel(DB_KEY_TX | prod, db); 1775 writel(DB_KEY_TX | prod, db); 1776 } 1777 1778 cpr->cp_raw_cons = raw_cons; 1779 /* ACK completion ring before freeing tx ring and producing new 1780 * buffers in rx/agg rings to prevent overflowing the completion 1781 * ring. 1782 */ 1783 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1784 1785 if (tx_pkts) 1786 bnapi->tx_int(bp, bnapi, tx_pkts); 1787 1788 if (event & BNXT_RX_EVENT) { 1789 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1790 1791 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1792 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1793 if (event & BNXT_AGG_EVENT) { 1794 writel(DB_KEY_RX | rxr->rx_agg_prod, 1795 rxr->rx_agg_doorbell); 1796 writel(DB_KEY_RX | rxr->rx_agg_prod, 1797 rxr->rx_agg_doorbell); 1798 } 1799 } 1800 return rx_pkts; 1801 } 1802 1803 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 1804 { 1805 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1806 struct bnxt *bp = bnapi->bp; 1807 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1808 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1809 struct tx_cmp *txcmp; 1810 struct rx_cmp_ext *rxcmp1; 1811 u32 cp_cons, tmp_raw_cons; 1812 u32 raw_cons = cpr->cp_raw_cons; 1813 u32 rx_pkts = 0; 1814 u8 event = 0; 1815 1816 while (1) { 1817 int rc; 1818 1819 cp_cons = RING_CMP(raw_cons); 1820 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1821 1822 if (!TX_CMP_VALID(txcmp, raw_cons)) 1823 break; 1824 1825 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1826 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 1827 cp_cons = RING_CMP(tmp_raw_cons); 1828 rxcmp1 = (struct rx_cmp_ext *) 1829 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1830 1831 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1832 break; 1833 1834 /* force an error to recycle the buffer */ 1835 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1836 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1837 1838 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1839 if (likely(rc == -EIO)) 1840 rx_pkts++; 1841 else if (rc == -EBUSY) /* partial completion */ 1842 break; 1843 } else if (unlikely(TX_CMP_TYPE(txcmp) == 1844 CMPL_BASE_TYPE_HWRM_DONE)) { 1845 bnxt_hwrm_handler(bp, txcmp); 1846 } else { 1847 netdev_err(bp->dev, 1848 "Invalid completion received on special ring\n"); 1849 } 1850 raw_cons = NEXT_RAW_CMP(raw_cons); 1851 1852 if (rx_pkts == budget) 1853 break; 1854 } 1855 1856 cpr->cp_raw_cons = raw_cons; 1857 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1858 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1859 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1860 1861 if (event & BNXT_AGG_EVENT) { 1862 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); 1863 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); 1864 } 1865 1866 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 1867 napi_complete_done(napi, rx_pkts); 1868 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1869 } 1870 return rx_pkts; 1871 } 1872 1873 static int bnxt_poll(struct napi_struct *napi, int budget) 1874 { 1875 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1876 struct bnxt *bp = bnapi->bp; 1877 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1878 int work_done = 0; 1879 1880 while (1) { 1881 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 1882 1883 if (work_done >= budget) 1884 break; 1885 1886 if (!bnxt_has_work(bp, cpr)) { 1887 if (napi_complete_done(napi, work_done)) 1888 BNXT_CP_DB_REARM(cpr->cp_doorbell, 1889 cpr->cp_raw_cons); 1890 break; 1891 } 1892 } 1893 mmiowb(); 1894 return work_done; 1895 } 1896 1897 static void bnxt_free_tx_skbs(struct bnxt *bp) 1898 { 1899 int i, max_idx; 1900 struct pci_dev *pdev = bp->pdev; 1901 1902 if (!bp->tx_ring) 1903 return; 1904 1905 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 1906 for (i = 0; i < bp->tx_nr_rings; i++) { 1907 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 1908 int j; 1909 1910 for (j = 0; j < max_idx;) { 1911 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 1912 struct sk_buff *skb = tx_buf->skb; 1913 int k, last; 1914 1915 if (!skb) { 1916 j++; 1917 continue; 1918 } 1919 1920 tx_buf->skb = NULL; 1921 1922 if (tx_buf->is_push) { 1923 dev_kfree_skb(skb); 1924 j += 2; 1925 continue; 1926 } 1927 1928 dma_unmap_single(&pdev->dev, 1929 dma_unmap_addr(tx_buf, mapping), 1930 skb_headlen(skb), 1931 PCI_DMA_TODEVICE); 1932 1933 last = tx_buf->nr_frags; 1934 j += 2; 1935 for (k = 0; k < last; k++, j++) { 1936 int ring_idx = j & bp->tx_ring_mask; 1937 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 1938 1939 tx_buf = &txr->tx_buf_ring[ring_idx]; 1940 dma_unmap_page( 1941 &pdev->dev, 1942 dma_unmap_addr(tx_buf, mapping), 1943 skb_frag_size(frag), PCI_DMA_TODEVICE); 1944 } 1945 dev_kfree_skb(skb); 1946 } 1947 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 1948 } 1949 } 1950 1951 static void bnxt_free_rx_skbs(struct bnxt *bp) 1952 { 1953 int i, max_idx, max_agg_idx; 1954 struct pci_dev *pdev = bp->pdev; 1955 1956 if (!bp->rx_ring) 1957 return; 1958 1959 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 1960 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 1961 for (i = 0; i < bp->rx_nr_rings; i++) { 1962 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 1963 int j; 1964 1965 if (rxr->rx_tpa) { 1966 for (j = 0; j < MAX_TPA; j++) { 1967 struct bnxt_tpa_info *tpa_info = 1968 &rxr->rx_tpa[j]; 1969 u8 *data = tpa_info->data; 1970 1971 if (!data) 1972 continue; 1973 1974 dma_unmap_single(&pdev->dev, tpa_info->mapping, 1975 bp->rx_buf_use_size, 1976 bp->rx_dir); 1977 1978 tpa_info->data = NULL; 1979 1980 kfree(data); 1981 } 1982 } 1983 1984 for (j = 0; j < max_idx; j++) { 1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1986 void *data = rx_buf->data; 1987 1988 if (!data) 1989 continue; 1990 1991 dma_unmap_single(&pdev->dev, rx_buf->mapping, 1992 bp->rx_buf_use_size, bp->rx_dir); 1993 1994 rx_buf->data = NULL; 1995 1996 if (BNXT_RX_PAGE_MODE(bp)) 1997 __free_page(data); 1998 else 1999 kfree(data); 2000 } 2001 2002 for (j = 0; j < max_agg_idx; j++) { 2003 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2004 &rxr->rx_agg_ring[j]; 2005 struct page *page = rx_agg_buf->page; 2006 2007 if (!page) 2008 continue; 2009 2010 dma_unmap_page(&pdev->dev, rx_agg_buf->mapping, 2011 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); 2012 2013 rx_agg_buf->page = NULL; 2014 __clear_bit(j, rxr->rx_agg_bmap); 2015 2016 __free_page(page); 2017 } 2018 if (rxr->rx_page) { 2019 __free_page(rxr->rx_page); 2020 rxr->rx_page = NULL; 2021 } 2022 } 2023 } 2024 2025 static void bnxt_free_skbs(struct bnxt *bp) 2026 { 2027 bnxt_free_tx_skbs(bp); 2028 bnxt_free_rx_skbs(bp); 2029 } 2030 2031 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2032 { 2033 struct pci_dev *pdev = bp->pdev; 2034 int i; 2035 2036 for (i = 0; i < ring->nr_pages; i++) { 2037 if (!ring->pg_arr[i]) 2038 continue; 2039 2040 dma_free_coherent(&pdev->dev, ring->page_size, 2041 ring->pg_arr[i], ring->dma_arr[i]); 2042 2043 ring->pg_arr[i] = NULL; 2044 } 2045 if (ring->pg_tbl) { 2046 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 2047 ring->pg_tbl, ring->pg_tbl_map); 2048 ring->pg_tbl = NULL; 2049 } 2050 if (ring->vmem_size && *ring->vmem) { 2051 vfree(*ring->vmem); 2052 *ring->vmem = NULL; 2053 } 2054 } 2055 2056 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2057 { 2058 int i; 2059 struct pci_dev *pdev = bp->pdev; 2060 2061 if (ring->nr_pages > 1) { 2062 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 2063 ring->nr_pages * 8, 2064 &ring->pg_tbl_map, 2065 GFP_KERNEL); 2066 if (!ring->pg_tbl) 2067 return -ENOMEM; 2068 } 2069 2070 for (i = 0; i < ring->nr_pages; i++) { 2071 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2072 ring->page_size, 2073 &ring->dma_arr[i], 2074 GFP_KERNEL); 2075 if (!ring->pg_arr[i]) 2076 return -ENOMEM; 2077 2078 if (ring->nr_pages > 1) 2079 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 2080 } 2081 2082 if (ring->vmem_size) { 2083 *ring->vmem = vzalloc(ring->vmem_size); 2084 if (!(*ring->vmem)) 2085 return -ENOMEM; 2086 } 2087 return 0; 2088 } 2089 2090 static void bnxt_free_rx_rings(struct bnxt *bp) 2091 { 2092 int i; 2093 2094 if (!bp->rx_ring) 2095 return; 2096 2097 for (i = 0; i < bp->rx_nr_rings; i++) { 2098 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2099 struct bnxt_ring_struct *ring; 2100 2101 if (rxr->xdp_prog) 2102 bpf_prog_put(rxr->xdp_prog); 2103 2104 kfree(rxr->rx_tpa); 2105 rxr->rx_tpa = NULL; 2106 2107 kfree(rxr->rx_agg_bmap); 2108 rxr->rx_agg_bmap = NULL; 2109 2110 ring = &rxr->rx_ring_struct; 2111 bnxt_free_ring(bp, ring); 2112 2113 ring = &rxr->rx_agg_ring_struct; 2114 bnxt_free_ring(bp, ring); 2115 } 2116 } 2117 2118 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2119 { 2120 int i, rc, agg_rings = 0, tpa_rings = 0; 2121 2122 if (!bp->rx_ring) 2123 return -ENOMEM; 2124 2125 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2126 agg_rings = 1; 2127 2128 if (bp->flags & BNXT_FLAG_TPA) 2129 tpa_rings = 1; 2130 2131 for (i = 0; i < bp->rx_nr_rings; i++) { 2132 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2133 struct bnxt_ring_struct *ring; 2134 2135 ring = &rxr->rx_ring_struct; 2136 2137 rc = bnxt_alloc_ring(bp, ring); 2138 if (rc) 2139 return rc; 2140 2141 if (agg_rings) { 2142 u16 mem_size; 2143 2144 ring = &rxr->rx_agg_ring_struct; 2145 rc = bnxt_alloc_ring(bp, ring); 2146 if (rc) 2147 return rc; 2148 2149 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2150 mem_size = rxr->rx_agg_bmap_size / 8; 2151 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2152 if (!rxr->rx_agg_bmap) 2153 return -ENOMEM; 2154 2155 if (tpa_rings) { 2156 rxr->rx_tpa = kcalloc(MAX_TPA, 2157 sizeof(struct bnxt_tpa_info), 2158 GFP_KERNEL); 2159 if (!rxr->rx_tpa) 2160 return -ENOMEM; 2161 } 2162 } 2163 } 2164 return 0; 2165 } 2166 2167 static void bnxt_free_tx_rings(struct bnxt *bp) 2168 { 2169 int i; 2170 struct pci_dev *pdev = bp->pdev; 2171 2172 if (!bp->tx_ring) 2173 return; 2174 2175 for (i = 0; i < bp->tx_nr_rings; i++) { 2176 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2177 struct bnxt_ring_struct *ring; 2178 2179 if (txr->tx_push) { 2180 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2181 txr->tx_push, txr->tx_push_mapping); 2182 txr->tx_push = NULL; 2183 } 2184 2185 ring = &txr->tx_ring_struct; 2186 2187 bnxt_free_ring(bp, ring); 2188 } 2189 } 2190 2191 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2192 { 2193 int i, j, rc; 2194 struct pci_dev *pdev = bp->pdev; 2195 2196 bp->tx_push_size = 0; 2197 if (bp->tx_push_thresh) { 2198 int push_size; 2199 2200 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2201 bp->tx_push_thresh); 2202 2203 if (push_size > 256) { 2204 push_size = 0; 2205 bp->tx_push_thresh = 0; 2206 } 2207 2208 bp->tx_push_size = push_size; 2209 } 2210 2211 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2212 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2213 struct bnxt_ring_struct *ring; 2214 2215 ring = &txr->tx_ring_struct; 2216 2217 rc = bnxt_alloc_ring(bp, ring); 2218 if (rc) 2219 return rc; 2220 2221 if (bp->tx_push_size) { 2222 dma_addr_t mapping; 2223 2224 /* One pre-allocated DMA buffer to backup 2225 * TX push operation 2226 */ 2227 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2228 bp->tx_push_size, 2229 &txr->tx_push_mapping, 2230 GFP_KERNEL); 2231 2232 if (!txr->tx_push) 2233 return -ENOMEM; 2234 2235 mapping = txr->tx_push_mapping + 2236 sizeof(struct tx_push_bd); 2237 txr->data_mapping = cpu_to_le64(mapping); 2238 2239 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2240 } 2241 ring->queue_id = bp->q_info[j].queue_id; 2242 if (i < bp->tx_nr_rings_xdp) 2243 continue; 2244 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2245 j++; 2246 } 2247 return 0; 2248 } 2249 2250 static void bnxt_free_cp_rings(struct bnxt *bp) 2251 { 2252 int i; 2253 2254 if (!bp->bnapi) 2255 return; 2256 2257 for (i = 0; i < bp->cp_nr_rings; i++) { 2258 struct bnxt_napi *bnapi = bp->bnapi[i]; 2259 struct bnxt_cp_ring_info *cpr; 2260 struct bnxt_ring_struct *ring; 2261 2262 if (!bnapi) 2263 continue; 2264 2265 cpr = &bnapi->cp_ring; 2266 ring = &cpr->cp_ring_struct; 2267 2268 bnxt_free_ring(bp, ring); 2269 } 2270 } 2271 2272 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2273 { 2274 int i, rc; 2275 2276 for (i = 0; i < bp->cp_nr_rings; i++) { 2277 struct bnxt_napi *bnapi = bp->bnapi[i]; 2278 struct bnxt_cp_ring_info *cpr; 2279 struct bnxt_ring_struct *ring; 2280 2281 if (!bnapi) 2282 continue; 2283 2284 cpr = &bnapi->cp_ring; 2285 ring = &cpr->cp_ring_struct; 2286 2287 rc = bnxt_alloc_ring(bp, ring); 2288 if (rc) 2289 return rc; 2290 } 2291 return 0; 2292 } 2293 2294 static void bnxt_init_ring_struct(struct bnxt *bp) 2295 { 2296 int i; 2297 2298 for (i = 0; i < bp->cp_nr_rings; i++) { 2299 struct bnxt_napi *bnapi = bp->bnapi[i]; 2300 struct bnxt_cp_ring_info *cpr; 2301 struct bnxt_rx_ring_info *rxr; 2302 struct bnxt_tx_ring_info *txr; 2303 struct bnxt_ring_struct *ring; 2304 2305 if (!bnapi) 2306 continue; 2307 2308 cpr = &bnapi->cp_ring; 2309 ring = &cpr->cp_ring_struct; 2310 ring->nr_pages = bp->cp_nr_pages; 2311 ring->page_size = HW_CMPD_RING_SIZE; 2312 ring->pg_arr = (void **)cpr->cp_desc_ring; 2313 ring->dma_arr = cpr->cp_desc_mapping; 2314 ring->vmem_size = 0; 2315 2316 rxr = bnapi->rx_ring; 2317 if (!rxr) 2318 goto skip_rx; 2319 2320 ring = &rxr->rx_ring_struct; 2321 ring->nr_pages = bp->rx_nr_pages; 2322 ring->page_size = HW_RXBD_RING_SIZE; 2323 ring->pg_arr = (void **)rxr->rx_desc_ring; 2324 ring->dma_arr = rxr->rx_desc_mapping; 2325 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2326 ring->vmem = (void **)&rxr->rx_buf_ring; 2327 2328 ring = &rxr->rx_agg_ring_struct; 2329 ring->nr_pages = bp->rx_agg_nr_pages; 2330 ring->page_size = HW_RXBD_RING_SIZE; 2331 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 2332 ring->dma_arr = rxr->rx_agg_desc_mapping; 2333 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2334 ring->vmem = (void **)&rxr->rx_agg_ring; 2335 2336 skip_rx: 2337 txr = bnapi->tx_ring; 2338 if (!txr) 2339 continue; 2340 2341 ring = &txr->tx_ring_struct; 2342 ring->nr_pages = bp->tx_nr_pages; 2343 ring->page_size = HW_RXBD_RING_SIZE; 2344 ring->pg_arr = (void **)txr->tx_desc_ring; 2345 ring->dma_arr = txr->tx_desc_mapping; 2346 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2347 ring->vmem = (void **)&txr->tx_buf_ring; 2348 } 2349 } 2350 2351 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2352 { 2353 int i; 2354 u32 prod; 2355 struct rx_bd **rx_buf_ring; 2356 2357 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 2358 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 2359 int j; 2360 struct rx_bd *rxbd; 2361 2362 rxbd = rx_buf_ring[i]; 2363 if (!rxbd) 2364 continue; 2365 2366 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2367 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2368 rxbd->rx_bd_opaque = prod; 2369 } 2370 } 2371 } 2372 2373 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2374 { 2375 struct net_device *dev = bp->dev; 2376 struct bnxt_rx_ring_info *rxr; 2377 struct bnxt_ring_struct *ring; 2378 u32 prod, type; 2379 int i; 2380 2381 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2382 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2383 2384 if (NET_IP_ALIGN == 2) 2385 type |= RX_BD_FLAGS_SOP; 2386 2387 rxr = &bp->rx_ring[ring_nr]; 2388 ring = &rxr->rx_ring_struct; 2389 bnxt_init_rxbd_pages(ring, type); 2390 2391 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 2392 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); 2393 if (IS_ERR(rxr->xdp_prog)) { 2394 int rc = PTR_ERR(rxr->xdp_prog); 2395 2396 rxr->xdp_prog = NULL; 2397 return rc; 2398 } 2399 } 2400 prod = rxr->rx_prod; 2401 for (i = 0; i < bp->rx_ring_size; i++) { 2402 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2403 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2404 ring_nr, i, bp->rx_ring_size); 2405 break; 2406 } 2407 prod = NEXT_RX(prod); 2408 } 2409 rxr->rx_prod = prod; 2410 ring->fw_ring_id = INVALID_HW_RING_ID; 2411 2412 ring = &rxr->rx_agg_ring_struct; 2413 ring->fw_ring_id = INVALID_HW_RING_ID; 2414 2415 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2416 return 0; 2417 2418 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2419 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2420 2421 bnxt_init_rxbd_pages(ring, type); 2422 2423 prod = rxr->rx_agg_prod; 2424 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2425 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2426 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2427 ring_nr, i, bp->rx_ring_size); 2428 break; 2429 } 2430 prod = NEXT_RX_AGG(prod); 2431 } 2432 rxr->rx_agg_prod = prod; 2433 2434 if (bp->flags & BNXT_FLAG_TPA) { 2435 if (rxr->rx_tpa) { 2436 u8 *data; 2437 dma_addr_t mapping; 2438 2439 for (i = 0; i < MAX_TPA; i++) { 2440 data = __bnxt_alloc_rx_data(bp, &mapping, 2441 GFP_KERNEL); 2442 if (!data) 2443 return -ENOMEM; 2444 2445 rxr->rx_tpa[i].data = data; 2446 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 2447 rxr->rx_tpa[i].mapping = mapping; 2448 } 2449 } else { 2450 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2451 return -ENOMEM; 2452 } 2453 } 2454 2455 return 0; 2456 } 2457 2458 static int bnxt_init_rx_rings(struct bnxt *bp) 2459 { 2460 int i, rc = 0; 2461 2462 if (BNXT_RX_PAGE_MODE(bp)) { 2463 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 2464 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 2465 } else { 2466 bp->rx_offset = BNXT_RX_OFFSET; 2467 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 2468 } 2469 2470 for (i = 0; i < bp->rx_nr_rings; i++) { 2471 rc = bnxt_init_one_rx_ring(bp, i); 2472 if (rc) 2473 break; 2474 } 2475 2476 return rc; 2477 } 2478 2479 static int bnxt_init_tx_rings(struct bnxt *bp) 2480 { 2481 u16 i; 2482 2483 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2484 MAX_SKB_FRAGS + 1); 2485 2486 for (i = 0; i < bp->tx_nr_rings; i++) { 2487 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2488 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2489 2490 ring->fw_ring_id = INVALID_HW_RING_ID; 2491 } 2492 2493 return 0; 2494 } 2495 2496 static void bnxt_free_ring_grps(struct bnxt *bp) 2497 { 2498 kfree(bp->grp_info); 2499 bp->grp_info = NULL; 2500 } 2501 2502 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2503 { 2504 int i; 2505 2506 if (irq_re_init) { 2507 bp->grp_info = kcalloc(bp->cp_nr_rings, 2508 sizeof(struct bnxt_ring_grp_info), 2509 GFP_KERNEL); 2510 if (!bp->grp_info) 2511 return -ENOMEM; 2512 } 2513 for (i = 0; i < bp->cp_nr_rings; i++) { 2514 if (irq_re_init) 2515 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2516 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2517 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2518 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2519 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2520 } 2521 return 0; 2522 } 2523 2524 static void bnxt_free_vnics(struct bnxt *bp) 2525 { 2526 kfree(bp->vnic_info); 2527 bp->vnic_info = NULL; 2528 bp->nr_vnics = 0; 2529 } 2530 2531 static int bnxt_alloc_vnics(struct bnxt *bp) 2532 { 2533 int num_vnics = 1; 2534 2535 #ifdef CONFIG_RFS_ACCEL 2536 if (bp->flags & BNXT_FLAG_RFS) 2537 num_vnics += bp->rx_nr_rings; 2538 #endif 2539 2540 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2541 num_vnics++; 2542 2543 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2544 GFP_KERNEL); 2545 if (!bp->vnic_info) 2546 return -ENOMEM; 2547 2548 bp->nr_vnics = num_vnics; 2549 return 0; 2550 } 2551 2552 static void bnxt_init_vnics(struct bnxt *bp) 2553 { 2554 int i; 2555 2556 for (i = 0; i < bp->nr_vnics; i++) { 2557 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2558 2559 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2560 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 2561 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 2562 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2563 2564 if (bp->vnic_info[i].rss_hash_key) { 2565 if (i == 0) 2566 prandom_bytes(vnic->rss_hash_key, 2567 HW_HASH_KEY_SIZE); 2568 else 2569 memcpy(vnic->rss_hash_key, 2570 bp->vnic_info[0].rss_hash_key, 2571 HW_HASH_KEY_SIZE); 2572 } 2573 } 2574 } 2575 2576 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2577 { 2578 int pages; 2579 2580 pages = ring_size / desc_per_pg; 2581 2582 if (!pages) 2583 return 1; 2584 2585 pages++; 2586 2587 while (pages & (pages - 1)) 2588 pages++; 2589 2590 return pages; 2591 } 2592 2593 void bnxt_set_tpa_flags(struct bnxt *bp) 2594 { 2595 bp->flags &= ~BNXT_FLAG_TPA; 2596 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 2597 return; 2598 if (bp->dev->features & NETIF_F_LRO) 2599 bp->flags |= BNXT_FLAG_LRO; 2600 if (bp->dev->features & NETIF_F_GRO) 2601 bp->flags |= BNXT_FLAG_GRO; 2602 } 2603 2604 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2605 * be set on entry. 2606 */ 2607 void bnxt_set_ring_params(struct bnxt *bp) 2608 { 2609 u32 ring_size, rx_size, rx_space; 2610 u32 agg_factor = 0, agg_ring_size = 0; 2611 2612 /* 8 for CRC and VLAN */ 2613 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2614 2615 rx_space = rx_size + NET_SKB_PAD + 2616 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2617 2618 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2619 ring_size = bp->rx_ring_size; 2620 bp->rx_agg_ring_size = 0; 2621 bp->rx_agg_nr_pages = 0; 2622 2623 if (bp->flags & BNXT_FLAG_TPA) 2624 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 2625 2626 bp->flags &= ~BNXT_FLAG_JUMBO; 2627 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 2628 u32 jumbo_factor; 2629 2630 bp->flags |= BNXT_FLAG_JUMBO; 2631 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2632 if (jumbo_factor > agg_factor) 2633 agg_factor = jumbo_factor; 2634 } 2635 agg_ring_size = ring_size * agg_factor; 2636 2637 if (agg_ring_size) { 2638 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2639 RX_DESC_CNT); 2640 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2641 u32 tmp = agg_ring_size; 2642 2643 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2644 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2645 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2646 tmp, agg_ring_size); 2647 } 2648 bp->rx_agg_ring_size = agg_ring_size; 2649 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2650 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2651 rx_space = rx_size + NET_SKB_PAD + 2652 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2653 } 2654 2655 bp->rx_buf_use_size = rx_size; 2656 bp->rx_buf_size = rx_space; 2657 2658 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2659 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2660 2661 ring_size = bp->tx_ring_size; 2662 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2663 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2664 2665 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2666 bp->cp_ring_size = ring_size; 2667 2668 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2669 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2670 bp->cp_nr_pages = MAX_CP_PAGES; 2671 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2672 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2673 ring_size, bp->cp_ring_size); 2674 } 2675 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2676 bp->cp_ring_mask = bp->cp_bit - 1; 2677 } 2678 2679 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 2680 { 2681 if (page_mode) { 2682 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 2683 return -EOPNOTSUPP; 2684 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; 2685 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 2686 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 2687 bp->dev->hw_features &= ~NETIF_F_LRO; 2688 bp->dev->features &= ~NETIF_F_LRO; 2689 bp->rx_dir = DMA_BIDIRECTIONAL; 2690 bp->rx_skb_func = bnxt_rx_page_skb; 2691 } else { 2692 bp->dev->max_mtu = BNXT_MAX_MTU; 2693 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 2694 bp->rx_dir = DMA_FROM_DEVICE; 2695 bp->rx_skb_func = bnxt_rx_skb; 2696 } 2697 return 0; 2698 } 2699 2700 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2701 { 2702 int i; 2703 struct bnxt_vnic_info *vnic; 2704 struct pci_dev *pdev = bp->pdev; 2705 2706 if (!bp->vnic_info) 2707 return; 2708 2709 for (i = 0; i < bp->nr_vnics; i++) { 2710 vnic = &bp->vnic_info[i]; 2711 2712 kfree(vnic->fw_grp_ids); 2713 vnic->fw_grp_ids = NULL; 2714 2715 kfree(vnic->uc_list); 2716 vnic->uc_list = NULL; 2717 2718 if (vnic->mc_list) { 2719 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2720 vnic->mc_list, vnic->mc_list_mapping); 2721 vnic->mc_list = NULL; 2722 } 2723 2724 if (vnic->rss_table) { 2725 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2726 vnic->rss_table, 2727 vnic->rss_table_dma_addr); 2728 vnic->rss_table = NULL; 2729 } 2730 2731 vnic->rss_hash_key = NULL; 2732 vnic->flags = 0; 2733 } 2734 } 2735 2736 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2737 { 2738 int i, rc = 0, size; 2739 struct bnxt_vnic_info *vnic; 2740 struct pci_dev *pdev = bp->pdev; 2741 int max_rings; 2742 2743 for (i = 0; i < bp->nr_vnics; i++) { 2744 vnic = &bp->vnic_info[i]; 2745 2746 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2747 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2748 2749 if (mem_size > 0) { 2750 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2751 if (!vnic->uc_list) { 2752 rc = -ENOMEM; 2753 goto out; 2754 } 2755 } 2756 } 2757 2758 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2759 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2760 vnic->mc_list = 2761 dma_alloc_coherent(&pdev->dev, 2762 vnic->mc_list_size, 2763 &vnic->mc_list_mapping, 2764 GFP_KERNEL); 2765 if (!vnic->mc_list) { 2766 rc = -ENOMEM; 2767 goto out; 2768 } 2769 } 2770 2771 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2772 max_rings = bp->rx_nr_rings; 2773 else 2774 max_rings = 1; 2775 2776 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2777 if (!vnic->fw_grp_ids) { 2778 rc = -ENOMEM; 2779 goto out; 2780 } 2781 2782 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 2783 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 2784 continue; 2785 2786 /* Allocate rss table and hash key */ 2787 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2788 &vnic->rss_table_dma_addr, 2789 GFP_KERNEL); 2790 if (!vnic->rss_table) { 2791 rc = -ENOMEM; 2792 goto out; 2793 } 2794 2795 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2796 2797 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2798 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2799 } 2800 return 0; 2801 2802 out: 2803 return rc; 2804 } 2805 2806 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2807 { 2808 struct pci_dev *pdev = bp->pdev; 2809 2810 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2811 bp->hwrm_cmd_resp_dma_addr); 2812 2813 bp->hwrm_cmd_resp_addr = NULL; 2814 if (bp->hwrm_dbg_resp_addr) { 2815 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2816 bp->hwrm_dbg_resp_addr, 2817 bp->hwrm_dbg_resp_dma_addr); 2818 2819 bp->hwrm_dbg_resp_addr = NULL; 2820 } 2821 } 2822 2823 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2824 { 2825 struct pci_dev *pdev = bp->pdev; 2826 2827 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2828 &bp->hwrm_cmd_resp_dma_addr, 2829 GFP_KERNEL); 2830 if (!bp->hwrm_cmd_resp_addr) 2831 return -ENOMEM; 2832 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 2833 HWRM_DBG_REG_BUF_SIZE, 2834 &bp->hwrm_dbg_resp_dma_addr, 2835 GFP_KERNEL); 2836 if (!bp->hwrm_dbg_resp_addr) 2837 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 2838 2839 return 0; 2840 } 2841 2842 static void bnxt_free_stats(struct bnxt *bp) 2843 { 2844 u32 size, i; 2845 struct pci_dev *pdev = bp->pdev; 2846 2847 if (bp->hw_rx_port_stats) { 2848 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 2849 bp->hw_rx_port_stats, 2850 bp->hw_rx_port_stats_map); 2851 bp->hw_rx_port_stats = NULL; 2852 bp->flags &= ~BNXT_FLAG_PORT_STATS; 2853 } 2854 2855 if (!bp->bnapi) 2856 return; 2857 2858 size = sizeof(struct ctx_hw_stats); 2859 2860 for (i = 0; i < bp->cp_nr_rings; i++) { 2861 struct bnxt_napi *bnapi = bp->bnapi[i]; 2862 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2863 2864 if (cpr->hw_stats) { 2865 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 2866 cpr->hw_stats_map); 2867 cpr->hw_stats = NULL; 2868 } 2869 } 2870 } 2871 2872 static int bnxt_alloc_stats(struct bnxt *bp) 2873 { 2874 u32 size, i; 2875 struct pci_dev *pdev = bp->pdev; 2876 2877 size = sizeof(struct ctx_hw_stats); 2878 2879 for (i = 0; i < bp->cp_nr_rings; i++) { 2880 struct bnxt_napi *bnapi = bp->bnapi[i]; 2881 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2882 2883 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 2884 &cpr->hw_stats_map, 2885 GFP_KERNEL); 2886 if (!cpr->hw_stats) 2887 return -ENOMEM; 2888 2889 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 2890 } 2891 2892 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 2893 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 2894 sizeof(struct tx_port_stats) + 1024; 2895 2896 bp->hw_rx_port_stats = 2897 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 2898 &bp->hw_rx_port_stats_map, 2899 GFP_KERNEL); 2900 if (!bp->hw_rx_port_stats) 2901 return -ENOMEM; 2902 2903 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 2904 512; 2905 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 2906 sizeof(struct rx_port_stats) + 512; 2907 bp->flags |= BNXT_FLAG_PORT_STATS; 2908 } 2909 return 0; 2910 } 2911 2912 static void bnxt_clear_ring_indices(struct bnxt *bp) 2913 { 2914 int i; 2915 2916 if (!bp->bnapi) 2917 return; 2918 2919 for (i = 0; i < bp->cp_nr_rings; i++) { 2920 struct bnxt_napi *bnapi = bp->bnapi[i]; 2921 struct bnxt_cp_ring_info *cpr; 2922 struct bnxt_rx_ring_info *rxr; 2923 struct bnxt_tx_ring_info *txr; 2924 2925 if (!bnapi) 2926 continue; 2927 2928 cpr = &bnapi->cp_ring; 2929 cpr->cp_raw_cons = 0; 2930 2931 txr = bnapi->tx_ring; 2932 if (txr) { 2933 txr->tx_prod = 0; 2934 txr->tx_cons = 0; 2935 } 2936 2937 rxr = bnapi->rx_ring; 2938 if (rxr) { 2939 rxr->rx_prod = 0; 2940 rxr->rx_agg_prod = 0; 2941 rxr->rx_sw_agg_prod = 0; 2942 rxr->rx_next_cons = 0; 2943 } 2944 } 2945 } 2946 2947 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 2948 { 2949 #ifdef CONFIG_RFS_ACCEL 2950 int i; 2951 2952 /* Under rtnl_lock and all our NAPIs have been disabled. It's 2953 * safe to delete the hash table. 2954 */ 2955 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 2956 struct hlist_head *head; 2957 struct hlist_node *tmp; 2958 struct bnxt_ntuple_filter *fltr; 2959 2960 head = &bp->ntp_fltr_hash_tbl[i]; 2961 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 2962 hlist_del(&fltr->hash); 2963 kfree(fltr); 2964 } 2965 } 2966 if (irq_reinit) { 2967 kfree(bp->ntp_fltr_bmap); 2968 bp->ntp_fltr_bmap = NULL; 2969 } 2970 bp->ntp_fltr_count = 0; 2971 #endif 2972 } 2973 2974 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 2975 { 2976 #ifdef CONFIG_RFS_ACCEL 2977 int i, rc = 0; 2978 2979 if (!(bp->flags & BNXT_FLAG_RFS)) 2980 return 0; 2981 2982 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 2983 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 2984 2985 bp->ntp_fltr_count = 0; 2986 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 2987 GFP_KERNEL); 2988 2989 if (!bp->ntp_fltr_bmap) 2990 rc = -ENOMEM; 2991 2992 return rc; 2993 #else 2994 return 0; 2995 #endif 2996 } 2997 2998 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 2999 { 3000 bnxt_free_vnic_attributes(bp); 3001 bnxt_free_tx_rings(bp); 3002 bnxt_free_rx_rings(bp); 3003 bnxt_free_cp_rings(bp); 3004 bnxt_free_ntp_fltrs(bp, irq_re_init); 3005 if (irq_re_init) { 3006 bnxt_free_stats(bp); 3007 bnxt_free_ring_grps(bp); 3008 bnxt_free_vnics(bp); 3009 kfree(bp->tx_ring_map); 3010 bp->tx_ring_map = NULL; 3011 kfree(bp->tx_ring); 3012 bp->tx_ring = NULL; 3013 kfree(bp->rx_ring); 3014 bp->rx_ring = NULL; 3015 kfree(bp->bnapi); 3016 bp->bnapi = NULL; 3017 } else { 3018 bnxt_clear_ring_indices(bp); 3019 } 3020 } 3021 3022 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3023 { 3024 int i, j, rc, size, arr_size; 3025 void *bnapi; 3026 3027 if (irq_re_init) { 3028 /* Allocate bnapi mem pointer array and mem block for 3029 * all queues 3030 */ 3031 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3032 bp->cp_nr_rings); 3033 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3034 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3035 if (!bnapi) 3036 return -ENOMEM; 3037 3038 bp->bnapi = bnapi; 3039 bnapi += arr_size; 3040 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3041 bp->bnapi[i] = bnapi; 3042 bp->bnapi[i]->index = i; 3043 bp->bnapi[i]->bp = bp; 3044 } 3045 3046 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3047 sizeof(struct bnxt_rx_ring_info), 3048 GFP_KERNEL); 3049 if (!bp->rx_ring) 3050 return -ENOMEM; 3051 3052 for (i = 0; i < bp->rx_nr_rings; i++) { 3053 bp->rx_ring[i].bnapi = bp->bnapi[i]; 3054 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 3055 } 3056 3057 bp->tx_ring = kcalloc(bp->tx_nr_rings, 3058 sizeof(struct bnxt_tx_ring_info), 3059 GFP_KERNEL); 3060 if (!bp->tx_ring) 3061 return -ENOMEM; 3062 3063 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 3064 GFP_KERNEL); 3065 3066 if (!bp->tx_ring_map) 3067 return -ENOMEM; 3068 3069 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 3070 j = 0; 3071 else 3072 j = bp->rx_nr_rings; 3073 3074 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 3075 bp->tx_ring[i].bnapi = bp->bnapi[j]; 3076 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 3077 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 3078 if (i >= bp->tx_nr_rings_xdp) { 3079 bp->tx_ring[i].txq_index = i - 3080 bp->tx_nr_rings_xdp; 3081 bp->bnapi[j]->tx_int = bnxt_tx_int; 3082 } else { 3083 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 3084 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 3085 } 3086 } 3087 3088 rc = bnxt_alloc_stats(bp); 3089 if (rc) 3090 goto alloc_mem_err; 3091 3092 rc = bnxt_alloc_ntp_fltrs(bp); 3093 if (rc) 3094 goto alloc_mem_err; 3095 3096 rc = bnxt_alloc_vnics(bp); 3097 if (rc) 3098 goto alloc_mem_err; 3099 } 3100 3101 bnxt_init_ring_struct(bp); 3102 3103 rc = bnxt_alloc_rx_rings(bp); 3104 if (rc) 3105 goto alloc_mem_err; 3106 3107 rc = bnxt_alloc_tx_rings(bp); 3108 if (rc) 3109 goto alloc_mem_err; 3110 3111 rc = bnxt_alloc_cp_rings(bp); 3112 if (rc) 3113 goto alloc_mem_err; 3114 3115 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 3116 BNXT_VNIC_UCAST_FLAG; 3117 rc = bnxt_alloc_vnic_attributes(bp); 3118 if (rc) 3119 goto alloc_mem_err; 3120 return 0; 3121 3122 alloc_mem_err: 3123 bnxt_free_mem(bp, true); 3124 return rc; 3125 } 3126 3127 static void bnxt_disable_int(struct bnxt *bp) 3128 { 3129 int i; 3130 3131 if (!bp->bnapi) 3132 return; 3133 3134 for (i = 0; i < bp->cp_nr_rings; i++) { 3135 struct bnxt_napi *bnapi = bp->bnapi[i]; 3136 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3137 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3138 3139 if (ring->fw_ring_id != INVALID_HW_RING_ID) 3140 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3141 } 3142 } 3143 3144 static void bnxt_disable_int_sync(struct bnxt *bp) 3145 { 3146 int i; 3147 3148 atomic_inc(&bp->intr_sem); 3149 3150 bnxt_disable_int(bp); 3151 for (i = 0; i < bp->cp_nr_rings; i++) 3152 synchronize_irq(bp->irq_tbl[i].vector); 3153 } 3154 3155 static void bnxt_enable_int(struct bnxt *bp) 3156 { 3157 int i; 3158 3159 atomic_set(&bp->intr_sem, 0); 3160 for (i = 0; i < bp->cp_nr_rings; i++) { 3161 struct bnxt_napi *bnapi = bp->bnapi[i]; 3162 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3163 3164 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 3165 } 3166 } 3167 3168 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 3169 u16 cmpl_ring, u16 target_id) 3170 { 3171 struct input *req = request; 3172 3173 req->req_type = cpu_to_le16(req_type); 3174 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3175 req->target_id = cpu_to_le16(target_id); 3176 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3177 } 3178 3179 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3180 int timeout, bool silent) 3181 { 3182 int i, intr_process, rc, tmo_count; 3183 struct input *req = msg; 3184 u32 *data = msg; 3185 __le32 *resp_len, *valid; 3186 u16 cp_ring_id, len = 0; 3187 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3188 3189 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3190 memset(resp, 0, PAGE_SIZE); 3191 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3192 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3193 3194 /* Write request msg to hwrm channel */ 3195 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3196 3197 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4) 3198 writel(0, bp->bar0 + i); 3199 3200 /* currently supports only one outstanding message */ 3201 if (intr_process) 3202 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3203 3204 /* Ring channel doorbell */ 3205 writel(1, bp->bar0 + 0x100); 3206 3207 if (!timeout) 3208 timeout = DFLT_HWRM_CMD_TIMEOUT; 3209 3210 i = 0; 3211 tmo_count = timeout * 40; 3212 if (intr_process) { 3213 /* Wait until hwrm response cmpl interrupt is processed */ 3214 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3215 i++ < tmo_count) { 3216 usleep_range(25, 40); 3217 } 3218 3219 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3220 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3221 le16_to_cpu(req->req_type)); 3222 return -1; 3223 } 3224 } else { 3225 /* Check if response len is updated */ 3226 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3227 for (i = 0; i < tmo_count; i++) { 3228 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3229 HWRM_RESP_LEN_SFT; 3230 if (len) 3231 break; 3232 usleep_range(25, 40); 3233 } 3234 3235 if (i >= tmo_count) { 3236 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3237 timeout, le16_to_cpu(req->req_type), 3238 le16_to_cpu(req->seq_id), len); 3239 return -1; 3240 } 3241 3242 /* Last word of resp contains valid bit */ 3243 valid = bp->hwrm_cmd_resp_addr + len - 4; 3244 for (i = 0; i < 5; i++) { 3245 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 3246 break; 3247 udelay(1); 3248 } 3249 3250 if (i >= 5) { 3251 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3252 timeout, le16_to_cpu(req->req_type), 3253 le16_to_cpu(req->seq_id), len, *valid); 3254 return -1; 3255 } 3256 } 3257 3258 rc = le16_to_cpu(resp->error_code); 3259 if (rc && !silent) 3260 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3261 le16_to_cpu(resp->req_type), 3262 le16_to_cpu(resp->seq_id), rc); 3263 return rc; 3264 } 3265 3266 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3267 { 3268 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3269 } 3270 3271 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3272 { 3273 int rc; 3274 3275 mutex_lock(&bp->hwrm_cmd_lock); 3276 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3277 mutex_unlock(&bp->hwrm_cmd_lock); 3278 return rc; 3279 } 3280 3281 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3282 int timeout) 3283 { 3284 int rc; 3285 3286 mutex_lock(&bp->hwrm_cmd_lock); 3287 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3288 mutex_unlock(&bp->hwrm_cmd_lock); 3289 return rc; 3290 } 3291 3292 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3293 int bmap_size) 3294 { 3295 struct hwrm_func_drv_rgtr_input req = {0}; 3296 DECLARE_BITMAP(async_events_bmap, 256); 3297 u32 *events = (u32 *)async_events_bmap; 3298 int i; 3299 3300 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3301 3302 req.enables = 3303 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3304 3305 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3306 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3307 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3308 3309 if (bmap && bmap_size) { 3310 for (i = 0; i < bmap_size; i++) { 3311 if (test_bit(i, bmap)) 3312 __set_bit(i, async_events_bmap); 3313 } 3314 } 3315 3316 for (i = 0; i < 8; i++) 3317 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3318 3319 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3320 } 3321 3322 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3323 { 3324 struct hwrm_func_drv_rgtr_input req = {0}; 3325 3326 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3327 3328 req.enables = 3329 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3330 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3331 3332 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3333 req.ver_maj = DRV_VER_MAJ; 3334 req.ver_min = DRV_VER_MIN; 3335 req.ver_upd = DRV_VER_UPD; 3336 3337 if (BNXT_PF(bp)) { 3338 DECLARE_BITMAP(vf_req_snif_bmap, 256); 3339 u32 *data = (u32 *)vf_req_snif_bmap; 3340 int i; 3341 3342 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); 3343 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 3344 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 3345 3346 for (i = 0; i < 8; i++) 3347 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3348 3349 req.enables |= 3350 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3351 } 3352 3353 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3354 } 3355 3356 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 3357 { 3358 struct hwrm_func_drv_unrgtr_input req = {0}; 3359 3360 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 3361 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3362 } 3363 3364 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 3365 { 3366 u32 rc = 0; 3367 struct hwrm_tunnel_dst_port_free_input req = {0}; 3368 3369 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 3370 req.tunnel_type = tunnel_type; 3371 3372 switch (tunnel_type) { 3373 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 3374 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 3375 break; 3376 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 3377 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 3378 break; 3379 default: 3380 break; 3381 } 3382 3383 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3384 if (rc) 3385 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 3386 rc); 3387 return rc; 3388 } 3389 3390 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 3391 u8 tunnel_type) 3392 { 3393 u32 rc = 0; 3394 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 3395 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3396 3397 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 3398 3399 req.tunnel_type = tunnel_type; 3400 req.tunnel_dst_port_val = port; 3401 3402 mutex_lock(&bp->hwrm_cmd_lock); 3403 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3404 if (rc) { 3405 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 3406 rc); 3407 goto err_out; 3408 } 3409 3410 switch (tunnel_type) { 3411 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3412 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3413 break; 3414 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3415 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3416 break; 3417 default: 3418 break; 3419 } 3420 3421 err_out: 3422 mutex_unlock(&bp->hwrm_cmd_lock); 3423 return rc; 3424 } 3425 3426 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 3427 { 3428 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 3429 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3430 3431 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 3432 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3433 3434 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 3435 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 3436 req.mask = cpu_to_le32(vnic->rx_mask); 3437 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3438 } 3439 3440 #ifdef CONFIG_RFS_ACCEL 3441 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 3442 struct bnxt_ntuple_filter *fltr) 3443 { 3444 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 3445 3446 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 3447 req.ntuple_filter_id = fltr->filter_id; 3448 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3449 } 3450 3451 #define BNXT_NTP_FLTR_FLAGS \ 3452 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 3453 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 3454 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 3455 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 3456 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 3457 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 3458 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 3459 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 3460 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 3461 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 3462 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 3463 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 3464 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 3465 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 3466 3467 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 3468 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 3469 3470 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 3471 struct bnxt_ntuple_filter *fltr) 3472 { 3473 int rc = 0; 3474 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 3475 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 3476 bp->hwrm_cmd_resp_addr; 3477 struct flow_keys *keys = &fltr->fkeys; 3478 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 3479 3480 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 3481 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 3482 3483 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 3484 3485 req.ethertype = htons(ETH_P_IP); 3486 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 3487 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 3488 req.ip_protocol = keys->basic.ip_proto; 3489 3490 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 3491 int i; 3492 3493 req.ethertype = htons(ETH_P_IPV6); 3494 req.ip_addr_type = 3495 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 3496 *(struct in6_addr *)&req.src_ipaddr[0] = 3497 keys->addrs.v6addrs.src; 3498 *(struct in6_addr *)&req.dst_ipaddr[0] = 3499 keys->addrs.v6addrs.dst; 3500 for (i = 0; i < 4; i++) { 3501 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3502 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3503 } 3504 } else { 3505 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 3506 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3507 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 3508 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3509 } 3510 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 3511 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 3512 req.tunnel_type = 3513 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 3514 } 3515 3516 req.src_port = keys->ports.src; 3517 req.src_port_mask = cpu_to_be16(0xffff); 3518 req.dst_port = keys->ports.dst; 3519 req.dst_port_mask = cpu_to_be16(0xffff); 3520 3521 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 3522 mutex_lock(&bp->hwrm_cmd_lock); 3523 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3524 if (!rc) 3525 fltr->filter_id = resp->ntuple_filter_id; 3526 mutex_unlock(&bp->hwrm_cmd_lock); 3527 return rc; 3528 } 3529 #endif 3530 3531 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 3532 u8 *mac_addr) 3533 { 3534 u32 rc = 0; 3535 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 3536 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3537 3538 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 3539 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 3540 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 3541 req.flags |= 3542 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 3543 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 3544 req.enables = 3545 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 3546 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 3547 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 3548 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 3549 req.l2_addr_mask[0] = 0xff; 3550 req.l2_addr_mask[1] = 0xff; 3551 req.l2_addr_mask[2] = 0xff; 3552 req.l2_addr_mask[3] = 0xff; 3553 req.l2_addr_mask[4] = 0xff; 3554 req.l2_addr_mask[5] = 0xff; 3555 3556 mutex_lock(&bp->hwrm_cmd_lock); 3557 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3558 if (!rc) 3559 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 3560 resp->l2_filter_id; 3561 mutex_unlock(&bp->hwrm_cmd_lock); 3562 return rc; 3563 } 3564 3565 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 3566 { 3567 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 3568 int rc = 0; 3569 3570 /* Any associated ntuple filters will also be cleared by firmware. */ 3571 mutex_lock(&bp->hwrm_cmd_lock); 3572 for (i = 0; i < num_of_vnics; i++) { 3573 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3574 3575 for (j = 0; j < vnic->uc_filter_count; j++) { 3576 struct hwrm_cfa_l2_filter_free_input req = {0}; 3577 3578 bnxt_hwrm_cmd_hdr_init(bp, &req, 3579 HWRM_CFA_L2_FILTER_FREE, -1, -1); 3580 3581 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 3582 3583 rc = _hwrm_send_message(bp, &req, sizeof(req), 3584 HWRM_CMD_TIMEOUT); 3585 } 3586 vnic->uc_filter_count = 0; 3587 } 3588 mutex_unlock(&bp->hwrm_cmd_lock); 3589 3590 return rc; 3591 } 3592 3593 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 3594 { 3595 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3596 struct hwrm_vnic_tpa_cfg_input req = {0}; 3597 3598 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3599 3600 if (tpa_flags) { 3601 u16 mss = bp->dev->mtu - 40; 3602 u32 nsegs, n, segs = 0, flags; 3603 3604 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3605 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3606 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3607 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3608 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3609 if (tpa_flags & BNXT_FLAG_GRO) 3610 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3611 3612 req.flags = cpu_to_le32(flags); 3613 3614 req.enables = 3615 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3616 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3617 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3618 3619 /* Number of segs are log2 units, and first packet is not 3620 * included as part of this units. 3621 */ 3622 if (mss <= BNXT_RX_PAGE_SIZE) { 3623 n = BNXT_RX_PAGE_SIZE / mss; 3624 nsegs = (MAX_SKB_FRAGS - 1) * n; 3625 } else { 3626 n = mss / BNXT_RX_PAGE_SIZE; 3627 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 3628 n++; 3629 nsegs = (MAX_SKB_FRAGS - n) / n; 3630 } 3631 3632 segs = ilog2(nsegs); 3633 req.max_agg_segs = cpu_to_le16(segs); 3634 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3635 3636 req.min_agg_len = cpu_to_le32(512); 3637 } 3638 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3639 3640 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3641 } 3642 3643 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3644 { 3645 u32 i, j, max_rings; 3646 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3647 struct hwrm_vnic_rss_cfg_input req = {0}; 3648 3649 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 3650 return 0; 3651 3652 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3653 if (set_rss) { 3654 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 3655 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 3656 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3657 max_rings = bp->rx_nr_rings - 1; 3658 else 3659 max_rings = bp->rx_nr_rings; 3660 } else { 3661 max_rings = 1; 3662 } 3663 3664 /* Fill the RSS indirection table with ring group ids */ 3665 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3666 if (j == max_rings) 3667 j = 0; 3668 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3669 } 3670 3671 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3672 req.hash_key_tbl_addr = 3673 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3674 } 3675 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3676 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3677 } 3678 3679 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3680 { 3681 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3682 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3683 3684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3685 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3686 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3687 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3688 req.enables = 3689 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3690 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3691 /* thresholds not implemented in firmware yet */ 3692 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3693 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3694 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3695 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3696 } 3697 3698 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 3699 u16 ctx_idx) 3700 { 3701 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3702 3703 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3704 req.rss_cos_lb_ctx_id = 3705 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 3706 3707 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3708 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 3709 } 3710 3711 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3712 { 3713 int i, j; 3714 3715 for (i = 0; i < bp->nr_vnics; i++) { 3716 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3717 3718 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 3719 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 3720 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 3721 } 3722 } 3723 bp->rsscos_nr_ctxs = 0; 3724 } 3725 3726 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 3727 { 3728 int rc; 3729 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3730 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3731 bp->hwrm_cmd_resp_addr; 3732 3733 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3734 -1); 3735 3736 mutex_lock(&bp->hwrm_cmd_lock); 3737 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3738 if (!rc) 3739 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 3740 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3741 mutex_unlock(&bp->hwrm_cmd_lock); 3742 3743 return rc; 3744 } 3745 3746 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3747 { 3748 unsigned int ring = 0, grp_idx; 3749 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3750 struct hwrm_vnic_cfg_input req = {0}; 3751 u16 def_vlan = 0; 3752 3753 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3754 3755 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 3756 /* Only RSS support for now TBD: COS & LB */ 3757 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 3758 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3759 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3760 VNIC_CFG_REQ_ENABLES_MRU); 3761 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 3762 req.rss_rule = 3763 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 3764 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3765 VNIC_CFG_REQ_ENABLES_MRU); 3766 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 3767 } else { 3768 req.rss_rule = cpu_to_le16(0xffff); 3769 } 3770 3771 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 3772 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 3773 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 3774 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 3775 } else { 3776 req.cos_rule = cpu_to_le16(0xffff); 3777 } 3778 3779 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3780 ring = 0; 3781 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 3782 ring = vnic_id - 1; 3783 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 3784 ring = bp->rx_nr_rings - 1; 3785 3786 grp_idx = bp->rx_ring[ring].bnapi->index; 3787 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3788 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 3789 3790 req.lb_rule = cpu_to_le16(0xffff); 3791 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3792 VLAN_HLEN); 3793 3794 #ifdef CONFIG_BNXT_SRIOV 3795 if (BNXT_VF(bp)) 3796 def_vlan = bp->vf.vlan; 3797 #endif 3798 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 3799 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3800 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 3801 req.flags |= 3802 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); 3803 3804 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3805 } 3806 3807 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 3808 { 3809 u32 rc = 0; 3810 3811 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 3812 struct hwrm_vnic_free_input req = {0}; 3813 3814 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 3815 req.vnic_id = 3816 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 3817 3818 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3819 if (rc) 3820 return rc; 3821 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 3822 } 3823 return rc; 3824 } 3825 3826 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 3827 { 3828 u16 i; 3829 3830 for (i = 0; i < bp->nr_vnics; i++) 3831 bnxt_hwrm_vnic_free_one(bp, i); 3832 } 3833 3834 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 3835 unsigned int start_rx_ring_idx, 3836 unsigned int nr_rings) 3837 { 3838 int rc = 0; 3839 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 3840 struct hwrm_vnic_alloc_input req = {0}; 3841 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3842 3843 /* map ring groups to this vnic */ 3844 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 3845 grp_idx = bp->rx_ring[i].bnapi->index; 3846 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 3847 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 3848 j, nr_rings); 3849 break; 3850 } 3851 bp->vnic_info[vnic_id].fw_grp_ids[j] = 3852 bp->grp_info[grp_idx].fw_grp_id; 3853 } 3854 3855 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 3856 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 3857 if (vnic_id == 0) 3858 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 3859 3860 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 3861 3862 mutex_lock(&bp->hwrm_cmd_lock); 3863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3864 if (!rc) 3865 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 3866 mutex_unlock(&bp->hwrm_cmd_lock); 3867 return rc; 3868 } 3869 3870 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 3871 { 3872 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 3873 struct hwrm_vnic_qcaps_input req = {0}; 3874 int rc; 3875 3876 if (bp->hwrm_spec_code < 0x10600) 3877 return 0; 3878 3879 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 3880 mutex_lock(&bp->hwrm_cmd_lock); 3881 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3882 if (!rc) { 3883 if (resp->flags & 3884 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 3885 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 3886 } 3887 mutex_unlock(&bp->hwrm_cmd_lock); 3888 return rc; 3889 } 3890 3891 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 3892 { 3893 u16 i; 3894 u32 rc = 0; 3895 3896 mutex_lock(&bp->hwrm_cmd_lock); 3897 for (i = 0; i < bp->rx_nr_rings; i++) { 3898 struct hwrm_ring_grp_alloc_input req = {0}; 3899 struct hwrm_ring_grp_alloc_output *resp = 3900 bp->hwrm_cmd_resp_addr; 3901 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 3902 3903 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 3904 3905 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 3906 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 3907 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 3908 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 3909 3910 rc = _hwrm_send_message(bp, &req, sizeof(req), 3911 HWRM_CMD_TIMEOUT); 3912 if (rc) 3913 break; 3914 3915 bp->grp_info[grp_idx].fw_grp_id = 3916 le32_to_cpu(resp->ring_group_id); 3917 } 3918 mutex_unlock(&bp->hwrm_cmd_lock); 3919 return rc; 3920 } 3921 3922 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 3923 { 3924 u16 i; 3925 u32 rc = 0; 3926 struct hwrm_ring_grp_free_input req = {0}; 3927 3928 if (!bp->grp_info) 3929 return 0; 3930 3931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 3932 3933 mutex_lock(&bp->hwrm_cmd_lock); 3934 for (i = 0; i < bp->cp_nr_rings; i++) { 3935 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 3936 continue; 3937 req.ring_group_id = 3938 cpu_to_le32(bp->grp_info[i].fw_grp_id); 3939 3940 rc = _hwrm_send_message(bp, &req, sizeof(req), 3941 HWRM_CMD_TIMEOUT); 3942 if (rc) 3943 break; 3944 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3945 } 3946 mutex_unlock(&bp->hwrm_cmd_lock); 3947 return rc; 3948 } 3949 3950 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 3951 struct bnxt_ring_struct *ring, 3952 u32 ring_type, u32 map_index, 3953 u32 stats_ctx_id) 3954 { 3955 int rc = 0, err = 0; 3956 struct hwrm_ring_alloc_input req = {0}; 3957 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3958 u16 ring_id; 3959 3960 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 3961 3962 req.enables = 0; 3963 if (ring->nr_pages > 1) { 3964 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 3965 /* Page size is in log2 units */ 3966 req.page_size = BNXT_PAGE_SHIFT; 3967 req.page_tbl_depth = 1; 3968 } else { 3969 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 3970 } 3971 req.fbo = 0; 3972 /* Association of ring index with doorbell index and MSIX number */ 3973 req.logical_id = cpu_to_le16(map_index); 3974 3975 switch (ring_type) { 3976 case HWRM_RING_ALLOC_TX: 3977 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 3978 /* Association of transmit ring with completion ring */ 3979 req.cmpl_ring_id = 3980 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 3981 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 3982 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 3983 req.queue_id = cpu_to_le16(ring->queue_id); 3984 break; 3985 case HWRM_RING_ALLOC_RX: 3986 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3987 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 3988 break; 3989 case HWRM_RING_ALLOC_AGG: 3990 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3991 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 3992 break; 3993 case HWRM_RING_ALLOC_CMPL: 3994 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 3995 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 3996 if (bp->flags & BNXT_FLAG_USING_MSIX) 3997 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 3998 break; 3999 default: 4000 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 4001 ring_type); 4002 return -1; 4003 } 4004 4005 mutex_lock(&bp->hwrm_cmd_lock); 4006 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4007 err = le16_to_cpu(resp->error_code); 4008 ring_id = le16_to_cpu(resp->ring_id); 4009 mutex_unlock(&bp->hwrm_cmd_lock); 4010 4011 if (rc || err) { 4012 switch (ring_type) { 4013 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4014 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 4015 rc, err); 4016 return -1; 4017 4018 case RING_FREE_REQ_RING_TYPE_RX: 4019 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 4020 rc, err); 4021 return -1; 4022 4023 case RING_FREE_REQ_RING_TYPE_TX: 4024 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 4025 rc, err); 4026 return -1; 4027 4028 default: 4029 netdev_err(bp->dev, "Invalid ring\n"); 4030 return -1; 4031 } 4032 } 4033 ring->fw_ring_id = ring_id; 4034 return rc; 4035 } 4036 4037 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 4038 { 4039 int rc; 4040 4041 if (BNXT_PF(bp)) { 4042 struct hwrm_func_cfg_input req = {0}; 4043 4044 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4045 req.fid = cpu_to_le16(0xffff); 4046 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4047 req.async_event_cr = cpu_to_le16(idx); 4048 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4049 } else { 4050 struct hwrm_func_vf_cfg_input req = {0}; 4051 4052 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4053 req.enables = 4054 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4055 req.async_event_cr = cpu_to_le16(idx); 4056 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4057 } 4058 return rc; 4059 } 4060 4061 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 4062 { 4063 int i, rc = 0; 4064 4065 for (i = 0; i < bp->cp_nr_rings; i++) { 4066 struct bnxt_napi *bnapi = bp->bnapi[i]; 4067 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4068 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4069 4070 cpr->cp_doorbell = bp->bar1 + i * 0x80; 4071 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 4072 INVALID_STATS_CTX_ID); 4073 if (rc) 4074 goto err_out; 4075 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4076 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4077 4078 if (!i) { 4079 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 4080 if (rc) 4081 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 4082 } 4083 } 4084 4085 for (i = 0; i < bp->tx_nr_rings; i++) { 4086 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4087 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4088 u32 map_idx = txr->bnapi->index; 4089 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 4090 4091 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 4092 map_idx, fw_stats_ctx); 4093 if (rc) 4094 goto err_out; 4095 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 4096 } 4097 4098 for (i = 0; i < bp->rx_nr_rings; i++) { 4099 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4100 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4101 u32 map_idx = rxr->bnapi->index; 4102 4103 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 4104 map_idx, INVALID_STATS_CTX_ID); 4105 if (rc) 4106 goto err_out; 4107 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 4108 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 4109 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 4110 } 4111 4112 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4113 for (i = 0; i < bp->rx_nr_rings; i++) { 4114 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4115 struct bnxt_ring_struct *ring = 4116 &rxr->rx_agg_ring_struct; 4117 u32 grp_idx = rxr->bnapi->index; 4118 u32 map_idx = grp_idx + bp->rx_nr_rings; 4119 4120 rc = hwrm_ring_alloc_send_msg(bp, ring, 4121 HWRM_RING_ALLOC_AGG, 4122 map_idx, 4123 INVALID_STATS_CTX_ID); 4124 if (rc) 4125 goto err_out; 4126 4127 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 4128 writel(DB_KEY_RX | rxr->rx_agg_prod, 4129 rxr->rx_agg_doorbell); 4130 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 4131 } 4132 } 4133 err_out: 4134 return rc; 4135 } 4136 4137 static int hwrm_ring_free_send_msg(struct bnxt *bp, 4138 struct bnxt_ring_struct *ring, 4139 u32 ring_type, int cmpl_ring_id) 4140 { 4141 int rc; 4142 struct hwrm_ring_free_input req = {0}; 4143 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 4144 u16 error_code; 4145 4146 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 4147 req.ring_type = ring_type; 4148 req.ring_id = cpu_to_le16(ring->fw_ring_id); 4149 4150 mutex_lock(&bp->hwrm_cmd_lock); 4151 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4152 error_code = le16_to_cpu(resp->error_code); 4153 mutex_unlock(&bp->hwrm_cmd_lock); 4154 4155 if (rc || error_code) { 4156 switch (ring_type) { 4157 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4158 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 4159 rc); 4160 return rc; 4161 case RING_FREE_REQ_RING_TYPE_RX: 4162 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 4163 rc); 4164 return rc; 4165 case RING_FREE_REQ_RING_TYPE_TX: 4166 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 4167 rc); 4168 return rc; 4169 default: 4170 netdev_err(bp->dev, "Invalid ring\n"); 4171 return -1; 4172 } 4173 } 4174 return 0; 4175 } 4176 4177 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 4178 { 4179 int i; 4180 4181 if (!bp->bnapi) 4182 return; 4183 4184 for (i = 0; i < bp->tx_nr_rings; i++) { 4185 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4186 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4187 u32 grp_idx = txr->bnapi->index; 4188 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4189 4190 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4191 hwrm_ring_free_send_msg(bp, ring, 4192 RING_FREE_REQ_RING_TYPE_TX, 4193 close_path ? cmpl_ring_id : 4194 INVALID_HW_RING_ID); 4195 ring->fw_ring_id = INVALID_HW_RING_ID; 4196 } 4197 } 4198 4199 for (i = 0; i < bp->rx_nr_rings; i++) { 4200 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4201 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4202 u32 grp_idx = rxr->bnapi->index; 4203 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4204 4205 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4206 hwrm_ring_free_send_msg(bp, ring, 4207 RING_FREE_REQ_RING_TYPE_RX, 4208 close_path ? cmpl_ring_id : 4209 INVALID_HW_RING_ID); 4210 ring->fw_ring_id = INVALID_HW_RING_ID; 4211 bp->grp_info[grp_idx].rx_fw_ring_id = 4212 INVALID_HW_RING_ID; 4213 } 4214 } 4215 4216 for (i = 0; i < bp->rx_nr_rings; i++) { 4217 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4218 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 4219 u32 grp_idx = rxr->bnapi->index; 4220 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4221 4222 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4223 hwrm_ring_free_send_msg(bp, ring, 4224 RING_FREE_REQ_RING_TYPE_RX, 4225 close_path ? cmpl_ring_id : 4226 INVALID_HW_RING_ID); 4227 ring->fw_ring_id = INVALID_HW_RING_ID; 4228 bp->grp_info[grp_idx].agg_fw_ring_id = 4229 INVALID_HW_RING_ID; 4230 } 4231 } 4232 4233 /* The completion rings are about to be freed. After that the 4234 * IRQ doorbell will not work anymore. So we need to disable 4235 * IRQ here. 4236 */ 4237 bnxt_disable_int_sync(bp); 4238 4239 for (i = 0; i < bp->cp_nr_rings; i++) { 4240 struct bnxt_napi *bnapi = bp->bnapi[i]; 4241 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4242 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4243 4244 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4245 hwrm_ring_free_send_msg(bp, ring, 4246 RING_FREE_REQ_RING_TYPE_L2_CMPL, 4247 INVALID_HW_RING_ID); 4248 ring->fw_ring_id = INVALID_HW_RING_ID; 4249 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4250 } 4251 } 4252 } 4253 4254 /* Caller must hold bp->hwrm_cmd_lock */ 4255 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 4256 { 4257 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4258 struct hwrm_func_qcfg_input req = {0}; 4259 int rc; 4260 4261 if (bp->hwrm_spec_code < 0x10601) 4262 return 0; 4263 4264 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4265 req.fid = cpu_to_le16(fid); 4266 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4267 if (!rc) 4268 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 4269 4270 return rc; 4271 } 4272 4273 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) 4274 { 4275 struct hwrm_func_cfg_input req = {0}; 4276 int rc; 4277 4278 if (bp->hwrm_spec_code < 0x10601) 4279 return 0; 4280 4281 if (BNXT_VF(bp)) 4282 return 0; 4283 4284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4285 req.fid = cpu_to_le16(0xffff); 4286 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4287 req.num_tx_rings = cpu_to_le16(*tx_rings); 4288 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4289 if (rc) 4290 return rc; 4291 4292 mutex_lock(&bp->hwrm_cmd_lock); 4293 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); 4294 mutex_unlock(&bp->hwrm_cmd_lock); 4295 return rc; 4296 } 4297 4298 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, 4299 u32 buf_tmrs, u16 flags, 4300 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 4301 { 4302 req->flags = cpu_to_le16(flags); 4303 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); 4304 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); 4305 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); 4306 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); 4307 /* Minimum time between 2 interrupts set to buf_tmr x 2 */ 4308 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); 4309 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); 4310 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); 4311 } 4312 4313 int bnxt_hwrm_set_coal(struct bnxt *bp) 4314 { 4315 int i, rc = 0; 4316 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 4317 req_tx = {0}, *req; 4318 u16 max_buf, max_buf_irq; 4319 u16 buf_tmr, buf_tmr_irq; 4320 u32 flags; 4321 4322 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 4323 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4324 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 4325 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4326 4327 /* Each rx completion (2 records) should be DMAed immediately. 4328 * DMA 1/4 of the completion buffers at a time. 4329 */ 4330 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); 4331 /* max_buf must not be zero */ 4332 max_buf = clamp_t(u16, max_buf, 1, 63); 4333 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); 4334 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); 4335 /* buf timer set to 1/4 of interrupt timer */ 4336 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4337 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); 4338 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4339 4340 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4341 4342 /* RING_IDLE generates more IRQs for lower latency. Enable it only 4343 * if coal_ticks is less than 25 us. 4344 */ 4345 if (bp->rx_coal_ticks < 25) 4346 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 4347 4348 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4349 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); 4350 4351 /* max_buf must not be zero */ 4352 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); 4353 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); 4354 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); 4355 /* buf timer set to 1/4 of interrupt timer */ 4356 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4357 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); 4358 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4359 4360 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4361 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4362 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); 4363 4364 mutex_lock(&bp->hwrm_cmd_lock); 4365 for (i = 0; i < bp->cp_nr_rings; i++) { 4366 struct bnxt_napi *bnapi = bp->bnapi[i]; 4367 4368 req = &req_rx; 4369 if (!bnapi->rx_ring) 4370 req = &req_tx; 4371 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 4372 4373 rc = _hwrm_send_message(bp, req, sizeof(*req), 4374 HWRM_CMD_TIMEOUT); 4375 if (rc) 4376 break; 4377 } 4378 mutex_unlock(&bp->hwrm_cmd_lock); 4379 return rc; 4380 } 4381 4382 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 4383 { 4384 int rc = 0, i; 4385 struct hwrm_stat_ctx_free_input req = {0}; 4386 4387 if (!bp->bnapi) 4388 return 0; 4389 4390 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4391 return 0; 4392 4393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 4394 4395 mutex_lock(&bp->hwrm_cmd_lock); 4396 for (i = 0; i < bp->cp_nr_rings; i++) { 4397 struct bnxt_napi *bnapi = bp->bnapi[i]; 4398 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4399 4400 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 4401 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 4402 4403 rc = _hwrm_send_message(bp, &req, sizeof(req), 4404 HWRM_CMD_TIMEOUT); 4405 if (rc) 4406 break; 4407 4408 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4409 } 4410 } 4411 mutex_unlock(&bp->hwrm_cmd_lock); 4412 return rc; 4413 } 4414 4415 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 4416 { 4417 int rc = 0, i; 4418 struct hwrm_stat_ctx_alloc_input req = {0}; 4419 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4420 4421 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4422 return 0; 4423 4424 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 4425 4426 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 4427 4428 mutex_lock(&bp->hwrm_cmd_lock); 4429 for (i = 0; i < bp->cp_nr_rings; i++) { 4430 struct bnxt_napi *bnapi = bp->bnapi[i]; 4431 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4432 4433 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 4434 4435 rc = _hwrm_send_message(bp, &req, sizeof(req), 4436 HWRM_CMD_TIMEOUT); 4437 if (rc) 4438 break; 4439 4440 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 4441 4442 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 4443 } 4444 mutex_unlock(&bp->hwrm_cmd_lock); 4445 return rc; 4446 } 4447 4448 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 4449 { 4450 struct hwrm_func_qcfg_input req = {0}; 4451 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4452 int rc; 4453 4454 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4455 req.fid = cpu_to_le16(0xffff); 4456 mutex_lock(&bp->hwrm_cmd_lock); 4457 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4458 if (rc) 4459 goto func_qcfg_exit; 4460 4461 #ifdef CONFIG_BNXT_SRIOV 4462 if (BNXT_VF(bp)) { 4463 struct bnxt_vf_info *vf = &bp->vf; 4464 4465 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4466 } 4467 #endif 4468 if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) & 4469 FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)) 4470 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; 4471 4472 switch (resp->port_partition_type) { 4473 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4474 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4475 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 4476 bp->port_partition_type = resp->port_partition_type; 4477 break; 4478 } 4479 4480 func_qcfg_exit: 4481 mutex_unlock(&bp->hwrm_cmd_lock); 4482 return rc; 4483 } 4484 4485 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4486 { 4487 int rc = 0; 4488 struct hwrm_func_qcaps_input req = {0}; 4489 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4490 4491 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 4492 req.fid = cpu_to_le16(0xffff); 4493 4494 mutex_lock(&bp->hwrm_cmd_lock); 4495 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4496 if (rc) 4497 goto hwrm_func_qcaps_exit; 4498 4499 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) 4500 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 4501 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) 4502 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 4503 4504 bp->tx_push_thresh = 0; 4505 if (resp->flags & 4506 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 4507 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 4508 4509 if (BNXT_PF(bp)) { 4510 struct bnxt_pf_info *pf = &bp->pf; 4511 4512 pf->fw_fid = le16_to_cpu(resp->fid); 4513 pf->port_id = le16_to_cpu(resp->port_id); 4514 bp->dev->dev_port = pf->port_id; 4515 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4516 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); 4517 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4518 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4519 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4520 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4521 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4522 if (!pf->max_hw_ring_grps) 4523 pf->max_hw_ring_grps = pf->max_tx_rings; 4524 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4525 pf->max_vnics = le16_to_cpu(resp->max_vnics); 4526 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4527 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 4528 pf->max_vfs = le16_to_cpu(resp->max_vfs); 4529 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 4530 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 4531 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 4532 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 4533 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 4534 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 4535 } else { 4536 #ifdef CONFIG_BNXT_SRIOV 4537 struct bnxt_vf_info *vf = &bp->vf; 4538 4539 vf->fw_fid = le16_to_cpu(resp->fid); 4540 4541 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4542 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4543 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4544 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4545 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4546 if (!vf->max_hw_ring_grps) 4547 vf->max_hw_ring_grps = vf->max_tx_rings; 4548 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4549 vf->max_vnics = le16_to_cpu(resp->max_vnics); 4550 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4551 4552 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4553 mutex_unlock(&bp->hwrm_cmd_lock); 4554 4555 if (is_valid_ether_addr(vf->mac_addr)) { 4556 /* overwrite netdev dev_adr with admin VF MAC */ 4557 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 4558 } else { 4559 eth_hw_addr_random(bp->dev); 4560 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 4561 } 4562 return rc; 4563 #endif 4564 } 4565 4566 hwrm_func_qcaps_exit: 4567 mutex_unlock(&bp->hwrm_cmd_lock); 4568 return rc; 4569 } 4570 4571 static int bnxt_hwrm_func_reset(struct bnxt *bp) 4572 { 4573 struct hwrm_func_reset_input req = {0}; 4574 4575 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 4576 req.enables = 0; 4577 4578 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 4579 } 4580 4581 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 4582 { 4583 int rc = 0; 4584 struct hwrm_queue_qportcfg_input req = {0}; 4585 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 4586 u8 i, *qptr; 4587 4588 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 4589 4590 mutex_lock(&bp->hwrm_cmd_lock); 4591 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4592 if (rc) 4593 goto qportcfg_exit; 4594 4595 if (!resp->max_configurable_queues) { 4596 rc = -EINVAL; 4597 goto qportcfg_exit; 4598 } 4599 bp->max_tc = resp->max_configurable_queues; 4600 bp->max_lltc = resp->max_configurable_lossless_queues; 4601 if (bp->max_tc > BNXT_MAX_QUEUE) 4602 bp->max_tc = BNXT_MAX_QUEUE; 4603 4604 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 4605 bp->max_tc = 1; 4606 4607 if (bp->max_lltc > bp->max_tc) 4608 bp->max_lltc = bp->max_tc; 4609 4610 qptr = &resp->queue_id0; 4611 for (i = 0; i < bp->max_tc; i++) { 4612 bp->q_info[i].queue_id = *qptr++; 4613 bp->q_info[i].queue_profile = *qptr++; 4614 } 4615 4616 qportcfg_exit: 4617 mutex_unlock(&bp->hwrm_cmd_lock); 4618 return rc; 4619 } 4620 4621 static int bnxt_hwrm_ver_get(struct bnxt *bp) 4622 { 4623 int rc; 4624 struct hwrm_ver_get_input req = {0}; 4625 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 4626 4627 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 4628 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 4629 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 4630 req.hwrm_intf_min = HWRM_VERSION_MINOR; 4631 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 4632 mutex_lock(&bp->hwrm_cmd_lock); 4633 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4634 if (rc) 4635 goto hwrm_ver_get_exit; 4636 4637 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 4638 4639 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | 4640 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; 4641 if (resp->hwrm_intf_maj < 1) { 4642 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 4643 resp->hwrm_intf_maj, resp->hwrm_intf_min, 4644 resp->hwrm_intf_upd); 4645 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 4646 } 4647 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", 4648 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 4649 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); 4650 4651 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 4652 if (!bp->hwrm_cmd_timeout) 4653 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 4654 4655 if (resp->hwrm_intf_maj >= 1) 4656 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4657 4658 bp->chip_num = le16_to_cpu(resp->chip_num); 4659 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 4660 !resp->chip_metal) 4661 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 4662 4663 hwrm_ver_get_exit: 4664 mutex_unlock(&bp->hwrm_cmd_lock); 4665 return rc; 4666 } 4667 4668 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 4669 { 4670 #if IS_ENABLED(CONFIG_RTC_LIB) 4671 struct hwrm_fw_set_time_input req = {0}; 4672 struct rtc_time tm; 4673 struct timeval tv; 4674 4675 if (bp->hwrm_spec_code < 0x10400) 4676 return -EOPNOTSUPP; 4677 4678 do_gettimeofday(&tv); 4679 rtc_time_to_tm(tv.tv_sec, &tm); 4680 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 4681 req.year = cpu_to_le16(1900 + tm.tm_year); 4682 req.month = 1 + tm.tm_mon; 4683 req.day = tm.tm_mday; 4684 req.hour = tm.tm_hour; 4685 req.minute = tm.tm_min; 4686 req.second = tm.tm_sec; 4687 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4688 #else 4689 return -EOPNOTSUPP; 4690 #endif 4691 } 4692 4693 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 4694 { 4695 int rc; 4696 struct bnxt_pf_info *pf = &bp->pf; 4697 struct hwrm_port_qstats_input req = {0}; 4698 4699 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 4700 return 0; 4701 4702 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 4703 req.port_id = cpu_to_le16(pf->port_id); 4704 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 4705 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 4706 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4707 return rc; 4708 } 4709 4710 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 4711 { 4712 if (bp->vxlan_port_cnt) { 4713 bnxt_hwrm_tunnel_dst_port_free( 4714 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 4715 } 4716 bp->vxlan_port_cnt = 0; 4717 if (bp->nge_port_cnt) { 4718 bnxt_hwrm_tunnel_dst_port_free( 4719 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 4720 } 4721 bp->nge_port_cnt = 0; 4722 } 4723 4724 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 4725 { 4726 int rc, i; 4727 u32 tpa_flags = 0; 4728 4729 if (set_tpa) 4730 tpa_flags = bp->flags & BNXT_FLAG_TPA; 4731 for (i = 0; i < bp->nr_vnics; i++) { 4732 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4733 if (rc) { 4734 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4735 rc, i); 4736 return rc; 4737 } 4738 } 4739 return 0; 4740 } 4741 4742 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 4743 { 4744 int i; 4745 4746 for (i = 0; i < bp->nr_vnics; i++) 4747 bnxt_hwrm_vnic_set_rss(bp, i, false); 4748 } 4749 4750 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 4751 bool irq_re_init) 4752 { 4753 if (bp->vnic_info) { 4754 bnxt_hwrm_clear_vnic_filter(bp); 4755 /* clear all RSS setting before free vnic ctx */ 4756 bnxt_hwrm_clear_vnic_rss(bp); 4757 bnxt_hwrm_vnic_ctx_free(bp); 4758 /* before free the vnic, undo the vnic tpa settings */ 4759 if (bp->flags & BNXT_FLAG_TPA) 4760 bnxt_set_tpa(bp, false); 4761 bnxt_hwrm_vnic_free(bp); 4762 } 4763 bnxt_hwrm_ring_free(bp, close_path); 4764 bnxt_hwrm_ring_grp_free(bp); 4765 if (irq_re_init) { 4766 bnxt_hwrm_stat_ctx_free(bp); 4767 bnxt_hwrm_free_tunnel_ports(bp); 4768 } 4769 } 4770 4771 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 4772 { 4773 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 4774 int rc; 4775 4776 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 4777 goto skip_rss_ctx; 4778 4779 /* allocate context for vnic */ 4780 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 4781 if (rc) { 4782 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 4783 vnic_id, rc); 4784 goto vnic_setup_err; 4785 } 4786 bp->rsscos_nr_ctxs++; 4787 4788 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 4789 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 4790 if (rc) { 4791 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 4792 vnic_id, rc); 4793 goto vnic_setup_err; 4794 } 4795 bp->rsscos_nr_ctxs++; 4796 } 4797 4798 skip_rss_ctx: 4799 /* configure default vnic, ring grp */ 4800 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 4801 if (rc) { 4802 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 4803 vnic_id, rc); 4804 goto vnic_setup_err; 4805 } 4806 4807 /* Enable RSS hashing on vnic */ 4808 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 4809 if (rc) { 4810 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 4811 vnic_id, rc); 4812 goto vnic_setup_err; 4813 } 4814 4815 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4816 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 4817 if (rc) { 4818 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 4819 vnic_id, rc); 4820 } 4821 } 4822 4823 vnic_setup_err: 4824 return rc; 4825 } 4826 4827 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 4828 { 4829 #ifdef CONFIG_RFS_ACCEL 4830 int i, rc = 0; 4831 4832 for (i = 0; i < bp->rx_nr_rings; i++) { 4833 struct bnxt_vnic_info *vnic; 4834 u16 vnic_id = i + 1; 4835 u16 ring_id = i; 4836 4837 if (vnic_id >= bp->nr_vnics) 4838 break; 4839 4840 vnic = &bp->vnic_info[vnic_id]; 4841 vnic->flags |= BNXT_VNIC_RFS_FLAG; 4842 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 4843 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 4844 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 4845 if (rc) { 4846 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 4847 vnic_id, rc); 4848 break; 4849 } 4850 rc = bnxt_setup_vnic(bp, vnic_id); 4851 if (rc) 4852 break; 4853 } 4854 return rc; 4855 #else 4856 return 0; 4857 #endif 4858 } 4859 4860 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 4861 static bool bnxt_promisc_ok(struct bnxt *bp) 4862 { 4863 #ifdef CONFIG_BNXT_SRIOV 4864 if (BNXT_VF(bp) && !bp->vf.vlan) 4865 return false; 4866 #endif 4867 return true; 4868 } 4869 4870 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 4871 { 4872 unsigned int rc = 0; 4873 4874 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 4875 if (rc) { 4876 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 4877 rc); 4878 return rc; 4879 } 4880 4881 rc = bnxt_hwrm_vnic_cfg(bp, 1); 4882 if (rc) { 4883 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 4884 rc); 4885 return rc; 4886 } 4887 return rc; 4888 } 4889 4890 static int bnxt_cfg_rx_mode(struct bnxt *); 4891 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 4892 4893 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 4894 { 4895 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4896 int rc = 0; 4897 unsigned int rx_nr_rings = bp->rx_nr_rings; 4898 4899 if (irq_re_init) { 4900 rc = bnxt_hwrm_stat_ctx_alloc(bp); 4901 if (rc) { 4902 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 4903 rc); 4904 goto err_out; 4905 } 4906 } 4907 4908 rc = bnxt_hwrm_ring_alloc(bp); 4909 if (rc) { 4910 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 4911 goto err_out; 4912 } 4913 4914 rc = bnxt_hwrm_ring_grp_alloc(bp); 4915 if (rc) { 4916 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 4917 goto err_out; 4918 } 4919 4920 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4921 rx_nr_rings--; 4922 4923 /* default vnic 0 */ 4924 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 4925 if (rc) { 4926 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 4927 goto err_out; 4928 } 4929 4930 rc = bnxt_setup_vnic(bp, 0); 4931 if (rc) 4932 goto err_out; 4933 4934 if (bp->flags & BNXT_FLAG_RFS) { 4935 rc = bnxt_alloc_rfs_vnics(bp); 4936 if (rc) 4937 goto err_out; 4938 } 4939 4940 if (bp->flags & BNXT_FLAG_TPA) { 4941 rc = bnxt_set_tpa(bp, true); 4942 if (rc) 4943 goto err_out; 4944 } 4945 4946 if (BNXT_VF(bp)) 4947 bnxt_update_vf_mac(bp); 4948 4949 /* Filter for default vnic 0 */ 4950 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 4951 if (rc) { 4952 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 4953 goto err_out; 4954 } 4955 vnic->uc_filter_count = 1; 4956 4957 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4958 4959 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 4960 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4961 4962 if (bp->dev->flags & IFF_ALLMULTI) { 4963 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 4964 vnic->mc_list_count = 0; 4965 } else { 4966 u32 mask = 0; 4967 4968 bnxt_mc_list_updated(bp, &mask); 4969 vnic->rx_mask |= mask; 4970 } 4971 4972 rc = bnxt_cfg_rx_mode(bp); 4973 if (rc) 4974 goto err_out; 4975 4976 rc = bnxt_hwrm_set_coal(bp); 4977 if (rc) 4978 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 4979 rc); 4980 4981 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 4982 rc = bnxt_setup_nitroa0_vnic(bp); 4983 if (rc) 4984 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 4985 rc); 4986 } 4987 4988 if (BNXT_VF(bp)) { 4989 bnxt_hwrm_func_qcfg(bp); 4990 netdev_update_features(bp->dev); 4991 } 4992 4993 return 0; 4994 4995 err_out: 4996 bnxt_hwrm_resource_free(bp, 0, true); 4997 4998 return rc; 4999 } 5000 5001 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 5002 { 5003 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 5004 return 0; 5005 } 5006 5007 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5008 { 5009 bnxt_init_rx_rings(bp); 5010 bnxt_init_tx_rings(bp); 5011 bnxt_init_ring_grps(bp, irq_re_init); 5012 bnxt_init_vnics(bp); 5013 5014 return bnxt_init_chip(bp, irq_re_init); 5015 } 5016 5017 static int bnxt_set_real_num_queues(struct bnxt *bp) 5018 { 5019 int rc; 5020 struct net_device *dev = bp->dev; 5021 5022 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 5023 bp->tx_nr_rings_xdp); 5024 if (rc) 5025 return rc; 5026 5027 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 5028 if (rc) 5029 return rc; 5030 5031 #ifdef CONFIG_RFS_ACCEL 5032 if (bp->flags & BNXT_FLAG_RFS) 5033 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 5034 #endif 5035 5036 return rc; 5037 } 5038 5039 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5040 bool shared) 5041 { 5042 int _rx = *rx, _tx = *tx; 5043 5044 if (shared) { 5045 *rx = min_t(int, _rx, max); 5046 *tx = min_t(int, _tx, max); 5047 } else { 5048 if (max < 2) 5049 return -ENOMEM; 5050 5051 while (_rx + _tx > max) { 5052 if (_rx > _tx && _rx > 1) 5053 _rx--; 5054 else if (_tx > 1) 5055 _tx--; 5056 } 5057 *rx = _rx; 5058 *tx = _tx; 5059 } 5060 return 0; 5061 } 5062 5063 static void bnxt_setup_msix(struct bnxt *bp) 5064 { 5065 const int len = sizeof(bp->irq_tbl[0].name); 5066 struct net_device *dev = bp->dev; 5067 int tcs, i; 5068 5069 tcs = netdev_get_num_tc(dev); 5070 if (tcs > 1) { 5071 int i, off, count; 5072 5073 for (i = 0; i < tcs; i++) { 5074 count = bp->tx_nr_rings_per_tc; 5075 off = i * count; 5076 netdev_set_tc_queue(dev, i, count, off); 5077 } 5078 } 5079 5080 for (i = 0; i < bp->cp_nr_rings; i++) { 5081 char *attr; 5082 5083 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5084 attr = "TxRx"; 5085 else if (i < bp->rx_nr_rings) 5086 attr = "rx"; 5087 else 5088 attr = "tx"; 5089 5090 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, 5091 i); 5092 bp->irq_tbl[i].handler = bnxt_msix; 5093 } 5094 } 5095 5096 static void bnxt_setup_inta(struct bnxt *bp) 5097 { 5098 const int len = sizeof(bp->irq_tbl[0].name); 5099 5100 if (netdev_get_num_tc(bp->dev)) 5101 netdev_reset_tc(bp->dev); 5102 5103 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 5104 0); 5105 bp->irq_tbl[0].handler = bnxt_inta; 5106 } 5107 5108 static int bnxt_setup_int_mode(struct bnxt *bp) 5109 { 5110 int rc; 5111 5112 if (bp->flags & BNXT_FLAG_USING_MSIX) 5113 bnxt_setup_msix(bp); 5114 else 5115 bnxt_setup_inta(bp); 5116 5117 rc = bnxt_set_real_num_queues(bp); 5118 return rc; 5119 } 5120 5121 #ifdef CONFIG_RFS_ACCEL 5122 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 5123 { 5124 #if defined(CONFIG_BNXT_SRIOV) 5125 if (BNXT_VF(bp)) 5126 return bp->vf.max_rsscos_ctxs; 5127 #endif 5128 return bp->pf.max_rsscos_ctxs; 5129 } 5130 5131 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 5132 { 5133 #if defined(CONFIG_BNXT_SRIOV) 5134 if (BNXT_VF(bp)) 5135 return bp->vf.max_vnics; 5136 #endif 5137 return bp->pf.max_vnics; 5138 } 5139 #endif 5140 5141 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 5142 { 5143 #if defined(CONFIG_BNXT_SRIOV) 5144 if (BNXT_VF(bp)) 5145 return bp->vf.max_stat_ctxs; 5146 #endif 5147 return bp->pf.max_stat_ctxs; 5148 } 5149 5150 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 5151 { 5152 #if defined(CONFIG_BNXT_SRIOV) 5153 if (BNXT_VF(bp)) 5154 bp->vf.max_stat_ctxs = max; 5155 else 5156 #endif 5157 bp->pf.max_stat_ctxs = max; 5158 } 5159 5160 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 5161 { 5162 #if defined(CONFIG_BNXT_SRIOV) 5163 if (BNXT_VF(bp)) 5164 return bp->vf.max_cp_rings; 5165 #endif 5166 return bp->pf.max_cp_rings; 5167 } 5168 5169 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5170 { 5171 #if defined(CONFIG_BNXT_SRIOV) 5172 if (BNXT_VF(bp)) 5173 bp->vf.max_cp_rings = max; 5174 else 5175 #endif 5176 bp->pf.max_cp_rings = max; 5177 } 5178 5179 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5180 { 5181 #if defined(CONFIG_BNXT_SRIOV) 5182 if (BNXT_VF(bp)) 5183 return bp->vf.max_irqs; 5184 #endif 5185 return bp->pf.max_irqs; 5186 } 5187 5188 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5189 { 5190 #if defined(CONFIG_BNXT_SRIOV) 5191 if (BNXT_VF(bp)) 5192 bp->vf.max_irqs = max_irqs; 5193 else 5194 #endif 5195 bp->pf.max_irqs = max_irqs; 5196 } 5197 5198 static int bnxt_init_msix(struct bnxt *bp) 5199 { 5200 int i, total_vecs, rc = 0, min = 1; 5201 struct msix_entry *msix_ent; 5202 5203 total_vecs = bnxt_get_max_func_irqs(bp); 5204 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 5205 if (!msix_ent) 5206 return -ENOMEM; 5207 5208 for (i = 0; i < total_vecs; i++) { 5209 msix_ent[i].entry = i; 5210 msix_ent[i].vector = 0; 5211 } 5212 5213 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 5214 min = 2; 5215 5216 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 5217 if (total_vecs < 0) { 5218 rc = -ENODEV; 5219 goto msix_setup_exit; 5220 } 5221 5222 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 5223 if (bp->irq_tbl) { 5224 for (i = 0; i < total_vecs; i++) 5225 bp->irq_tbl[i].vector = msix_ent[i].vector; 5226 5227 bp->total_irqs = total_vecs; 5228 /* Trim rings based upon num of vectors allocated */ 5229 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 5230 total_vecs, min == 1); 5231 if (rc) 5232 goto msix_setup_exit; 5233 5234 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5235 bp->cp_nr_rings = (min == 1) ? 5236 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5237 bp->tx_nr_rings + bp->rx_nr_rings; 5238 5239 } else { 5240 rc = -ENOMEM; 5241 goto msix_setup_exit; 5242 } 5243 bp->flags |= BNXT_FLAG_USING_MSIX; 5244 kfree(msix_ent); 5245 return 0; 5246 5247 msix_setup_exit: 5248 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 5249 kfree(bp->irq_tbl); 5250 bp->irq_tbl = NULL; 5251 pci_disable_msix(bp->pdev); 5252 kfree(msix_ent); 5253 return rc; 5254 } 5255 5256 static int bnxt_init_inta(struct bnxt *bp) 5257 { 5258 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 5259 if (!bp->irq_tbl) 5260 return -ENOMEM; 5261 5262 bp->total_irqs = 1; 5263 bp->rx_nr_rings = 1; 5264 bp->tx_nr_rings = 1; 5265 bp->cp_nr_rings = 1; 5266 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5267 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5268 bp->irq_tbl[0].vector = bp->pdev->irq; 5269 return 0; 5270 } 5271 5272 static int bnxt_init_int_mode(struct bnxt *bp) 5273 { 5274 int rc = 0; 5275 5276 if (bp->flags & BNXT_FLAG_MSIX_CAP) 5277 rc = bnxt_init_msix(bp); 5278 5279 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 5280 /* fallback to INTA */ 5281 rc = bnxt_init_inta(bp); 5282 } 5283 return rc; 5284 } 5285 5286 static void bnxt_clear_int_mode(struct bnxt *bp) 5287 { 5288 if (bp->flags & BNXT_FLAG_USING_MSIX) 5289 pci_disable_msix(bp->pdev); 5290 5291 kfree(bp->irq_tbl); 5292 bp->irq_tbl = NULL; 5293 bp->flags &= ~BNXT_FLAG_USING_MSIX; 5294 } 5295 5296 static void bnxt_free_irq(struct bnxt *bp) 5297 { 5298 struct bnxt_irq *irq; 5299 int i; 5300 5301 #ifdef CONFIG_RFS_ACCEL 5302 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 5303 bp->dev->rx_cpu_rmap = NULL; 5304 #endif 5305 if (!bp->irq_tbl) 5306 return; 5307 5308 for (i = 0; i < bp->cp_nr_rings; i++) { 5309 irq = &bp->irq_tbl[i]; 5310 if (irq->requested) 5311 free_irq(irq->vector, bp->bnapi[i]); 5312 irq->requested = 0; 5313 } 5314 } 5315 5316 static int bnxt_request_irq(struct bnxt *bp) 5317 { 5318 int i, j, rc = 0; 5319 unsigned long flags = 0; 5320 #ifdef CONFIG_RFS_ACCEL 5321 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 5322 #endif 5323 5324 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 5325 flags = IRQF_SHARED; 5326 5327 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 5328 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5329 #ifdef CONFIG_RFS_ACCEL 5330 if (rmap && bp->bnapi[i]->rx_ring) { 5331 rc = irq_cpu_rmap_add(rmap, irq->vector); 5332 if (rc) 5333 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 5334 j); 5335 j++; 5336 } 5337 #endif 5338 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5339 bp->bnapi[i]); 5340 if (rc) 5341 break; 5342 5343 irq->requested = 1; 5344 } 5345 return rc; 5346 } 5347 5348 static void bnxt_del_napi(struct bnxt *bp) 5349 { 5350 int i; 5351 5352 if (!bp->bnapi) 5353 return; 5354 5355 for (i = 0; i < bp->cp_nr_rings; i++) { 5356 struct bnxt_napi *bnapi = bp->bnapi[i]; 5357 5358 napi_hash_del(&bnapi->napi); 5359 netif_napi_del(&bnapi->napi); 5360 } 5361 /* We called napi_hash_del() before netif_napi_del(), we need 5362 * to respect an RCU grace period before freeing napi structures. 5363 */ 5364 synchronize_net(); 5365 } 5366 5367 static void bnxt_init_napi(struct bnxt *bp) 5368 { 5369 int i; 5370 unsigned int cp_nr_rings = bp->cp_nr_rings; 5371 struct bnxt_napi *bnapi; 5372 5373 if (bp->flags & BNXT_FLAG_USING_MSIX) { 5374 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5375 cp_nr_rings--; 5376 for (i = 0; i < cp_nr_rings; i++) { 5377 bnapi = bp->bnapi[i]; 5378 netif_napi_add(bp->dev, &bnapi->napi, 5379 bnxt_poll, 64); 5380 } 5381 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5382 bnapi = bp->bnapi[cp_nr_rings]; 5383 netif_napi_add(bp->dev, &bnapi->napi, 5384 bnxt_poll_nitroa0, 64); 5385 } 5386 } else { 5387 bnapi = bp->bnapi[0]; 5388 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 5389 } 5390 } 5391 5392 static void bnxt_disable_napi(struct bnxt *bp) 5393 { 5394 int i; 5395 5396 if (!bp->bnapi) 5397 return; 5398 5399 for (i = 0; i < bp->cp_nr_rings; i++) 5400 napi_disable(&bp->bnapi[i]->napi); 5401 } 5402 5403 static void bnxt_enable_napi(struct bnxt *bp) 5404 { 5405 int i; 5406 5407 for (i = 0; i < bp->cp_nr_rings; i++) { 5408 bp->bnapi[i]->in_reset = false; 5409 napi_enable(&bp->bnapi[i]->napi); 5410 } 5411 } 5412 5413 void bnxt_tx_disable(struct bnxt *bp) 5414 { 5415 int i; 5416 struct bnxt_tx_ring_info *txr; 5417 struct netdev_queue *txq; 5418 5419 if (bp->tx_ring) { 5420 for (i = 0; i < bp->tx_nr_rings; i++) { 5421 txr = &bp->tx_ring[i]; 5422 txq = netdev_get_tx_queue(bp->dev, i); 5423 txr->dev_state = BNXT_DEV_STATE_CLOSING; 5424 } 5425 } 5426 /* Stop all TX queues */ 5427 netif_tx_disable(bp->dev); 5428 netif_carrier_off(bp->dev); 5429 } 5430 5431 void bnxt_tx_enable(struct bnxt *bp) 5432 { 5433 int i; 5434 struct bnxt_tx_ring_info *txr; 5435 struct netdev_queue *txq; 5436 5437 for (i = 0; i < bp->tx_nr_rings; i++) { 5438 txr = &bp->tx_ring[i]; 5439 txq = netdev_get_tx_queue(bp->dev, i); 5440 txr->dev_state = 0; 5441 } 5442 netif_tx_wake_all_queues(bp->dev); 5443 if (bp->link_info.link_up) 5444 netif_carrier_on(bp->dev); 5445 } 5446 5447 static void bnxt_report_link(struct bnxt *bp) 5448 { 5449 if (bp->link_info.link_up) { 5450 const char *duplex; 5451 const char *flow_ctrl; 5452 u16 speed, fec; 5453 5454 netif_carrier_on(bp->dev); 5455 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 5456 duplex = "full"; 5457 else 5458 duplex = "half"; 5459 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 5460 flow_ctrl = "ON - receive & transmit"; 5461 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 5462 flow_ctrl = "ON - transmit"; 5463 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 5464 flow_ctrl = "ON - receive"; 5465 else 5466 flow_ctrl = "none"; 5467 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 5468 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 5469 speed, duplex, flow_ctrl); 5470 if (bp->flags & BNXT_FLAG_EEE_CAP) 5471 netdev_info(bp->dev, "EEE is %s\n", 5472 bp->eee.eee_active ? "active" : 5473 "not active"); 5474 fec = bp->link_info.fec_cfg; 5475 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 5476 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 5477 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 5478 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 5479 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 5480 } else { 5481 netif_carrier_off(bp->dev); 5482 netdev_err(bp->dev, "NIC Link is Down\n"); 5483 } 5484 } 5485 5486 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 5487 { 5488 int rc = 0; 5489 struct hwrm_port_phy_qcaps_input req = {0}; 5490 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5491 struct bnxt_link_info *link_info = &bp->link_info; 5492 5493 if (bp->hwrm_spec_code < 0x10201) 5494 return 0; 5495 5496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 5497 5498 mutex_lock(&bp->hwrm_cmd_lock); 5499 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5500 if (rc) 5501 goto hwrm_phy_qcaps_exit; 5502 5503 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { 5504 struct ethtool_eee *eee = &bp->eee; 5505 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 5506 5507 bp->flags |= BNXT_FLAG_EEE_CAP; 5508 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5509 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 5510 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 5511 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5512 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5513 } 5514 if (resp->supported_speeds_auto_mode) 5515 link_info->support_auto_speeds = 5516 le16_to_cpu(resp->supported_speeds_auto_mode); 5517 5518 hwrm_phy_qcaps_exit: 5519 mutex_unlock(&bp->hwrm_cmd_lock); 5520 return rc; 5521 } 5522 5523 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 5524 { 5525 int rc = 0; 5526 struct bnxt_link_info *link_info = &bp->link_info; 5527 struct hwrm_port_phy_qcfg_input req = {0}; 5528 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5529 u8 link_up = link_info->link_up; 5530 u16 diff; 5531 5532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 5533 5534 mutex_lock(&bp->hwrm_cmd_lock); 5535 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5536 if (rc) { 5537 mutex_unlock(&bp->hwrm_cmd_lock); 5538 return rc; 5539 } 5540 5541 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 5542 link_info->phy_link_status = resp->link; 5543 link_info->duplex = resp->duplex; 5544 link_info->pause = resp->pause; 5545 link_info->auto_mode = resp->auto_mode; 5546 link_info->auto_pause_setting = resp->auto_pause; 5547 link_info->lp_pause = resp->link_partner_adv_pause; 5548 link_info->force_pause_setting = resp->force_pause; 5549 link_info->duplex_setting = resp->duplex; 5550 if (link_info->phy_link_status == BNXT_LINK_LINK) 5551 link_info->link_speed = le16_to_cpu(resp->link_speed); 5552 else 5553 link_info->link_speed = 0; 5554 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 5555 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 5556 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 5557 link_info->lp_auto_link_speeds = 5558 le16_to_cpu(resp->link_partner_adv_speeds); 5559 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 5560 link_info->phy_ver[0] = resp->phy_maj; 5561 link_info->phy_ver[1] = resp->phy_min; 5562 link_info->phy_ver[2] = resp->phy_bld; 5563 link_info->media_type = resp->media_type; 5564 link_info->phy_type = resp->phy_type; 5565 link_info->transceiver = resp->xcvr_pkg_type; 5566 link_info->phy_addr = resp->eee_config_phy_addr & 5567 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 5568 link_info->module_status = resp->module_status; 5569 5570 if (bp->flags & BNXT_FLAG_EEE_CAP) { 5571 struct ethtool_eee *eee = &bp->eee; 5572 u16 fw_speeds; 5573 5574 eee->eee_active = 0; 5575 if (resp->eee_config_phy_addr & 5576 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 5577 eee->eee_active = 1; 5578 fw_speeds = le16_to_cpu( 5579 resp->link_partner_adv_eee_link_speed_mask); 5580 eee->lp_advertised = 5581 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5582 } 5583 5584 /* Pull initial EEE config */ 5585 if (!chng_link_state) { 5586 if (resp->eee_config_phy_addr & 5587 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 5588 eee->eee_enabled = 1; 5589 5590 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 5591 eee->advertised = 5592 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5593 5594 if (resp->eee_config_phy_addr & 5595 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 5596 __le32 tmr; 5597 5598 eee->tx_lpi_enabled = 1; 5599 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 5600 eee->tx_lpi_timer = le32_to_cpu(tmr) & 5601 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 5602 } 5603 } 5604 } 5605 5606 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 5607 if (bp->hwrm_spec_code >= 0x10504) 5608 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 5609 5610 /* TODO: need to add more logic to report VF link */ 5611 if (chng_link_state) { 5612 if (link_info->phy_link_status == BNXT_LINK_LINK) 5613 link_info->link_up = 1; 5614 else 5615 link_info->link_up = 0; 5616 if (link_up != link_info->link_up) 5617 bnxt_report_link(bp); 5618 } else { 5619 /* alwasy link down if not require to update link state */ 5620 link_info->link_up = 0; 5621 } 5622 mutex_unlock(&bp->hwrm_cmd_lock); 5623 5624 diff = link_info->support_auto_speeds ^ link_info->advertising; 5625 if ((link_info->support_auto_speeds | diff) != 5626 link_info->support_auto_speeds) { 5627 /* An advertised speed is no longer supported, so we need to 5628 * update the advertisement settings. Caller holds RTNL 5629 * so we can modify link settings. 5630 */ 5631 link_info->advertising = link_info->support_auto_speeds; 5632 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 5633 bnxt_hwrm_set_link_setting(bp, true, false); 5634 } 5635 return 0; 5636 } 5637 5638 static void bnxt_get_port_module_status(struct bnxt *bp) 5639 { 5640 struct bnxt_link_info *link_info = &bp->link_info; 5641 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 5642 u8 module_status; 5643 5644 if (bnxt_update_link(bp, true)) 5645 return; 5646 5647 module_status = link_info->module_status; 5648 switch (module_status) { 5649 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 5650 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 5651 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 5652 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 5653 bp->pf.port_id); 5654 if (bp->hwrm_spec_code >= 0x10201) { 5655 netdev_warn(bp->dev, "Module part number %s\n", 5656 resp->phy_vendor_partnumber); 5657 } 5658 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 5659 netdev_warn(bp->dev, "TX is disabled\n"); 5660 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 5661 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 5662 } 5663 } 5664 5665 static void 5666 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 5667 { 5668 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 5669 if (bp->hwrm_spec_code >= 0x10201) 5670 req->auto_pause = 5671 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 5672 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5673 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 5674 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5675 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 5676 req->enables |= 5677 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5678 } else { 5679 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5680 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 5681 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5682 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 5683 req->enables |= 5684 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 5685 if (bp->hwrm_spec_code >= 0x10201) { 5686 req->auto_pause = req->force_pause; 5687 req->enables |= cpu_to_le32( 5688 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5689 } 5690 } 5691 } 5692 5693 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 5694 struct hwrm_port_phy_cfg_input *req) 5695 { 5696 u8 autoneg = bp->link_info.autoneg; 5697 u16 fw_link_speed = bp->link_info.req_link_speed; 5698 u16 advertising = bp->link_info.advertising; 5699 5700 if (autoneg & BNXT_AUTONEG_SPEED) { 5701 req->auto_mode |= 5702 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 5703 5704 req->enables |= cpu_to_le32( 5705 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 5706 req->auto_link_speed_mask = cpu_to_le16(advertising); 5707 5708 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 5709 req->flags |= 5710 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 5711 } else { 5712 req->force_link_speed = cpu_to_le16(fw_link_speed); 5713 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 5714 } 5715 5716 /* tell chimp that the setting takes effect immediately */ 5717 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5718 } 5719 5720 int bnxt_hwrm_set_pause(struct bnxt *bp) 5721 { 5722 struct hwrm_port_phy_cfg_input req = {0}; 5723 int rc; 5724 5725 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5726 bnxt_hwrm_set_pause_common(bp, &req); 5727 5728 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 5729 bp->link_info.force_link_chng) 5730 bnxt_hwrm_set_link_common(bp, &req); 5731 5732 mutex_lock(&bp->hwrm_cmd_lock); 5733 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5734 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 5735 /* since changing of pause setting doesn't trigger any link 5736 * change event, the driver needs to update the current pause 5737 * result upon successfully return of the phy_cfg command 5738 */ 5739 bp->link_info.pause = 5740 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 5741 bp->link_info.auto_pause_setting = 0; 5742 if (!bp->link_info.force_link_chng) 5743 bnxt_report_link(bp); 5744 } 5745 bp->link_info.force_link_chng = false; 5746 mutex_unlock(&bp->hwrm_cmd_lock); 5747 return rc; 5748 } 5749 5750 static void bnxt_hwrm_set_eee(struct bnxt *bp, 5751 struct hwrm_port_phy_cfg_input *req) 5752 { 5753 struct ethtool_eee *eee = &bp->eee; 5754 5755 if (eee->eee_enabled) { 5756 u16 eee_speeds; 5757 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 5758 5759 if (eee->tx_lpi_enabled) 5760 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 5761 else 5762 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 5763 5764 req->flags |= cpu_to_le32(flags); 5765 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 5766 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 5767 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 5768 } else { 5769 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 5770 } 5771 } 5772 5773 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 5774 { 5775 struct hwrm_port_phy_cfg_input req = {0}; 5776 5777 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5778 if (set_pause) 5779 bnxt_hwrm_set_pause_common(bp, &req); 5780 5781 bnxt_hwrm_set_link_common(bp, &req); 5782 5783 if (set_eee) 5784 bnxt_hwrm_set_eee(bp, &req); 5785 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5786 } 5787 5788 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 5789 { 5790 struct hwrm_port_phy_cfg_input req = {0}; 5791 5792 if (!BNXT_SINGLE_PF(bp)) 5793 return 0; 5794 5795 if (pci_num_vf(bp->pdev)) 5796 return 0; 5797 5798 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5799 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 5800 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5801 } 5802 5803 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 5804 { 5805 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5806 struct hwrm_port_led_qcaps_input req = {0}; 5807 struct bnxt_pf_info *pf = &bp->pf; 5808 int rc; 5809 5810 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 5811 return 0; 5812 5813 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 5814 req.port_id = cpu_to_le16(pf->port_id); 5815 mutex_lock(&bp->hwrm_cmd_lock); 5816 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5817 if (rc) { 5818 mutex_unlock(&bp->hwrm_cmd_lock); 5819 return rc; 5820 } 5821 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 5822 int i; 5823 5824 bp->num_leds = resp->num_leds; 5825 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 5826 bp->num_leds); 5827 for (i = 0; i < bp->num_leds; i++) { 5828 struct bnxt_led_info *led = &bp->leds[i]; 5829 __le16 caps = led->led_state_caps; 5830 5831 if (!led->led_group_id || 5832 !BNXT_LED_ALT_BLINK_CAP(caps)) { 5833 bp->num_leds = 0; 5834 break; 5835 } 5836 } 5837 } 5838 mutex_unlock(&bp->hwrm_cmd_lock); 5839 return 0; 5840 } 5841 5842 static bool bnxt_eee_config_ok(struct bnxt *bp) 5843 { 5844 struct ethtool_eee *eee = &bp->eee; 5845 struct bnxt_link_info *link_info = &bp->link_info; 5846 5847 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 5848 return true; 5849 5850 if (eee->eee_enabled) { 5851 u32 advertising = 5852 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 5853 5854 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 5855 eee->eee_enabled = 0; 5856 return false; 5857 } 5858 if (eee->advertised & ~advertising) { 5859 eee->advertised = advertising & eee->supported; 5860 return false; 5861 } 5862 } 5863 return true; 5864 } 5865 5866 static int bnxt_update_phy_setting(struct bnxt *bp) 5867 { 5868 int rc; 5869 bool update_link = false; 5870 bool update_pause = false; 5871 bool update_eee = false; 5872 struct bnxt_link_info *link_info = &bp->link_info; 5873 5874 rc = bnxt_update_link(bp, true); 5875 if (rc) { 5876 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 5877 rc); 5878 return rc; 5879 } 5880 if (!BNXT_SINGLE_PF(bp)) 5881 return 0; 5882 5883 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 5884 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 5885 link_info->req_flow_ctrl) 5886 update_pause = true; 5887 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 5888 link_info->force_pause_setting != link_info->req_flow_ctrl) 5889 update_pause = true; 5890 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 5891 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5892 update_link = true; 5893 if (link_info->req_link_speed != link_info->force_link_speed) 5894 update_link = true; 5895 if (link_info->req_duplex != link_info->duplex_setting) 5896 update_link = true; 5897 } else { 5898 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 5899 update_link = true; 5900 if (link_info->advertising != link_info->auto_link_speeds) 5901 update_link = true; 5902 } 5903 5904 /* The last close may have shutdown the link, so need to call 5905 * PHY_CFG to bring it back up. 5906 */ 5907 if (!netif_carrier_ok(bp->dev)) 5908 update_link = true; 5909 5910 if (!bnxt_eee_config_ok(bp)) 5911 update_eee = true; 5912 5913 if (update_link) 5914 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 5915 else if (update_pause) 5916 rc = bnxt_hwrm_set_pause(bp); 5917 if (rc) { 5918 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 5919 rc); 5920 return rc; 5921 } 5922 5923 return rc; 5924 } 5925 5926 /* Common routine to pre-map certain register block to different GRC window. 5927 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 5928 * in PF and 3 windows in VF that can be customized to map in different 5929 * register blocks. 5930 */ 5931 static void bnxt_preset_reg_win(struct bnxt *bp) 5932 { 5933 if (BNXT_PF(bp)) { 5934 /* CAG registers map to GRC window #4 */ 5935 writel(BNXT_CAG_REG_BASE, 5936 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 5937 } 5938 } 5939 5940 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 5941 { 5942 int rc = 0; 5943 5944 bnxt_preset_reg_win(bp); 5945 netif_carrier_off(bp->dev); 5946 if (irq_re_init) { 5947 rc = bnxt_setup_int_mode(bp); 5948 if (rc) { 5949 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 5950 rc); 5951 return rc; 5952 } 5953 } 5954 if ((bp->flags & BNXT_FLAG_RFS) && 5955 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 5956 /* disable RFS if falling back to INTA */ 5957 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 5958 bp->flags &= ~BNXT_FLAG_RFS; 5959 } 5960 5961 rc = bnxt_alloc_mem(bp, irq_re_init); 5962 if (rc) { 5963 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 5964 goto open_err_free_mem; 5965 } 5966 5967 if (irq_re_init) { 5968 bnxt_init_napi(bp); 5969 rc = bnxt_request_irq(bp); 5970 if (rc) { 5971 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 5972 goto open_err; 5973 } 5974 } 5975 5976 bnxt_enable_napi(bp); 5977 5978 rc = bnxt_init_nic(bp, irq_re_init); 5979 if (rc) { 5980 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 5981 goto open_err; 5982 } 5983 5984 if (link_re_init) { 5985 rc = bnxt_update_phy_setting(bp); 5986 if (rc) 5987 netdev_warn(bp->dev, "failed to update phy settings\n"); 5988 } 5989 5990 if (irq_re_init) 5991 udp_tunnel_get_rx_info(bp->dev); 5992 5993 set_bit(BNXT_STATE_OPEN, &bp->state); 5994 bnxt_enable_int(bp); 5995 /* Enable TX queues */ 5996 bnxt_tx_enable(bp); 5997 mod_timer(&bp->timer, jiffies + bp->current_interval); 5998 /* Poll link status and check for SFP+ module status */ 5999 bnxt_get_port_module_status(bp); 6000 6001 return 0; 6002 6003 open_err: 6004 bnxt_disable_napi(bp); 6005 bnxt_del_napi(bp); 6006 6007 open_err_free_mem: 6008 bnxt_free_skbs(bp); 6009 bnxt_free_irq(bp); 6010 bnxt_free_mem(bp, true); 6011 return rc; 6012 } 6013 6014 /* rtnl_lock held */ 6015 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6016 { 6017 int rc = 0; 6018 6019 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 6020 if (rc) { 6021 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 6022 dev_close(bp->dev); 6023 } 6024 return rc; 6025 } 6026 6027 static int bnxt_open(struct net_device *dev) 6028 { 6029 struct bnxt *bp = netdev_priv(dev); 6030 6031 return __bnxt_open_nic(bp, true, true); 6032 } 6033 6034 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6035 { 6036 int rc = 0; 6037 6038 #ifdef CONFIG_BNXT_SRIOV 6039 if (bp->sriov_cfg) { 6040 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 6041 !bp->sriov_cfg, 6042 BNXT_SRIOV_CFG_WAIT_TMO); 6043 if (rc) 6044 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 6045 } 6046 #endif 6047 /* Change device state to avoid TX queue wake up's */ 6048 bnxt_tx_disable(bp); 6049 6050 clear_bit(BNXT_STATE_OPEN, &bp->state); 6051 smp_mb__after_atomic(); 6052 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) 6053 msleep(20); 6054 6055 /* Flush rings and and disable interrupts */ 6056 bnxt_shutdown_nic(bp, irq_re_init); 6057 6058 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 6059 6060 bnxt_disable_napi(bp); 6061 del_timer_sync(&bp->timer); 6062 bnxt_free_skbs(bp); 6063 6064 if (irq_re_init) { 6065 bnxt_free_irq(bp); 6066 bnxt_del_napi(bp); 6067 } 6068 bnxt_free_mem(bp, irq_re_init); 6069 return rc; 6070 } 6071 6072 static int bnxt_close(struct net_device *dev) 6073 { 6074 struct bnxt *bp = netdev_priv(dev); 6075 6076 bnxt_close_nic(bp, true, true); 6077 bnxt_hwrm_shutdown_link(bp); 6078 return 0; 6079 } 6080 6081 /* rtnl_lock held */ 6082 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6083 { 6084 switch (cmd) { 6085 case SIOCGMIIPHY: 6086 /* fallthru */ 6087 case SIOCGMIIREG: { 6088 if (!netif_running(dev)) 6089 return -EAGAIN; 6090 6091 return 0; 6092 } 6093 6094 case SIOCSMIIREG: 6095 if (!netif_running(dev)) 6096 return -EAGAIN; 6097 6098 return 0; 6099 6100 default: 6101 /* do nothing */ 6102 break; 6103 } 6104 return -EOPNOTSUPP; 6105 } 6106 6107 static void 6108 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6109 { 6110 u32 i; 6111 struct bnxt *bp = netdev_priv(dev); 6112 6113 if (!bp->bnapi) 6114 return; 6115 6116 /* TODO check if we need to synchronize with bnxt_close path */ 6117 for (i = 0; i < bp->cp_nr_rings; i++) { 6118 struct bnxt_napi *bnapi = bp->bnapi[i]; 6119 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6120 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 6121 6122 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 6123 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 6124 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 6125 6126 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 6127 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 6128 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 6129 6130 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 6131 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 6132 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 6133 6134 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 6135 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 6136 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 6137 6138 stats->rx_missed_errors += 6139 le64_to_cpu(hw_stats->rx_discard_pkts); 6140 6141 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 6142 6143 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 6144 } 6145 6146 if (bp->flags & BNXT_FLAG_PORT_STATS) { 6147 struct rx_port_stats *rx = bp->hw_rx_port_stats; 6148 struct tx_port_stats *tx = bp->hw_tx_port_stats; 6149 6150 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 6151 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 6152 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 6153 le64_to_cpu(rx->rx_ovrsz_frames) + 6154 le64_to_cpu(rx->rx_runt_frames); 6155 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 6156 le64_to_cpu(rx->rx_jbr_frames); 6157 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 6158 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 6159 stats->tx_errors = le64_to_cpu(tx->tx_err); 6160 } 6161 } 6162 6163 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 6164 { 6165 struct net_device *dev = bp->dev; 6166 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6167 struct netdev_hw_addr *ha; 6168 u8 *haddr; 6169 int mc_count = 0; 6170 bool update = false; 6171 int off = 0; 6172 6173 netdev_for_each_mc_addr(ha, dev) { 6174 if (mc_count >= BNXT_MAX_MC_ADDRS) { 6175 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6176 vnic->mc_list_count = 0; 6177 return false; 6178 } 6179 haddr = ha->addr; 6180 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 6181 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 6182 update = true; 6183 } 6184 off += ETH_ALEN; 6185 mc_count++; 6186 } 6187 if (mc_count) 6188 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 6189 6190 if (mc_count != vnic->mc_list_count) { 6191 vnic->mc_list_count = mc_count; 6192 update = true; 6193 } 6194 return update; 6195 } 6196 6197 static bool bnxt_uc_list_updated(struct bnxt *bp) 6198 { 6199 struct net_device *dev = bp->dev; 6200 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6201 struct netdev_hw_addr *ha; 6202 int off = 0; 6203 6204 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 6205 return true; 6206 6207 netdev_for_each_uc_addr(ha, dev) { 6208 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 6209 return true; 6210 6211 off += ETH_ALEN; 6212 } 6213 return false; 6214 } 6215 6216 static void bnxt_set_rx_mode(struct net_device *dev) 6217 { 6218 struct bnxt *bp = netdev_priv(dev); 6219 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6220 u32 mask = vnic->rx_mask; 6221 bool mc_update = false; 6222 bool uc_update; 6223 6224 if (!netif_running(dev)) 6225 return; 6226 6227 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 6228 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 6229 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 6230 6231 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 6232 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6233 6234 uc_update = bnxt_uc_list_updated(bp); 6235 6236 if (dev->flags & IFF_ALLMULTI) { 6237 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6238 vnic->mc_list_count = 0; 6239 } else { 6240 mc_update = bnxt_mc_list_updated(bp, &mask); 6241 } 6242 6243 if (mask != vnic->rx_mask || uc_update || mc_update) { 6244 vnic->rx_mask = mask; 6245 6246 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6247 schedule_work(&bp->sp_task); 6248 } 6249 } 6250 6251 static int bnxt_cfg_rx_mode(struct bnxt *bp) 6252 { 6253 struct net_device *dev = bp->dev; 6254 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6255 struct netdev_hw_addr *ha; 6256 int i, off = 0, rc; 6257 bool uc_update; 6258 6259 netif_addr_lock_bh(dev); 6260 uc_update = bnxt_uc_list_updated(bp); 6261 netif_addr_unlock_bh(dev); 6262 6263 if (!uc_update) 6264 goto skip_uc; 6265 6266 mutex_lock(&bp->hwrm_cmd_lock); 6267 for (i = 1; i < vnic->uc_filter_count; i++) { 6268 struct hwrm_cfa_l2_filter_free_input req = {0}; 6269 6270 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 6271 -1); 6272 6273 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 6274 6275 rc = _hwrm_send_message(bp, &req, sizeof(req), 6276 HWRM_CMD_TIMEOUT); 6277 } 6278 mutex_unlock(&bp->hwrm_cmd_lock); 6279 6280 vnic->uc_filter_count = 1; 6281 6282 netif_addr_lock_bh(dev); 6283 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 6284 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6285 } else { 6286 netdev_for_each_uc_addr(ha, dev) { 6287 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 6288 off += ETH_ALEN; 6289 vnic->uc_filter_count++; 6290 } 6291 } 6292 netif_addr_unlock_bh(dev); 6293 6294 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 6295 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 6296 if (rc) { 6297 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 6298 rc); 6299 vnic->uc_filter_count = i; 6300 return rc; 6301 } 6302 } 6303 6304 skip_uc: 6305 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 6306 if (rc) 6307 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 6308 rc); 6309 6310 return rc; 6311 } 6312 6313 /* If the chip and firmware supports RFS */ 6314 static bool bnxt_rfs_supported(struct bnxt *bp) 6315 { 6316 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6317 return true; 6318 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6319 return true; 6320 return false; 6321 } 6322 6323 /* If runtime conditions support RFS */ 6324 static bool bnxt_rfs_capable(struct bnxt *bp) 6325 { 6326 #ifdef CONFIG_RFS_ACCEL 6327 int vnics, max_vnics, max_rss_ctxs; 6328 6329 if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) 6330 return false; 6331 6332 vnics = 1 + bp->rx_nr_rings; 6333 max_vnics = bnxt_get_max_func_vnics(bp); 6334 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 6335 6336 /* RSS contexts not a limiting factor */ 6337 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6338 max_rss_ctxs = max_vnics; 6339 if (vnics > max_vnics || vnics > max_rss_ctxs) { 6340 netdev_warn(bp->dev, 6341 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 6342 min(max_rss_ctxs - 1, max_vnics - 1)); 6343 return false; 6344 } 6345 6346 return true; 6347 #else 6348 return false; 6349 #endif 6350 } 6351 6352 static netdev_features_t bnxt_fix_features(struct net_device *dev, 6353 netdev_features_t features) 6354 { 6355 struct bnxt *bp = netdev_priv(dev); 6356 6357 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 6358 features &= ~NETIF_F_NTUPLE; 6359 6360 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 6361 * turned on or off together. 6362 */ 6363 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 6364 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 6365 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 6366 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6367 NETIF_F_HW_VLAN_STAG_RX); 6368 else 6369 features |= NETIF_F_HW_VLAN_CTAG_RX | 6370 NETIF_F_HW_VLAN_STAG_RX; 6371 } 6372 #ifdef CONFIG_BNXT_SRIOV 6373 if (BNXT_VF(bp)) { 6374 if (bp->vf.vlan) { 6375 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6376 NETIF_F_HW_VLAN_STAG_RX); 6377 } 6378 } 6379 #endif 6380 return features; 6381 } 6382 6383 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 6384 { 6385 struct bnxt *bp = netdev_priv(dev); 6386 u32 flags = bp->flags; 6387 u32 changes; 6388 int rc = 0; 6389 bool re_init = false; 6390 bool update_tpa = false; 6391 6392 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 6393 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6394 flags |= BNXT_FLAG_GRO; 6395 if (features & NETIF_F_LRO) 6396 flags |= BNXT_FLAG_LRO; 6397 6398 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6399 flags &= ~BNXT_FLAG_TPA; 6400 6401 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6402 flags |= BNXT_FLAG_STRIP_VLAN; 6403 6404 if (features & NETIF_F_NTUPLE) 6405 flags |= BNXT_FLAG_RFS; 6406 6407 changes = flags ^ bp->flags; 6408 if (changes & BNXT_FLAG_TPA) { 6409 update_tpa = true; 6410 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 6411 (flags & BNXT_FLAG_TPA) == 0) 6412 re_init = true; 6413 } 6414 6415 if (changes & ~BNXT_FLAG_TPA) 6416 re_init = true; 6417 6418 if (flags != bp->flags) { 6419 u32 old_flags = bp->flags; 6420 6421 bp->flags = flags; 6422 6423 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6424 if (update_tpa) 6425 bnxt_set_ring_params(bp); 6426 return rc; 6427 } 6428 6429 if (re_init) { 6430 bnxt_close_nic(bp, false, false); 6431 if (update_tpa) 6432 bnxt_set_ring_params(bp); 6433 6434 return bnxt_open_nic(bp, false, false); 6435 } 6436 if (update_tpa) { 6437 rc = bnxt_set_tpa(bp, 6438 (flags & BNXT_FLAG_TPA) ? 6439 true : false); 6440 if (rc) 6441 bp->flags = old_flags; 6442 } 6443 } 6444 return rc; 6445 } 6446 6447 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 6448 { 6449 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 6450 int i = bnapi->index; 6451 6452 if (!txr) 6453 return; 6454 6455 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 6456 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 6457 txr->tx_cons); 6458 } 6459 6460 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 6461 { 6462 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 6463 int i = bnapi->index; 6464 6465 if (!rxr) 6466 return; 6467 6468 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 6469 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 6470 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 6471 rxr->rx_sw_agg_prod); 6472 } 6473 6474 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 6475 { 6476 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6477 int i = bnapi->index; 6478 6479 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 6480 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 6481 } 6482 6483 static void bnxt_dbg_dump_states(struct bnxt *bp) 6484 { 6485 int i; 6486 struct bnxt_napi *bnapi; 6487 6488 for (i = 0; i < bp->cp_nr_rings; i++) { 6489 bnapi = bp->bnapi[i]; 6490 if (netif_msg_drv(bp)) { 6491 bnxt_dump_tx_sw_state(bnapi); 6492 bnxt_dump_rx_sw_state(bnapi); 6493 bnxt_dump_cp_sw_state(bnapi); 6494 } 6495 } 6496 } 6497 6498 static void bnxt_reset_task(struct bnxt *bp, bool silent) 6499 { 6500 if (!silent) 6501 bnxt_dbg_dump_states(bp); 6502 if (netif_running(bp->dev)) { 6503 int rc; 6504 6505 if (!silent) 6506 bnxt_ulp_stop(bp); 6507 bnxt_close_nic(bp, false, false); 6508 rc = bnxt_open_nic(bp, false, false); 6509 if (!silent && !rc) 6510 bnxt_ulp_start(bp); 6511 } 6512 } 6513 6514 static void bnxt_tx_timeout(struct net_device *dev) 6515 { 6516 struct bnxt *bp = netdev_priv(dev); 6517 6518 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6519 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6520 schedule_work(&bp->sp_task); 6521 } 6522 6523 #ifdef CONFIG_NET_POLL_CONTROLLER 6524 static void bnxt_poll_controller(struct net_device *dev) 6525 { 6526 struct bnxt *bp = netdev_priv(dev); 6527 int i; 6528 6529 for (i = 0; i < bp->cp_nr_rings; i++) { 6530 struct bnxt_irq *irq = &bp->irq_tbl[i]; 6531 6532 disable_irq(irq->vector); 6533 irq->handler(irq->vector, bp->bnapi[i]); 6534 enable_irq(irq->vector); 6535 } 6536 } 6537 #endif 6538 6539 static void bnxt_timer(unsigned long data) 6540 { 6541 struct bnxt *bp = (struct bnxt *)data; 6542 struct net_device *dev = bp->dev; 6543 6544 if (!netif_running(dev)) 6545 return; 6546 6547 if (atomic_read(&bp->intr_sem) != 0) 6548 goto bnxt_restart_timer; 6549 6550 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { 6551 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6552 schedule_work(&bp->sp_task); 6553 } 6554 bnxt_restart_timer: 6555 mod_timer(&bp->timer, jiffies + bp->current_interval); 6556 } 6557 6558 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 6559 { 6560 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 6561 * set. If the device is being closed, bnxt_close() may be holding 6562 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6563 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6564 */ 6565 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6566 rtnl_lock(); 6567 } 6568 6569 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 6570 { 6571 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6572 rtnl_unlock(); 6573 } 6574 6575 /* Only called from bnxt_sp_task() */ 6576 static void bnxt_reset(struct bnxt *bp, bool silent) 6577 { 6578 bnxt_rtnl_lock_sp(bp); 6579 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6580 bnxt_reset_task(bp, silent); 6581 bnxt_rtnl_unlock_sp(bp); 6582 } 6583 6584 static void bnxt_cfg_ntp_filters(struct bnxt *); 6585 6586 static void bnxt_sp_task(struct work_struct *work) 6587 { 6588 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6589 6590 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6591 smp_mb__after_atomic(); 6592 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6593 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6594 return; 6595 } 6596 6597 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 6598 bnxt_cfg_rx_mode(bp); 6599 6600 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 6601 bnxt_cfg_ntp_filters(bp); 6602 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 6603 bnxt_hwrm_exec_fwd_req(bp); 6604 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6605 bnxt_hwrm_tunnel_dst_port_alloc( 6606 bp, bp->vxlan_port, 6607 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 6608 } 6609 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 6610 bnxt_hwrm_tunnel_dst_port_free( 6611 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 6612 } 6613 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6614 bnxt_hwrm_tunnel_dst_port_alloc( 6615 bp, bp->nge_port, 6616 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6617 } 6618 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 6619 bnxt_hwrm_tunnel_dst_port_free( 6620 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6621 } 6622 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 6623 bnxt_hwrm_port_qstats(bp); 6624 6625 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 6626 * must be the last functions to be called before exiting. 6627 */ 6628 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 6629 int rc = 0; 6630 6631 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 6632 &bp->sp_event)) 6633 bnxt_hwrm_phy_qcaps(bp); 6634 6635 bnxt_rtnl_lock_sp(bp); 6636 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6637 rc = bnxt_update_link(bp, true); 6638 bnxt_rtnl_unlock_sp(bp); 6639 if (rc) 6640 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 6641 rc); 6642 } 6643 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 6644 bnxt_rtnl_lock_sp(bp); 6645 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6646 bnxt_get_port_module_status(bp); 6647 bnxt_rtnl_unlock_sp(bp); 6648 } 6649 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 6650 bnxt_reset(bp, false); 6651 6652 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 6653 bnxt_reset(bp, true); 6654 6655 smp_mb__before_atomic(); 6656 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6657 } 6658 6659 /* Under rtnl_lock */ 6660 int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) 6661 { 6662 int max_rx, max_tx, tx_sets = 1; 6663 int tx_rings_needed; 6664 bool sh = true; 6665 int rc; 6666 6667 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 6668 sh = false; 6669 6670 if (tcs) 6671 tx_sets = tcs; 6672 6673 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 6674 if (rc) 6675 return rc; 6676 6677 if (max_rx < rx) 6678 return -ENOMEM; 6679 6680 tx_rings_needed = tx * tx_sets + tx_xdp; 6681 if (max_tx < tx_rings_needed) 6682 return -ENOMEM; 6683 6684 if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || 6685 tx_rings_needed < (tx * tx_sets + tx_xdp)) 6686 return -ENOMEM; 6687 return 0; 6688 } 6689 6690 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 6691 { 6692 if (bp->bar2) { 6693 pci_iounmap(pdev, bp->bar2); 6694 bp->bar2 = NULL; 6695 } 6696 6697 if (bp->bar1) { 6698 pci_iounmap(pdev, bp->bar1); 6699 bp->bar1 = NULL; 6700 } 6701 6702 if (bp->bar0) { 6703 pci_iounmap(pdev, bp->bar0); 6704 bp->bar0 = NULL; 6705 } 6706 } 6707 6708 static void bnxt_cleanup_pci(struct bnxt *bp) 6709 { 6710 bnxt_unmap_bars(bp, bp->pdev); 6711 pci_release_regions(bp->pdev); 6712 pci_disable_device(bp->pdev); 6713 } 6714 6715 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 6716 { 6717 int rc; 6718 struct bnxt *bp = netdev_priv(dev); 6719 6720 SET_NETDEV_DEV(dev, &pdev->dev); 6721 6722 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 6723 rc = pci_enable_device(pdev); 6724 if (rc) { 6725 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 6726 goto init_err; 6727 } 6728 6729 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 6730 dev_err(&pdev->dev, 6731 "Cannot find PCI device base address, aborting\n"); 6732 rc = -ENODEV; 6733 goto init_err_disable; 6734 } 6735 6736 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 6737 if (rc) { 6738 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 6739 goto init_err_disable; 6740 } 6741 6742 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 6743 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 6744 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 6745 goto init_err_disable; 6746 } 6747 6748 pci_set_master(pdev); 6749 6750 bp->dev = dev; 6751 bp->pdev = pdev; 6752 6753 bp->bar0 = pci_ioremap_bar(pdev, 0); 6754 if (!bp->bar0) { 6755 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 6756 rc = -ENOMEM; 6757 goto init_err_release; 6758 } 6759 6760 bp->bar1 = pci_ioremap_bar(pdev, 2); 6761 if (!bp->bar1) { 6762 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 6763 rc = -ENOMEM; 6764 goto init_err_release; 6765 } 6766 6767 bp->bar2 = pci_ioremap_bar(pdev, 4); 6768 if (!bp->bar2) { 6769 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 6770 rc = -ENOMEM; 6771 goto init_err_release; 6772 } 6773 6774 pci_enable_pcie_error_reporting(pdev); 6775 6776 INIT_WORK(&bp->sp_task, bnxt_sp_task); 6777 6778 spin_lock_init(&bp->ntp_fltr_lock); 6779 6780 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 6781 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 6782 6783 /* tick values in micro seconds */ 6784 bp->rx_coal_ticks = 12; 6785 bp->rx_coal_bufs = 30; 6786 bp->rx_coal_ticks_irq = 1; 6787 bp->rx_coal_bufs_irq = 2; 6788 6789 bp->tx_coal_ticks = 25; 6790 bp->tx_coal_bufs = 30; 6791 bp->tx_coal_ticks_irq = 2; 6792 bp->tx_coal_bufs_irq = 2; 6793 6794 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 6795 6796 init_timer(&bp->timer); 6797 bp->timer.data = (unsigned long)bp; 6798 bp->timer.function = bnxt_timer; 6799 bp->current_interval = BNXT_TIMER_INTERVAL; 6800 6801 clear_bit(BNXT_STATE_OPEN, &bp->state); 6802 return 0; 6803 6804 init_err_release: 6805 bnxt_unmap_bars(bp, pdev); 6806 pci_release_regions(pdev); 6807 6808 init_err_disable: 6809 pci_disable_device(pdev); 6810 6811 init_err: 6812 return rc; 6813 } 6814 6815 /* rtnl_lock held */ 6816 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 6817 { 6818 struct sockaddr *addr = p; 6819 struct bnxt *bp = netdev_priv(dev); 6820 int rc = 0; 6821 6822 if (!is_valid_ether_addr(addr->sa_data)) 6823 return -EADDRNOTAVAIL; 6824 6825 rc = bnxt_approve_mac(bp, addr->sa_data); 6826 if (rc) 6827 return rc; 6828 6829 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 6830 return 0; 6831 6832 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 6833 if (netif_running(dev)) { 6834 bnxt_close_nic(bp, false, false); 6835 rc = bnxt_open_nic(bp, false, false); 6836 } 6837 6838 return rc; 6839 } 6840 6841 /* rtnl_lock held */ 6842 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 6843 { 6844 struct bnxt *bp = netdev_priv(dev); 6845 6846 if (netif_running(dev)) 6847 bnxt_close_nic(bp, false, false); 6848 6849 dev->mtu = new_mtu; 6850 bnxt_set_ring_params(bp); 6851 6852 if (netif_running(dev)) 6853 return bnxt_open_nic(bp, false, false); 6854 6855 return 0; 6856 } 6857 6858 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 6859 { 6860 struct bnxt *bp = netdev_priv(dev); 6861 bool sh = false; 6862 int rc; 6863 6864 if (tc > bp->max_tc) { 6865 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 6866 tc, bp->max_tc); 6867 return -EINVAL; 6868 } 6869 6870 if (netdev_get_num_tc(dev) == tc) 6871 return 0; 6872 6873 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6874 sh = true; 6875 6876 rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 6877 tc, bp->tx_nr_rings_xdp); 6878 if (rc) 6879 return rc; 6880 6881 /* Needs to close the device and do hw resource re-allocations */ 6882 if (netif_running(bp->dev)) 6883 bnxt_close_nic(bp, true, false); 6884 6885 if (tc) { 6886 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 6887 netdev_set_num_tc(dev, tc); 6888 } else { 6889 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6890 netdev_reset_tc(dev); 6891 } 6892 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 6893 bp->tx_nr_rings + bp->rx_nr_rings; 6894 bp->num_stat_ctxs = bp->cp_nr_rings; 6895 6896 if (netif_running(bp->dev)) 6897 return bnxt_open_nic(bp, true, false); 6898 6899 return 0; 6900 } 6901 6902 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 6903 struct tc_to_netdev *ntc) 6904 { 6905 if (ntc->type != TC_SETUP_MQPRIO) 6906 return -EINVAL; 6907 6908 return bnxt_setup_mq_tc(dev, ntc->tc); 6909 } 6910 6911 #ifdef CONFIG_RFS_ACCEL 6912 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 6913 struct bnxt_ntuple_filter *f2) 6914 { 6915 struct flow_keys *keys1 = &f1->fkeys; 6916 struct flow_keys *keys2 = &f2->fkeys; 6917 6918 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 6919 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 6920 keys1->ports.ports == keys2->ports.ports && 6921 keys1->basic.ip_proto == keys2->basic.ip_proto && 6922 keys1->basic.n_proto == keys2->basic.n_proto && 6923 keys1->control.flags == keys2->control.flags && 6924 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 6925 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 6926 return true; 6927 6928 return false; 6929 } 6930 6931 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 6932 u16 rxq_index, u32 flow_id) 6933 { 6934 struct bnxt *bp = netdev_priv(dev); 6935 struct bnxt_ntuple_filter *fltr, *new_fltr; 6936 struct flow_keys *fkeys; 6937 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 6938 int rc = 0, idx, bit_id, l2_idx = 0; 6939 struct hlist_head *head; 6940 6941 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 6942 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6943 int off = 0, j; 6944 6945 netif_addr_lock_bh(dev); 6946 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 6947 if (ether_addr_equal(eth->h_dest, 6948 vnic->uc_list + off)) { 6949 l2_idx = j + 1; 6950 break; 6951 } 6952 } 6953 netif_addr_unlock_bh(dev); 6954 if (!l2_idx) 6955 return -EINVAL; 6956 } 6957 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 6958 if (!new_fltr) 6959 return -ENOMEM; 6960 6961 fkeys = &new_fltr->fkeys; 6962 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 6963 rc = -EPROTONOSUPPORT; 6964 goto err_free; 6965 } 6966 6967 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 6968 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 6969 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 6970 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 6971 rc = -EPROTONOSUPPORT; 6972 goto err_free; 6973 } 6974 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 6975 bp->hwrm_spec_code < 0x10601) { 6976 rc = -EPROTONOSUPPORT; 6977 goto err_free; 6978 } 6979 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 6980 bp->hwrm_spec_code < 0x10601) { 6981 rc = -EPROTONOSUPPORT; 6982 goto err_free; 6983 } 6984 6985 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 6986 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 6987 6988 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 6989 head = &bp->ntp_fltr_hash_tbl[idx]; 6990 rcu_read_lock(); 6991 hlist_for_each_entry_rcu(fltr, head, hash) { 6992 if (bnxt_fltr_match(fltr, new_fltr)) { 6993 rcu_read_unlock(); 6994 rc = 0; 6995 goto err_free; 6996 } 6997 } 6998 rcu_read_unlock(); 6999 7000 spin_lock_bh(&bp->ntp_fltr_lock); 7001 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 7002 BNXT_NTP_FLTR_MAX_FLTR, 0); 7003 if (bit_id < 0) { 7004 spin_unlock_bh(&bp->ntp_fltr_lock); 7005 rc = -ENOMEM; 7006 goto err_free; 7007 } 7008 7009 new_fltr->sw_id = (u16)bit_id; 7010 new_fltr->flow_id = flow_id; 7011 new_fltr->l2_fltr_idx = l2_idx; 7012 new_fltr->rxq = rxq_index; 7013 hlist_add_head_rcu(&new_fltr->hash, head); 7014 bp->ntp_fltr_count++; 7015 spin_unlock_bh(&bp->ntp_fltr_lock); 7016 7017 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7018 schedule_work(&bp->sp_task); 7019 7020 return new_fltr->sw_id; 7021 7022 err_free: 7023 kfree(new_fltr); 7024 return rc; 7025 } 7026 7027 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7028 { 7029 int i; 7030 7031 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 7032 struct hlist_head *head; 7033 struct hlist_node *tmp; 7034 struct bnxt_ntuple_filter *fltr; 7035 int rc; 7036 7037 head = &bp->ntp_fltr_hash_tbl[i]; 7038 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 7039 bool del = false; 7040 7041 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 7042 if (rps_may_expire_flow(bp->dev, fltr->rxq, 7043 fltr->flow_id, 7044 fltr->sw_id)) { 7045 bnxt_hwrm_cfa_ntuple_filter_free(bp, 7046 fltr); 7047 del = true; 7048 } 7049 } else { 7050 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 7051 fltr); 7052 if (rc) 7053 del = true; 7054 else 7055 set_bit(BNXT_FLTR_VALID, &fltr->state); 7056 } 7057 7058 if (del) { 7059 spin_lock_bh(&bp->ntp_fltr_lock); 7060 hlist_del_rcu(&fltr->hash); 7061 bp->ntp_fltr_count--; 7062 spin_unlock_bh(&bp->ntp_fltr_lock); 7063 synchronize_rcu(); 7064 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 7065 kfree(fltr); 7066 } 7067 } 7068 } 7069 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 7070 netdev_info(bp->dev, "Receive PF driver unload event!"); 7071 } 7072 7073 #else 7074 7075 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7076 { 7077 } 7078 7079 #endif /* CONFIG_RFS_ACCEL */ 7080 7081 static void bnxt_udp_tunnel_add(struct net_device *dev, 7082 struct udp_tunnel_info *ti) 7083 { 7084 struct bnxt *bp = netdev_priv(dev); 7085 7086 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7087 return; 7088 7089 if (!netif_running(dev)) 7090 return; 7091 7092 switch (ti->type) { 7093 case UDP_TUNNEL_TYPE_VXLAN: 7094 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 7095 return; 7096 7097 bp->vxlan_port_cnt++; 7098 if (bp->vxlan_port_cnt == 1) { 7099 bp->vxlan_port = ti->port; 7100 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7101 schedule_work(&bp->sp_task); 7102 } 7103 break; 7104 case UDP_TUNNEL_TYPE_GENEVE: 7105 if (bp->nge_port_cnt && bp->nge_port != ti->port) 7106 return; 7107 7108 bp->nge_port_cnt++; 7109 if (bp->nge_port_cnt == 1) { 7110 bp->nge_port = ti->port; 7111 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 7112 } 7113 break; 7114 default: 7115 return; 7116 } 7117 7118 schedule_work(&bp->sp_task); 7119 } 7120 7121 static void bnxt_udp_tunnel_del(struct net_device *dev, 7122 struct udp_tunnel_info *ti) 7123 { 7124 struct bnxt *bp = netdev_priv(dev); 7125 7126 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7127 return; 7128 7129 if (!netif_running(dev)) 7130 return; 7131 7132 switch (ti->type) { 7133 case UDP_TUNNEL_TYPE_VXLAN: 7134 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 7135 return; 7136 bp->vxlan_port_cnt--; 7137 7138 if (bp->vxlan_port_cnt != 0) 7139 return; 7140 7141 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 7142 break; 7143 case UDP_TUNNEL_TYPE_GENEVE: 7144 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 7145 return; 7146 bp->nge_port_cnt--; 7147 7148 if (bp->nge_port_cnt != 0) 7149 return; 7150 7151 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 7152 break; 7153 default: 7154 return; 7155 } 7156 7157 schedule_work(&bp->sp_task); 7158 } 7159 7160 static const struct net_device_ops bnxt_netdev_ops = { 7161 .ndo_open = bnxt_open, 7162 .ndo_start_xmit = bnxt_start_xmit, 7163 .ndo_stop = bnxt_close, 7164 .ndo_get_stats64 = bnxt_get_stats64, 7165 .ndo_set_rx_mode = bnxt_set_rx_mode, 7166 .ndo_do_ioctl = bnxt_ioctl, 7167 .ndo_validate_addr = eth_validate_addr, 7168 .ndo_set_mac_address = bnxt_change_mac_addr, 7169 .ndo_change_mtu = bnxt_change_mtu, 7170 .ndo_fix_features = bnxt_fix_features, 7171 .ndo_set_features = bnxt_set_features, 7172 .ndo_tx_timeout = bnxt_tx_timeout, 7173 #ifdef CONFIG_BNXT_SRIOV 7174 .ndo_get_vf_config = bnxt_get_vf_config, 7175 .ndo_set_vf_mac = bnxt_set_vf_mac, 7176 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 7177 .ndo_set_vf_rate = bnxt_set_vf_bw, 7178 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 7179 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 7180 #endif 7181 #ifdef CONFIG_NET_POLL_CONTROLLER 7182 .ndo_poll_controller = bnxt_poll_controller, 7183 #endif 7184 .ndo_setup_tc = bnxt_setup_tc, 7185 #ifdef CONFIG_RFS_ACCEL 7186 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 7187 #endif 7188 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 7189 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 7190 .ndo_xdp = bnxt_xdp, 7191 }; 7192 7193 static void bnxt_remove_one(struct pci_dev *pdev) 7194 { 7195 struct net_device *dev = pci_get_drvdata(pdev); 7196 struct bnxt *bp = netdev_priv(dev); 7197 7198 if (BNXT_PF(bp)) 7199 bnxt_sriov_disable(bp); 7200 7201 pci_disable_pcie_error_reporting(pdev); 7202 unregister_netdev(dev); 7203 cancel_work_sync(&bp->sp_task); 7204 bp->sp_event = 0; 7205 7206 bnxt_clear_int_mode(bp); 7207 bnxt_hwrm_func_drv_unrgtr(bp); 7208 bnxt_free_hwrm_resources(bp); 7209 bnxt_dcb_free(bp); 7210 kfree(bp->edev); 7211 bp->edev = NULL; 7212 if (bp->xdp_prog) 7213 bpf_prog_put(bp->xdp_prog); 7214 bnxt_cleanup_pci(bp); 7215 free_netdev(dev); 7216 } 7217 7218 static int bnxt_probe_phy(struct bnxt *bp) 7219 { 7220 int rc = 0; 7221 struct bnxt_link_info *link_info = &bp->link_info; 7222 7223 rc = bnxt_hwrm_phy_qcaps(bp); 7224 if (rc) { 7225 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 7226 rc); 7227 return rc; 7228 } 7229 7230 rc = bnxt_update_link(bp, false); 7231 if (rc) { 7232 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 7233 rc); 7234 return rc; 7235 } 7236 7237 /* Older firmware does not have supported_auto_speeds, so assume 7238 * that all supported speeds can be autonegotiated. 7239 */ 7240 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 7241 link_info->support_auto_speeds = link_info->support_speeds; 7242 7243 /*initialize the ethool setting copy with NVM settings */ 7244 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 7245 link_info->autoneg = BNXT_AUTONEG_SPEED; 7246 if (bp->hwrm_spec_code >= 0x10201) { 7247 if (link_info->auto_pause_setting & 7248 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 7249 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7250 } else { 7251 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7252 } 7253 link_info->advertising = link_info->auto_link_speeds; 7254 } else { 7255 link_info->req_link_speed = link_info->force_link_speed; 7256 link_info->req_duplex = link_info->duplex_setting; 7257 } 7258 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 7259 link_info->req_flow_ctrl = 7260 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 7261 else 7262 link_info->req_flow_ctrl = link_info->force_pause_setting; 7263 return rc; 7264 } 7265 7266 static int bnxt_get_max_irq(struct pci_dev *pdev) 7267 { 7268 u16 ctrl; 7269 7270 if (!pdev->msix_cap) 7271 return 1; 7272 7273 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 7274 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 7275 } 7276 7277 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7278 int *max_cp) 7279 { 7280 int max_ring_grps = 0; 7281 7282 #ifdef CONFIG_BNXT_SRIOV 7283 if (!BNXT_PF(bp)) { 7284 *max_tx = bp->vf.max_tx_rings; 7285 *max_rx = bp->vf.max_rx_rings; 7286 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 7287 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 7288 max_ring_grps = bp->vf.max_hw_ring_grps; 7289 } else 7290 #endif 7291 { 7292 *max_tx = bp->pf.max_tx_rings; 7293 *max_rx = bp->pf.max_rx_rings; 7294 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 7295 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 7296 max_ring_grps = bp->pf.max_hw_ring_grps; 7297 } 7298 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 7299 *max_cp -= 1; 7300 *max_rx -= 2; 7301 } 7302 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7303 *max_rx >>= 1; 7304 *max_rx = min_t(int, *max_rx, max_ring_grps); 7305 } 7306 7307 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 7308 { 7309 int rx, tx, cp; 7310 7311 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 7312 if (!rx || !tx || !cp) 7313 return -ENOMEM; 7314 7315 *max_rx = rx; 7316 *max_tx = tx; 7317 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 7318 } 7319 7320 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7321 bool shared) 7322 { 7323 int rc; 7324 7325 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7326 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 7327 /* Not enough rings, try disabling agg rings. */ 7328 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7329 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7330 if (rc) 7331 return rc; 7332 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7333 bp->dev->hw_features &= ~NETIF_F_LRO; 7334 bp->dev->features &= ~NETIF_F_LRO; 7335 bnxt_set_ring_params(bp); 7336 } 7337 7338 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 7339 int max_cp, max_stat, max_irq; 7340 7341 /* Reserve minimum resources for RoCE */ 7342 max_cp = bnxt_get_max_func_cp_rings(bp); 7343 max_stat = bnxt_get_max_func_stat_ctxs(bp); 7344 max_irq = bnxt_get_max_func_irqs(bp); 7345 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 7346 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 7347 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 7348 return 0; 7349 7350 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 7351 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 7352 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 7353 max_cp = min_t(int, max_cp, max_irq); 7354 max_cp = min_t(int, max_cp, max_stat); 7355 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 7356 if (rc) 7357 rc = 0; 7358 } 7359 return rc; 7360 } 7361 7362 static int bnxt_set_dflt_rings(struct bnxt *bp) 7363 { 7364 int dflt_rings, max_rx_rings, max_tx_rings, rc; 7365 bool sh = true; 7366 7367 if (sh) 7368 bp->flags |= BNXT_FLAG_SHARED_RINGS; 7369 dflt_rings = netif_get_num_default_rss_queues(); 7370 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 7371 if (rc) 7372 return rc; 7373 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 7374 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 7375 7376 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); 7377 if (rc) 7378 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 7379 7380 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7381 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7382 bp->tx_nr_rings + bp->rx_nr_rings; 7383 bp->num_stat_ctxs = bp->cp_nr_rings; 7384 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7385 bp->rx_nr_rings++; 7386 bp->cp_nr_rings++; 7387 } 7388 return rc; 7389 } 7390 7391 void bnxt_restore_pf_fw_resources(struct bnxt *bp) 7392 { 7393 ASSERT_RTNL(); 7394 bnxt_hwrm_func_qcaps(bp); 7395 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 7396 } 7397 7398 static void bnxt_parse_log_pcie_link(struct bnxt *bp) 7399 { 7400 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7401 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7402 7403 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7404 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7405 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 7406 else 7407 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", 7408 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 7409 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 7410 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 7411 "Unknown", width); 7412 } 7413 7414 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7415 { 7416 static int version_printed; 7417 struct net_device *dev; 7418 struct bnxt *bp; 7419 int rc, max_irqs; 7420 7421 if (pci_is_bridge(pdev)) 7422 return -ENODEV; 7423 7424 if (version_printed++ == 0) 7425 pr_info("%s", version); 7426 7427 max_irqs = bnxt_get_max_irq(pdev); 7428 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 7429 if (!dev) 7430 return -ENOMEM; 7431 7432 bp = netdev_priv(dev); 7433 7434 if (bnxt_vf_pciid(ent->driver_data)) 7435 bp->flags |= BNXT_FLAG_VF; 7436 7437 if (pdev->msix_cap) 7438 bp->flags |= BNXT_FLAG_MSIX_CAP; 7439 7440 rc = bnxt_init_board(pdev, dev); 7441 if (rc < 0) 7442 goto init_err_free; 7443 7444 dev->netdev_ops = &bnxt_netdev_ops; 7445 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 7446 dev->ethtool_ops = &bnxt_ethtool_ops; 7447 pci_set_drvdata(pdev, dev); 7448 7449 rc = bnxt_alloc_hwrm_resources(bp); 7450 if (rc) 7451 goto init_err_pci_clean; 7452 7453 mutex_init(&bp->hwrm_cmd_lock); 7454 rc = bnxt_hwrm_ver_get(bp); 7455 if (rc) 7456 goto init_err_pci_clean; 7457 7458 rc = bnxt_hwrm_func_reset(bp); 7459 if (rc) 7460 goto init_err_pci_clean; 7461 7462 bnxt_hwrm_fw_set_time(bp); 7463 7464 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7465 NETIF_F_TSO | NETIF_F_TSO6 | 7466 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 7467 NETIF_F_GSO_IPXIP4 | 7468 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 7469 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 7470 NETIF_F_RXCSUM | NETIF_F_GRO; 7471 7472 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 7473 dev->hw_features |= NETIF_F_LRO; 7474 7475 dev->hw_enc_features = 7476 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7477 NETIF_F_TSO | NETIF_F_TSO6 | 7478 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 7479 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 7480 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 7481 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 7482 NETIF_F_GSO_GRE_CSUM; 7483 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 7484 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 7485 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 7486 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 7487 dev->priv_flags |= IFF_UNICAST_FLT; 7488 7489 /* MTU range: 60 - 9500 */ 7490 dev->min_mtu = ETH_ZLEN; 7491 dev->max_mtu = BNXT_MAX_MTU; 7492 7493 bnxt_dcb_init(bp); 7494 7495 #ifdef CONFIG_BNXT_SRIOV 7496 init_waitqueue_head(&bp->sriov_cfg_wait); 7497 #endif 7498 bp->gro_func = bnxt_gro_func_5730x; 7499 if (BNXT_CHIP_NUM_57X1X(bp->chip_num)) 7500 bp->gro_func = bnxt_gro_func_5731x; 7501 7502 rc = bnxt_hwrm_func_drv_rgtr(bp); 7503 if (rc) 7504 goto init_err_pci_clean; 7505 7506 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 7507 if (rc) 7508 goto init_err_pci_clean; 7509 7510 bp->ulp_probe = bnxt_ulp_probe; 7511 7512 /* Get the MAX capabilities for this function */ 7513 rc = bnxt_hwrm_func_qcaps(bp); 7514 if (rc) { 7515 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 7516 rc); 7517 rc = -1; 7518 goto init_err_pci_clean; 7519 } 7520 7521 rc = bnxt_hwrm_queue_qportcfg(bp); 7522 if (rc) { 7523 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 7524 rc); 7525 rc = -1; 7526 goto init_err_pci_clean; 7527 } 7528 7529 bnxt_hwrm_func_qcfg(bp); 7530 bnxt_hwrm_port_led_qcaps(bp); 7531 7532 bnxt_set_rx_skb_mode(bp, false); 7533 bnxt_set_tpa_flags(bp); 7534 bnxt_set_ring_params(bp); 7535 bnxt_set_max_func_irqs(bp, max_irqs); 7536 rc = bnxt_set_dflt_rings(bp); 7537 if (rc) { 7538 netdev_err(bp->dev, "Not enough rings available.\n"); 7539 rc = -ENOMEM; 7540 goto init_err_pci_clean; 7541 } 7542 7543 /* Default RSS hash cfg. */ 7544 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 7545 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 7546 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 7547 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 7548 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) && 7549 !BNXT_CHIP_TYPE_NITRO_A0(bp) && 7550 bp->hwrm_spec_code >= 0x10501) { 7551 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 7552 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 7553 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 7554 } 7555 7556 bnxt_hwrm_vnic_qcaps(bp); 7557 if (bnxt_rfs_supported(bp)) { 7558 dev->hw_features |= NETIF_F_NTUPLE; 7559 if (bnxt_rfs_capable(bp)) { 7560 bp->flags |= BNXT_FLAG_RFS; 7561 dev->features |= NETIF_F_NTUPLE; 7562 } 7563 } 7564 7565 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 7566 bp->flags |= BNXT_FLAG_STRIP_VLAN; 7567 7568 rc = bnxt_probe_phy(bp); 7569 if (rc) 7570 goto init_err_pci_clean; 7571 7572 rc = bnxt_init_int_mode(bp); 7573 if (rc) 7574 goto init_err_pci_clean; 7575 7576 rc = register_netdev(dev); 7577 if (rc) 7578 goto init_err_clr_int; 7579 7580 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 7581 board_info[ent->driver_data].name, 7582 (long)pci_resource_start(pdev, 0), dev->dev_addr); 7583 7584 bnxt_parse_log_pcie_link(bp); 7585 7586 return 0; 7587 7588 init_err_clr_int: 7589 bnxt_clear_int_mode(bp); 7590 7591 init_err_pci_clean: 7592 bnxt_cleanup_pci(bp); 7593 7594 init_err_free: 7595 free_netdev(dev); 7596 return rc; 7597 } 7598 7599 /** 7600 * bnxt_io_error_detected - called when PCI error is detected 7601 * @pdev: Pointer to PCI device 7602 * @state: The current pci connection state 7603 * 7604 * This function is called after a PCI bus error affecting 7605 * this device has been detected. 7606 */ 7607 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 7608 pci_channel_state_t state) 7609 { 7610 struct net_device *netdev = pci_get_drvdata(pdev); 7611 struct bnxt *bp = netdev_priv(netdev); 7612 7613 netdev_info(netdev, "PCI I/O error detected\n"); 7614 7615 rtnl_lock(); 7616 netif_device_detach(netdev); 7617 7618 bnxt_ulp_stop(bp); 7619 7620 if (state == pci_channel_io_perm_failure) { 7621 rtnl_unlock(); 7622 return PCI_ERS_RESULT_DISCONNECT; 7623 } 7624 7625 if (netif_running(netdev)) 7626 bnxt_close(netdev); 7627 7628 pci_disable_device(pdev); 7629 rtnl_unlock(); 7630 7631 /* Request a slot slot reset. */ 7632 return PCI_ERS_RESULT_NEED_RESET; 7633 } 7634 7635 /** 7636 * bnxt_io_slot_reset - called after the pci bus has been reset. 7637 * @pdev: Pointer to PCI device 7638 * 7639 * Restart the card from scratch, as if from a cold-boot. 7640 * At this point, the card has exprienced a hard reset, 7641 * followed by fixups by BIOS, and has its config space 7642 * set up identically to what it was at cold boot. 7643 */ 7644 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 7645 { 7646 struct net_device *netdev = pci_get_drvdata(pdev); 7647 struct bnxt *bp = netdev_priv(netdev); 7648 int err = 0; 7649 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 7650 7651 netdev_info(bp->dev, "PCI Slot Reset\n"); 7652 7653 rtnl_lock(); 7654 7655 if (pci_enable_device(pdev)) { 7656 dev_err(&pdev->dev, 7657 "Cannot re-enable PCI device after reset.\n"); 7658 } else { 7659 pci_set_master(pdev); 7660 7661 err = bnxt_hwrm_func_reset(bp); 7662 if (!err && netif_running(netdev)) 7663 err = bnxt_open(netdev); 7664 7665 if (!err) { 7666 result = PCI_ERS_RESULT_RECOVERED; 7667 bnxt_ulp_start(bp); 7668 } 7669 } 7670 7671 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 7672 dev_close(netdev); 7673 7674 rtnl_unlock(); 7675 7676 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7677 if (err) { 7678 dev_err(&pdev->dev, 7679 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 7680 err); /* non-fatal, continue */ 7681 } 7682 7683 return PCI_ERS_RESULT_RECOVERED; 7684 } 7685 7686 /** 7687 * bnxt_io_resume - called when traffic can start flowing again. 7688 * @pdev: Pointer to PCI device 7689 * 7690 * This callback is called when the error recovery driver tells 7691 * us that its OK to resume normal operation. 7692 */ 7693 static void bnxt_io_resume(struct pci_dev *pdev) 7694 { 7695 struct net_device *netdev = pci_get_drvdata(pdev); 7696 7697 rtnl_lock(); 7698 7699 netif_device_attach(netdev); 7700 7701 rtnl_unlock(); 7702 } 7703 7704 static const struct pci_error_handlers bnxt_err_handler = { 7705 .error_detected = bnxt_io_error_detected, 7706 .slot_reset = bnxt_io_slot_reset, 7707 .resume = bnxt_io_resume 7708 }; 7709 7710 static struct pci_driver bnxt_pci_driver = { 7711 .name = DRV_MODULE_NAME, 7712 .id_table = bnxt_pci_tbl, 7713 .probe = bnxt_init_one, 7714 .remove = bnxt_remove_one, 7715 .err_handler = &bnxt_err_handler, 7716 #if defined(CONFIG_BNXT_SRIOV) 7717 .sriov_configure = bnxt_sriov_configure, 7718 #endif 7719 }; 7720 7721 module_pci_driver(bnxt_pci_driver); 7722