1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/stringify.h> 13 #include <linux/kernel.h> 14 #include <linux/timer.h> 15 #include <linux/errno.h> 16 #include <linux/ioport.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/netdevice.h> 22 #include <linux/etherdevice.h> 23 #include <linux/skbuff.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/bitops.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/delay.h> 29 #include <asm/byteorder.h> 30 #include <asm/page.h> 31 #include <linux/time.h> 32 #include <linux/mii.h> 33 #include <linux/if.h> 34 #include <linux/if_vlan.h> 35 #include <linux/rtc.h> 36 #include <net/ip.h> 37 #include <net/tcp.h> 38 #include <net/udp.h> 39 #include <net/checksum.h> 40 #include <net/ip6_checksum.h> 41 #include <net/udp_tunnel.h> 42 #ifdef CONFIG_NET_RX_BUSY_POLL 43 #include <net/busy_poll.h> 44 #endif 45 #include <linux/workqueue.h> 46 #include <linux/prefetch.h> 47 #include <linux/cache.h> 48 #include <linux/log2.h> 49 #include <linux/aer.h> 50 #include <linux/bitmap.h> 51 #include <linux/cpu_rmap.h> 52 53 #include "bnxt_hsi.h" 54 #include "bnxt.h" 55 #include "bnxt_ulp.h" 56 #include "bnxt_sriov.h" 57 #include "bnxt_ethtool.h" 58 #include "bnxt_dcb.h" 59 60 #define BNXT_TX_TIMEOUT (5 * HZ) 61 62 static const char version[] = 63 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 64 65 MODULE_LICENSE("GPL"); 66 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 67 MODULE_VERSION(DRV_MODULE_VERSION); 68 69 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 70 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 71 #define BNXT_RX_COPY_THRESH 256 72 73 #define BNXT_TX_PUSH_THRESH 164 74 75 enum board_idx { 76 BCM57301, 77 BCM57302, 78 BCM57304, 79 BCM57417_NPAR, 80 BCM58700, 81 BCM57311, 82 BCM57312, 83 BCM57402, 84 BCM57404, 85 BCM57406, 86 BCM57402_NPAR, 87 BCM57407, 88 BCM57412, 89 BCM57414, 90 BCM57416, 91 BCM57417, 92 BCM57412_NPAR, 93 BCM57314, 94 BCM57417_SFP, 95 BCM57416_SFP, 96 BCM57404_NPAR, 97 BCM57406_NPAR, 98 BCM57407_SFP, 99 BCM57407_NPAR, 100 BCM57414_NPAR, 101 BCM57416_NPAR, 102 NETXTREME_E_VF, 103 NETXTREME_C_VF, 104 }; 105 106 /* indexed by enum above */ 107 static const struct { 108 char *name; 109 } board_info[] = { 110 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 111 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 112 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 113 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 114 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 115 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 116 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 117 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 118 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 119 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 120 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 121 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 122 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 123 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 124 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 125 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 126 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 127 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 128 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 129 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 130 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 131 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 132 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 133 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 134 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 135 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 136 { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 137 { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 138 }; 139 140 static const struct pci_device_id bnxt_pci_tbl[] = { 141 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 142 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 143 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 144 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 145 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 146 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 147 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 148 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 149 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 150 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 151 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 152 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 153 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 154 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 155 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 156 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 157 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 158 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 159 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 160 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 161 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 162 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 163 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 164 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 165 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 166 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 167 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 168 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 170 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 171 #ifdef CONFIG_BNXT_SRIOV 172 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 173 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 174 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 175 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 176 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 177 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 178 #endif 179 { 0 } 180 }; 181 182 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 183 184 static const u16 bnxt_vf_req_snif[] = { 185 HWRM_FUNC_CFG, 186 HWRM_PORT_PHY_QCFG, 187 HWRM_CFA_L2_FILTER_ALLOC, 188 }; 189 190 static const u16 bnxt_async_events_arr[] = { 191 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 192 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 193 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 194 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 195 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 196 }; 197 198 static bool bnxt_vf_pciid(enum board_idx idx) 199 { 200 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 201 } 202 203 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 204 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 205 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 206 207 #define BNXT_CP_DB_REARM(db, raw_cons) \ 208 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 209 210 #define BNXT_CP_DB(db, raw_cons) \ 211 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 212 213 #define BNXT_CP_DB_IRQ_DIS(db) \ 214 writel(DB_CP_IRQ_DIS_FLAGS, db) 215 216 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 217 { 218 /* Tell compiler to fetch tx indices from memory. */ 219 barrier(); 220 221 return bp->tx_ring_size - 222 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); 223 } 224 225 static const u16 bnxt_lhint_arr[] = { 226 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 227 TX_BD_FLAGS_LHINT_512_TO_1023, 228 TX_BD_FLAGS_LHINT_1024_TO_2047, 229 TX_BD_FLAGS_LHINT_1024_TO_2047, 230 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 231 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 232 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 233 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 234 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 235 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 236 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 237 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 238 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 239 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 240 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 241 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 242 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 243 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 244 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 245 }; 246 247 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 248 { 249 struct bnxt *bp = netdev_priv(dev); 250 struct tx_bd *txbd; 251 struct tx_bd_ext *txbd1; 252 struct netdev_queue *txq; 253 int i; 254 dma_addr_t mapping; 255 unsigned int length, pad = 0; 256 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 257 u16 prod, last_frag; 258 struct pci_dev *pdev = bp->pdev; 259 struct bnxt_tx_ring_info *txr; 260 struct bnxt_sw_tx_bd *tx_buf; 261 262 i = skb_get_queue_mapping(skb); 263 if (unlikely(i >= bp->tx_nr_rings)) { 264 dev_kfree_skb_any(skb); 265 return NETDEV_TX_OK; 266 } 267 268 txr = &bp->tx_ring[i]; 269 txq = netdev_get_tx_queue(dev, i); 270 prod = txr->tx_prod; 271 272 free_size = bnxt_tx_avail(bp, txr); 273 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 274 netif_tx_stop_queue(txq); 275 return NETDEV_TX_BUSY; 276 } 277 278 length = skb->len; 279 len = skb_headlen(skb); 280 last_frag = skb_shinfo(skb)->nr_frags; 281 282 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 283 284 txbd->tx_bd_opaque = prod; 285 286 tx_buf = &txr->tx_buf_ring[prod]; 287 tx_buf->skb = skb; 288 tx_buf->nr_frags = last_frag; 289 290 vlan_tag_flags = 0; 291 cfa_action = 0; 292 if (skb_vlan_tag_present(skb)) { 293 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 294 skb_vlan_tag_get(skb); 295 /* Currently supports 8021Q, 8021AD vlan offloads 296 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 297 */ 298 if (skb->vlan_proto == htons(ETH_P_8021Q)) 299 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 300 } 301 302 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 303 struct tx_push_buffer *tx_push_buf = txr->tx_push; 304 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 305 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 306 void *pdata = tx_push_buf->data; 307 u64 *end; 308 int j, push_len; 309 310 /* Set COAL_NOW to be ready quickly for the next push */ 311 tx_push->tx_bd_len_flags_type = 312 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 313 TX_BD_TYPE_LONG_TX_BD | 314 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 315 TX_BD_FLAGS_COAL_NOW | 316 TX_BD_FLAGS_PACKET_END | 317 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 318 319 if (skb->ip_summed == CHECKSUM_PARTIAL) 320 tx_push1->tx_bd_hsize_lflags = 321 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 322 else 323 tx_push1->tx_bd_hsize_lflags = 0; 324 325 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 326 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 327 328 end = pdata + length; 329 end = PTR_ALIGN(end, 8) - 1; 330 *end = 0; 331 332 skb_copy_from_linear_data(skb, pdata, len); 333 pdata += len; 334 for (j = 0; j < last_frag; j++) { 335 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 336 void *fptr; 337 338 fptr = skb_frag_address_safe(frag); 339 if (!fptr) 340 goto normal_tx; 341 342 memcpy(pdata, fptr, skb_frag_size(frag)); 343 pdata += skb_frag_size(frag); 344 } 345 346 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 347 txbd->tx_bd_haddr = txr->data_mapping; 348 prod = NEXT_TX(prod); 349 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 350 memcpy(txbd, tx_push1, sizeof(*txbd)); 351 prod = NEXT_TX(prod); 352 tx_push->doorbell = 353 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 354 txr->tx_prod = prod; 355 356 tx_buf->is_push = 1; 357 netdev_tx_sent_queue(txq, skb->len); 358 wmb(); /* Sync is_push and byte queue before pushing data */ 359 360 push_len = (length + sizeof(*tx_push) + 7) / 8; 361 if (push_len > 16) { 362 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 363 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 364 (push_len - 16) << 1); 365 } else { 366 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 367 push_len); 368 } 369 370 goto tx_done; 371 } 372 373 normal_tx: 374 if (length < BNXT_MIN_PKT_SIZE) { 375 pad = BNXT_MIN_PKT_SIZE - length; 376 if (skb_pad(skb, pad)) { 377 /* SKB already freed. */ 378 tx_buf->skb = NULL; 379 return NETDEV_TX_OK; 380 } 381 length = BNXT_MIN_PKT_SIZE; 382 } 383 384 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 385 386 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 387 dev_kfree_skb_any(skb); 388 tx_buf->skb = NULL; 389 return NETDEV_TX_OK; 390 } 391 392 dma_unmap_addr_set(tx_buf, mapping, mapping); 393 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 394 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 395 396 txbd->tx_bd_haddr = cpu_to_le64(mapping); 397 398 prod = NEXT_TX(prod); 399 txbd1 = (struct tx_bd_ext *) 400 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 401 402 txbd1->tx_bd_hsize_lflags = 0; 403 if (skb_is_gso(skb)) { 404 u32 hdr_len; 405 406 if (skb->encapsulation) 407 hdr_len = skb_inner_network_offset(skb) + 408 skb_inner_network_header_len(skb) + 409 inner_tcp_hdrlen(skb); 410 else 411 hdr_len = skb_transport_offset(skb) + 412 tcp_hdrlen(skb); 413 414 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 415 TX_BD_FLAGS_T_IPID | 416 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 417 length = skb_shinfo(skb)->gso_size; 418 txbd1->tx_bd_mss = cpu_to_le32(length); 419 length += hdr_len; 420 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 421 txbd1->tx_bd_hsize_lflags = 422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 423 txbd1->tx_bd_mss = 0; 424 } 425 426 length >>= 9; 427 flags |= bnxt_lhint_arr[length]; 428 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 429 430 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 431 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 432 for (i = 0; i < last_frag; i++) { 433 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 434 435 prod = NEXT_TX(prod); 436 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 437 438 len = skb_frag_size(frag); 439 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 440 DMA_TO_DEVICE); 441 442 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 443 goto tx_dma_error; 444 445 tx_buf = &txr->tx_buf_ring[prod]; 446 dma_unmap_addr_set(tx_buf, mapping, mapping); 447 448 txbd->tx_bd_haddr = cpu_to_le64(mapping); 449 450 flags = len << TX_BD_LEN_SHIFT; 451 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 452 } 453 454 flags &= ~TX_BD_LEN; 455 txbd->tx_bd_len_flags_type = 456 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 457 TX_BD_FLAGS_PACKET_END); 458 459 netdev_tx_sent_queue(txq, skb->len); 460 461 /* Sync BD data before updating doorbell */ 462 wmb(); 463 464 prod = NEXT_TX(prod); 465 txr->tx_prod = prod; 466 467 writel(DB_KEY_TX | prod, txr->tx_doorbell); 468 writel(DB_KEY_TX | prod, txr->tx_doorbell); 469 470 tx_done: 471 472 mmiowb(); 473 474 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 475 netif_tx_stop_queue(txq); 476 477 /* netif_tx_stop_queue() must be done before checking 478 * tx index in bnxt_tx_avail() below, because in 479 * bnxt_tx_int(), we update tx index before checking for 480 * netif_tx_queue_stopped(). 481 */ 482 smp_mb(); 483 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 484 netif_tx_wake_queue(txq); 485 } 486 return NETDEV_TX_OK; 487 488 tx_dma_error: 489 last_frag = i; 490 491 /* start back at beginning and unmap skb */ 492 prod = txr->tx_prod; 493 tx_buf = &txr->tx_buf_ring[prod]; 494 tx_buf->skb = NULL; 495 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 496 skb_headlen(skb), PCI_DMA_TODEVICE); 497 prod = NEXT_TX(prod); 498 499 /* unmap remaining mapped pages */ 500 for (i = 0; i < last_frag; i++) { 501 prod = NEXT_TX(prod); 502 tx_buf = &txr->tx_buf_ring[prod]; 503 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 504 skb_frag_size(&skb_shinfo(skb)->frags[i]), 505 PCI_DMA_TODEVICE); 506 } 507 508 dev_kfree_skb_any(skb); 509 return NETDEV_TX_OK; 510 } 511 512 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 513 { 514 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 515 int index = txr - &bp->tx_ring[0]; 516 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); 517 u16 cons = txr->tx_cons; 518 struct pci_dev *pdev = bp->pdev; 519 int i; 520 unsigned int tx_bytes = 0; 521 522 for (i = 0; i < nr_pkts; i++) { 523 struct bnxt_sw_tx_bd *tx_buf; 524 struct sk_buff *skb; 525 int j, last; 526 527 tx_buf = &txr->tx_buf_ring[cons]; 528 cons = NEXT_TX(cons); 529 skb = tx_buf->skb; 530 tx_buf->skb = NULL; 531 532 if (tx_buf->is_push) { 533 tx_buf->is_push = 0; 534 goto next_tx_int; 535 } 536 537 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 538 skb_headlen(skb), PCI_DMA_TODEVICE); 539 last = tx_buf->nr_frags; 540 541 for (j = 0; j < last; j++) { 542 cons = NEXT_TX(cons); 543 tx_buf = &txr->tx_buf_ring[cons]; 544 dma_unmap_page( 545 &pdev->dev, 546 dma_unmap_addr(tx_buf, mapping), 547 skb_frag_size(&skb_shinfo(skb)->frags[j]), 548 PCI_DMA_TODEVICE); 549 } 550 551 next_tx_int: 552 cons = NEXT_TX(cons); 553 554 tx_bytes += skb->len; 555 dev_kfree_skb_any(skb); 556 } 557 558 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 559 txr->tx_cons = cons; 560 561 /* Need to make the tx_cons update visible to bnxt_start_xmit() 562 * before checking for netif_tx_queue_stopped(). Without the 563 * memory barrier, there is a small possibility that bnxt_start_xmit() 564 * will miss it and cause the queue to be stopped forever. 565 */ 566 smp_mb(); 567 568 if (unlikely(netif_tx_queue_stopped(txq)) && 569 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 570 __netif_tx_lock(txq, smp_processor_id()); 571 if (netif_tx_queue_stopped(txq) && 572 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 573 txr->dev_state != BNXT_DEV_STATE_CLOSING) 574 netif_tx_wake_queue(txq); 575 __netif_tx_unlock(txq); 576 } 577 } 578 579 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 580 gfp_t gfp) 581 { 582 u8 *data; 583 struct pci_dev *pdev = bp->pdev; 584 585 data = kmalloc(bp->rx_buf_size, gfp); 586 if (!data) 587 return NULL; 588 589 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, 590 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 591 592 if (dma_mapping_error(&pdev->dev, *mapping)) { 593 kfree(data); 594 data = NULL; 595 } 596 return data; 597 } 598 599 static inline int bnxt_alloc_rx_data(struct bnxt *bp, 600 struct bnxt_rx_ring_info *rxr, 601 u16 prod, gfp_t gfp) 602 { 603 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 604 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 605 u8 *data; 606 dma_addr_t mapping; 607 608 data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 609 if (!data) 610 return -ENOMEM; 611 612 rx_buf->data = data; 613 dma_unmap_addr_set(rx_buf, mapping, mapping); 614 615 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 616 617 return 0; 618 } 619 620 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, 621 u8 *data) 622 { 623 u16 prod = rxr->rx_prod; 624 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 625 struct rx_bd *cons_bd, *prod_bd; 626 627 prod_rx_buf = &rxr->rx_buf_ring[prod]; 628 cons_rx_buf = &rxr->rx_buf_ring[cons]; 629 630 prod_rx_buf->data = data; 631 632 dma_unmap_addr_set(prod_rx_buf, mapping, 633 dma_unmap_addr(cons_rx_buf, mapping)); 634 635 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 636 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 637 638 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 639 } 640 641 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 642 { 643 u16 next, max = rxr->rx_agg_bmap_size; 644 645 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 646 if (next >= max) 647 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 648 return next; 649 } 650 651 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 652 struct bnxt_rx_ring_info *rxr, 653 u16 prod, gfp_t gfp) 654 { 655 struct rx_bd *rxbd = 656 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 657 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 658 struct pci_dev *pdev = bp->pdev; 659 struct page *page; 660 dma_addr_t mapping; 661 u16 sw_prod = rxr->rx_sw_agg_prod; 662 unsigned int offset = 0; 663 664 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 665 page = rxr->rx_page; 666 if (!page) { 667 page = alloc_page(gfp); 668 if (!page) 669 return -ENOMEM; 670 rxr->rx_page = page; 671 rxr->rx_page_offset = 0; 672 } 673 offset = rxr->rx_page_offset; 674 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 675 if (rxr->rx_page_offset == PAGE_SIZE) 676 rxr->rx_page = NULL; 677 else 678 get_page(page); 679 } else { 680 page = alloc_page(gfp); 681 if (!page) 682 return -ENOMEM; 683 } 684 685 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE, 686 PCI_DMA_FROMDEVICE); 687 if (dma_mapping_error(&pdev->dev, mapping)) { 688 __free_page(page); 689 return -EIO; 690 } 691 692 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 693 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 694 695 __set_bit(sw_prod, rxr->rx_agg_bmap); 696 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 697 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 698 699 rx_agg_buf->page = page; 700 rx_agg_buf->offset = offset; 701 rx_agg_buf->mapping = mapping; 702 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 703 rxbd->rx_bd_opaque = sw_prod; 704 return 0; 705 } 706 707 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 708 u32 agg_bufs) 709 { 710 struct bnxt *bp = bnapi->bp; 711 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 712 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 713 u16 prod = rxr->rx_agg_prod; 714 u16 sw_prod = rxr->rx_sw_agg_prod; 715 u32 i; 716 717 for (i = 0; i < agg_bufs; i++) { 718 u16 cons; 719 struct rx_agg_cmp *agg; 720 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 721 struct rx_bd *prod_bd; 722 struct page *page; 723 724 agg = (struct rx_agg_cmp *) 725 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 726 cons = agg->rx_agg_cmp_opaque; 727 __clear_bit(cons, rxr->rx_agg_bmap); 728 729 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 730 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 731 732 __set_bit(sw_prod, rxr->rx_agg_bmap); 733 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 734 cons_rx_buf = &rxr->rx_agg_ring[cons]; 735 736 /* It is possible for sw_prod to be equal to cons, so 737 * set cons_rx_buf->page to NULL first. 738 */ 739 page = cons_rx_buf->page; 740 cons_rx_buf->page = NULL; 741 prod_rx_buf->page = page; 742 prod_rx_buf->offset = cons_rx_buf->offset; 743 744 prod_rx_buf->mapping = cons_rx_buf->mapping; 745 746 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 747 748 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 749 prod_bd->rx_bd_opaque = sw_prod; 750 751 prod = NEXT_RX_AGG(prod); 752 sw_prod = NEXT_RX_AGG(sw_prod); 753 cp_cons = NEXT_CMP(cp_cons); 754 } 755 rxr->rx_agg_prod = prod; 756 rxr->rx_sw_agg_prod = sw_prod; 757 } 758 759 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 760 struct bnxt_rx_ring_info *rxr, u16 cons, 761 u16 prod, u8 *data, dma_addr_t dma_addr, 762 unsigned int len) 763 { 764 int err; 765 struct sk_buff *skb; 766 767 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 768 if (unlikely(err)) { 769 bnxt_reuse_rx_data(rxr, cons, data); 770 return NULL; 771 } 772 773 skb = build_skb(data, 0); 774 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 775 PCI_DMA_FROMDEVICE); 776 if (!skb) { 777 kfree(data); 778 return NULL; 779 } 780 781 skb_reserve(skb, BNXT_RX_OFFSET); 782 skb_put(skb, len); 783 return skb; 784 } 785 786 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 787 struct sk_buff *skb, u16 cp_cons, 788 u32 agg_bufs) 789 { 790 struct pci_dev *pdev = bp->pdev; 791 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 792 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 793 u16 prod = rxr->rx_agg_prod; 794 u32 i; 795 796 for (i = 0; i < agg_bufs; i++) { 797 u16 cons, frag_len; 798 struct rx_agg_cmp *agg; 799 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 800 struct page *page; 801 dma_addr_t mapping; 802 803 agg = (struct rx_agg_cmp *) 804 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 805 cons = agg->rx_agg_cmp_opaque; 806 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 807 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 808 809 cons_rx_buf = &rxr->rx_agg_ring[cons]; 810 skb_fill_page_desc(skb, i, cons_rx_buf->page, 811 cons_rx_buf->offset, frag_len); 812 __clear_bit(cons, rxr->rx_agg_bmap); 813 814 /* It is possible for bnxt_alloc_rx_page() to allocate 815 * a sw_prod index that equals the cons index, so we 816 * need to clear the cons entry now. 817 */ 818 mapping = dma_unmap_addr(cons_rx_buf, mapping); 819 page = cons_rx_buf->page; 820 cons_rx_buf->page = NULL; 821 822 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 823 struct skb_shared_info *shinfo; 824 unsigned int nr_frags; 825 826 shinfo = skb_shinfo(skb); 827 nr_frags = --shinfo->nr_frags; 828 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 829 830 dev_kfree_skb(skb); 831 832 cons_rx_buf->page = page; 833 834 /* Update prod since possibly some pages have been 835 * allocated already. 836 */ 837 rxr->rx_agg_prod = prod; 838 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 839 return NULL; 840 } 841 842 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 843 PCI_DMA_FROMDEVICE); 844 845 skb->data_len += frag_len; 846 skb->len += frag_len; 847 skb->truesize += PAGE_SIZE; 848 849 prod = NEXT_RX_AGG(prod); 850 cp_cons = NEXT_CMP(cp_cons); 851 } 852 rxr->rx_agg_prod = prod; 853 return skb; 854 } 855 856 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 857 u8 agg_bufs, u32 *raw_cons) 858 { 859 u16 last; 860 struct rx_agg_cmp *agg; 861 862 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 863 last = RING_CMP(*raw_cons); 864 agg = (struct rx_agg_cmp *) 865 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 866 return RX_AGG_CMP_VALID(agg, *raw_cons); 867 } 868 869 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 870 unsigned int len, 871 dma_addr_t mapping) 872 { 873 struct bnxt *bp = bnapi->bp; 874 struct pci_dev *pdev = bp->pdev; 875 struct sk_buff *skb; 876 877 skb = napi_alloc_skb(&bnapi->napi, len); 878 if (!skb) 879 return NULL; 880 881 dma_sync_single_for_cpu(&pdev->dev, mapping, 882 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); 883 884 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); 885 886 dma_sync_single_for_device(&pdev->dev, mapping, 887 bp->rx_copy_thresh, 888 PCI_DMA_FROMDEVICE); 889 890 skb_put(skb, len); 891 return skb; 892 } 893 894 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 895 u32 *raw_cons, void *cmp) 896 { 897 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 898 struct rx_cmp *rxcmp = cmp; 899 u32 tmp_raw_cons = *raw_cons; 900 u8 cmp_type, agg_bufs = 0; 901 902 cmp_type = RX_CMP_TYPE(rxcmp); 903 904 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 905 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 906 RX_CMP_AGG_BUFS) >> 907 RX_CMP_AGG_BUFS_SHIFT; 908 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 909 struct rx_tpa_end_cmp *tpa_end = cmp; 910 911 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 912 RX_TPA_END_CMP_AGG_BUFS) >> 913 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 914 } 915 916 if (agg_bufs) { 917 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 918 return -EBUSY; 919 } 920 *raw_cons = tmp_raw_cons; 921 return 0; 922 } 923 924 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 925 { 926 if (!rxr->bnapi->in_reset) { 927 rxr->bnapi->in_reset = true; 928 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 929 schedule_work(&bp->sp_task); 930 } 931 rxr->rx_next_cons = 0xffff; 932 } 933 934 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 935 struct rx_tpa_start_cmp *tpa_start, 936 struct rx_tpa_start_cmp_ext *tpa_start1) 937 { 938 u8 agg_id = TPA_START_AGG_ID(tpa_start); 939 u16 cons, prod; 940 struct bnxt_tpa_info *tpa_info; 941 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 942 struct rx_bd *prod_bd; 943 dma_addr_t mapping; 944 945 cons = tpa_start->rx_tpa_start_cmp_opaque; 946 prod = rxr->rx_prod; 947 cons_rx_buf = &rxr->rx_buf_ring[cons]; 948 prod_rx_buf = &rxr->rx_buf_ring[prod]; 949 tpa_info = &rxr->rx_tpa[agg_id]; 950 951 if (unlikely(cons != rxr->rx_next_cons)) { 952 bnxt_sched_reset(bp, rxr); 953 return; 954 } 955 956 prod_rx_buf->data = tpa_info->data; 957 958 mapping = tpa_info->mapping; 959 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 960 961 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 962 963 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 964 965 tpa_info->data = cons_rx_buf->data; 966 cons_rx_buf->data = NULL; 967 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); 968 969 tpa_info->len = 970 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 971 RX_TPA_START_CMP_LEN_SHIFT; 972 if (likely(TPA_START_HASH_VALID(tpa_start))) { 973 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 974 975 tpa_info->hash_type = PKT_HASH_TYPE_L4; 976 tpa_info->gso_type = SKB_GSO_TCPV4; 977 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 978 if (hash_type == 3) 979 tpa_info->gso_type = SKB_GSO_TCPV6; 980 tpa_info->rss_hash = 981 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 982 } else { 983 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 984 tpa_info->gso_type = 0; 985 if (netif_msg_rx_err(bp)) 986 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 987 } 988 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 989 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 990 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 991 992 rxr->rx_prod = NEXT_RX(prod); 993 cons = NEXT_RX(cons); 994 rxr->rx_next_cons = NEXT_RX(cons); 995 cons_rx_buf = &rxr->rx_buf_ring[cons]; 996 997 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 998 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 999 cons_rx_buf->data = NULL; 1000 } 1001 1002 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 1003 u16 cp_cons, u32 agg_bufs) 1004 { 1005 if (agg_bufs) 1006 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1007 } 1008 1009 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1010 int payload_off, int tcp_ts, 1011 struct sk_buff *skb) 1012 { 1013 #ifdef CONFIG_INET 1014 struct tcphdr *th; 1015 int len, nw_off; 1016 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1017 u32 hdr_info = tpa_info->hdr_info; 1018 bool loopback = false; 1019 1020 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1021 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1022 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1023 1024 /* If the packet is an internal loopback packet, the offsets will 1025 * have an extra 4 bytes. 1026 */ 1027 if (inner_mac_off == 4) { 1028 loopback = true; 1029 } else if (inner_mac_off > 4) { 1030 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1031 ETH_HLEN - 2)); 1032 1033 /* We only support inner iPv4/ipv6. If we don't see the 1034 * correct protocol ID, it must be a loopback packet where 1035 * the offsets are off by 4. 1036 */ 1037 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1038 loopback = true; 1039 } 1040 if (loopback) { 1041 /* internal loopback packet, subtract all offsets by 4 */ 1042 inner_ip_off -= 4; 1043 inner_mac_off -= 4; 1044 outer_ip_off -= 4; 1045 } 1046 1047 nw_off = inner_ip_off - ETH_HLEN; 1048 skb_set_network_header(skb, nw_off); 1049 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1050 struct ipv6hdr *iph = ipv6_hdr(skb); 1051 1052 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1053 len = skb->len - skb_transport_offset(skb); 1054 th = tcp_hdr(skb); 1055 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1056 } else { 1057 struct iphdr *iph = ip_hdr(skb); 1058 1059 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1060 len = skb->len - skb_transport_offset(skb); 1061 th = tcp_hdr(skb); 1062 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1063 } 1064 1065 if (inner_mac_off) { /* tunnel */ 1066 struct udphdr *uh = NULL; 1067 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1068 ETH_HLEN - 2)); 1069 1070 if (proto == htons(ETH_P_IP)) { 1071 struct iphdr *iph = (struct iphdr *)skb->data; 1072 1073 if (iph->protocol == IPPROTO_UDP) 1074 uh = (struct udphdr *)(iph + 1); 1075 } else { 1076 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1077 1078 if (iph->nexthdr == IPPROTO_UDP) 1079 uh = (struct udphdr *)(iph + 1); 1080 } 1081 if (uh) { 1082 if (uh->check) 1083 skb_shinfo(skb)->gso_type |= 1084 SKB_GSO_UDP_TUNNEL_CSUM; 1085 else 1086 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1087 } 1088 } 1089 #endif 1090 return skb; 1091 } 1092 1093 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1094 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1095 1096 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1097 int payload_off, int tcp_ts, 1098 struct sk_buff *skb) 1099 { 1100 #ifdef CONFIG_INET 1101 struct tcphdr *th; 1102 int len, nw_off, tcp_opt_len; 1103 1104 if (tcp_ts) 1105 tcp_opt_len = 12; 1106 1107 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1108 struct iphdr *iph; 1109 1110 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1111 ETH_HLEN; 1112 skb_set_network_header(skb, nw_off); 1113 iph = ip_hdr(skb); 1114 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1115 len = skb->len - skb_transport_offset(skb); 1116 th = tcp_hdr(skb); 1117 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1118 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1119 struct ipv6hdr *iph; 1120 1121 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1122 ETH_HLEN; 1123 skb_set_network_header(skb, nw_off); 1124 iph = ipv6_hdr(skb); 1125 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1126 len = skb->len - skb_transport_offset(skb); 1127 th = tcp_hdr(skb); 1128 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1129 } else { 1130 dev_kfree_skb_any(skb); 1131 return NULL; 1132 } 1133 tcp_gro_complete(skb); 1134 1135 if (nw_off) { /* tunnel */ 1136 struct udphdr *uh = NULL; 1137 1138 if (skb->protocol == htons(ETH_P_IP)) { 1139 struct iphdr *iph = (struct iphdr *)skb->data; 1140 1141 if (iph->protocol == IPPROTO_UDP) 1142 uh = (struct udphdr *)(iph + 1); 1143 } else { 1144 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1145 1146 if (iph->nexthdr == IPPROTO_UDP) 1147 uh = (struct udphdr *)(iph + 1); 1148 } 1149 if (uh) { 1150 if (uh->check) 1151 skb_shinfo(skb)->gso_type |= 1152 SKB_GSO_UDP_TUNNEL_CSUM; 1153 else 1154 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1155 } 1156 } 1157 #endif 1158 return skb; 1159 } 1160 1161 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1162 struct bnxt_tpa_info *tpa_info, 1163 struct rx_tpa_end_cmp *tpa_end, 1164 struct rx_tpa_end_cmp_ext *tpa_end1, 1165 struct sk_buff *skb) 1166 { 1167 #ifdef CONFIG_INET 1168 int payload_off; 1169 u16 segs; 1170 1171 segs = TPA_END_TPA_SEGS(tpa_end); 1172 if (segs == 1) 1173 return skb; 1174 1175 NAPI_GRO_CB(skb)->count = segs; 1176 skb_shinfo(skb)->gso_size = 1177 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1178 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1179 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1180 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1181 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1182 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1183 #endif 1184 return skb; 1185 } 1186 1187 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1188 struct bnxt_napi *bnapi, 1189 u32 *raw_cons, 1190 struct rx_tpa_end_cmp *tpa_end, 1191 struct rx_tpa_end_cmp_ext *tpa_end1, 1192 bool *agg_event) 1193 { 1194 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1195 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1196 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1197 u8 *data, agg_bufs; 1198 u16 cp_cons = RING_CMP(*raw_cons); 1199 unsigned int len; 1200 struct bnxt_tpa_info *tpa_info; 1201 dma_addr_t mapping; 1202 struct sk_buff *skb; 1203 1204 if (unlikely(bnapi->in_reset)) { 1205 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 1206 1207 if (rc < 0) 1208 return ERR_PTR(-EBUSY); 1209 return NULL; 1210 } 1211 1212 tpa_info = &rxr->rx_tpa[agg_id]; 1213 data = tpa_info->data; 1214 prefetch(data); 1215 len = tpa_info->len; 1216 mapping = tpa_info->mapping; 1217 1218 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1219 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1220 1221 if (agg_bufs) { 1222 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1223 return ERR_PTR(-EBUSY); 1224 1225 *agg_event = true; 1226 cp_cons = NEXT_CMP(cp_cons); 1227 } 1228 1229 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 1230 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1231 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1232 agg_bufs, (int)MAX_SKB_FRAGS); 1233 return NULL; 1234 } 1235 1236 if (len <= bp->rx_copy_thresh) { 1237 skb = bnxt_copy_skb(bnapi, data, len, mapping); 1238 if (!skb) { 1239 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1240 return NULL; 1241 } 1242 } else { 1243 u8 *new_data; 1244 dma_addr_t new_mapping; 1245 1246 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1247 if (!new_data) { 1248 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1249 return NULL; 1250 } 1251 1252 tpa_info->data = new_data; 1253 tpa_info->mapping = new_mapping; 1254 1255 skb = build_skb(data, 0); 1256 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, 1257 PCI_DMA_FROMDEVICE); 1258 1259 if (!skb) { 1260 kfree(data); 1261 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1262 return NULL; 1263 } 1264 skb_reserve(skb, BNXT_RX_OFFSET); 1265 skb_put(skb, len); 1266 } 1267 1268 if (agg_bufs) { 1269 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1270 if (!skb) { 1271 /* Page reuse already handled by bnxt_rx_pages(). */ 1272 return NULL; 1273 } 1274 } 1275 skb->protocol = eth_type_trans(skb, bp->dev); 1276 1277 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1278 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1279 1280 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1281 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1282 u16 vlan_proto = tpa_info->metadata >> 1283 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1284 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1285 1286 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1287 } 1288 1289 skb_checksum_none_assert(skb); 1290 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1291 skb->ip_summed = CHECKSUM_UNNECESSARY; 1292 skb->csum_level = 1293 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1294 } 1295 1296 if (TPA_END_GRO(tpa_end)) 1297 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1298 1299 return skb; 1300 } 1301 1302 /* returns the following: 1303 * 1 - 1 packet successfully received 1304 * 0 - successful TPA_START, packet not completed yet 1305 * -EBUSY - completion ring does not have all the agg buffers yet 1306 * -ENOMEM - packet aborted due to out of memory 1307 * -EIO - packet aborted due to hw error indicated in BD 1308 */ 1309 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1310 bool *agg_event) 1311 { 1312 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1313 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1314 struct net_device *dev = bp->dev; 1315 struct rx_cmp *rxcmp; 1316 struct rx_cmp_ext *rxcmp1; 1317 u32 tmp_raw_cons = *raw_cons; 1318 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1319 struct bnxt_sw_rx_bd *rx_buf; 1320 unsigned int len; 1321 u8 *data, agg_bufs, cmp_type; 1322 dma_addr_t dma_addr; 1323 struct sk_buff *skb; 1324 int rc = 0; 1325 1326 rxcmp = (struct rx_cmp *) 1327 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1328 1329 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1330 cp_cons = RING_CMP(tmp_raw_cons); 1331 rxcmp1 = (struct rx_cmp_ext *) 1332 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1333 1334 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1335 return -EBUSY; 1336 1337 cmp_type = RX_CMP_TYPE(rxcmp); 1338 1339 prod = rxr->rx_prod; 1340 1341 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1342 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1343 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1344 1345 goto next_rx_no_prod; 1346 1347 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1348 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1349 (struct rx_tpa_end_cmp *)rxcmp, 1350 (struct rx_tpa_end_cmp_ext *)rxcmp1, 1351 agg_event); 1352 1353 if (unlikely(IS_ERR(skb))) 1354 return -EBUSY; 1355 1356 rc = -ENOMEM; 1357 if (likely(skb)) { 1358 skb_record_rx_queue(skb, bnapi->index); 1359 skb_mark_napi_id(skb, &bnapi->napi); 1360 if (bnxt_busy_polling(bnapi)) 1361 netif_receive_skb(skb); 1362 else 1363 napi_gro_receive(&bnapi->napi, skb); 1364 rc = 1; 1365 } 1366 goto next_rx_no_prod; 1367 } 1368 1369 cons = rxcmp->rx_cmp_opaque; 1370 rx_buf = &rxr->rx_buf_ring[cons]; 1371 data = rx_buf->data; 1372 if (unlikely(cons != rxr->rx_next_cons)) { 1373 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1374 1375 bnxt_sched_reset(bp, rxr); 1376 return rc1; 1377 } 1378 prefetch(data); 1379 1380 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> 1381 RX_CMP_AGG_BUFS_SHIFT; 1382 1383 if (agg_bufs) { 1384 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1385 return -EBUSY; 1386 1387 cp_cons = NEXT_CMP(cp_cons); 1388 *agg_event = true; 1389 } 1390 1391 rx_buf->data = NULL; 1392 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1393 bnxt_reuse_rx_data(rxr, cons, data); 1394 if (agg_bufs) 1395 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1396 1397 rc = -EIO; 1398 goto next_rx; 1399 } 1400 1401 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1402 dma_addr = dma_unmap_addr(rx_buf, mapping); 1403 1404 if (len <= bp->rx_copy_thresh) { 1405 skb = bnxt_copy_skb(bnapi, data, len, dma_addr); 1406 bnxt_reuse_rx_data(rxr, cons, data); 1407 if (!skb) { 1408 rc = -ENOMEM; 1409 goto next_rx; 1410 } 1411 } else { 1412 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); 1413 if (!skb) { 1414 rc = -ENOMEM; 1415 goto next_rx; 1416 } 1417 } 1418 1419 if (agg_bufs) { 1420 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1421 if (!skb) { 1422 rc = -ENOMEM; 1423 goto next_rx; 1424 } 1425 } 1426 1427 if (RX_CMP_HASH_VALID(rxcmp)) { 1428 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1429 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1430 1431 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1432 if (hash_type != 1 && hash_type != 3) 1433 type = PKT_HASH_TYPE_L3; 1434 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1435 } 1436 1437 skb->protocol = eth_type_trans(skb, dev); 1438 1439 if ((rxcmp1->rx_cmp_flags2 & 1440 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1441 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1442 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1443 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1444 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1445 1446 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1447 } 1448 1449 skb_checksum_none_assert(skb); 1450 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1451 if (dev->features & NETIF_F_RXCSUM) { 1452 skb->ip_summed = CHECKSUM_UNNECESSARY; 1453 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1454 } 1455 } else { 1456 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1457 if (dev->features & NETIF_F_RXCSUM) 1458 cpr->rx_l4_csum_errors++; 1459 } 1460 } 1461 1462 skb_record_rx_queue(skb, bnapi->index); 1463 skb_mark_napi_id(skb, &bnapi->napi); 1464 if (bnxt_busy_polling(bnapi)) 1465 netif_receive_skb(skb); 1466 else 1467 napi_gro_receive(&bnapi->napi, skb); 1468 rc = 1; 1469 1470 next_rx: 1471 rxr->rx_prod = NEXT_RX(prod); 1472 rxr->rx_next_cons = NEXT_RX(cons); 1473 1474 next_rx_no_prod: 1475 *raw_cons = tmp_raw_cons; 1476 1477 return rc; 1478 } 1479 1480 #define BNXT_GET_EVENT_PORT(data) \ 1481 ((data) & \ 1482 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1483 1484 static int bnxt_async_event_process(struct bnxt *bp, 1485 struct hwrm_async_event_cmpl *cmpl) 1486 { 1487 u16 event_id = le16_to_cpu(cmpl->event_id); 1488 1489 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1490 switch (event_id) { 1491 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1492 u32 data1 = le32_to_cpu(cmpl->event_data1); 1493 struct bnxt_link_info *link_info = &bp->link_info; 1494 1495 if (BNXT_VF(bp)) 1496 goto async_event_process_exit; 1497 if (data1 & 0x20000) { 1498 u16 fw_speed = link_info->force_link_speed; 1499 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1500 1501 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1502 speed); 1503 } 1504 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1505 /* fall thru */ 1506 } 1507 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1508 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1509 break; 1510 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1511 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1512 break; 1513 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1514 u32 data1 = le32_to_cpu(cmpl->event_data1); 1515 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1516 1517 if (BNXT_VF(bp)) 1518 break; 1519 1520 if (bp->pf.port_id != port_id) 1521 break; 1522 1523 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1524 break; 1525 } 1526 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1527 if (BNXT_PF(bp)) 1528 goto async_event_process_exit; 1529 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1530 break; 1531 default: 1532 goto async_event_process_exit; 1533 } 1534 schedule_work(&bp->sp_task); 1535 async_event_process_exit: 1536 bnxt_ulp_async_events(bp, cmpl); 1537 return 0; 1538 } 1539 1540 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1541 { 1542 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1543 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1544 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1545 (struct hwrm_fwd_req_cmpl *)txcmp; 1546 1547 switch (cmpl_type) { 1548 case CMPL_BASE_TYPE_HWRM_DONE: 1549 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1550 if (seq_id == bp->hwrm_intr_seq_id) 1551 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1552 else 1553 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1554 break; 1555 1556 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1557 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1558 1559 if ((vf_id < bp->pf.first_vf_id) || 1560 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1561 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1562 vf_id); 1563 return -EINVAL; 1564 } 1565 1566 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1567 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1568 schedule_work(&bp->sp_task); 1569 break; 1570 1571 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1572 bnxt_async_event_process(bp, 1573 (struct hwrm_async_event_cmpl *)txcmp); 1574 1575 default: 1576 break; 1577 } 1578 1579 return 0; 1580 } 1581 1582 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1583 { 1584 struct bnxt_napi *bnapi = dev_instance; 1585 struct bnxt *bp = bnapi->bp; 1586 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1587 u32 cons = RING_CMP(cpr->cp_raw_cons); 1588 1589 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1590 napi_schedule(&bnapi->napi); 1591 return IRQ_HANDLED; 1592 } 1593 1594 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1595 { 1596 u32 raw_cons = cpr->cp_raw_cons; 1597 u16 cons = RING_CMP(raw_cons); 1598 struct tx_cmp *txcmp; 1599 1600 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1601 1602 return TX_CMP_VALID(txcmp, raw_cons); 1603 } 1604 1605 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1606 { 1607 struct bnxt_napi *bnapi = dev_instance; 1608 struct bnxt *bp = bnapi->bp; 1609 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1610 u32 cons = RING_CMP(cpr->cp_raw_cons); 1611 u32 int_status; 1612 1613 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1614 1615 if (!bnxt_has_work(bp, cpr)) { 1616 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1617 /* return if erroneous interrupt */ 1618 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1619 return IRQ_NONE; 1620 } 1621 1622 /* disable ring IRQ */ 1623 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1624 1625 /* Return here if interrupt is shared and is disabled. */ 1626 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1627 return IRQ_HANDLED; 1628 1629 napi_schedule(&bnapi->napi); 1630 return IRQ_HANDLED; 1631 } 1632 1633 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1634 { 1635 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1636 u32 raw_cons = cpr->cp_raw_cons; 1637 u32 cons; 1638 int tx_pkts = 0; 1639 int rx_pkts = 0; 1640 bool rx_event = false; 1641 bool agg_event = false; 1642 struct tx_cmp *txcmp; 1643 1644 while (1) { 1645 int rc; 1646 1647 cons = RING_CMP(raw_cons); 1648 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1649 1650 if (!TX_CMP_VALID(txcmp, raw_cons)) 1651 break; 1652 1653 /* The valid test of the entry must be done first before 1654 * reading any further. 1655 */ 1656 dma_rmb(); 1657 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1658 tx_pkts++; 1659 /* return full budget so NAPI will complete. */ 1660 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1661 rx_pkts = budget; 1662 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1663 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); 1664 if (likely(rc >= 0)) 1665 rx_pkts += rc; 1666 else if (rc == -EBUSY) /* partial completion */ 1667 break; 1668 rx_event = true; 1669 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1670 CMPL_BASE_TYPE_HWRM_DONE) || 1671 (TX_CMP_TYPE(txcmp) == 1672 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1673 (TX_CMP_TYPE(txcmp) == 1674 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1675 bnxt_hwrm_handler(bp, txcmp); 1676 } 1677 raw_cons = NEXT_RAW_CMP(raw_cons); 1678 1679 if (rx_pkts == budget) 1680 break; 1681 } 1682 1683 cpr->cp_raw_cons = raw_cons; 1684 /* ACK completion ring before freeing tx ring and producing new 1685 * buffers in rx/agg rings to prevent overflowing the completion 1686 * ring. 1687 */ 1688 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1689 1690 if (tx_pkts) 1691 bnxt_tx_int(bp, bnapi, tx_pkts); 1692 1693 if (rx_event) { 1694 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1695 1696 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1697 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1698 if (agg_event) { 1699 writel(DB_KEY_RX | rxr->rx_agg_prod, 1700 rxr->rx_agg_doorbell); 1701 writel(DB_KEY_RX | rxr->rx_agg_prod, 1702 rxr->rx_agg_doorbell); 1703 } 1704 } 1705 return rx_pkts; 1706 } 1707 1708 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 1709 { 1710 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1711 struct bnxt *bp = bnapi->bp; 1712 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1713 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1714 struct tx_cmp *txcmp; 1715 struct rx_cmp_ext *rxcmp1; 1716 u32 cp_cons, tmp_raw_cons; 1717 u32 raw_cons = cpr->cp_raw_cons; 1718 u32 rx_pkts = 0; 1719 bool agg_event = false; 1720 1721 while (1) { 1722 int rc; 1723 1724 cp_cons = RING_CMP(raw_cons); 1725 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1726 1727 if (!TX_CMP_VALID(txcmp, raw_cons)) 1728 break; 1729 1730 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1731 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 1732 cp_cons = RING_CMP(tmp_raw_cons); 1733 rxcmp1 = (struct rx_cmp_ext *) 1734 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1735 1736 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1737 break; 1738 1739 /* force an error to recycle the buffer */ 1740 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1741 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1742 1743 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); 1744 if (likely(rc == -EIO)) 1745 rx_pkts++; 1746 else if (rc == -EBUSY) /* partial completion */ 1747 break; 1748 } else if (unlikely(TX_CMP_TYPE(txcmp) == 1749 CMPL_BASE_TYPE_HWRM_DONE)) { 1750 bnxt_hwrm_handler(bp, txcmp); 1751 } else { 1752 netdev_err(bp->dev, 1753 "Invalid completion received on special ring\n"); 1754 } 1755 raw_cons = NEXT_RAW_CMP(raw_cons); 1756 1757 if (rx_pkts == budget) 1758 break; 1759 } 1760 1761 cpr->cp_raw_cons = raw_cons; 1762 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1763 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1764 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1765 1766 if (agg_event) { 1767 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); 1768 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); 1769 } 1770 1771 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 1772 napi_complete(napi); 1773 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1774 } 1775 return rx_pkts; 1776 } 1777 1778 static int bnxt_poll(struct napi_struct *napi, int budget) 1779 { 1780 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1781 struct bnxt *bp = bnapi->bp; 1782 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1783 int work_done = 0; 1784 1785 if (!bnxt_lock_napi(bnapi)) 1786 return budget; 1787 1788 while (1) { 1789 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 1790 1791 if (work_done >= budget) 1792 break; 1793 1794 if (!bnxt_has_work(bp, cpr)) { 1795 napi_complete(napi); 1796 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1797 break; 1798 } 1799 } 1800 mmiowb(); 1801 bnxt_unlock_napi(bnapi); 1802 return work_done; 1803 } 1804 1805 #ifdef CONFIG_NET_RX_BUSY_POLL 1806 static int bnxt_busy_poll(struct napi_struct *napi) 1807 { 1808 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1809 struct bnxt *bp = bnapi->bp; 1810 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1811 int rx_work, budget = 4; 1812 1813 if (atomic_read(&bp->intr_sem) != 0) 1814 return LL_FLUSH_FAILED; 1815 1816 if (!bp->link_info.link_up) 1817 return LL_FLUSH_FAILED; 1818 1819 if (!bnxt_lock_poll(bnapi)) 1820 return LL_FLUSH_BUSY; 1821 1822 rx_work = bnxt_poll_work(bp, bnapi, budget); 1823 1824 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1825 1826 bnxt_unlock_poll(bnapi); 1827 return rx_work; 1828 } 1829 #endif 1830 1831 static void bnxt_free_tx_skbs(struct bnxt *bp) 1832 { 1833 int i, max_idx; 1834 struct pci_dev *pdev = bp->pdev; 1835 1836 if (!bp->tx_ring) 1837 return; 1838 1839 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 1840 for (i = 0; i < bp->tx_nr_rings; i++) { 1841 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 1842 int j; 1843 1844 for (j = 0; j < max_idx;) { 1845 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 1846 struct sk_buff *skb = tx_buf->skb; 1847 int k, last; 1848 1849 if (!skb) { 1850 j++; 1851 continue; 1852 } 1853 1854 tx_buf->skb = NULL; 1855 1856 if (tx_buf->is_push) { 1857 dev_kfree_skb(skb); 1858 j += 2; 1859 continue; 1860 } 1861 1862 dma_unmap_single(&pdev->dev, 1863 dma_unmap_addr(tx_buf, mapping), 1864 skb_headlen(skb), 1865 PCI_DMA_TODEVICE); 1866 1867 last = tx_buf->nr_frags; 1868 j += 2; 1869 for (k = 0; k < last; k++, j++) { 1870 int ring_idx = j & bp->tx_ring_mask; 1871 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 1872 1873 tx_buf = &txr->tx_buf_ring[ring_idx]; 1874 dma_unmap_page( 1875 &pdev->dev, 1876 dma_unmap_addr(tx_buf, mapping), 1877 skb_frag_size(frag), PCI_DMA_TODEVICE); 1878 } 1879 dev_kfree_skb(skb); 1880 } 1881 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 1882 } 1883 } 1884 1885 static void bnxt_free_rx_skbs(struct bnxt *bp) 1886 { 1887 int i, max_idx, max_agg_idx; 1888 struct pci_dev *pdev = bp->pdev; 1889 1890 if (!bp->rx_ring) 1891 return; 1892 1893 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 1894 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 1895 for (i = 0; i < bp->rx_nr_rings; i++) { 1896 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 1897 int j; 1898 1899 if (rxr->rx_tpa) { 1900 for (j = 0; j < MAX_TPA; j++) { 1901 struct bnxt_tpa_info *tpa_info = 1902 &rxr->rx_tpa[j]; 1903 u8 *data = tpa_info->data; 1904 1905 if (!data) 1906 continue; 1907 1908 dma_unmap_single( 1909 &pdev->dev, 1910 dma_unmap_addr(tpa_info, mapping), 1911 bp->rx_buf_use_size, 1912 PCI_DMA_FROMDEVICE); 1913 1914 tpa_info->data = NULL; 1915 1916 kfree(data); 1917 } 1918 } 1919 1920 for (j = 0; j < max_idx; j++) { 1921 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1922 u8 *data = rx_buf->data; 1923 1924 if (!data) 1925 continue; 1926 1927 dma_unmap_single(&pdev->dev, 1928 dma_unmap_addr(rx_buf, mapping), 1929 bp->rx_buf_use_size, 1930 PCI_DMA_FROMDEVICE); 1931 1932 rx_buf->data = NULL; 1933 1934 kfree(data); 1935 } 1936 1937 for (j = 0; j < max_agg_idx; j++) { 1938 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 1939 &rxr->rx_agg_ring[j]; 1940 struct page *page = rx_agg_buf->page; 1941 1942 if (!page) 1943 continue; 1944 1945 dma_unmap_page(&pdev->dev, 1946 dma_unmap_addr(rx_agg_buf, mapping), 1947 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); 1948 1949 rx_agg_buf->page = NULL; 1950 __clear_bit(j, rxr->rx_agg_bmap); 1951 1952 __free_page(page); 1953 } 1954 if (rxr->rx_page) { 1955 __free_page(rxr->rx_page); 1956 rxr->rx_page = NULL; 1957 } 1958 } 1959 } 1960 1961 static void bnxt_free_skbs(struct bnxt *bp) 1962 { 1963 bnxt_free_tx_skbs(bp); 1964 bnxt_free_rx_skbs(bp); 1965 } 1966 1967 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 1968 { 1969 struct pci_dev *pdev = bp->pdev; 1970 int i; 1971 1972 for (i = 0; i < ring->nr_pages; i++) { 1973 if (!ring->pg_arr[i]) 1974 continue; 1975 1976 dma_free_coherent(&pdev->dev, ring->page_size, 1977 ring->pg_arr[i], ring->dma_arr[i]); 1978 1979 ring->pg_arr[i] = NULL; 1980 } 1981 if (ring->pg_tbl) { 1982 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 1983 ring->pg_tbl, ring->pg_tbl_map); 1984 ring->pg_tbl = NULL; 1985 } 1986 if (ring->vmem_size && *ring->vmem) { 1987 vfree(*ring->vmem); 1988 *ring->vmem = NULL; 1989 } 1990 } 1991 1992 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 1993 { 1994 int i; 1995 struct pci_dev *pdev = bp->pdev; 1996 1997 if (ring->nr_pages > 1) { 1998 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 1999 ring->nr_pages * 8, 2000 &ring->pg_tbl_map, 2001 GFP_KERNEL); 2002 if (!ring->pg_tbl) 2003 return -ENOMEM; 2004 } 2005 2006 for (i = 0; i < ring->nr_pages; i++) { 2007 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2008 ring->page_size, 2009 &ring->dma_arr[i], 2010 GFP_KERNEL); 2011 if (!ring->pg_arr[i]) 2012 return -ENOMEM; 2013 2014 if (ring->nr_pages > 1) 2015 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 2016 } 2017 2018 if (ring->vmem_size) { 2019 *ring->vmem = vzalloc(ring->vmem_size); 2020 if (!(*ring->vmem)) 2021 return -ENOMEM; 2022 } 2023 return 0; 2024 } 2025 2026 static void bnxt_free_rx_rings(struct bnxt *bp) 2027 { 2028 int i; 2029 2030 if (!bp->rx_ring) 2031 return; 2032 2033 for (i = 0; i < bp->rx_nr_rings; i++) { 2034 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2035 struct bnxt_ring_struct *ring; 2036 2037 kfree(rxr->rx_tpa); 2038 rxr->rx_tpa = NULL; 2039 2040 kfree(rxr->rx_agg_bmap); 2041 rxr->rx_agg_bmap = NULL; 2042 2043 ring = &rxr->rx_ring_struct; 2044 bnxt_free_ring(bp, ring); 2045 2046 ring = &rxr->rx_agg_ring_struct; 2047 bnxt_free_ring(bp, ring); 2048 } 2049 } 2050 2051 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2052 { 2053 int i, rc, agg_rings = 0, tpa_rings = 0; 2054 2055 if (!bp->rx_ring) 2056 return -ENOMEM; 2057 2058 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2059 agg_rings = 1; 2060 2061 if (bp->flags & BNXT_FLAG_TPA) 2062 tpa_rings = 1; 2063 2064 for (i = 0; i < bp->rx_nr_rings; i++) { 2065 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2066 struct bnxt_ring_struct *ring; 2067 2068 ring = &rxr->rx_ring_struct; 2069 2070 rc = bnxt_alloc_ring(bp, ring); 2071 if (rc) 2072 return rc; 2073 2074 if (agg_rings) { 2075 u16 mem_size; 2076 2077 ring = &rxr->rx_agg_ring_struct; 2078 rc = bnxt_alloc_ring(bp, ring); 2079 if (rc) 2080 return rc; 2081 2082 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2083 mem_size = rxr->rx_agg_bmap_size / 8; 2084 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2085 if (!rxr->rx_agg_bmap) 2086 return -ENOMEM; 2087 2088 if (tpa_rings) { 2089 rxr->rx_tpa = kcalloc(MAX_TPA, 2090 sizeof(struct bnxt_tpa_info), 2091 GFP_KERNEL); 2092 if (!rxr->rx_tpa) 2093 return -ENOMEM; 2094 } 2095 } 2096 } 2097 return 0; 2098 } 2099 2100 static void bnxt_free_tx_rings(struct bnxt *bp) 2101 { 2102 int i; 2103 struct pci_dev *pdev = bp->pdev; 2104 2105 if (!bp->tx_ring) 2106 return; 2107 2108 for (i = 0; i < bp->tx_nr_rings; i++) { 2109 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2110 struct bnxt_ring_struct *ring; 2111 2112 if (txr->tx_push) { 2113 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2114 txr->tx_push, txr->tx_push_mapping); 2115 txr->tx_push = NULL; 2116 } 2117 2118 ring = &txr->tx_ring_struct; 2119 2120 bnxt_free_ring(bp, ring); 2121 } 2122 } 2123 2124 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2125 { 2126 int i, j, rc; 2127 struct pci_dev *pdev = bp->pdev; 2128 2129 bp->tx_push_size = 0; 2130 if (bp->tx_push_thresh) { 2131 int push_size; 2132 2133 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2134 bp->tx_push_thresh); 2135 2136 if (push_size > 256) { 2137 push_size = 0; 2138 bp->tx_push_thresh = 0; 2139 } 2140 2141 bp->tx_push_size = push_size; 2142 } 2143 2144 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2145 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2146 struct bnxt_ring_struct *ring; 2147 2148 ring = &txr->tx_ring_struct; 2149 2150 rc = bnxt_alloc_ring(bp, ring); 2151 if (rc) 2152 return rc; 2153 2154 if (bp->tx_push_size) { 2155 dma_addr_t mapping; 2156 2157 /* One pre-allocated DMA buffer to backup 2158 * TX push operation 2159 */ 2160 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2161 bp->tx_push_size, 2162 &txr->tx_push_mapping, 2163 GFP_KERNEL); 2164 2165 if (!txr->tx_push) 2166 return -ENOMEM; 2167 2168 mapping = txr->tx_push_mapping + 2169 sizeof(struct tx_push_bd); 2170 txr->data_mapping = cpu_to_le64(mapping); 2171 2172 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2173 } 2174 ring->queue_id = bp->q_info[j].queue_id; 2175 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2176 j++; 2177 } 2178 return 0; 2179 } 2180 2181 static void bnxt_free_cp_rings(struct bnxt *bp) 2182 { 2183 int i; 2184 2185 if (!bp->bnapi) 2186 return; 2187 2188 for (i = 0; i < bp->cp_nr_rings; i++) { 2189 struct bnxt_napi *bnapi = bp->bnapi[i]; 2190 struct bnxt_cp_ring_info *cpr; 2191 struct bnxt_ring_struct *ring; 2192 2193 if (!bnapi) 2194 continue; 2195 2196 cpr = &bnapi->cp_ring; 2197 ring = &cpr->cp_ring_struct; 2198 2199 bnxt_free_ring(bp, ring); 2200 } 2201 } 2202 2203 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2204 { 2205 int i, rc; 2206 2207 for (i = 0; i < bp->cp_nr_rings; i++) { 2208 struct bnxt_napi *bnapi = bp->bnapi[i]; 2209 struct bnxt_cp_ring_info *cpr; 2210 struct bnxt_ring_struct *ring; 2211 2212 if (!bnapi) 2213 continue; 2214 2215 cpr = &bnapi->cp_ring; 2216 ring = &cpr->cp_ring_struct; 2217 2218 rc = bnxt_alloc_ring(bp, ring); 2219 if (rc) 2220 return rc; 2221 } 2222 return 0; 2223 } 2224 2225 static void bnxt_init_ring_struct(struct bnxt *bp) 2226 { 2227 int i; 2228 2229 for (i = 0; i < bp->cp_nr_rings; i++) { 2230 struct bnxt_napi *bnapi = bp->bnapi[i]; 2231 struct bnxt_cp_ring_info *cpr; 2232 struct bnxt_rx_ring_info *rxr; 2233 struct bnxt_tx_ring_info *txr; 2234 struct bnxt_ring_struct *ring; 2235 2236 if (!bnapi) 2237 continue; 2238 2239 cpr = &bnapi->cp_ring; 2240 ring = &cpr->cp_ring_struct; 2241 ring->nr_pages = bp->cp_nr_pages; 2242 ring->page_size = HW_CMPD_RING_SIZE; 2243 ring->pg_arr = (void **)cpr->cp_desc_ring; 2244 ring->dma_arr = cpr->cp_desc_mapping; 2245 ring->vmem_size = 0; 2246 2247 rxr = bnapi->rx_ring; 2248 if (!rxr) 2249 goto skip_rx; 2250 2251 ring = &rxr->rx_ring_struct; 2252 ring->nr_pages = bp->rx_nr_pages; 2253 ring->page_size = HW_RXBD_RING_SIZE; 2254 ring->pg_arr = (void **)rxr->rx_desc_ring; 2255 ring->dma_arr = rxr->rx_desc_mapping; 2256 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2257 ring->vmem = (void **)&rxr->rx_buf_ring; 2258 2259 ring = &rxr->rx_agg_ring_struct; 2260 ring->nr_pages = bp->rx_agg_nr_pages; 2261 ring->page_size = HW_RXBD_RING_SIZE; 2262 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 2263 ring->dma_arr = rxr->rx_agg_desc_mapping; 2264 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2265 ring->vmem = (void **)&rxr->rx_agg_ring; 2266 2267 skip_rx: 2268 txr = bnapi->tx_ring; 2269 if (!txr) 2270 continue; 2271 2272 ring = &txr->tx_ring_struct; 2273 ring->nr_pages = bp->tx_nr_pages; 2274 ring->page_size = HW_RXBD_RING_SIZE; 2275 ring->pg_arr = (void **)txr->tx_desc_ring; 2276 ring->dma_arr = txr->tx_desc_mapping; 2277 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2278 ring->vmem = (void **)&txr->tx_buf_ring; 2279 } 2280 } 2281 2282 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2283 { 2284 int i; 2285 u32 prod; 2286 struct rx_bd **rx_buf_ring; 2287 2288 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 2289 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 2290 int j; 2291 struct rx_bd *rxbd; 2292 2293 rxbd = rx_buf_ring[i]; 2294 if (!rxbd) 2295 continue; 2296 2297 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2298 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2299 rxbd->rx_bd_opaque = prod; 2300 } 2301 } 2302 } 2303 2304 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2305 { 2306 struct net_device *dev = bp->dev; 2307 struct bnxt_rx_ring_info *rxr; 2308 struct bnxt_ring_struct *ring; 2309 u32 prod, type; 2310 int i; 2311 2312 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2313 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2314 2315 if (NET_IP_ALIGN == 2) 2316 type |= RX_BD_FLAGS_SOP; 2317 2318 rxr = &bp->rx_ring[ring_nr]; 2319 ring = &rxr->rx_ring_struct; 2320 bnxt_init_rxbd_pages(ring, type); 2321 2322 prod = rxr->rx_prod; 2323 for (i = 0; i < bp->rx_ring_size; i++) { 2324 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2325 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2326 ring_nr, i, bp->rx_ring_size); 2327 break; 2328 } 2329 prod = NEXT_RX(prod); 2330 } 2331 rxr->rx_prod = prod; 2332 ring->fw_ring_id = INVALID_HW_RING_ID; 2333 2334 ring = &rxr->rx_agg_ring_struct; 2335 ring->fw_ring_id = INVALID_HW_RING_ID; 2336 2337 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2338 return 0; 2339 2340 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2341 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2342 2343 bnxt_init_rxbd_pages(ring, type); 2344 2345 prod = rxr->rx_agg_prod; 2346 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2347 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2348 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2349 ring_nr, i, bp->rx_ring_size); 2350 break; 2351 } 2352 prod = NEXT_RX_AGG(prod); 2353 } 2354 rxr->rx_agg_prod = prod; 2355 2356 if (bp->flags & BNXT_FLAG_TPA) { 2357 if (rxr->rx_tpa) { 2358 u8 *data; 2359 dma_addr_t mapping; 2360 2361 for (i = 0; i < MAX_TPA; i++) { 2362 data = __bnxt_alloc_rx_data(bp, &mapping, 2363 GFP_KERNEL); 2364 if (!data) 2365 return -ENOMEM; 2366 2367 rxr->rx_tpa[i].data = data; 2368 rxr->rx_tpa[i].mapping = mapping; 2369 } 2370 } else { 2371 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2372 return -ENOMEM; 2373 } 2374 } 2375 2376 return 0; 2377 } 2378 2379 static int bnxt_init_rx_rings(struct bnxt *bp) 2380 { 2381 int i, rc = 0; 2382 2383 for (i = 0; i < bp->rx_nr_rings; i++) { 2384 rc = bnxt_init_one_rx_ring(bp, i); 2385 if (rc) 2386 break; 2387 } 2388 2389 return rc; 2390 } 2391 2392 static int bnxt_init_tx_rings(struct bnxt *bp) 2393 { 2394 u16 i; 2395 2396 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2397 MAX_SKB_FRAGS + 1); 2398 2399 for (i = 0; i < bp->tx_nr_rings; i++) { 2400 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2401 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2402 2403 ring->fw_ring_id = INVALID_HW_RING_ID; 2404 } 2405 2406 return 0; 2407 } 2408 2409 static void bnxt_free_ring_grps(struct bnxt *bp) 2410 { 2411 kfree(bp->grp_info); 2412 bp->grp_info = NULL; 2413 } 2414 2415 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2416 { 2417 int i; 2418 2419 if (irq_re_init) { 2420 bp->grp_info = kcalloc(bp->cp_nr_rings, 2421 sizeof(struct bnxt_ring_grp_info), 2422 GFP_KERNEL); 2423 if (!bp->grp_info) 2424 return -ENOMEM; 2425 } 2426 for (i = 0; i < bp->cp_nr_rings; i++) { 2427 if (irq_re_init) 2428 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2429 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2430 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2431 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2432 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2433 } 2434 return 0; 2435 } 2436 2437 static void bnxt_free_vnics(struct bnxt *bp) 2438 { 2439 kfree(bp->vnic_info); 2440 bp->vnic_info = NULL; 2441 bp->nr_vnics = 0; 2442 } 2443 2444 static int bnxt_alloc_vnics(struct bnxt *bp) 2445 { 2446 int num_vnics = 1; 2447 2448 #ifdef CONFIG_RFS_ACCEL 2449 if (bp->flags & BNXT_FLAG_RFS) 2450 num_vnics += bp->rx_nr_rings; 2451 #endif 2452 2453 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2454 num_vnics++; 2455 2456 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2457 GFP_KERNEL); 2458 if (!bp->vnic_info) 2459 return -ENOMEM; 2460 2461 bp->nr_vnics = num_vnics; 2462 return 0; 2463 } 2464 2465 static void bnxt_init_vnics(struct bnxt *bp) 2466 { 2467 int i; 2468 2469 for (i = 0; i < bp->nr_vnics; i++) { 2470 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2471 2472 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2473 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 2474 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 2475 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2476 2477 if (bp->vnic_info[i].rss_hash_key) { 2478 if (i == 0) 2479 prandom_bytes(vnic->rss_hash_key, 2480 HW_HASH_KEY_SIZE); 2481 else 2482 memcpy(vnic->rss_hash_key, 2483 bp->vnic_info[0].rss_hash_key, 2484 HW_HASH_KEY_SIZE); 2485 } 2486 } 2487 } 2488 2489 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2490 { 2491 int pages; 2492 2493 pages = ring_size / desc_per_pg; 2494 2495 if (!pages) 2496 return 1; 2497 2498 pages++; 2499 2500 while (pages & (pages - 1)) 2501 pages++; 2502 2503 return pages; 2504 } 2505 2506 static void bnxt_set_tpa_flags(struct bnxt *bp) 2507 { 2508 bp->flags &= ~BNXT_FLAG_TPA; 2509 if (bp->dev->features & NETIF_F_LRO) 2510 bp->flags |= BNXT_FLAG_LRO; 2511 if (bp->dev->features & NETIF_F_GRO) 2512 bp->flags |= BNXT_FLAG_GRO; 2513 } 2514 2515 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2516 * be set on entry. 2517 */ 2518 void bnxt_set_ring_params(struct bnxt *bp) 2519 { 2520 u32 ring_size, rx_size, rx_space; 2521 u32 agg_factor = 0, agg_ring_size = 0; 2522 2523 /* 8 for CRC and VLAN */ 2524 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2525 2526 rx_space = rx_size + NET_SKB_PAD + 2527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2528 2529 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2530 ring_size = bp->rx_ring_size; 2531 bp->rx_agg_ring_size = 0; 2532 bp->rx_agg_nr_pages = 0; 2533 2534 if (bp->flags & BNXT_FLAG_TPA) 2535 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 2536 2537 bp->flags &= ~BNXT_FLAG_JUMBO; 2538 if (rx_space > PAGE_SIZE) { 2539 u32 jumbo_factor; 2540 2541 bp->flags |= BNXT_FLAG_JUMBO; 2542 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2543 if (jumbo_factor > agg_factor) 2544 agg_factor = jumbo_factor; 2545 } 2546 agg_ring_size = ring_size * agg_factor; 2547 2548 if (agg_ring_size) { 2549 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2550 RX_DESC_CNT); 2551 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2552 u32 tmp = agg_ring_size; 2553 2554 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2555 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2556 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2557 tmp, agg_ring_size); 2558 } 2559 bp->rx_agg_ring_size = agg_ring_size; 2560 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2561 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2562 rx_space = rx_size + NET_SKB_PAD + 2563 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2564 } 2565 2566 bp->rx_buf_use_size = rx_size; 2567 bp->rx_buf_size = rx_space; 2568 2569 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2570 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2571 2572 ring_size = bp->tx_ring_size; 2573 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2574 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2575 2576 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2577 bp->cp_ring_size = ring_size; 2578 2579 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2580 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2581 bp->cp_nr_pages = MAX_CP_PAGES; 2582 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2583 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2584 ring_size, bp->cp_ring_size); 2585 } 2586 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2587 bp->cp_ring_mask = bp->cp_bit - 1; 2588 } 2589 2590 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2591 { 2592 int i; 2593 struct bnxt_vnic_info *vnic; 2594 struct pci_dev *pdev = bp->pdev; 2595 2596 if (!bp->vnic_info) 2597 return; 2598 2599 for (i = 0; i < bp->nr_vnics; i++) { 2600 vnic = &bp->vnic_info[i]; 2601 2602 kfree(vnic->fw_grp_ids); 2603 vnic->fw_grp_ids = NULL; 2604 2605 kfree(vnic->uc_list); 2606 vnic->uc_list = NULL; 2607 2608 if (vnic->mc_list) { 2609 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2610 vnic->mc_list, vnic->mc_list_mapping); 2611 vnic->mc_list = NULL; 2612 } 2613 2614 if (vnic->rss_table) { 2615 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2616 vnic->rss_table, 2617 vnic->rss_table_dma_addr); 2618 vnic->rss_table = NULL; 2619 } 2620 2621 vnic->rss_hash_key = NULL; 2622 vnic->flags = 0; 2623 } 2624 } 2625 2626 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2627 { 2628 int i, rc = 0, size; 2629 struct bnxt_vnic_info *vnic; 2630 struct pci_dev *pdev = bp->pdev; 2631 int max_rings; 2632 2633 for (i = 0; i < bp->nr_vnics; i++) { 2634 vnic = &bp->vnic_info[i]; 2635 2636 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2637 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2638 2639 if (mem_size > 0) { 2640 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2641 if (!vnic->uc_list) { 2642 rc = -ENOMEM; 2643 goto out; 2644 } 2645 } 2646 } 2647 2648 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2649 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2650 vnic->mc_list = 2651 dma_alloc_coherent(&pdev->dev, 2652 vnic->mc_list_size, 2653 &vnic->mc_list_mapping, 2654 GFP_KERNEL); 2655 if (!vnic->mc_list) { 2656 rc = -ENOMEM; 2657 goto out; 2658 } 2659 } 2660 2661 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2662 max_rings = bp->rx_nr_rings; 2663 else 2664 max_rings = 1; 2665 2666 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2667 if (!vnic->fw_grp_ids) { 2668 rc = -ENOMEM; 2669 goto out; 2670 } 2671 2672 /* Allocate rss table and hash key */ 2673 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2674 &vnic->rss_table_dma_addr, 2675 GFP_KERNEL); 2676 if (!vnic->rss_table) { 2677 rc = -ENOMEM; 2678 goto out; 2679 } 2680 2681 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2682 2683 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2684 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2685 } 2686 return 0; 2687 2688 out: 2689 return rc; 2690 } 2691 2692 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2693 { 2694 struct pci_dev *pdev = bp->pdev; 2695 2696 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2697 bp->hwrm_cmd_resp_dma_addr); 2698 2699 bp->hwrm_cmd_resp_addr = NULL; 2700 if (bp->hwrm_dbg_resp_addr) { 2701 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2702 bp->hwrm_dbg_resp_addr, 2703 bp->hwrm_dbg_resp_dma_addr); 2704 2705 bp->hwrm_dbg_resp_addr = NULL; 2706 } 2707 } 2708 2709 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2710 { 2711 struct pci_dev *pdev = bp->pdev; 2712 2713 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2714 &bp->hwrm_cmd_resp_dma_addr, 2715 GFP_KERNEL); 2716 if (!bp->hwrm_cmd_resp_addr) 2717 return -ENOMEM; 2718 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 2719 HWRM_DBG_REG_BUF_SIZE, 2720 &bp->hwrm_dbg_resp_dma_addr, 2721 GFP_KERNEL); 2722 if (!bp->hwrm_dbg_resp_addr) 2723 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 2724 2725 return 0; 2726 } 2727 2728 static void bnxt_free_stats(struct bnxt *bp) 2729 { 2730 u32 size, i; 2731 struct pci_dev *pdev = bp->pdev; 2732 2733 if (bp->hw_rx_port_stats) { 2734 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 2735 bp->hw_rx_port_stats, 2736 bp->hw_rx_port_stats_map); 2737 bp->hw_rx_port_stats = NULL; 2738 bp->flags &= ~BNXT_FLAG_PORT_STATS; 2739 } 2740 2741 if (!bp->bnapi) 2742 return; 2743 2744 size = sizeof(struct ctx_hw_stats); 2745 2746 for (i = 0; i < bp->cp_nr_rings; i++) { 2747 struct bnxt_napi *bnapi = bp->bnapi[i]; 2748 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2749 2750 if (cpr->hw_stats) { 2751 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 2752 cpr->hw_stats_map); 2753 cpr->hw_stats = NULL; 2754 } 2755 } 2756 } 2757 2758 static int bnxt_alloc_stats(struct bnxt *bp) 2759 { 2760 u32 size, i; 2761 struct pci_dev *pdev = bp->pdev; 2762 2763 size = sizeof(struct ctx_hw_stats); 2764 2765 for (i = 0; i < bp->cp_nr_rings; i++) { 2766 struct bnxt_napi *bnapi = bp->bnapi[i]; 2767 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2768 2769 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 2770 &cpr->hw_stats_map, 2771 GFP_KERNEL); 2772 if (!cpr->hw_stats) 2773 return -ENOMEM; 2774 2775 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 2776 } 2777 2778 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 2779 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 2780 sizeof(struct tx_port_stats) + 1024; 2781 2782 bp->hw_rx_port_stats = 2783 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 2784 &bp->hw_rx_port_stats_map, 2785 GFP_KERNEL); 2786 if (!bp->hw_rx_port_stats) 2787 return -ENOMEM; 2788 2789 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 2790 512; 2791 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 2792 sizeof(struct rx_port_stats) + 512; 2793 bp->flags |= BNXT_FLAG_PORT_STATS; 2794 } 2795 return 0; 2796 } 2797 2798 static void bnxt_clear_ring_indices(struct bnxt *bp) 2799 { 2800 int i; 2801 2802 if (!bp->bnapi) 2803 return; 2804 2805 for (i = 0; i < bp->cp_nr_rings; i++) { 2806 struct bnxt_napi *bnapi = bp->bnapi[i]; 2807 struct bnxt_cp_ring_info *cpr; 2808 struct bnxt_rx_ring_info *rxr; 2809 struct bnxt_tx_ring_info *txr; 2810 2811 if (!bnapi) 2812 continue; 2813 2814 cpr = &bnapi->cp_ring; 2815 cpr->cp_raw_cons = 0; 2816 2817 txr = bnapi->tx_ring; 2818 if (txr) { 2819 txr->tx_prod = 0; 2820 txr->tx_cons = 0; 2821 } 2822 2823 rxr = bnapi->rx_ring; 2824 if (rxr) { 2825 rxr->rx_prod = 0; 2826 rxr->rx_agg_prod = 0; 2827 rxr->rx_sw_agg_prod = 0; 2828 rxr->rx_next_cons = 0; 2829 } 2830 } 2831 } 2832 2833 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 2834 { 2835 #ifdef CONFIG_RFS_ACCEL 2836 int i; 2837 2838 /* Under rtnl_lock and all our NAPIs have been disabled. It's 2839 * safe to delete the hash table. 2840 */ 2841 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 2842 struct hlist_head *head; 2843 struct hlist_node *tmp; 2844 struct bnxt_ntuple_filter *fltr; 2845 2846 head = &bp->ntp_fltr_hash_tbl[i]; 2847 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 2848 hlist_del(&fltr->hash); 2849 kfree(fltr); 2850 } 2851 } 2852 if (irq_reinit) { 2853 kfree(bp->ntp_fltr_bmap); 2854 bp->ntp_fltr_bmap = NULL; 2855 } 2856 bp->ntp_fltr_count = 0; 2857 #endif 2858 } 2859 2860 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 2861 { 2862 #ifdef CONFIG_RFS_ACCEL 2863 int i, rc = 0; 2864 2865 if (!(bp->flags & BNXT_FLAG_RFS)) 2866 return 0; 2867 2868 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 2869 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 2870 2871 bp->ntp_fltr_count = 0; 2872 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 2873 GFP_KERNEL); 2874 2875 if (!bp->ntp_fltr_bmap) 2876 rc = -ENOMEM; 2877 2878 return rc; 2879 #else 2880 return 0; 2881 #endif 2882 } 2883 2884 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 2885 { 2886 bnxt_free_vnic_attributes(bp); 2887 bnxt_free_tx_rings(bp); 2888 bnxt_free_rx_rings(bp); 2889 bnxt_free_cp_rings(bp); 2890 bnxt_free_ntp_fltrs(bp, irq_re_init); 2891 if (irq_re_init) { 2892 bnxt_free_stats(bp); 2893 bnxt_free_ring_grps(bp); 2894 bnxt_free_vnics(bp); 2895 kfree(bp->tx_ring); 2896 bp->tx_ring = NULL; 2897 kfree(bp->rx_ring); 2898 bp->rx_ring = NULL; 2899 kfree(bp->bnapi); 2900 bp->bnapi = NULL; 2901 } else { 2902 bnxt_clear_ring_indices(bp); 2903 } 2904 } 2905 2906 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 2907 { 2908 int i, j, rc, size, arr_size; 2909 void *bnapi; 2910 2911 if (irq_re_init) { 2912 /* Allocate bnapi mem pointer array and mem block for 2913 * all queues 2914 */ 2915 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 2916 bp->cp_nr_rings); 2917 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 2918 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 2919 if (!bnapi) 2920 return -ENOMEM; 2921 2922 bp->bnapi = bnapi; 2923 bnapi += arr_size; 2924 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 2925 bp->bnapi[i] = bnapi; 2926 bp->bnapi[i]->index = i; 2927 bp->bnapi[i]->bp = bp; 2928 } 2929 2930 bp->rx_ring = kcalloc(bp->rx_nr_rings, 2931 sizeof(struct bnxt_rx_ring_info), 2932 GFP_KERNEL); 2933 if (!bp->rx_ring) 2934 return -ENOMEM; 2935 2936 for (i = 0; i < bp->rx_nr_rings; i++) { 2937 bp->rx_ring[i].bnapi = bp->bnapi[i]; 2938 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 2939 } 2940 2941 bp->tx_ring = kcalloc(bp->tx_nr_rings, 2942 sizeof(struct bnxt_tx_ring_info), 2943 GFP_KERNEL); 2944 if (!bp->tx_ring) 2945 return -ENOMEM; 2946 2947 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 2948 j = 0; 2949 else 2950 j = bp->rx_nr_rings; 2951 2952 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 2953 bp->tx_ring[i].bnapi = bp->bnapi[j]; 2954 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 2955 } 2956 2957 rc = bnxt_alloc_stats(bp); 2958 if (rc) 2959 goto alloc_mem_err; 2960 2961 rc = bnxt_alloc_ntp_fltrs(bp); 2962 if (rc) 2963 goto alloc_mem_err; 2964 2965 rc = bnxt_alloc_vnics(bp); 2966 if (rc) 2967 goto alloc_mem_err; 2968 } 2969 2970 bnxt_init_ring_struct(bp); 2971 2972 rc = bnxt_alloc_rx_rings(bp); 2973 if (rc) 2974 goto alloc_mem_err; 2975 2976 rc = bnxt_alloc_tx_rings(bp); 2977 if (rc) 2978 goto alloc_mem_err; 2979 2980 rc = bnxt_alloc_cp_rings(bp); 2981 if (rc) 2982 goto alloc_mem_err; 2983 2984 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 2985 BNXT_VNIC_UCAST_FLAG; 2986 rc = bnxt_alloc_vnic_attributes(bp); 2987 if (rc) 2988 goto alloc_mem_err; 2989 return 0; 2990 2991 alloc_mem_err: 2992 bnxt_free_mem(bp, true); 2993 return rc; 2994 } 2995 2996 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 2997 u16 cmpl_ring, u16 target_id) 2998 { 2999 struct input *req = request; 3000 3001 req->req_type = cpu_to_le16(req_type); 3002 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3003 req->target_id = cpu_to_le16(target_id); 3004 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3005 } 3006 3007 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3008 int timeout, bool silent) 3009 { 3010 int i, intr_process, rc, tmo_count; 3011 struct input *req = msg; 3012 u32 *data = msg; 3013 __le32 *resp_len, *valid; 3014 u16 cp_ring_id, len = 0; 3015 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3016 3017 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3018 memset(resp, 0, PAGE_SIZE); 3019 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3020 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3021 3022 /* Write request msg to hwrm channel */ 3023 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3024 3025 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4) 3026 writel(0, bp->bar0 + i); 3027 3028 /* currently supports only one outstanding message */ 3029 if (intr_process) 3030 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3031 3032 /* Ring channel doorbell */ 3033 writel(1, bp->bar0 + 0x100); 3034 3035 if (!timeout) 3036 timeout = DFLT_HWRM_CMD_TIMEOUT; 3037 3038 i = 0; 3039 tmo_count = timeout * 40; 3040 if (intr_process) { 3041 /* Wait until hwrm response cmpl interrupt is processed */ 3042 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3043 i++ < tmo_count) { 3044 usleep_range(25, 40); 3045 } 3046 3047 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3048 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3049 le16_to_cpu(req->req_type)); 3050 return -1; 3051 } 3052 } else { 3053 /* Check if response len is updated */ 3054 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3055 for (i = 0; i < tmo_count; i++) { 3056 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3057 HWRM_RESP_LEN_SFT; 3058 if (len) 3059 break; 3060 usleep_range(25, 40); 3061 } 3062 3063 if (i >= tmo_count) { 3064 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3065 timeout, le16_to_cpu(req->req_type), 3066 le16_to_cpu(req->seq_id), len); 3067 return -1; 3068 } 3069 3070 /* Last word of resp contains valid bit */ 3071 valid = bp->hwrm_cmd_resp_addr + len - 4; 3072 for (i = 0; i < 5; i++) { 3073 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 3074 break; 3075 udelay(1); 3076 } 3077 3078 if (i >= 5) { 3079 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3080 timeout, le16_to_cpu(req->req_type), 3081 le16_to_cpu(req->seq_id), len, *valid); 3082 return -1; 3083 } 3084 } 3085 3086 rc = le16_to_cpu(resp->error_code); 3087 if (rc && !silent) 3088 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3089 le16_to_cpu(resp->req_type), 3090 le16_to_cpu(resp->seq_id), rc); 3091 return rc; 3092 } 3093 3094 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3095 { 3096 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3097 } 3098 3099 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3100 { 3101 int rc; 3102 3103 mutex_lock(&bp->hwrm_cmd_lock); 3104 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3105 mutex_unlock(&bp->hwrm_cmd_lock); 3106 return rc; 3107 } 3108 3109 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3110 int timeout) 3111 { 3112 int rc; 3113 3114 mutex_lock(&bp->hwrm_cmd_lock); 3115 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3116 mutex_unlock(&bp->hwrm_cmd_lock); 3117 return rc; 3118 } 3119 3120 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3121 int bmap_size) 3122 { 3123 struct hwrm_func_drv_rgtr_input req = {0}; 3124 DECLARE_BITMAP(async_events_bmap, 256); 3125 u32 *events = (u32 *)async_events_bmap; 3126 int i; 3127 3128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3129 3130 req.enables = 3131 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3132 3133 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3134 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3135 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3136 3137 if (bmap && bmap_size) { 3138 for (i = 0; i < bmap_size; i++) { 3139 if (test_bit(i, bmap)) 3140 __set_bit(i, async_events_bmap); 3141 } 3142 } 3143 3144 for (i = 0; i < 8; i++) 3145 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3146 3147 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3148 } 3149 3150 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3151 { 3152 struct hwrm_func_drv_rgtr_input req = {0}; 3153 3154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3155 3156 req.enables = 3157 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3158 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3159 3160 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3161 req.ver_maj = DRV_VER_MAJ; 3162 req.ver_min = DRV_VER_MIN; 3163 req.ver_upd = DRV_VER_UPD; 3164 3165 if (BNXT_PF(bp)) { 3166 DECLARE_BITMAP(vf_req_snif_bmap, 256); 3167 u32 *data = (u32 *)vf_req_snif_bmap; 3168 int i; 3169 3170 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); 3171 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 3172 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 3173 3174 for (i = 0; i < 8; i++) 3175 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3176 3177 req.enables |= 3178 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3179 } 3180 3181 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3182 } 3183 3184 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 3185 { 3186 struct hwrm_func_drv_unrgtr_input req = {0}; 3187 3188 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 3189 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3190 } 3191 3192 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 3193 { 3194 u32 rc = 0; 3195 struct hwrm_tunnel_dst_port_free_input req = {0}; 3196 3197 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 3198 req.tunnel_type = tunnel_type; 3199 3200 switch (tunnel_type) { 3201 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 3202 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 3203 break; 3204 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 3205 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 3206 break; 3207 default: 3208 break; 3209 } 3210 3211 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3212 if (rc) 3213 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 3214 rc); 3215 return rc; 3216 } 3217 3218 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 3219 u8 tunnel_type) 3220 { 3221 u32 rc = 0; 3222 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 3223 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3224 3225 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 3226 3227 req.tunnel_type = tunnel_type; 3228 req.tunnel_dst_port_val = port; 3229 3230 mutex_lock(&bp->hwrm_cmd_lock); 3231 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3232 if (rc) { 3233 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 3234 rc); 3235 goto err_out; 3236 } 3237 3238 switch (tunnel_type) { 3239 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3240 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3241 break; 3242 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3243 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3244 break; 3245 default: 3246 break; 3247 } 3248 3249 err_out: 3250 mutex_unlock(&bp->hwrm_cmd_lock); 3251 return rc; 3252 } 3253 3254 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 3255 { 3256 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 3257 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3258 3259 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 3260 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3261 3262 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 3263 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 3264 req.mask = cpu_to_le32(vnic->rx_mask); 3265 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3266 } 3267 3268 #ifdef CONFIG_RFS_ACCEL 3269 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 3270 struct bnxt_ntuple_filter *fltr) 3271 { 3272 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 3273 3274 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 3275 req.ntuple_filter_id = fltr->filter_id; 3276 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3277 } 3278 3279 #define BNXT_NTP_FLTR_FLAGS \ 3280 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 3281 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 3282 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 3283 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 3284 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 3285 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 3286 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 3287 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 3288 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 3289 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 3290 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 3291 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 3292 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 3293 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 3294 3295 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 3296 struct bnxt_ntuple_filter *fltr) 3297 { 3298 int rc = 0; 3299 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 3300 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 3301 bp->hwrm_cmd_resp_addr; 3302 struct flow_keys *keys = &fltr->fkeys; 3303 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 3304 3305 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 3306 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 3307 3308 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 3309 3310 req.ethertype = htons(ETH_P_IP); 3311 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 3312 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 3313 req.ip_protocol = keys->basic.ip_proto; 3314 3315 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 3316 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3317 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 3318 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3319 3320 req.src_port = keys->ports.src; 3321 req.src_port_mask = cpu_to_be16(0xffff); 3322 req.dst_port = keys->ports.dst; 3323 req.dst_port_mask = cpu_to_be16(0xffff); 3324 3325 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 3326 mutex_lock(&bp->hwrm_cmd_lock); 3327 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3328 if (!rc) 3329 fltr->filter_id = resp->ntuple_filter_id; 3330 mutex_unlock(&bp->hwrm_cmd_lock); 3331 return rc; 3332 } 3333 #endif 3334 3335 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 3336 u8 *mac_addr) 3337 { 3338 u32 rc = 0; 3339 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 3340 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3341 3342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 3343 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 3344 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 3345 req.flags |= 3346 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 3347 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 3348 req.enables = 3349 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 3350 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 3351 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 3352 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 3353 req.l2_addr_mask[0] = 0xff; 3354 req.l2_addr_mask[1] = 0xff; 3355 req.l2_addr_mask[2] = 0xff; 3356 req.l2_addr_mask[3] = 0xff; 3357 req.l2_addr_mask[4] = 0xff; 3358 req.l2_addr_mask[5] = 0xff; 3359 3360 mutex_lock(&bp->hwrm_cmd_lock); 3361 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3362 if (!rc) 3363 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 3364 resp->l2_filter_id; 3365 mutex_unlock(&bp->hwrm_cmd_lock); 3366 return rc; 3367 } 3368 3369 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 3370 { 3371 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 3372 int rc = 0; 3373 3374 /* Any associated ntuple filters will also be cleared by firmware. */ 3375 mutex_lock(&bp->hwrm_cmd_lock); 3376 for (i = 0; i < num_of_vnics; i++) { 3377 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3378 3379 for (j = 0; j < vnic->uc_filter_count; j++) { 3380 struct hwrm_cfa_l2_filter_free_input req = {0}; 3381 3382 bnxt_hwrm_cmd_hdr_init(bp, &req, 3383 HWRM_CFA_L2_FILTER_FREE, -1, -1); 3384 3385 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 3386 3387 rc = _hwrm_send_message(bp, &req, sizeof(req), 3388 HWRM_CMD_TIMEOUT); 3389 } 3390 vnic->uc_filter_count = 0; 3391 } 3392 mutex_unlock(&bp->hwrm_cmd_lock); 3393 3394 return rc; 3395 } 3396 3397 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 3398 { 3399 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3400 struct hwrm_vnic_tpa_cfg_input req = {0}; 3401 3402 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3403 3404 if (tpa_flags) { 3405 u16 mss = bp->dev->mtu - 40; 3406 u32 nsegs, n, segs = 0, flags; 3407 3408 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3409 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3410 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3411 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3412 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3413 if (tpa_flags & BNXT_FLAG_GRO) 3414 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3415 3416 req.flags = cpu_to_le32(flags); 3417 3418 req.enables = 3419 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3420 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3421 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3422 3423 /* Number of segs are log2 units, and first packet is not 3424 * included as part of this units. 3425 */ 3426 if (mss <= BNXT_RX_PAGE_SIZE) { 3427 n = BNXT_RX_PAGE_SIZE / mss; 3428 nsegs = (MAX_SKB_FRAGS - 1) * n; 3429 } else { 3430 n = mss / BNXT_RX_PAGE_SIZE; 3431 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 3432 n++; 3433 nsegs = (MAX_SKB_FRAGS - n) / n; 3434 } 3435 3436 segs = ilog2(nsegs); 3437 req.max_agg_segs = cpu_to_le16(segs); 3438 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3439 3440 req.min_agg_len = cpu_to_le32(512); 3441 } 3442 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3443 3444 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3445 } 3446 3447 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3448 { 3449 u32 i, j, max_rings; 3450 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3451 struct hwrm_vnic_rss_cfg_input req = {0}; 3452 3453 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 3454 return 0; 3455 3456 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3457 if (set_rss) { 3458 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 3459 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 3460 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3461 max_rings = bp->rx_nr_rings - 1; 3462 else 3463 max_rings = bp->rx_nr_rings; 3464 } else { 3465 max_rings = 1; 3466 } 3467 3468 /* Fill the RSS indirection table with ring group ids */ 3469 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3470 if (j == max_rings) 3471 j = 0; 3472 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3473 } 3474 3475 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3476 req.hash_key_tbl_addr = 3477 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3478 } 3479 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3480 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3481 } 3482 3483 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3484 { 3485 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3486 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3487 3488 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3489 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3490 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3491 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3492 req.enables = 3493 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3494 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3495 /* thresholds not implemented in firmware yet */ 3496 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3497 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3498 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3499 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3500 } 3501 3502 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 3503 u16 ctx_idx) 3504 { 3505 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3506 3507 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3508 req.rss_cos_lb_ctx_id = 3509 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 3510 3511 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3512 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 3513 } 3514 3515 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3516 { 3517 int i, j; 3518 3519 for (i = 0; i < bp->nr_vnics; i++) { 3520 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3521 3522 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 3523 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 3524 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 3525 } 3526 } 3527 bp->rsscos_nr_ctxs = 0; 3528 } 3529 3530 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 3531 { 3532 int rc; 3533 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3534 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3535 bp->hwrm_cmd_resp_addr; 3536 3537 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3538 -1); 3539 3540 mutex_lock(&bp->hwrm_cmd_lock); 3541 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3542 if (!rc) 3543 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 3544 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3545 mutex_unlock(&bp->hwrm_cmd_lock); 3546 3547 return rc; 3548 } 3549 3550 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3551 { 3552 unsigned int ring = 0, grp_idx; 3553 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3554 struct hwrm_vnic_cfg_input req = {0}; 3555 u16 def_vlan = 0; 3556 3557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3558 3559 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 3560 /* Only RSS support for now TBD: COS & LB */ 3561 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 3562 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3563 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3564 VNIC_CFG_REQ_ENABLES_MRU); 3565 } else { 3566 req.rss_rule = cpu_to_le16(0xffff); 3567 } 3568 3569 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 3570 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 3571 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 3572 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 3573 } else { 3574 req.cos_rule = cpu_to_le16(0xffff); 3575 } 3576 3577 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3578 ring = 0; 3579 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 3580 ring = vnic_id - 1; 3581 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 3582 ring = bp->rx_nr_rings - 1; 3583 3584 grp_idx = bp->rx_ring[ring].bnapi->index; 3585 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3586 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 3587 3588 req.lb_rule = cpu_to_le16(0xffff); 3589 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3590 VLAN_HLEN); 3591 3592 #ifdef CONFIG_BNXT_SRIOV 3593 if (BNXT_VF(bp)) 3594 def_vlan = bp->vf.vlan; 3595 #endif 3596 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 3597 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3598 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 3599 req.flags |= 3600 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); 3601 3602 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3603 } 3604 3605 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 3606 { 3607 u32 rc = 0; 3608 3609 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 3610 struct hwrm_vnic_free_input req = {0}; 3611 3612 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 3613 req.vnic_id = 3614 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 3615 3616 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3617 if (rc) 3618 return rc; 3619 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 3620 } 3621 return rc; 3622 } 3623 3624 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 3625 { 3626 u16 i; 3627 3628 for (i = 0; i < bp->nr_vnics; i++) 3629 bnxt_hwrm_vnic_free_one(bp, i); 3630 } 3631 3632 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 3633 unsigned int start_rx_ring_idx, 3634 unsigned int nr_rings) 3635 { 3636 int rc = 0; 3637 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 3638 struct hwrm_vnic_alloc_input req = {0}; 3639 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3640 3641 /* map ring groups to this vnic */ 3642 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 3643 grp_idx = bp->rx_ring[i].bnapi->index; 3644 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 3645 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 3646 j, nr_rings); 3647 break; 3648 } 3649 bp->vnic_info[vnic_id].fw_grp_ids[j] = 3650 bp->grp_info[grp_idx].fw_grp_id; 3651 } 3652 3653 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 3654 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 3655 if (vnic_id == 0) 3656 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 3657 3658 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 3659 3660 mutex_lock(&bp->hwrm_cmd_lock); 3661 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3662 if (!rc) 3663 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 3664 mutex_unlock(&bp->hwrm_cmd_lock); 3665 return rc; 3666 } 3667 3668 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 3669 { 3670 u16 i; 3671 u32 rc = 0; 3672 3673 mutex_lock(&bp->hwrm_cmd_lock); 3674 for (i = 0; i < bp->rx_nr_rings; i++) { 3675 struct hwrm_ring_grp_alloc_input req = {0}; 3676 struct hwrm_ring_grp_alloc_output *resp = 3677 bp->hwrm_cmd_resp_addr; 3678 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 3679 3680 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 3681 3682 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 3683 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 3684 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 3685 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 3686 3687 rc = _hwrm_send_message(bp, &req, sizeof(req), 3688 HWRM_CMD_TIMEOUT); 3689 if (rc) 3690 break; 3691 3692 bp->grp_info[grp_idx].fw_grp_id = 3693 le32_to_cpu(resp->ring_group_id); 3694 } 3695 mutex_unlock(&bp->hwrm_cmd_lock); 3696 return rc; 3697 } 3698 3699 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 3700 { 3701 u16 i; 3702 u32 rc = 0; 3703 struct hwrm_ring_grp_free_input req = {0}; 3704 3705 if (!bp->grp_info) 3706 return 0; 3707 3708 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 3709 3710 mutex_lock(&bp->hwrm_cmd_lock); 3711 for (i = 0; i < bp->cp_nr_rings; i++) { 3712 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 3713 continue; 3714 req.ring_group_id = 3715 cpu_to_le32(bp->grp_info[i].fw_grp_id); 3716 3717 rc = _hwrm_send_message(bp, &req, sizeof(req), 3718 HWRM_CMD_TIMEOUT); 3719 if (rc) 3720 break; 3721 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3722 } 3723 mutex_unlock(&bp->hwrm_cmd_lock); 3724 return rc; 3725 } 3726 3727 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 3728 struct bnxt_ring_struct *ring, 3729 u32 ring_type, u32 map_index, 3730 u32 stats_ctx_id) 3731 { 3732 int rc = 0, err = 0; 3733 struct hwrm_ring_alloc_input req = {0}; 3734 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3735 u16 ring_id; 3736 3737 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 3738 3739 req.enables = 0; 3740 if (ring->nr_pages > 1) { 3741 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 3742 /* Page size is in log2 units */ 3743 req.page_size = BNXT_PAGE_SHIFT; 3744 req.page_tbl_depth = 1; 3745 } else { 3746 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 3747 } 3748 req.fbo = 0; 3749 /* Association of ring index with doorbell index and MSIX number */ 3750 req.logical_id = cpu_to_le16(map_index); 3751 3752 switch (ring_type) { 3753 case HWRM_RING_ALLOC_TX: 3754 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 3755 /* Association of transmit ring with completion ring */ 3756 req.cmpl_ring_id = 3757 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 3758 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 3759 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 3760 req.queue_id = cpu_to_le16(ring->queue_id); 3761 break; 3762 case HWRM_RING_ALLOC_RX: 3763 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3764 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 3765 break; 3766 case HWRM_RING_ALLOC_AGG: 3767 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3768 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 3769 break; 3770 case HWRM_RING_ALLOC_CMPL: 3771 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; 3772 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 3773 if (bp->flags & BNXT_FLAG_USING_MSIX) 3774 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 3775 break; 3776 default: 3777 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 3778 ring_type); 3779 return -1; 3780 } 3781 3782 mutex_lock(&bp->hwrm_cmd_lock); 3783 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3784 err = le16_to_cpu(resp->error_code); 3785 ring_id = le16_to_cpu(resp->ring_id); 3786 mutex_unlock(&bp->hwrm_cmd_lock); 3787 3788 if (rc || err) { 3789 switch (ring_type) { 3790 case RING_FREE_REQ_RING_TYPE_CMPL: 3791 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 3792 rc, err); 3793 return -1; 3794 3795 case RING_FREE_REQ_RING_TYPE_RX: 3796 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 3797 rc, err); 3798 return -1; 3799 3800 case RING_FREE_REQ_RING_TYPE_TX: 3801 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 3802 rc, err); 3803 return -1; 3804 3805 default: 3806 netdev_err(bp->dev, "Invalid ring\n"); 3807 return -1; 3808 } 3809 } 3810 ring->fw_ring_id = ring_id; 3811 return rc; 3812 } 3813 3814 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 3815 { 3816 int i, rc = 0; 3817 3818 for (i = 0; i < bp->cp_nr_rings; i++) { 3819 struct bnxt_napi *bnapi = bp->bnapi[i]; 3820 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3821 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3822 3823 cpr->cp_doorbell = bp->bar1 + i * 0x80; 3824 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 3825 INVALID_STATS_CTX_ID); 3826 if (rc) 3827 goto err_out; 3828 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3829 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 3830 } 3831 3832 for (i = 0; i < bp->tx_nr_rings; i++) { 3833 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3834 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3835 u32 map_idx = txr->bnapi->index; 3836 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 3837 3838 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 3839 map_idx, fw_stats_ctx); 3840 if (rc) 3841 goto err_out; 3842 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 3843 } 3844 3845 for (i = 0; i < bp->rx_nr_rings; i++) { 3846 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3847 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 3848 u32 map_idx = rxr->bnapi->index; 3849 3850 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 3851 map_idx, INVALID_STATS_CTX_ID); 3852 if (rc) 3853 goto err_out; 3854 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 3855 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 3856 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 3857 } 3858 3859 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 3860 for (i = 0; i < bp->rx_nr_rings; i++) { 3861 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3862 struct bnxt_ring_struct *ring = 3863 &rxr->rx_agg_ring_struct; 3864 u32 grp_idx = rxr->bnapi->index; 3865 u32 map_idx = grp_idx + bp->rx_nr_rings; 3866 3867 rc = hwrm_ring_alloc_send_msg(bp, ring, 3868 HWRM_RING_ALLOC_AGG, 3869 map_idx, 3870 INVALID_STATS_CTX_ID); 3871 if (rc) 3872 goto err_out; 3873 3874 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 3875 writel(DB_KEY_RX | rxr->rx_agg_prod, 3876 rxr->rx_agg_doorbell); 3877 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 3878 } 3879 } 3880 err_out: 3881 return rc; 3882 } 3883 3884 static int hwrm_ring_free_send_msg(struct bnxt *bp, 3885 struct bnxt_ring_struct *ring, 3886 u32 ring_type, int cmpl_ring_id) 3887 { 3888 int rc; 3889 struct hwrm_ring_free_input req = {0}; 3890 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 3891 u16 error_code; 3892 3893 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 3894 req.ring_type = ring_type; 3895 req.ring_id = cpu_to_le16(ring->fw_ring_id); 3896 3897 mutex_lock(&bp->hwrm_cmd_lock); 3898 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3899 error_code = le16_to_cpu(resp->error_code); 3900 mutex_unlock(&bp->hwrm_cmd_lock); 3901 3902 if (rc || error_code) { 3903 switch (ring_type) { 3904 case RING_FREE_REQ_RING_TYPE_CMPL: 3905 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 3906 rc); 3907 return rc; 3908 case RING_FREE_REQ_RING_TYPE_RX: 3909 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 3910 rc); 3911 return rc; 3912 case RING_FREE_REQ_RING_TYPE_TX: 3913 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 3914 rc); 3915 return rc; 3916 default: 3917 netdev_err(bp->dev, "Invalid ring\n"); 3918 return -1; 3919 } 3920 } 3921 return 0; 3922 } 3923 3924 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 3925 { 3926 int i; 3927 3928 if (!bp->bnapi) 3929 return; 3930 3931 for (i = 0; i < bp->tx_nr_rings; i++) { 3932 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3933 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3934 u32 grp_idx = txr->bnapi->index; 3935 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3936 3937 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3938 hwrm_ring_free_send_msg(bp, ring, 3939 RING_FREE_REQ_RING_TYPE_TX, 3940 close_path ? cmpl_ring_id : 3941 INVALID_HW_RING_ID); 3942 ring->fw_ring_id = INVALID_HW_RING_ID; 3943 } 3944 } 3945 3946 for (i = 0; i < bp->rx_nr_rings; i++) { 3947 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3948 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 3949 u32 grp_idx = rxr->bnapi->index; 3950 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3951 3952 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3953 hwrm_ring_free_send_msg(bp, ring, 3954 RING_FREE_REQ_RING_TYPE_RX, 3955 close_path ? cmpl_ring_id : 3956 INVALID_HW_RING_ID); 3957 ring->fw_ring_id = INVALID_HW_RING_ID; 3958 bp->grp_info[grp_idx].rx_fw_ring_id = 3959 INVALID_HW_RING_ID; 3960 } 3961 } 3962 3963 for (i = 0; i < bp->rx_nr_rings; i++) { 3964 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3965 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 3966 u32 grp_idx = rxr->bnapi->index; 3967 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3968 3969 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3970 hwrm_ring_free_send_msg(bp, ring, 3971 RING_FREE_REQ_RING_TYPE_RX, 3972 close_path ? cmpl_ring_id : 3973 INVALID_HW_RING_ID); 3974 ring->fw_ring_id = INVALID_HW_RING_ID; 3975 bp->grp_info[grp_idx].agg_fw_ring_id = 3976 INVALID_HW_RING_ID; 3977 } 3978 } 3979 3980 for (i = 0; i < bp->cp_nr_rings; i++) { 3981 struct bnxt_napi *bnapi = bp->bnapi[i]; 3982 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3983 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3984 3985 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3986 hwrm_ring_free_send_msg(bp, ring, 3987 RING_FREE_REQ_RING_TYPE_CMPL, 3988 INVALID_HW_RING_ID); 3989 ring->fw_ring_id = INVALID_HW_RING_ID; 3990 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3991 } 3992 } 3993 } 3994 3995 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, 3996 u32 buf_tmrs, u16 flags, 3997 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 3998 { 3999 req->flags = cpu_to_le16(flags); 4000 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); 4001 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); 4002 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); 4003 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); 4004 /* Minimum time between 2 interrupts set to buf_tmr x 2 */ 4005 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); 4006 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); 4007 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); 4008 } 4009 4010 int bnxt_hwrm_set_coal(struct bnxt *bp) 4011 { 4012 int i, rc = 0; 4013 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 4014 req_tx = {0}, *req; 4015 u16 max_buf, max_buf_irq; 4016 u16 buf_tmr, buf_tmr_irq; 4017 u32 flags; 4018 4019 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 4020 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4021 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 4022 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4023 4024 /* Each rx completion (2 records) should be DMAed immediately. 4025 * DMA 1/4 of the completion buffers at a time. 4026 */ 4027 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); 4028 /* max_buf must not be zero */ 4029 max_buf = clamp_t(u16, max_buf, 1, 63); 4030 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); 4031 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); 4032 /* buf timer set to 1/4 of interrupt timer */ 4033 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4034 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); 4035 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4036 4037 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4038 4039 /* RING_IDLE generates more IRQs for lower latency. Enable it only 4040 * if coal_ticks is less than 25 us. 4041 */ 4042 if (bp->rx_coal_ticks < 25) 4043 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 4044 4045 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4046 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); 4047 4048 /* max_buf must not be zero */ 4049 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); 4050 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); 4051 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); 4052 /* buf timer set to 1/4 of interrupt timer */ 4053 buf_tmr = max_t(u16, buf_tmr / 4, 1); 4054 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); 4055 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 4056 4057 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4058 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 4059 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); 4060 4061 mutex_lock(&bp->hwrm_cmd_lock); 4062 for (i = 0; i < bp->cp_nr_rings; i++) { 4063 struct bnxt_napi *bnapi = bp->bnapi[i]; 4064 4065 req = &req_rx; 4066 if (!bnapi->rx_ring) 4067 req = &req_tx; 4068 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 4069 4070 rc = _hwrm_send_message(bp, req, sizeof(*req), 4071 HWRM_CMD_TIMEOUT); 4072 if (rc) 4073 break; 4074 } 4075 mutex_unlock(&bp->hwrm_cmd_lock); 4076 return rc; 4077 } 4078 4079 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 4080 { 4081 int rc = 0, i; 4082 struct hwrm_stat_ctx_free_input req = {0}; 4083 4084 if (!bp->bnapi) 4085 return 0; 4086 4087 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4088 return 0; 4089 4090 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 4091 4092 mutex_lock(&bp->hwrm_cmd_lock); 4093 for (i = 0; i < bp->cp_nr_rings; i++) { 4094 struct bnxt_napi *bnapi = bp->bnapi[i]; 4095 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4096 4097 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 4098 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 4099 4100 rc = _hwrm_send_message(bp, &req, sizeof(req), 4101 HWRM_CMD_TIMEOUT); 4102 if (rc) 4103 break; 4104 4105 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4106 } 4107 } 4108 mutex_unlock(&bp->hwrm_cmd_lock); 4109 return rc; 4110 } 4111 4112 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 4113 { 4114 int rc = 0, i; 4115 struct hwrm_stat_ctx_alloc_input req = {0}; 4116 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4117 4118 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4119 return 0; 4120 4121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 4122 4123 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 4124 4125 mutex_lock(&bp->hwrm_cmd_lock); 4126 for (i = 0; i < bp->cp_nr_rings; i++) { 4127 struct bnxt_napi *bnapi = bp->bnapi[i]; 4128 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4129 4130 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 4131 4132 rc = _hwrm_send_message(bp, &req, sizeof(req), 4133 HWRM_CMD_TIMEOUT); 4134 if (rc) 4135 break; 4136 4137 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 4138 4139 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 4140 } 4141 mutex_unlock(&bp->hwrm_cmd_lock); 4142 return rc; 4143 } 4144 4145 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 4146 { 4147 struct hwrm_func_qcfg_input req = {0}; 4148 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4149 int rc; 4150 4151 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4152 req.fid = cpu_to_le16(0xffff); 4153 mutex_lock(&bp->hwrm_cmd_lock); 4154 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4155 if (rc) 4156 goto func_qcfg_exit; 4157 4158 #ifdef CONFIG_BNXT_SRIOV 4159 if (BNXT_VF(bp)) { 4160 struct bnxt_vf_info *vf = &bp->vf; 4161 4162 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4163 } 4164 #endif 4165 switch (resp->port_partition_type) { 4166 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4167 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4168 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 4169 bp->port_partition_type = resp->port_partition_type; 4170 break; 4171 } 4172 4173 func_qcfg_exit: 4174 mutex_unlock(&bp->hwrm_cmd_lock); 4175 return rc; 4176 } 4177 4178 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4179 { 4180 int rc = 0; 4181 struct hwrm_func_qcaps_input req = {0}; 4182 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4183 4184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 4185 req.fid = cpu_to_le16(0xffff); 4186 4187 mutex_lock(&bp->hwrm_cmd_lock); 4188 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4189 if (rc) 4190 goto hwrm_func_qcaps_exit; 4191 4192 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) 4193 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 4194 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) 4195 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 4196 4197 bp->tx_push_thresh = 0; 4198 if (resp->flags & 4199 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 4200 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 4201 4202 if (BNXT_PF(bp)) { 4203 struct bnxt_pf_info *pf = &bp->pf; 4204 4205 pf->fw_fid = le16_to_cpu(resp->fid); 4206 pf->port_id = le16_to_cpu(resp->port_id); 4207 bp->dev->dev_port = pf->port_id; 4208 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4209 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); 4210 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4211 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4212 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4213 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4214 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4215 if (!pf->max_hw_ring_grps) 4216 pf->max_hw_ring_grps = pf->max_tx_rings; 4217 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4218 pf->max_vnics = le16_to_cpu(resp->max_vnics); 4219 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4220 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 4221 pf->max_vfs = le16_to_cpu(resp->max_vfs); 4222 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 4223 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 4224 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 4225 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 4226 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 4227 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 4228 } else { 4229 #ifdef CONFIG_BNXT_SRIOV 4230 struct bnxt_vf_info *vf = &bp->vf; 4231 4232 vf->fw_fid = le16_to_cpu(resp->fid); 4233 4234 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4235 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4236 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4237 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4238 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4239 if (!vf->max_hw_ring_grps) 4240 vf->max_hw_ring_grps = vf->max_tx_rings; 4241 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4242 vf->max_vnics = le16_to_cpu(resp->max_vnics); 4243 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4244 4245 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4246 mutex_unlock(&bp->hwrm_cmd_lock); 4247 4248 if (is_valid_ether_addr(vf->mac_addr)) { 4249 /* overwrite netdev dev_adr with admin VF MAC */ 4250 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 4251 } else { 4252 random_ether_addr(bp->dev->dev_addr); 4253 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 4254 } 4255 return rc; 4256 #endif 4257 } 4258 4259 hwrm_func_qcaps_exit: 4260 mutex_unlock(&bp->hwrm_cmd_lock); 4261 return rc; 4262 } 4263 4264 static int bnxt_hwrm_func_reset(struct bnxt *bp) 4265 { 4266 struct hwrm_func_reset_input req = {0}; 4267 4268 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 4269 req.enables = 0; 4270 4271 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 4272 } 4273 4274 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 4275 { 4276 int rc = 0; 4277 struct hwrm_queue_qportcfg_input req = {0}; 4278 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 4279 u8 i, *qptr; 4280 4281 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 4282 4283 mutex_lock(&bp->hwrm_cmd_lock); 4284 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4285 if (rc) 4286 goto qportcfg_exit; 4287 4288 if (!resp->max_configurable_queues) { 4289 rc = -EINVAL; 4290 goto qportcfg_exit; 4291 } 4292 bp->max_tc = resp->max_configurable_queues; 4293 bp->max_lltc = resp->max_configurable_lossless_queues; 4294 if (bp->max_tc > BNXT_MAX_QUEUE) 4295 bp->max_tc = BNXT_MAX_QUEUE; 4296 4297 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 4298 bp->max_tc = 1; 4299 4300 if (bp->max_lltc > bp->max_tc) 4301 bp->max_lltc = bp->max_tc; 4302 4303 qptr = &resp->queue_id0; 4304 for (i = 0; i < bp->max_tc; i++) { 4305 bp->q_info[i].queue_id = *qptr++; 4306 bp->q_info[i].queue_profile = *qptr++; 4307 } 4308 4309 qportcfg_exit: 4310 mutex_unlock(&bp->hwrm_cmd_lock); 4311 return rc; 4312 } 4313 4314 static int bnxt_hwrm_ver_get(struct bnxt *bp) 4315 { 4316 int rc; 4317 struct hwrm_ver_get_input req = {0}; 4318 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 4319 4320 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 4321 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 4322 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 4323 req.hwrm_intf_min = HWRM_VERSION_MINOR; 4324 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 4325 mutex_lock(&bp->hwrm_cmd_lock); 4326 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4327 if (rc) 4328 goto hwrm_ver_get_exit; 4329 4330 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 4331 4332 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | 4333 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; 4334 if (resp->hwrm_intf_maj < 1) { 4335 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 4336 resp->hwrm_intf_maj, resp->hwrm_intf_min, 4337 resp->hwrm_intf_upd); 4338 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 4339 } 4340 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", 4341 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 4342 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); 4343 4344 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 4345 if (!bp->hwrm_cmd_timeout) 4346 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 4347 4348 if (resp->hwrm_intf_maj >= 1) 4349 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4350 4351 bp->chip_num = le16_to_cpu(resp->chip_num); 4352 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 4353 !resp->chip_metal) 4354 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 4355 4356 hwrm_ver_get_exit: 4357 mutex_unlock(&bp->hwrm_cmd_lock); 4358 return rc; 4359 } 4360 4361 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 4362 { 4363 #if IS_ENABLED(CONFIG_RTC_LIB) 4364 struct hwrm_fw_set_time_input req = {0}; 4365 struct rtc_time tm; 4366 struct timeval tv; 4367 4368 if (bp->hwrm_spec_code < 0x10400) 4369 return -EOPNOTSUPP; 4370 4371 do_gettimeofday(&tv); 4372 rtc_time_to_tm(tv.tv_sec, &tm); 4373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 4374 req.year = cpu_to_le16(1900 + tm.tm_year); 4375 req.month = 1 + tm.tm_mon; 4376 req.day = tm.tm_mday; 4377 req.hour = tm.tm_hour; 4378 req.minute = tm.tm_min; 4379 req.second = tm.tm_sec; 4380 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4381 #else 4382 return -EOPNOTSUPP; 4383 #endif 4384 } 4385 4386 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 4387 { 4388 int rc; 4389 struct bnxt_pf_info *pf = &bp->pf; 4390 struct hwrm_port_qstats_input req = {0}; 4391 4392 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 4393 return 0; 4394 4395 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 4396 req.port_id = cpu_to_le16(pf->port_id); 4397 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 4398 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 4399 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4400 return rc; 4401 } 4402 4403 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 4404 { 4405 if (bp->vxlan_port_cnt) { 4406 bnxt_hwrm_tunnel_dst_port_free( 4407 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 4408 } 4409 bp->vxlan_port_cnt = 0; 4410 if (bp->nge_port_cnt) { 4411 bnxt_hwrm_tunnel_dst_port_free( 4412 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 4413 } 4414 bp->nge_port_cnt = 0; 4415 } 4416 4417 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 4418 { 4419 int rc, i; 4420 u32 tpa_flags = 0; 4421 4422 if (set_tpa) 4423 tpa_flags = bp->flags & BNXT_FLAG_TPA; 4424 for (i = 0; i < bp->nr_vnics; i++) { 4425 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4426 if (rc) { 4427 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4428 rc, i); 4429 return rc; 4430 } 4431 } 4432 return 0; 4433 } 4434 4435 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 4436 { 4437 int i; 4438 4439 for (i = 0; i < bp->nr_vnics; i++) 4440 bnxt_hwrm_vnic_set_rss(bp, i, false); 4441 } 4442 4443 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 4444 bool irq_re_init) 4445 { 4446 if (bp->vnic_info) { 4447 bnxt_hwrm_clear_vnic_filter(bp); 4448 /* clear all RSS setting before free vnic ctx */ 4449 bnxt_hwrm_clear_vnic_rss(bp); 4450 bnxt_hwrm_vnic_ctx_free(bp); 4451 /* before free the vnic, undo the vnic tpa settings */ 4452 if (bp->flags & BNXT_FLAG_TPA) 4453 bnxt_set_tpa(bp, false); 4454 bnxt_hwrm_vnic_free(bp); 4455 } 4456 bnxt_hwrm_ring_free(bp, close_path); 4457 bnxt_hwrm_ring_grp_free(bp); 4458 if (irq_re_init) { 4459 bnxt_hwrm_stat_ctx_free(bp); 4460 bnxt_hwrm_free_tunnel_ports(bp); 4461 } 4462 } 4463 4464 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 4465 { 4466 int rc; 4467 4468 /* allocate context for vnic */ 4469 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 4470 if (rc) { 4471 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 4472 vnic_id, rc); 4473 goto vnic_setup_err; 4474 } 4475 bp->rsscos_nr_ctxs++; 4476 4477 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 4478 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 4479 if (rc) { 4480 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 4481 vnic_id, rc); 4482 goto vnic_setup_err; 4483 } 4484 bp->rsscos_nr_ctxs++; 4485 } 4486 4487 /* configure default vnic, ring grp */ 4488 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 4489 if (rc) { 4490 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 4491 vnic_id, rc); 4492 goto vnic_setup_err; 4493 } 4494 4495 /* Enable RSS hashing on vnic */ 4496 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 4497 if (rc) { 4498 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 4499 vnic_id, rc); 4500 goto vnic_setup_err; 4501 } 4502 4503 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4504 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 4505 if (rc) { 4506 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 4507 vnic_id, rc); 4508 } 4509 } 4510 4511 vnic_setup_err: 4512 return rc; 4513 } 4514 4515 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 4516 { 4517 #ifdef CONFIG_RFS_ACCEL 4518 int i, rc = 0; 4519 4520 for (i = 0; i < bp->rx_nr_rings; i++) { 4521 u16 vnic_id = i + 1; 4522 u16 ring_id = i; 4523 4524 if (vnic_id >= bp->nr_vnics) 4525 break; 4526 4527 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; 4528 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 4529 if (rc) { 4530 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 4531 vnic_id, rc); 4532 break; 4533 } 4534 rc = bnxt_setup_vnic(bp, vnic_id); 4535 if (rc) 4536 break; 4537 } 4538 return rc; 4539 #else 4540 return 0; 4541 #endif 4542 } 4543 4544 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 4545 static bool bnxt_promisc_ok(struct bnxt *bp) 4546 { 4547 #ifdef CONFIG_BNXT_SRIOV 4548 if (BNXT_VF(bp) && !bp->vf.vlan) 4549 return false; 4550 #endif 4551 return true; 4552 } 4553 4554 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 4555 { 4556 unsigned int rc = 0; 4557 4558 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 4559 if (rc) { 4560 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 4561 rc); 4562 return rc; 4563 } 4564 4565 rc = bnxt_hwrm_vnic_cfg(bp, 1); 4566 if (rc) { 4567 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 4568 rc); 4569 return rc; 4570 } 4571 return rc; 4572 } 4573 4574 static int bnxt_cfg_rx_mode(struct bnxt *); 4575 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 4576 4577 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 4578 { 4579 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4580 int rc = 0; 4581 unsigned int rx_nr_rings = bp->rx_nr_rings; 4582 4583 if (irq_re_init) { 4584 rc = bnxt_hwrm_stat_ctx_alloc(bp); 4585 if (rc) { 4586 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 4587 rc); 4588 goto err_out; 4589 } 4590 } 4591 4592 rc = bnxt_hwrm_ring_alloc(bp); 4593 if (rc) { 4594 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 4595 goto err_out; 4596 } 4597 4598 rc = bnxt_hwrm_ring_grp_alloc(bp); 4599 if (rc) { 4600 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 4601 goto err_out; 4602 } 4603 4604 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4605 rx_nr_rings--; 4606 4607 /* default vnic 0 */ 4608 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 4609 if (rc) { 4610 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 4611 goto err_out; 4612 } 4613 4614 rc = bnxt_setup_vnic(bp, 0); 4615 if (rc) 4616 goto err_out; 4617 4618 if (bp->flags & BNXT_FLAG_RFS) { 4619 rc = bnxt_alloc_rfs_vnics(bp); 4620 if (rc) 4621 goto err_out; 4622 } 4623 4624 if (bp->flags & BNXT_FLAG_TPA) { 4625 rc = bnxt_set_tpa(bp, true); 4626 if (rc) 4627 goto err_out; 4628 } 4629 4630 if (BNXT_VF(bp)) 4631 bnxt_update_vf_mac(bp); 4632 4633 /* Filter for default vnic 0 */ 4634 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 4635 if (rc) { 4636 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 4637 goto err_out; 4638 } 4639 vnic->uc_filter_count = 1; 4640 4641 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4642 4643 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 4644 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4645 4646 if (bp->dev->flags & IFF_ALLMULTI) { 4647 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 4648 vnic->mc_list_count = 0; 4649 } else { 4650 u32 mask = 0; 4651 4652 bnxt_mc_list_updated(bp, &mask); 4653 vnic->rx_mask |= mask; 4654 } 4655 4656 rc = bnxt_cfg_rx_mode(bp); 4657 if (rc) 4658 goto err_out; 4659 4660 rc = bnxt_hwrm_set_coal(bp); 4661 if (rc) 4662 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 4663 rc); 4664 4665 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 4666 rc = bnxt_setup_nitroa0_vnic(bp); 4667 if (rc) 4668 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 4669 rc); 4670 } 4671 4672 if (BNXT_VF(bp)) { 4673 bnxt_hwrm_func_qcfg(bp); 4674 netdev_update_features(bp->dev); 4675 } 4676 4677 return 0; 4678 4679 err_out: 4680 bnxt_hwrm_resource_free(bp, 0, true); 4681 4682 return rc; 4683 } 4684 4685 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 4686 { 4687 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 4688 return 0; 4689 } 4690 4691 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 4692 { 4693 bnxt_init_rx_rings(bp); 4694 bnxt_init_tx_rings(bp); 4695 bnxt_init_ring_grps(bp, irq_re_init); 4696 bnxt_init_vnics(bp); 4697 4698 return bnxt_init_chip(bp, irq_re_init); 4699 } 4700 4701 static void bnxt_disable_int(struct bnxt *bp) 4702 { 4703 int i; 4704 4705 if (!bp->bnapi) 4706 return; 4707 4708 for (i = 0; i < bp->cp_nr_rings; i++) { 4709 struct bnxt_napi *bnapi = bp->bnapi[i]; 4710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4711 4712 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4713 } 4714 } 4715 4716 static void bnxt_enable_int(struct bnxt *bp) 4717 { 4718 int i; 4719 4720 atomic_set(&bp->intr_sem, 0); 4721 for (i = 0; i < bp->cp_nr_rings; i++) { 4722 struct bnxt_napi *bnapi = bp->bnapi[i]; 4723 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4724 4725 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 4726 } 4727 } 4728 4729 static int bnxt_set_real_num_queues(struct bnxt *bp) 4730 { 4731 int rc; 4732 struct net_device *dev = bp->dev; 4733 4734 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); 4735 if (rc) 4736 return rc; 4737 4738 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 4739 if (rc) 4740 return rc; 4741 4742 #ifdef CONFIG_RFS_ACCEL 4743 if (bp->flags & BNXT_FLAG_RFS) 4744 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 4745 #endif 4746 4747 return rc; 4748 } 4749 4750 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 4751 bool shared) 4752 { 4753 int _rx = *rx, _tx = *tx; 4754 4755 if (shared) { 4756 *rx = min_t(int, _rx, max); 4757 *tx = min_t(int, _tx, max); 4758 } else { 4759 if (max < 2) 4760 return -ENOMEM; 4761 4762 while (_rx + _tx > max) { 4763 if (_rx > _tx && _rx > 1) 4764 _rx--; 4765 else if (_tx > 1) 4766 _tx--; 4767 } 4768 *rx = _rx; 4769 *tx = _tx; 4770 } 4771 return 0; 4772 } 4773 4774 static void bnxt_setup_msix(struct bnxt *bp) 4775 { 4776 const int len = sizeof(bp->irq_tbl[0].name); 4777 struct net_device *dev = bp->dev; 4778 int tcs, i; 4779 4780 tcs = netdev_get_num_tc(dev); 4781 if (tcs > 1) { 4782 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; 4783 if (bp->tx_nr_rings_per_tc == 0) { 4784 netdev_reset_tc(dev); 4785 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4786 } else { 4787 int i, off, count; 4788 4789 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; 4790 for (i = 0; i < tcs; i++) { 4791 count = bp->tx_nr_rings_per_tc; 4792 off = i * count; 4793 netdev_set_tc_queue(dev, i, count, off); 4794 } 4795 } 4796 } 4797 4798 for (i = 0; i < bp->cp_nr_rings; i++) { 4799 char *attr; 4800 4801 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4802 attr = "TxRx"; 4803 else if (i < bp->rx_nr_rings) 4804 attr = "rx"; 4805 else 4806 attr = "tx"; 4807 4808 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, 4809 i); 4810 bp->irq_tbl[i].handler = bnxt_msix; 4811 } 4812 } 4813 4814 static void bnxt_setup_inta(struct bnxt *bp) 4815 { 4816 const int len = sizeof(bp->irq_tbl[0].name); 4817 4818 if (netdev_get_num_tc(bp->dev)) 4819 netdev_reset_tc(bp->dev); 4820 4821 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 4822 0); 4823 bp->irq_tbl[0].handler = bnxt_inta; 4824 } 4825 4826 static int bnxt_setup_int_mode(struct bnxt *bp) 4827 { 4828 int rc; 4829 4830 if (bp->flags & BNXT_FLAG_USING_MSIX) 4831 bnxt_setup_msix(bp); 4832 else 4833 bnxt_setup_inta(bp); 4834 4835 rc = bnxt_set_real_num_queues(bp); 4836 return rc; 4837 } 4838 4839 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 4840 { 4841 #if defined(CONFIG_BNXT_SRIOV) 4842 if (BNXT_VF(bp)) 4843 return bp->vf.max_stat_ctxs; 4844 #endif 4845 return bp->pf.max_stat_ctxs; 4846 } 4847 4848 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 4849 { 4850 #if defined(CONFIG_BNXT_SRIOV) 4851 if (BNXT_VF(bp)) 4852 bp->vf.max_stat_ctxs = max; 4853 else 4854 #endif 4855 bp->pf.max_stat_ctxs = max; 4856 } 4857 4858 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 4859 { 4860 #if defined(CONFIG_BNXT_SRIOV) 4861 if (BNXT_VF(bp)) 4862 return bp->vf.max_cp_rings; 4863 #endif 4864 return bp->pf.max_cp_rings; 4865 } 4866 4867 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 4868 { 4869 #if defined(CONFIG_BNXT_SRIOV) 4870 if (BNXT_VF(bp)) 4871 bp->vf.max_cp_rings = max; 4872 else 4873 #endif 4874 bp->pf.max_cp_rings = max; 4875 } 4876 4877 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 4878 { 4879 #if defined(CONFIG_BNXT_SRIOV) 4880 if (BNXT_VF(bp)) 4881 return bp->vf.max_irqs; 4882 #endif 4883 return bp->pf.max_irqs; 4884 } 4885 4886 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 4887 { 4888 #if defined(CONFIG_BNXT_SRIOV) 4889 if (BNXT_VF(bp)) 4890 bp->vf.max_irqs = max_irqs; 4891 else 4892 #endif 4893 bp->pf.max_irqs = max_irqs; 4894 } 4895 4896 static int bnxt_init_msix(struct bnxt *bp) 4897 { 4898 int i, total_vecs, rc = 0, min = 1; 4899 struct msix_entry *msix_ent; 4900 4901 total_vecs = bnxt_get_max_func_irqs(bp); 4902 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 4903 if (!msix_ent) 4904 return -ENOMEM; 4905 4906 for (i = 0; i < total_vecs; i++) { 4907 msix_ent[i].entry = i; 4908 msix_ent[i].vector = 0; 4909 } 4910 4911 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 4912 min = 2; 4913 4914 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 4915 if (total_vecs < 0) { 4916 rc = -ENODEV; 4917 goto msix_setup_exit; 4918 } 4919 4920 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 4921 if (bp->irq_tbl) { 4922 for (i = 0; i < total_vecs; i++) 4923 bp->irq_tbl[i].vector = msix_ent[i].vector; 4924 4925 bp->total_irqs = total_vecs; 4926 /* Trim rings based upon num of vectors allocated */ 4927 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 4928 total_vecs, min == 1); 4929 if (rc) 4930 goto msix_setup_exit; 4931 4932 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4933 bp->cp_nr_rings = (min == 1) ? 4934 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 4935 bp->tx_nr_rings + bp->rx_nr_rings; 4936 4937 } else { 4938 rc = -ENOMEM; 4939 goto msix_setup_exit; 4940 } 4941 bp->flags |= BNXT_FLAG_USING_MSIX; 4942 kfree(msix_ent); 4943 return 0; 4944 4945 msix_setup_exit: 4946 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 4947 kfree(bp->irq_tbl); 4948 bp->irq_tbl = NULL; 4949 pci_disable_msix(bp->pdev); 4950 kfree(msix_ent); 4951 return rc; 4952 } 4953 4954 static int bnxt_init_inta(struct bnxt *bp) 4955 { 4956 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 4957 if (!bp->irq_tbl) 4958 return -ENOMEM; 4959 4960 bp->total_irqs = 1; 4961 bp->rx_nr_rings = 1; 4962 bp->tx_nr_rings = 1; 4963 bp->cp_nr_rings = 1; 4964 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4965 bp->flags |= BNXT_FLAG_SHARED_RINGS; 4966 bp->irq_tbl[0].vector = bp->pdev->irq; 4967 return 0; 4968 } 4969 4970 static int bnxt_init_int_mode(struct bnxt *bp) 4971 { 4972 int rc = 0; 4973 4974 if (bp->flags & BNXT_FLAG_MSIX_CAP) 4975 rc = bnxt_init_msix(bp); 4976 4977 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 4978 /* fallback to INTA */ 4979 rc = bnxt_init_inta(bp); 4980 } 4981 return rc; 4982 } 4983 4984 static void bnxt_clear_int_mode(struct bnxt *bp) 4985 { 4986 if (bp->flags & BNXT_FLAG_USING_MSIX) 4987 pci_disable_msix(bp->pdev); 4988 4989 kfree(bp->irq_tbl); 4990 bp->irq_tbl = NULL; 4991 bp->flags &= ~BNXT_FLAG_USING_MSIX; 4992 } 4993 4994 static void bnxt_free_irq(struct bnxt *bp) 4995 { 4996 struct bnxt_irq *irq; 4997 int i; 4998 4999 #ifdef CONFIG_RFS_ACCEL 5000 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 5001 bp->dev->rx_cpu_rmap = NULL; 5002 #endif 5003 if (!bp->irq_tbl) 5004 return; 5005 5006 for (i = 0; i < bp->cp_nr_rings; i++) { 5007 irq = &bp->irq_tbl[i]; 5008 if (irq->requested) 5009 free_irq(irq->vector, bp->bnapi[i]); 5010 irq->requested = 0; 5011 } 5012 } 5013 5014 static int bnxt_request_irq(struct bnxt *bp) 5015 { 5016 int i, j, rc = 0; 5017 unsigned long flags = 0; 5018 #ifdef CONFIG_RFS_ACCEL 5019 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 5020 #endif 5021 5022 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 5023 flags = IRQF_SHARED; 5024 5025 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 5026 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5027 #ifdef CONFIG_RFS_ACCEL 5028 if (rmap && bp->bnapi[i]->rx_ring) { 5029 rc = irq_cpu_rmap_add(rmap, irq->vector); 5030 if (rc) 5031 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 5032 j); 5033 j++; 5034 } 5035 #endif 5036 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5037 bp->bnapi[i]); 5038 if (rc) 5039 break; 5040 5041 irq->requested = 1; 5042 } 5043 return rc; 5044 } 5045 5046 static void bnxt_del_napi(struct bnxt *bp) 5047 { 5048 int i; 5049 5050 if (!bp->bnapi) 5051 return; 5052 5053 for (i = 0; i < bp->cp_nr_rings; i++) { 5054 struct bnxt_napi *bnapi = bp->bnapi[i]; 5055 5056 napi_hash_del(&bnapi->napi); 5057 netif_napi_del(&bnapi->napi); 5058 } 5059 /* We called napi_hash_del() before netif_napi_del(), we need 5060 * to respect an RCU grace period before freeing napi structures. 5061 */ 5062 synchronize_net(); 5063 } 5064 5065 static void bnxt_init_napi(struct bnxt *bp) 5066 { 5067 int i; 5068 unsigned int cp_nr_rings = bp->cp_nr_rings; 5069 struct bnxt_napi *bnapi; 5070 5071 if (bp->flags & BNXT_FLAG_USING_MSIX) { 5072 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5073 cp_nr_rings--; 5074 for (i = 0; i < cp_nr_rings; i++) { 5075 bnapi = bp->bnapi[i]; 5076 netif_napi_add(bp->dev, &bnapi->napi, 5077 bnxt_poll, 64); 5078 } 5079 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5080 bnapi = bp->bnapi[cp_nr_rings]; 5081 netif_napi_add(bp->dev, &bnapi->napi, 5082 bnxt_poll_nitroa0, 64); 5083 } 5084 } else { 5085 bnapi = bp->bnapi[0]; 5086 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 5087 } 5088 } 5089 5090 static void bnxt_disable_napi(struct bnxt *bp) 5091 { 5092 int i; 5093 5094 if (!bp->bnapi) 5095 return; 5096 5097 for (i = 0; i < bp->cp_nr_rings; i++) { 5098 napi_disable(&bp->bnapi[i]->napi); 5099 bnxt_disable_poll(bp->bnapi[i]); 5100 } 5101 } 5102 5103 static void bnxt_enable_napi(struct bnxt *bp) 5104 { 5105 int i; 5106 5107 for (i = 0; i < bp->cp_nr_rings; i++) { 5108 bp->bnapi[i]->in_reset = false; 5109 bnxt_enable_poll(bp->bnapi[i]); 5110 napi_enable(&bp->bnapi[i]->napi); 5111 } 5112 } 5113 5114 void bnxt_tx_disable(struct bnxt *bp) 5115 { 5116 int i; 5117 struct bnxt_tx_ring_info *txr; 5118 struct netdev_queue *txq; 5119 5120 if (bp->tx_ring) { 5121 for (i = 0; i < bp->tx_nr_rings; i++) { 5122 txr = &bp->tx_ring[i]; 5123 txq = netdev_get_tx_queue(bp->dev, i); 5124 txr->dev_state = BNXT_DEV_STATE_CLOSING; 5125 } 5126 } 5127 /* Stop all TX queues */ 5128 netif_tx_disable(bp->dev); 5129 netif_carrier_off(bp->dev); 5130 } 5131 5132 void bnxt_tx_enable(struct bnxt *bp) 5133 { 5134 int i; 5135 struct bnxt_tx_ring_info *txr; 5136 struct netdev_queue *txq; 5137 5138 for (i = 0; i < bp->tx_nr_rings; i++) { 5139 txr = &bp->tx_ring[i]; 5140 txq = netdev_get_tx_queue(bp->dev, i); 5141 txr->dev_state = 0; 5142 } 5143 netif_tx_wake_all_queues(bp->dev); 5144 if (bp->link_info.link_up) 5145 netif_carrier_on(bp->dev); 5146 } 5147 5148 static void bnxt_report_link(struct bnxt *bp) 5149 { 5150 if (bp->link_info.link_up) { 5151 const char *duplex; 5152 const char *flow_ctrl; 5153 u16 speed; 5154 5155 netif_carrier_on(bp->dev); 5156 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 5157 duplex = "full"; 5158 else 5159 duplex = "half"; 5160 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 5161 flow_ctrl = "ON - receive & transmit"; 5162 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 5163 flow_ctrl = "ON - transmit"; 5164 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 5165 flow_ctrl = "ON - receive"; 5166 else 5167 flow_ctrl = "none"; 5168 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 5169 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 5170 speed, duplex, flow_ctrl); 5171 if (bp->flags & BNXT_FLAG_EEE_CAP) 5172 netdev_info(bp->dev, "EEE is %s\n", 5173 bp->eee.eee_active ? "active" : 5174 "not active"); 5175 } else { 5176 netif_carrier_off(bp->dev); 5177 netdev_err(bp->dev, "NIC Link is Down\n"); 5178 } 5179 } 5180 5181 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 5182 { 5183 int rc = 0; 5184 struct hwrm_port_phy_qcaps_input req = {0}; 5185 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5186 struct bnxt_link_info *link_info = &bp->link_info; 5187 5188 if (bp->hwrm_spec_code < 0x10201) 5189 return 0; 5190 5191 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 5192 5193 mutex_lock(&bp->hwrm_cmd_lock); 5194 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5195 if (rc) 5196 goto hwrm_phy_qcaps_exit; 5197 5198 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { 5199 struct ethtool_eee *eee = &bp->eee; 5200 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 5201 5202 bp->flags |= BNXT_FLAG_EEE_CAP; 5203 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5204 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 5205 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 5206 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5207 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5208 } 5209 link_info->support_auto_speeds = 5210 le16_to_cpu(resp->supported_speeds_auto_mode); 5211 5212 hwrm_phy_qcaps_exit: 5213 mutex_unlock(&bp->hwrm_cmd_lock); 5214 return rc; 5215 } 5216 5217 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 5218 { 5219 int rc = 0; 5220 struct bnxt_link_info *link_info = &bp->link_info; 5221 struct hwrm_port_phy_qcfg_input req = {0}; 5222 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5223 u8 link_up = link_info->link_up; 5224 u16 diff; 5225 5226 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 5227 5228 mutex_lock(&bp->hwrm_cmd_lock); 5229 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5230 if (rc) { 5231 mutex_unlock(&bp->hwrm_cmd_lock); 5232 return rc; 5233 } 5234 5235 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 5236 link_info->phy_link_status = resp->link; 5237 link_info->duplex = resp->duplex; 5238 link_info->pause = resp->pause; 5239 link_info->auto_mode = resp->auto_mode; 5240 link_info->auto_pause_setting = resp->auto_pause; 5241 link_info->lp_pause = resp->link_partner_adv_pause; 5242 link_info->force_pause_setting = resp->force_pause; 5243 link_info->duplex_setting = resp->duplex; 5244 if (link_info->phy_link_status == BNXT_LINK_LINK) 5245 link_info->link_speed = le16_to_cpu(resp->link_speed); 5246 else 5247 link_info->link_speed = 0; 5248 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 5249 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 5250 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 5251 link_info->lp_auto_link_speeds = 5252 le16_to_cpu(resp->link_partner_adv_speeds); 5253 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 5254 link_info->phy_ver[0] = resp->phy_maj; 5255 link_info->phy_ver[1] = resp->phy_min; 5256 link_info->phy_ver[2] = resp->phy_bld; 5257 link_info->media_type = resp->media_type; 5258 link_info->phy_type = resp->phy_type; 5259 link_info->transceiver = resp->xcvr_pkg_type; 5260 link_info->phy_addr = resp->eee_config_phy_addr & 5261 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 5262 link_info->module_status = resp->module_status; 5263 5264 if (bp->flags & BNXT_FLAG_EEE_CAP) { 5265 struct ethtool_eee *eee = &bp->eee; 5266 u16 fw_speeds; 5267 5268 eee->eee_active = 0; 5269 if (resp->eee_config_phy_addr & 5270 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 5271 eee->eee_active = 1; 5272 fw_speeds = le16_to_cpu( 5273 resp->link_partner_adv_eee_link_speed_mask); 5274 eee->lp_advertised = 5275 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5276 } 5277 5278 /* Pull initial EEE config */ 5279 if (!chng_link_state) { 5280 if (resp->eee_config_phy_addr & 5281 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 5282 eee->eee_enabled = 1; 5283 5284 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 5285 eee->advertised = 5286 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5287 5288 if (resp->eee_config_phy_addr & 5289 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 5290 __le32 tmr; 5291 5292 eee->tx_lpi_enabled = 1; 5293 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 5294 eee->tx_lpi_timer = le32_to_cpu(tmr) & 5295 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 5296 } 5297 } 5298 } 5299 /* TODO: need to add more logic to report VF link */ 5300 if (chng_link_state) { 5301 if (link_info->phy_link_status == BNXT_LINK_LINK) 5302 link_info->link_up = 1; 5303 else 5304 link_info->link_up = 0; 5305 if (link_up != link_info->link_up) 5306 bnxt_report_link(bp); 5307 } else { 5308 /* alwasy link down if not require to update link state */ 5309 link_info->link_up = 0; 5310 } 5311 mutex_unlock(&bp->hwrm_cmd_lock); 5312 5313 diff = link_info->support_auto_speeds ^ link_info->advertising; 5314 if ((link_info->support_auto_speeds | diff) != 5315 link_info->support_auto_speeds) { 5316 /* An advertised speed is no longer supported, so we need to 5317 * update the advertisement settings. See bnxt_reset() for 5318 * comments about the rtnl_lock() sequence below. 5319 */ 5320 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5321 rtnl_lock(); 5322 link_info->advertising = link_info->support_auto_speeds; 5323 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 5324 (link_info->autoneg & BNXT_AUTONEG_SPEED)) 5325 bnxt_hwrm_set_link_setting(bp, true, false); 5326 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5327 rtnl_unlock(); 5328 } 5329 return 0; 5330 } 5331 5332 static void bnxt_get_port_module_status(struct bnxt *bp) 5333 { 5334 struct bnxt_link_info *link_info = &bp->link_info; 5335 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 5336 u8 module_status; 5337 5338 if (bnxt_update_link(bp, true)) 5339 return; 5340 5341 module_status = link_info->module_status; 5342 switch (module_status) { 5343 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 5344 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 5345 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 5346 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 5347 bp->pf.port_id); 5348 if (bp->hwrm_spec_code >= 0x10201) { 5349 netdev_warn(bp->dev, "Module part number %s\n", 5350 resp->phy_vendor_partnumber); 5351 } 5352 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 5353 netdev_warn(bp->dev, "TX is disabled\n"); 5354 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 5355 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 5356 } 5357 } 5358 5359 static void 5360 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 5361 { 5362 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 5363 if (bp->hwrm_spec_code >= 0x10201) 5364 req->auto_pause = 5365 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 5366 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5367 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 5368 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5369 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 5370 req->enables |= 5371 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5372 } else { 5373 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5374 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 5375 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5376 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 5377 req->enables |= 5378 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 5379 if (bp->hwrm_spec_code >= 0x10201) { 5380 req->auto_pause = req->force_pause; 5381 req->enables |= cpu_to_le32( 5382 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5383 } 5384 } 5385 } 5386 5387 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 5388 struct hwrm_port_phy_cfg_input *req) 5389 { 5390 u8 autoneg = bp->link_info.autoneg; 5391 u16 fw_link_speed = bp->link_info.req_link_speed; 5392 u32 advertising = bp->link_info.advertising; 5393 5394 if (autoneg & BNXT_AUTONEG_SPEED) { 5395 req->auto_mode |= 5396 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 5397 5398 req->enables |= cpu_to_le32( 5399 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 5400 req->auto_link_speed_mask = cpu_to_le16(advertising); 5401 5402 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 5403 req->flags |= 5404 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 5405 } else { 5406 req->force_link_speed = cpu_to_le16(fw_link_speed); 5407 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 5408 } 5409 5410 /* tell chimp that the setting takes effect immediately */ 5411 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5412 } 5413 5414 int bnxt_hwrm_set_pause(struct bnxt *bp) 5415 { 5416 struct hwrm_port_phy_cfg_input req = {0}; 5417 int rc; 5418 5419 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5420 bnxt_hwrm_set_pause_common(bp, &req); 5421 5422 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 5423 bp->link_info.force_link_chng) 5424 bnxt_hwrm_set_link_common(bp, &req); 5425 5426 mutex_lock(&bp->hwrm_cmd_lock); 5427 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5428 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 5429 /* since changing of pause setting doesn't trigger any link 5430 * change event, the driver needs to update the current pause 5431 * result upon successfully return of the phy_cfg command 5432 */ 5433 bp->link_info.pause = 5434 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 5435 bp->link_info.auto_pause_setting = 0; 5436 if (!bp->link_info.force_link_chng) 5437 bnxt_report_link(bp); 5438 } 5439 bp->link_info.force_link_chng = false; 5440 mutex_unlock(&bp->hwrm_cmd_lock); 5441 return rc; 5442 } 5443 5444 static void bnxt_hwrm_set_eee(struct bnxt *bp, 5445 struct hwrm_port_phy_cfg_input *req) 5446 { 5447 struct ethtool_eee *eee = &bp->eee; 5448 5449 if (eee->eee_enabled) { 5450 u16 eee_speeds; 5451 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 5452 5453 if (eee->tx_lpi_enabled) 5454 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 5455 else 5456 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 5457 5458 req->flags |= cpu_to_le32(flags); 5459 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 5460 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 5461 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 5462 } else { 5463 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 5464 } 5465 } 5466 5467 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 5468 { 5469 struct hwrm_port_phy_cfg_input req = {0}; 5470 5471 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5472 if (set_pause) 5473 bnxt_hwrm_set_pause_common(bp, &req); 5474 5475 bnxt_hwrm_set_link_common(bp, &req); 5476 5477 if (set_eee) 5478 bnxt_hwrm_set_eee(bp, &req); 5479 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5480 } 5481 5482 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 5483 { 5484 struct hwrm_port_phy_cfg_input req = {0}; 5485 5486 if (!BNXT_SINGLE_PF(bp)) 5487 return 0; 5488 5489 if (pci_num_vf(bp->pdev)) 5490 return 0; 5491 5492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 5493 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 5494 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5495 } 5496 5497 static bool bnxt_eee_config_ok(struct bnxt *bp) 5498 { 5499 struct ethtool_eee *eee = &bp->eee; 5500 struct bnxt_link_info *link_info = &bp->link_info; 5501 5502 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 5503 return true; 5504 5505 if (eee->eee_enabled) { 5506 u32 advertising = 5507 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 5508 5509 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 5510 eee->eee_enabled = 0; 5511 return false; 5512 } 5513 if (eee->advertised & ~advertising) { 5514 eee->advertised = advertising & eee->supported; 5515 return false; 5516 } 5517 } 5518 return true; 5519 } 5520 5521 static int bnxt_update_phy_setting(struct bnxt *bp) 5522 { 5523 int rc; 5524 bool update_link = false; 5525 bool update_pause = false; 5526 bool update_eee = false; 5527 struct bnxt_link_info *link_info = &bp->link_info; 5528 5529 rc = bnxt_update_link(bp, true); 5530 if (rc) { 5531 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 5532 rc); 5533 return rc; 5534 } 5535 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 5536 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 5537 link_info->req_flow_ctrl) 5538 update_pause = true; 5539 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 5540 link_info->force_pause_setting != link_info->req_flow_ctrl) 5541 update_pause = true; 5542 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 5543 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5544 update_link = true; 5545 if (link_info->req_link_speed != link_info->force_link_speed) 5546 update_link = true; 5547 if (link_info->req_duplex != link_info->duplex_setting) 5548 update_link = true; 5549 } else { 5550 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 5551 update_link = true; 5552 if (link_info->advertising != link_info->auto_link_speeds) 5553 update_link = true; 5554 } 5555 5556 /* The last close may have shutdown the link, so need to call 5557 * PHY_CFG to bring it back up. 5558 */ 5559 if (!netif_carrier_ok(bp->dev)) 5560 update_link = true; 5561 5562 if (!bnxt_eee_config_ok(bp)) 5563 update_eee = true; 5564 5565 if (update_link) 5566 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 5567 else if (update_pause) 5568 rc = bnxt_hwrm_set_pause(bp); 5569 if (rc) { 5570 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 5571 rc); 5572 return rc; 5573 } 5574 5575 return rc; 5576 } 5577 5578 /* Common routine to pre-map certain register block to different GRC window. 5579 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 5580 * in PF and 3 windows in VF that can be customized to map in different 5581 * register blocks. 5582 */ 5583 static void bnxt_preset_reg_win(struct bnxt *bp) 5584 { 5585 if (BNXT_PF(bp)) { 5586 /* CAG registers map to GRC window #4 */ 5587 writel(BNXT_CAG_REG_BASE, 5588 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 5589 } 5590 } 5591 5592 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 5593 { 5594 int rc = 0; 5595 5596 bnxt_preset_reg_win(bp); 5597 netif_carrier_off(bp->dev); 5598 if (irq_re_init) { 5599 rc = bnxt_setup_int_mode(bp); 5600 if (rc) { 5601 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 5602 rc); 5603 return rc; 5604 } 5605 } 5606 if ((bp->flags & BNXT_FLAG_RFS) && 5607 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 5608 /* disable RFS if falling back to INTA */ 5609 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 5610 bp->flags &= ~BNXT_FLAG_RFS; 5611 } 5612 5613 rc = bnxt_alloc_mem(bp, irq_re_init); 5614 if (rc) { 5615 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 5616 goto open_err_free_mem; 5617 } 5618 5619 if (irq_re_init) { 5620 bnxt_init_napi(bp); 5621 rc = bnxt_request_irq(bp); 5622 if (rc) { 5623 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 5624 goto open_err; 5625 } 5626 } 5627 5628 bnxt_enable_napi(bp); 5629 5630 rc = bnxt_init_nic(bp, irq_re_init); 5631 if (rc) { 5632 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 5633 goto open_err; 5634 } 5635 5636 if (link_re_init) { 5637 rc = bnxt_update_phy_setting(bp); 5638 if (rc) 5639 netdev_warn(bp->dev, "failed to update phy settings\n"); 5640 } 5641 5642 if (irq_re_init) 5643 udp_tunnel_get_rx_info(bp->dev); 5644 5645 set_bit(BNXT_STATE_OPEN, &bp->state); 5646 bnxt_enable_int(bp); 5647 /* Enable TX queues */ 5648 bnxt_tx_enable(bp); 5649 mod_timer(&bp->timer, jiffies + bp->current_interval); 5650 /* Poll link status and check for SFP+ module status */ 5651 bnxt_get_port_module_status(bp); 5652 5653 return 0; 5654 5655 open_err: 5656 bnxt_disable_napi(bp); 5657 bnxt_del_napi(bp); 5658 5659 open_err_free_mem: 5660 bnxt_free_skbs(bp); 5661 bnxt_free_irq(bp); 5662 bnxt_free_mem(bp, true); 5663 return rc; 5664 } 5665 5666 /* rtnl_lock held */ 5667 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 5668 { 5669 int rc = 0; 5670 5671 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 5672 if (rc) { 5673 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 5674 dev_close(bp->dev); 5675 } 5676 return rc; 5677 } 5678 5679 static int bnxt_open(struct net_device *dev) 5680 { 5681 struct bnxt *bp = netdev_priv(dev); 5682 5683 return __bnxt_open_nic(bp, true, true); 5684 } 5685 5686 static void bnxt_disable_int_sync(struct bnxt *bp) 5687 { 5688 int i; 5689 5690 atomic_inc(&bp->intr_sem); 5691 if (!netif_running(bp->dev)) 5692 return; 5693 5694 bnxt_disable_int(bp); 5695 for (i = 0; i < bp->cp_nr_rings; i++) 5696 synchronize_irq(bp->irq_tbl[i].vector); 5697 } 5698 5699 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 5700 { 5701 int rc = 0; 5702 5703 #ifdef CONFIG_BNXT_SRIOV 5704 if (bp->sriov_cfg) { 5705 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 5706 !bp->sriov_cfg, 5707 BNXT_SRIOV_CFG_WAIT_TMO); 5708 if (rc) 5709 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 5710 } 5711 #endif 5712 /* Change device state to avoid TX queue wake up's */ 5713 bnxt_tx_disable(bp); 5714 5715 clear_bit(BNXT_STATE_OPEN, &bp->state); 5716 smp_mb__after_atomic(); 5717 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) 5718 msleep(20); 5719 5720 /* Flush rings before disabling interrupts */ 5721 bnxt_shutdown_nic(bp, irq_re_init); 5722 5723 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 5724 5725 bnxt_disable_napi(bp); 5726 bnxt_disable_int_sync(bp); 5727 del_timer_sync(&bp->timer); 5728 bnxt_free_skbs(bp); 5729 5730 if (irq_re_init) { 5731 bnxt_free_irq(bp); 5732 bnxt_del_napi(bp); 5733 } 5734 bnxt_free_mem(bp, irq_re_init); 5735 return rc; 5736 } 5737 5738 static int bnxt_close(struct net_device *dev) 5739 { 5740 struct bnxt *bp = netdev_priv(dev); 5741 5742 bnxt_close_nic(bp, true, true); 5743 bnxt_hwrm_shutdown_link(bp); 5744 return 0; 5745 } 5746 5747 /* rtnl_lock held */ 5748 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5749 { 5750 switch (cmd) { 5751 case SIOCGMIIPHY: 5752 /* fallthru */ 5753 case SIOCGMIIREG: { 5754 if (!netif_running(dev)) 5755 return -EAGAIN; 5756 5757 return 0; 5758 } 5759 5760 case SIOCSMIIREG: 5761 if (!netif_running(dev)) 5762 return -EAGAIN; 5763 5764 return 0; 5765 5766 default: 5767 /* do nothing */ 5768 break; 5769 } 5770 return -EOPNOTSUPP; 5771 } 5772 5773 static struct rtnl_link_stats64 * 5774 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 5775 { 5776 u32 i; 5777 struct bnxt *bp = netdev_priv(dev); 5778 5779 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 5780 5781 if (!bp->bnapi) 5782 return stats; 5783 5784 /* TODO check if we need to synchronize with bnxt_close path */ 5785 for (i = 0; i < bp->cp_nr_rings; i++) { 5786 struct bnxt_napi *bnapi = bp->bnapi[i]; 5787 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5788 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 5789 5790 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 5791 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 5792 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 5793 5794 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 5795 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 5796 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 5797 5798 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 5799 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 5800 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 5801 5802 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 5803 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 5804 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 5805 5806 stats->rx_missed_errors += 5807 le64_to_cpu(hw_stats->rx_discard_pkts); 5808 5809 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 5810 5811 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 5812 } 5813 5814 if (bp->flags & BNXT_FLAG_PORT_STATS) { 5815 struct rx_port_stats *rx = bp->hw_rx_port_stats; 5816 struct tx_port_stats *tx = bp->hw_tx_port_stats; 5817 5818 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 5819 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 5820 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 5821 le64_to_cpu(rx->rx_ovrsz_frames) + 5822 le64_to_cpu(rx->rx_runt_frames); 5823 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 5824 le64_to_cpu(rx->rx_jbr_frames); 5825 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 5826 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 5827 stats->tx_errors = le64_to_cpu(tx->tx_err); 5828 } 5829 5830 return stats; 5831 } 5832 5833 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 5834 { 5835 struct net_device *dev = bp->dev; 5836 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5837 struct netdev_hw_addr *ha; 5838 u8 *haddr; 5839 int mc_count = 0; 5840 bool update = false; 5841 int off = 0; 5842 5843 netdev_for_each_mc_addr(ha, dev) { 5844 if (mc_count >= BNXT_MAX_MC_ADDRS) { 5845 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5846 vnic->mc_list_count = 0; 5847 return false; 5848 } 5849 haddr = ha->addr; 5850 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 5851 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 5852 update = true; 5853 } 5854 off += ETH_ALEN; 5855 mc_count++; 5856 } 5857 if (mc_count) 5858 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 5859 5860 if (mc_count != vnic->mc_list_count) { 5861 vnic->mc_list_count = mc_count; 5862 update = true; 5863 } 5864 return update; 5865 } 5866 5867 static bool bnxt_uc_list_updated(struct bnxt *bp) 5868 { 5869 struct net_device *dev = bp->dev; 5870 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5871 struct netdev_hw_addr *ha; 5872 int off = 0; 5873 5874 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 5875 return true; 5876 5877 netdev_for_each_uc_addr(ha, dev) { 5878 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 5879 return true; 5880 5881 off += ETH_ALEN; 5882 } 5883 return false; 5884 } 5885 5886 static void bnxt_set_rx_mode(struct net_device *dev) 5887 { 5888 struct bnxt *bp = netdev_priv(dev); 5889 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5890 u32 mask = vnic->rx_mask; 5891 bool mc_update = false; 5892 bool uc_update; 5893 5894 if (!netif_running(dev)) 5895 return; 5896 5897 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 5898 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 5899 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 5900 5901 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5902 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5903 5904 uc_update = bnxt_uc_list_updated(bp); 5905 5906 if (dev->flags & IFF_ALLMULTI) { 5907 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5908 vnic->mc_list_count = 0; 5909 } else { 5910 mc_update = bnxt_mc_list_updated(bp, &mask); 5911 } 5912 5913 if (mask != vnic->rx_mask || uc_update || mc_update) { 5914 vnic->rx_mask = mask; 5915 5916 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 5917 schedule_work(&bp->sp_task); 5918 } 5919 } 5920 5921 static int bnxt_cfg_rx_mode(struct bnxt *bp) 5922 { 5923 struct net_device *dev = bp->dev; 5924 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5925 struct netdev_hw_addr *ha; 5926 int i, off = 0, rc; 5927 bool uc_update; 5928 5929 netif_addr_lock_bh(dev); 5930 uc_update = bnxt_uc_list_updated(bp); 5931 netif_addr_unlock_bh(dev); 5932 5933 if (!uc_update) 5934 goto skip_uc; 5935 5936 mutex_lock(&bp->hwrm_cmd_lock); 5937 for (i = 1; i < vnic->uc_filter_count; i++) { 5938 struct hwrm_cfa_l2_filter_free_input req = {0}; 5939 5940 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 5941 -1); 5942 5943 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 5944 5945 rc = _hwrm_send_message(bp, &req, sizeof(req), 5946 HWRM_CMD_TIMEOUT); 5947 } 5948 mutex_unlock(&bp->hwrm_cmd_lock); 5949 5950 vnic->uc_filter_count = 1; 5951 5952 netif_addr_lock_bh(dev); 5953 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 5954 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5955 } else { 5956 netdev_for_each_uc_addr(ha, dev) { 5957 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 5958 off += ETH_ALEN; 5959 vnic->uc_filter_count++; 5960 } 5961 } 5962 netif_addr_unlock_bh(dev); 5963 5964 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 5965 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 5966 if (rc) { 5967 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 5968 rc); 5969 vnic->uc_filter_count = i; 5970 return rc; 5971 } 5972 } 5973 5974 skip_uc: 5975 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 5976 if (rc) 5977 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 5978 rc); 5979 5980 return rc; 5981 } 5982 5983 static bool bnxt_rfs_capable(struct bnxt *bp) 5984 { 5985 #ifdef CONFIG_RFS_ACCEL 5986 struct bnxt_pf_info *pf = &bp->pf; 5987 int vnics; 5988 5989 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) 5990 return false; 5991 5992 vnics = 1 + bp->rx_nr_rings; 5993 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { 5994 netdev_warn(bp->dev, 5995 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 5996 min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); 5997 return false; 5998 } 5999 6000 return true; 6001 #else 6002 return false; 6003 #endif 6004 } 6005 6006 static netdev_features_t bnxt_fix_features(struct net_device *dev, 6007 netdev_features_t features) 6008 { 6009 struct bnxt *bp = netdev_priv(dev); 6010 6011 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 6012 features &= ~NETIF_F_NTUPLE; 6013 6014 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 6015 * turned on or off together. 6016 */ 6017 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 6018 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 6019 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 6020 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6021 NETIF_F_HW_VLAN_STAG_RX); 6022 else 6023 features |= NETIF_F_HW_VLAN_CTAG_RX | 6024 NETIF_F_HW_VLAN_STAG_RX; 6025 } 6026 #ifdef CONFIG_BNXT_SRIOV 6027 if (BNXT_VF(bp)) { 6028 if (bp->vf.vlan) { 6029 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6030 NETIF_F_HW_VLAN_STAG_RX); 6031 } 6032 } 6033 #endif 6034 return features; 6035 } 6036 6037 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 6038 { 6039 struct bnxt *bp = netdev_priv(dev); 6040 u32 flags = bp->flags; 6041 u32 changes; 6042 int rc = 0; 6043 bool re_init = false; 6044 bool update_tpa = false; 6045 6046 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 6047 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6048 flags |= BNXT_FLAG_GRO; 6049 if (features & NETIF_F_LRO) 6050 flags |= BNXT_FLAG_LRO; 6051 6052 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6053 flags |= BNXT_FLAG_STRIP_VLAN; 6054 6055 if (features & NETIF_F_NTUPLE) 6056 flags |= BNXT_FLAG_RFS; 6057 6058 changes = flags ^ bp->flags; 6059 if (changes & BNXT_FLAG_TPA) { 6060 update_tpa = true; 6061 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 6062 (flags & BNXT_FLAG_TPA) == 0) 6063 re_init = true; 6064 } 6065 6066 if (changes & ~BNXT_FLAG_TPA) 6067 re_init = true; 6068 6069 if (flags != bp->flags) { 6070 u32 old_flags = bp->flags; 6071 6072 bp->flags = flags; 6073 6074 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6075 if (update_tpa) 6076 bnxt_set_ring_params(bp); 6077 return rc; 6078 } 6079 6080 if (re_init) { 6081 bnxt_close_nic(bp, false, false); 6082 if (update_tpa) 6083 bnxt_set_ring_params(bp); 6084 6085 return bnxt_open_nic(bp, false, false); 6086 } 6087 if (update_tpa) { 6088 rc = bnxt_set_tpa(bp, 6089 (flags & BNXT_FLAG_TPA) ? 6090 true : false); 6091 if (rc) 6092 bp->flags = old_flags; 6093 } 6094 } 6095 return rc; 6096 } 6097 6098 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 6099 { 6100 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 6101 int i = bnapi->index; 6102 6103 if (!txr) 6104 return; 6105 6106 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 6107 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 6108 txr->tx_cons); 6109 } 6110 6111 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 6112 { 6113 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 6114 int i = bnapi->index; 6115 6116 if (!rxr) 6117 return; 6118 6119 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 6120 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 6121 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 6122 rxr->rx_sw_agg_prod); 6123 } 6124 6125 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 6126 { 6127 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6128 int i = bnapi->index; 6129 6130 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 6131 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 6132 } 6133 6134 static void bnxt_dbg_dump_states(struct bnxt *bp) 6135 { 6136 int i; 6137 struct bnxt_napi *bnapi; 6138 6139 for (i = 0; i < bp->cp_nr_rings; i++) { 6140 bnapi = bp->bnapi[i]; 6141 if (netif_msg_drv(bp)) { 6142 bnxt_dump_tx_sw_state(bnapi); 6143 bnxt_dump_rx_sw_state(bnapi); 6144 bnxt_dump_cp_sw_state(bnapi); 6145 } 6146 } 6147 } 6148 6149 static void bnxt_reset_task(struct bnxt *bp, bool silent) 6150 { 6151 if (!silent) 6152 bnxt_dbg_dump_states(bp); 6153 if (netif_running(bp->dev)) { 6154 bnxt_close_nic(bp, false, false); 6155 bnxt_open_nic(bp, false, false); 6156 } 6157 } 6158 6159 static void bnxt_tx_timeout(struct net_device *dev) 6160 { 6161 struct bnxt *bp = netdev_priv(dev); 6162 6163 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6164 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6165 schedule_work(&bp->sp_task); 6166 } 6167 6168 #ifdef CONFIG_NET_POLL_CONTROLLER 6169 static void bnxt_poll_controller(struct net_device *dev) 6170 { 6171 struct bnxt *bp = netdev_priv(dev); 6172 int i; 6173 6174 for (i = 0; i < bp->cp_nr_rings; i++) { 6175 struct bnxt_irq *irq = &bp->irq_tbl[i]; 6176 6177 disable_irq(irq->vector); 6178 irq->handler(irq->vector, bp->bnapi[i]); 6179 enable_irq(irq->vector); 6180 } 6181 } 6182 #endif 6183 6184 static void bnxt_timer(unsigned long data) 6185 { 6186 struct bnxt *bp = (struct bnxt *)data; 6187 struct net_device *dev = bp->dev; 6188 6189 if (!netif_running(dev)) 6190 return; 6191 6192 if (atomic_read(&bp->intr_sem) != 0) 6193 goto bnxt_restart_timer; 6194 6195 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { 6196 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6197 schedule_work(&bp->sp_task); 6198 } 6199 bnxt_restart_timer: 6200 mod_timer(&bp->timer, jiffies + bp->current_interval); 6201 } 6202 6203 /* Only called from bnxt_sp_task() */ 6204 static void bnxt_reset(struct bnxt *bp, bool silent) 6205 { 6206 /* bnxt_reset_task() calls bnxt_close_nic() which waits 6207 * for BNXT_STATE_IN_SP_TASK to clear. 6208 * If there is a parallel dev_close(), bnxt_close() may be holding 6209 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6210 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6211 */ 6212 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6213 rtnl_lock(); 6214 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6215 bnxt_reset_task(bp, silent); 6216 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6217 rtnl_unlock(); 6218 } 6219 6220 static void bnxt_cfg_ntp_filters(struct bnxt *); 6221 6222 static void bnxt_sp_task(struct work_struct *work) 6223 { 6224 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6225 int rc; 6226 6227 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6228 smp_mb__after_atomic(); 6229 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6230 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6231 return; 6232 } 6233 6234 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 6235 bnxt_cfg_rx_mode(bp); 6236 6237 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 6238 bnxt_cfg_ntp_filters(bp); 6239 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 6240 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 6241 &bp->sp_event)) 6242 bnxt_hwrm_phy_qcaps(bp); 6243 6244 rc = bnxt_update_link(bp, true); 6245 if (rc) 6246 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 6247 rc); 6248 } 6249 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 6250 bnxt_hwrm_exec_fwd_req(bp); 6251 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6252 bnxt_hwrm_tunnel_dst_port_alloc( 6253 bp, bp->vxlan_port, 6254 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 6255 } 6256 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 6257 bnxt_hwrm_tunnel_dst_port_free( 6258 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 6259 } 6260 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6261 bnxt_hwrm_tunnel_dst_port_alloc( 6262 bp, bp->nge_port, 6263 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6264 } 6265 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 6266 bnxt_hwrm_tunnel_dst_port_free( 6267 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6268 } 6269 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 6270 bnxt_reset(bp, false); 6271 6272 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 6273 bnxt_reset(bp, true); 6274 6275 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) 6276 bnxt_get_port_module_status(bp); 6277 6278 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 6279 bnxt_hwrm_port_qstats(bp); 6280 6281 smp_mb__before_atomic(); 6282 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6283 } 6284 6285 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 6286 { 6287 int rc; 6288 struct bnxt *bp = netdev_priv(dev); 6289 6290 SET_NETDEV_DEV(dev, &pdev->dev); 6291 6292 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 6293 rc = pci_enable_device(pdev); 6294 if (rc) { 6295 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 6296 goto init_err; 6297 } 6298 6299 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 6300 dev_err(&pdev->dev, 6301 "Cannot find PCI device base address, aborting\n"); 6302 rc = -ENODEV; 6303 goto init_err_disable; 6304 } 6305 6306 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 6307 if (rc) { 6308 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 6309 goto init_err_disable; 6310 } 6311 6312 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 6313 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 6314 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 6315 goto init_err_disable; 6316 } 6317 6318 pci_set_master(pdev); 6319 6320 bp->dev = dev; 6321 bp->pdev = pdev; 6322 6323 bp->bar0 = pci_ioremap_bar(pdev, 0); 6324 if (!bp->bar0) { 6325 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 6326 rc = -ENOMEM; 6327 goto init_err_release; 6328 } 6329 6330 bp->bar1 = pci_ioremap_bar(pdev, 2); 6331 if (!bp->bar1) { 6332 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 6333 rc = -ENOMEM; 6334 goto init_err_release; 6335 } 6336 6337 bp->bar2 = pci_ioremap_bar(pdev, 4); 6338 if (!bp->bar2) { 6339 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 6340 rc = -ENOMEM; 6341 goto init_err_release; 6342 } 6343 6344 pci_enable_pcie_error_reporting(pdev); 6345 6346 INIT_WORK(&bp->sp_task, bnxt_sp_task); 6347 6348 spin_lock_init(&bp->ntp_fltr_lock); 6349 6350 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 6351 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 6352 6353 /* tick values in micro seconds */ 6354 bp->rx_coal_ticks = 12; 6355 bp->rx_coal_bufs = 30; 6356 bp->rx_coal_ticks_irq = 1; 6357 bp->rx_coal_bufs_irq = 2; 6358 6359 bp->tx_coal_ticks = 25; 6360 bp->tx_coal_bufs = 30; 6361 bp->tx_coal_ticks_irq = 2; 6362 bp->tx_coal_bufs_irq = 2; 6363 6364 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 6365 6366 init_timer(&bp->timer); 6367 bp->timer.data = (unsigned long)bp; 6368 bp->timer.function = bnxt_timer; 6369 bp->current_interval = BNXT_TIMER_INTERVAL; 6370 6371 clear_bit(BNXT_STATE_OPEN, &bp->state); 6372 6373 return 0; 6374 6375 init_err_release: 6376 if (bp->bar2) { 6377 pci_iounmap(pdev, bp->bar2); 6378 bp->bar2 = NULL; 6379 } 6380 6381 if (bp->bar1) { 6382 pci_iounmap(pdev, bp->bar1); 6383 bp->bar1 = NULL; 6384 } 6385 6386 if (bp->bar0) { 6387 pci_iounmap(pdev, bp->bar0); 6388 bp->bar0 = NULL; 6389 } 6390 6391 pci_release_regions(pdev); 6392 6393 init_err_disable: 6394 pci_disable_device(pdev); 6395 6396 init_err: 6397 return rc; 6398 } 6399 6400 /* rtnl_lock held */ 6401 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 6402 { 6403 struct sockaddr *addr = p; 6404 struct bnxt *bp = netdev_priv(dev); 6405 int rc = 0; 6406 6407 if (!is_valid_ether_addr(addr->sa_data)) 6408 return -EADDRNOTAVAIL; 6409 6410 rc = bnxt_approve_mac(bp, addr->sa_data); 6411 if (rc) 6412 return rc; 6413 6414 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 6415 return 0; 6416 6417 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 6418 if (netif_running(dev)) { 6419 bnxt_close_nic(bp, false, false); 6420 rc = bnxt_open_nic(bp, false, false); 6421 } 6422 6423 return rc; 6424 } 6425 6426 /* rtnl_lock held */ 6427 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 6428 { 6429 struct bnxt *bp = netdev_priv(dev); 6430 6431 if (netif_running(dev)) 6432 bnxt_close_nic(bp, false, false); 6433 6434 dev->mtu = new_mtu; 6435 bnxt_set_ring_params(bp); 6436 6437 if (netif_running(dev)) 6438 return bnxt_open_nic(bp, false, false); 6439 6440 return 0; 6441 } 6442 6443 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 6444 { 6445 struct bnxt *bp = netdev_priv(dev); 6446 bool sh = false; 6447 6448 if (tc > bp->max_tc) { 6449 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", 6450 tc, bp->max_tc); 6451 return -EINVAL; 6452 } 6453 6454 if (netdev_get_num_tc(dev) == tc) 6455 return 0; 6456 6457 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 6458 sh = true; 6459 6460 if (tc) { 6461 int max_rx_rings, max_tx_rings, rc; 6462 6463 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 6464 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) 6465 return -ENOMEM; 6466 } 6467 6468 /* Needs to close the device and do hw resource re-allocations */ 6469 if (netif_running(bp->dev)) 6470 bnxt_close_nic(bp, true, false); 6471 6472 if (tc) { 6473 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 6474 netdev_set_num_tc(dev, tc); 6475 } else { 6476 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6477 netdev_reset_tc(dev); 6478 } 6479 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 6480 bp->tx_nr_rings + bp->rx_nr_rings; 6481 bp->num_stat_ctxs = bp->cp_nr_rings; 6482 6483 if (netif_running(bp->dev)) 6484 return bnxt_open_nic(bp, true, false); 6485 6486 return 0; 6487 } 6488 6489 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 6490 struct tc_to_netdev *ntc) 6491 { 6492 if (ntc->type != TC_SETUP_MQPRIO) 6493 return -EINVAL; 6494 6495 return bnxt_setup_mq_tc(dev, ntc->tc); 6496 } 6497 6498 #ifdef CONFIG_RFS_ACCEL 6499 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 6500 struct bnxt_ntuple_filter *f2) 6501 { 6502 struct flow_keys *keys1 = &f1->fkeys; 6503 struct flow_keys *keys2 = &f2->fkeys; 6504 6505 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 6506 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 6507 keys1->ports.ports == keys2->ports.ports && 6508 keys1->basic.ip_proto == keys2->basic.ip_proto && 6509 keys1->basic.n_proto == keys2->basic.n_proto && 6510 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 6511 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 6512 return true; 6513 6514 return false; 6515 } 6516 6517 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 6518 u16 rxq_index, u32 flow_id) 6519 { 6520 struct bnxt *bp = netdev_priv(dev); 6521 struct bnxt_ntuple_filter *fltr, *new_fltr; 6522 struct flow_keys *fkeys; 6523 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 6524 int rc = 0, idx, bit_id, l2_idx = 0; 6525 struct hlist_head *head; 6526 6527 if (skb->encapsulation) 6528 return -EPROTONOSUPPORT; 6529 6530 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 6531 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6532 int off = 0, j; 6533 6534 netif_addr_lock_bh(dev); 6535 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 6536 if (ether_addr_equal(eth->h_dest, 6537 vnic->uc_list + off)) { 6538 l2_idx = j + 1; 6539 break; 6540 } 6541 } 6542 netif_addr_unlock_bh(dev); 6543 if (!l2_idx) 6544 return -EINVAL; 6545 } 6546 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 6547 if (!new_fltr) 6548 return -ENOMEM; 6549 6550 fkeys = &new_fltr->fkeys; 6551 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 6552 rc = -EPROTONOSUPPORT; 6553 goto err_free; 6554 } 6555 6556 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || 6557 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 6558 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 6559 rc = -EPROTONOSUPPORT; 6560 goto err_free; 6561 } 6562 6563 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 6564 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 6565 6566 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 6567 head = &bp->ntp_fltr_hash_tbl[idx]; 6568 rcu_read_lock(); 6569 hlist_for_each_entry_rcu(fltr, head, hash) { 6570 if (bnxt_fltr_match(fltr, new_fltr)) { 6571 rcu_read_unlock(); 6572 rc = 0; 6573 goto err_free; 6574 } 6575 } 6576 rcu_read_unlock(); 6577 6578 spin_lock_bh(&bp->ntp_fltr_lock); 6579 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 6580 BNXT_NTP_FLTR_MAX_FLTR, 0); 6581 if (bit_id < 0) { 6582 spin_unlock_bh(&bp->ntp_fltr_lock); 6583 rc = -ENOMEM; 6584 goto err_free; 6585 } 6586 6587 new_fltr->sw_id = (u16)bit_id; 6588 new_fltr->flow_id = flow_id; 6589 new_fltr->l2_fltr_idx = l2_idx; 6590 new_fltr->rxq = rxq_index; 6591 hlist_add_head_rcu(&new_fltr->hash, head); 6592 bp->ntp_fltr_count++; 6593 spin_unlock_bh(&bp->ntp_fltr_lock); 6594 6595 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 6596 schedule_work(&bp->sp_task); 6597 6598 return new_fltr->sw_id; 6599 6600 err_free: 6601 kfree(new_fltr); 6602 return rc; 6603 } 6604 6605 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 6606 { 6607 int i; 6608 6609 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 6610 struct hlist_head *head; 6611 struct hlist_node *tmp; 6612 struct bnxt_ntuple_filter *fltr; 6613 int rc; 6614 6615 head = &bp->ntp_fltr_hash_tbl[i]; 6616 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 6617 bool del = false; 6618 6619 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 6620 if (rps_may_expire_flow(bp->dev, fltr->rxq, 6621 fltr->flow_id, 6622 fltr->sw_id)) { 6623 bnxt_hwrm_cfa_ntuple_filter_free(bp, 6624 fltr); 6625 del = true; 6626 } 6627 } else { 6628 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 6629 fltr); 6630 if (rc) 6631 del = true; 6632 else 6633 set_bit(BNXT_FLTR_VALID, &fltr->state); 6634 } 6635 6636 if (del) { 6637 spin_lock_bh(&bp->ntp_fltr_lock); 6638 hlist_del_rcu(&fltr->hash); 6639 bp->ntp_fltr_count--; 6640 spin_unlock_bh(&bp->ntp_fltr_lock); 6641 synchronize_rcu(); 6642 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 6643 kfree(fltr); 6644 } 6645 } 6646 } 6647 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 6648 netdev_info(bp->dev, "Receive PF driver unload event!"); 6649 } 6650 6651 #else 6652 6653 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 6654 { 6655 } 6656 6657 #endif /* CONFIG_RFS_ACCEL */ 6658 6659 static void bnxt_udp_tunnel_add(struct net_device *dev, 6660 struct udp_tunnel_info *ti) 6661 { 6662 struct bnxt *bp = netdev_priv(dev); 6663 6664 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 6665 return; 6666 6667 if (!netif_running(dev)) 6668 return; 6669 6670 switch (ti->type) { 6671 case UDP_TUNNEL_TYPE_VXLAN: 6672 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 6673 return; 6674 6675 bp->vxlan_port_cnt++; 6676 if (bp->vxlan_port_cnt == 1) { 6677 bp->vxlan_port = ti->port; 6678 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 6679 schedule_work(&bp->sp_task); 6680 } 6681 break; 6682 case UDP_TUNNEL_TYPE_GENEVE: 6683 if (bp->nge_port_cnt && bp->nge_port != ti->port) 6684 return; 6685 6686 bp->nge_port_cnt++; 6687 if (bp->nge_port_cnt == 1) { 6688 bp->nge_port = ti->port; 6689 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 6690 } 6691 break; 6692 default: 6693 return; 6694 } 6695 6696 schedule_work(&bp->sp_task); 6697 } 6698 6699 static void bnxt_udp_tunnel_del(struct net_device *dev, 6700 struct udp_tunnel_info *ti) 6701 { 6702 struct bnxt *bp = netdev_priv(dev); 6703 6704 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 6705 return; 6706 6707 if (!netif_running(dev)) 6708 return; 6709 6710 switch (ti->type) { 6711 case UDP_TUNNEL_TYPE_VXLAN: 6712 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 6713 return; 6714 bp->vxlan_port_cnt--; 6715 6716 if (bp->vxlan_port_cnt != 0) 6717 return; 6718 6719 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 6720 break; 6721 case UDP_TUNNEL_TYPE_GENEVE: 6722 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 6723 return; 6724 bp->nge_port_cnt--; 6725 6726 if (bp->nge_port_cnt != 0) 6727 return; 6728 6729 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 6730 break; 6731 default: 6732 return; 6733 } 6734 6735 schedule_work(&bp->sp_task); 6736 } 6737 6738 static const struct net_device_ops bnxt_netdev_ops = { 6739 .ndo_open = bnxt_open, 6740 .ndo_start_xmit = bnxt_start_xmit, 6741 .ndo_stop = bnxt_close, 6742 .ndo_get_stats64 = bnxt_get_stats64, 6743 .ndo_set_rx_mode = bnxt_set_rx_mode, 6744 .ndo_do_ioctl = bnxt_ioctl, 6745 .ndo_validate_addr = eth_validate_addr, 6746 .ndo_set_mac_address = bnxt_change_mac_addr, 6747 .ndo_change_mtu = bnxt_change_mtu, 6748 .ndo_fix_features = bnxt_fix_features, 6749 .ndo_set_features = bnxt_set_features, 6750 .ndo_tx_timeout = bnxt_tx_timeout, 6751 #ifdef CONFIG_BNXT_SRIOV 6752 .ndo_get_vf_config = bnxt_get_vf_config, 6753 .ndo_set_vf_mac = bnxt_set_vf_mac, 6754 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 6755 .ndo_set_vf_rate = bnxt_set_vf_bw, 6756 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 6757 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 6758 #endif 6759 #ifdef CONFIG_NET_POLL_CONTROLLER 6760 .ndo_poll_controller = bnxt_poll_controller, 6761 #endif 6762 .ndo_setup_tc = bnxt_setup_tc, 6763 #ifdef CONFIG_RFS_ACCEL 6764 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 6765 #endif 6766 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 6767 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 6768 #ifdef CONFIG_NET_RX_BUSY_POLL 6769 .ndo_busy_poll = bnxt_busy_poll, 6770 #endif 6771 }; 6772 6773 static void bnxt_remove_one(struct pci_dev *pdev) 6774 { 6775 struct net_device *dev = pci_get_drvdata(pdev); 6776 struct bnxt *bp = netdev_priv(dev); 6777 6778 if (BNXT_PF(bp)) 6779 bnxt_sriov_disable(bp); 6780 6781 pci_disable_pcie_error_reporting(pdev); 6782 unregister_netdev(dev); 6783 cancel_work_sync(&bp->sp_task); 6784 bp->sp_event = 0; 6785 6786 bnxt_clear_int_mode(bp); 6787 bnxt_hwrm_func_drv_unrgtr(bp); 6788 bnxt_free_hwrm_resources(bp); 6789 bnxt_dcb_free(bp); 6790 pci_iounmap(pdev, bp->bar2); 6791 pci_iounmap(pdev, bp->bar1); 6792 pci_iounmap(pdev, bp->bar0); 6793 kfree(bp->edev); 6794 bp->edev = NULL; 6795 free_netdev(dev); 6796 6797 pci_release_regions(pdev); 6798 pci_disable_device(pdev); 6799 } 6800 6801 static int bnxt_probe_phy(struct bnxt *bp) 6802 { 6803 int rc = 0; 6804 struct bnxt_link_info *link_info = &bp->link_info; 6805 6806 rc = bnxt_hwrm_phy_qcaps(bp); 6807 if (rc) { 6808 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 6809 rc); 6810 return rc; 6811 } 6812 6813 rc = bnxt_update_link(bp, false); 6814 if (rc) { 6815 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 6816 rc); 6817 return rc; 6818 } 6819 6820 /* Older firmware does not have supported_auto_speeds, so assume 6821 * that all supported speeds can be autonegotiated. 6822 */ 6823 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 6824 link_info->support_auto_speeds = link_info->support_speeds; 6825 6826 /*initialize the ethool setting copy with NVM settings */ 6827 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 6828 link_info->autoneg = BNXT_AUTONEG_SPEED; 6829 if (bp->hwrm_spec_code >= 0x10201) { 6830 if (link_info->auto_pause_setting & 6831 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 6832 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 6833 } else { 6834 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 6835 } 6836 link_info->advertising = link_info->auto_link_speeds; 6837 } else { 6838 link_info->req_link_speed = link_info->force_link_speed; 6839 link_info->req_duplex = link_info->duplex_setting; 6840 } 6841 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 6842 link_info->req_flow_ctrl = 6843 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 6844 else 6845 link_info->req_flow_ctrl = link_info->force_pause_setting; 6846 return rc; 6847 } 6848 6849 static int bnxt_get_max_irq(struct pci_dev *pdev) 6850 { 6851 u16 ctrl; 6852 6853 if (!pdev->msix_cap) 6854 return 1; 6855 6856 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 6857 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 6858 } 6859 6860 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 6861 int *max_cp) 6862 { 6863 int max_ring_grps = 0; 6864 6865 #ifdef CONFIG_BNXT_SRIOV 6866 if (!BNXT_PF(bp)) { 6867 *max_tx = bp->vf.max_tx_rings; 6868 *max_rx = bp->vf.max_rx_rings; 6869 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 6870 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 6871 max_ring_grps = bp->vf.max_hw_ring_grps; 6872 } else 6873 #endif 6874 { 6875 *max_tx = bp->pf.max_tx_rings; 6876 *max_rx = bp->pf.max_rx_rings; 6877 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 6878 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 6879 max_ring_grps = bp->pf.max_hw_ring_grps; 6880 } 6881 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 6882 *max_cp -= 1; 6883 *max_rx -= 2; 6884 } 6885 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6886 *max_rx >>= 1; 6887 *max_rx = min_t(int, *max_rx, max_ring_grps); 6888 } 6889 6890 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 6891 { 6892 int rx, tx, cp; 6893 6894 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 6895 if (!rx || !tx || !cp) 6896 return -ENOMEM; 6897 6898 *max_rx = rx; 6899 *max_tx = tx; 6900 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 6901 } 6902 6903 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 6904 bool shared) 6905 { 6906 int rc; 6907 6908 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 6909 if (rc) 6910 return rc; 6911 6912 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 6913 int max_cp, max_stat, max_irq; 6914 6915 /* Reserve minimum resources for RoCE */ 6916 max_cp = bnxt_get_max_func_cp_rings(bp); 6917 max_stat = bnxt_get_max_func_stat_ctxs(bp); 6918 max_irq = bnxt_get_max_func_irqs(bp); 6919 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 6920 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 6921 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 6922 return 0; 6923 6924 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 6925 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 6926 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 6927 max_cp = min_t(int, max_cp, max_irq); 6928 max_cp = min_t(int, max_cp, max_stat); 6929 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 6930 if (rc) 6931 rc = 0; 6932 } 6933 return rc; 6934 } 6935 6936 static int bnxt_set_dflt_rings(struct bnxt *bp) 6937 { 6938 int dflt_rings, max_rx_rings, max_tx_rings, rc; 6939 bool sh = true; 6940 6941 if (sh) 6942 bp->flags |= BNXT_FLAG_SHARED_RINGS; 6943 dflt_rings = netif_get_num_default_rss_queues(); 6944 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 6945 if (rc) 6946 return rc; 6947 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 6948 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 6949 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6950 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 6951 bp->tx_nr_rings + bp->rx_nr_rings; 6952 bp->num_stat_ctxs = bp->cp_nr_rings; 6953 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 6954 bp->rx_nr_rings++; 6955 bp->cp_nr_rings++; 6956 } 6957 return rc; 6958 } 6959 6960 void bnxt_restore_pf_fw_resources(struct bnxt *bp) 6961 { 6962 ASSERT_RTNL(); 6963 bnxt_hwrm_func_qcaps(bp); 6964 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 6965 } 6966 6967 static void bnxt_parse_log_pcie_link(struct bnxt *bp) 6968 { 6969 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 6970 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 6971 6972 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 6973 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 6974 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 6975 else 6976 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", 6977 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 6978 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 6979 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 6980 "Unknown", width); 6981 } 6982 6983 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6984 { 6985 static int version_printed; 6986 struct net_device *dev; 6987 struct bnxt *bp; 6988 int rc, max_irqs; 6989 6990 if (pdev->device == 0x16cd && pci_is_bridge(pdev)) 6991 return -ENODEV; 6992 6993 if (version_printed++ == 0) 6994 pr_info("%s", version); 6995 6996 max_irqs = bnxt_get_max_irq(pdev); 6997 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 6998 if (!dev) 6999 return -ENOMEM; 7000 7001 bp = netdev_priv(dev); 7002 7003 if (bnxt_vf_pciid(ent->driver_data)) 7004 bp->flags |= BNXT_FLAG_VF; 7005 7006 if (pdev->msix_cap) 7007 bp->flags |= BNXT_FLAG_MSIX_CAP; 7008 7009 rc = bnxt_init_board(pdev, dev); 7010 if (rc < 0) 7011 goto init_err_free; 7012 7013 dev->netdev_ops = &bnxt_netdev_ops; 7014 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 7015 dev->ethtool_ops = &bnxt_ethtool_ops; 7016 7017 pci_set_drvdata(pdev, dev); 7018 7019 rc = bnxt_alloc_hwrm_resources(bp); 7020 if (rc) 7021 goto init_err; 7022 7023 mutex_init(&bp->hwrm_cmd_lock); 7024 rc = bnxt_hwrm_ver_get(bp); 7025 if (rc) 7026 goto init_err; 7027 7028 bnxt_hwrm_fw_set_time(bp); 7029 7030 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7031 NETIF_F_TSO | NETIF_F_TSO6 | 7032 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 7033 NETIF_F_GSO_IPXIP4 | 7034 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 7035 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 7036 NETIF_F_RXCSUM | NETIF_F_GRO; 7037 7038 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 7039 dev->hw_features |= NETIF_F_LRO; 7040 7041 dev->hw_enc_features = 7042 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7043 NETIF_F_TSO | NETIF_F_TSO6 | 7044 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 7045 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 7046 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 7047 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 7048 NETIF_F_GSO_GRE_CSUM; 7049 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 7050 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 7051 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 7052 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 7053 dev->priv_flags |= IFF_UNICAST_FLT; 7054 7055 /* MTU range: 60 - 9500 */ 7056 dev->min_mtu = ETH_ZLEN; 7057 dev->max_mtu = 9500; 7058 7059 bnxt_dcb_init(bp); 7060 7061 #ifdef CONFIG_BNXT_SRIOV 7062 init_waitqueue_head(&bp->sriov_cfg_wait); 7063 #endif 7064 bp->gro_func = bnxt_gro_func_5730x; 7065 if (BNXT_CHIP_NUM_57X1X(bp->chip_num)) 7066 bp->gro_func = bnxt_gro_func_5731x; 7067 7068 rc = bnxt_hwrm_func_drv_rgtr(bp); 7069 if (rc) 7070 goto init_err; 7071 7072 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 7073 if (rc) 7074 goto init_err; 7075 7076 bp->ulp_probe = bnxt_ulp_probe; 7077 7078 /* Get the MAX capabilities for this function */ 7079 rc = bnxt_hwrm_func_qcaps(bp); 7080 if (rc) { 7081 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 7082 rc); 7083 rc = -1; 7084 goto init_err; 7085 } 7086 7087 rc = bnxt_hwrm_queue_qportcfg(bp); 7088 if (rc) { 7089 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 7090 rc); 7091 rc = -1; 7092 goto init_err; 7093 } 7094 7095 bnxt_hwrm_func_qcfg(bp); 7096 7097 bnxt_set_tpa_flags(bp); 7098 bnxt_set_ring_params(bp); 7099 bnxt_set_max_func_irqs(bp, max_irqs); 7100 bnxt_set_dflt_rings(bp); 7101 7102 /* Default RSS hash cfg. */ 7103 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 7104 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 7105 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 7106 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 7107 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) && 7108 !BNXT_CHIP_TYPE_NITRO_A0(bp) && 7109 bp->hwrm_spec_code >= 0x10501) { 7110 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 7111 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 7112 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 7113 } 7114 7115 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { 7116 dev->hw_features |= NETIF_F_NTUPLE; 7117 if (bnxt_rfs_capable(bp)) { 7118 bp->flags |= BNXT_FLAG_RFS; 7119 dev->features |= NETIF_F_NTUPLE; 7120 } 7121 } 7122 7123 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 7124 bp->flags |= BNXT_FLAG_STRIP_VLAN; 7125 7126 rc = bnxt_probe_phy(bp); 7127 if (rc) 7128 goto init_err; 7129 7130 rc = bnxt_hwrm_func_reset(bp); 7131 if (rc) 7132 goto init_err; 7133 7134 rc = bnxt_init_int_mode(bp); 7135 if (rc) 7136 goto init_err; 7137 7138 rc = register_netdev(dev); 7139 if (rc) 7140 goto init_err_clr_int; 7141 7142 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 7143 board_info[ent->driver_data].name, 7144 (long)pci_resource_start(pdev, 0), dev->dev_addr); 7145 7146 bnxt_parse_log_pcie_link(bp); 7147 7148 return 0; 7149 7150 init_err_clr_int: 7151 bnxt_clear_int_mode(bp); 7152 7153 init_err: 7154 pci_iounmap(pdev, bp->bar0); 7155 pci_release_regions(pdev); 7156 pci_disable_device(pdev); 7157 7158 init_err_free: 7159 free_netdev(dev); 7160 return rc; 7161 } 7162 7163 /** 7164 * bnxt_io_error_detected - called when PCI error is detected 7165 * @pdev: Pointer to PCI device 7166 * @state: The current pci connection state 7167 * 7168 * This function is called after a PCI bus error affecting 7169 * this device has been detected. 7170 */ 7171 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 7172 pci_channel_state_t state) 7173 { 7174 struct net_device *netdev = pci_get_drvdata(pdev); 7175 struct bnxt *bp = netdev_priv(netdev); 7176 7177 netdev_info(netdev, "PCI I/O error detected\n"); 7178 7179 rtnl_lock(); 7180 netif_device_detach(netdev); 7181 7182 bnxt_ulp_stop(bp); 7183 7184 if (state == pci_channel_io_perm_failure) { 7185 rtnl_unlock(); 7186 return PCI_ERS_RESULT_DISCONNECT; 7187 } 7188 7189 if (netif_running(netdev)) 7190 bnxt_close(netdev); 7191 7192 pci_disable_device(pdev); 7193 rtnl_unlock(); 7194 7195 /* Request a slot slot reset. */ 7196 return PCI_ERS_RESULT_NEED_RESET; 7197 } 7198 7199 /** 7200 * bnxt_io_slot_reset - called after the pci bus has been reset. 7201 * @pdev: Pointer to PCI device 7202 * 7203 * Restart the card from scratch, as if from a cold-boot. 7204 * At this point, the card has exprienced a hard reset, 7205 * followed by fixups by BIOS, and has its config space 7206 * set up identically to what it was at cold boot. 7207 */ 7208 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 7209 { 7210 struct net_device *netdev = pci_get_drvdata(pdev); 7211 struct bnxt *bp = netdev_priv(netdev); 7212 int err = 0; 7213 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 7214 7215 netdev_info(bp->dev, "PCI Slot Reset\n"); 7216 7217 rtnl_lock(); 7218 7219 if (pci_enable_device(pdev)) { 7220 dev_err(&pdev->dev, 7221 "Cannot re-enable PCI device after reset.\n"); 7222 } else { 7223 pci_set_master(pdev); 7224 7225 err = bnxt_hwrm_func_reset(bp); 7226 if (!err && netif_running(netdev)) 7227 err = bnxt_open(netdev); 7228 7229 if (!err) { 7230 result = PCI_ERS_RESULT_RECOVERED; 7231 bnxt_ulp_start(bp); 7232 } 7233 } 7234 7235 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 7236 dev_close(netdev); 7237 7238 rtnl_unlock(); 7239 7240 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7241 if (err) { 7242 dev_err(&pdev->dev, 7243 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 7244 err); /* non-fatal, continue */ 7245 } 7246 7247 return PCI_ERS_RESULT_RECOVERED; 7248 } 7249 7250 /** 7251 * bnxt_io_resume - called when traffic can start flowing again. 7252 * @pdev: Pointer to PCI device 7253 * 7254 * This callback is called when the error recovery driver tells 7255 * us that its OK to resume normal operation. 7256 */ 7257 static void bnxt_io_resume(struct pci_dev *pdev) 7258 { 7259 struct net_device *netdev = pci_get_drvdata(pdev); 7260 7261 rtnl_lock(); 7262 7263 netif_device_attach(netdev); 7264 7265 rtnl_unlock(); 7266 } 7267 7268 static const struct pci_error_handlers bnxt_err_handler = { 7269 .error_detected = bnxt_io_error_detected, 7270 .slot_reset = bnxt_io_slot_reset, 7271 .resume = bnxt_io_resume 7272 }; 7273 7274 static struct pci_driver bnxt_pci_driver = { 7275 .name = DRV_MODULE_NAME, 7276 .id_table = bnxt_pci_tbl, 7277 .probe = bnxt_init_one, 7278 .remove = bnxt_remove_one, 7279 .err_handler = &bnxt_err_handler, 7280 #if defined(CONFIG_BNXT_SRIOV) 7281 .sriov_configure = bnxt_sriov_configure, 7282 #endif 7283 }; 7284 7285 module_pci_driver(bnxt_pci_driver); 7286