1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2015 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/stringify.h> 13 #include <linux/kernel.h> 14 #include <linux/timer.h> 15 #include <linux/errno.h> 16 #include <linux/ioport.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/netdevice.h> 22 #include <linux/etherdevice.h> 23 #include <linux/skbuff.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/bitops.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/delay.h> 29 #include <asm/byteorder.h> 30 #include <asm/page.h> 31 #include <linux/time.h> 32 #include <linux/mii.h> 33 #include <linux/if.h> 34 #include <linux/if_vlan.h> 35 #include <net/ip.h> 36 #include <net/tcp.h> 37 #include <net/udp.h> 38 #include <net/checksum.h> 39 #include <net/ip6_checksum.h> 40 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) 41 #include <net/vxlan.h> 42 #endif 43 #ifdef CONFIG_NET_RX_BUSY_POLL 44 #include <net/busy_poll.h> 45 #endif 46 #include <linux/workqueue.h> 47 #include <linux/prefetch.h> 48 #include <linux/cache.h> 49 #include <linux/log2.h> 50 #include <linux/aer.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 54 #include "bnxt_hsi.h" 55 #include "bnxt.h" 56 #include "bnxt_sriov.h" 57 #include "bnxt_ethtool.h" 58 59 #define BNXT_TX_TIMEOUT (5 * HZ) 60 61 static const char version[] = 62 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 63 64 MODULE_LICENSE("GPL"); 65 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 66 MODULE_VERSION(DRV_MODULE_VERSION); 67 68 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 69 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 70 #define BNXT_RX_COPY_THRESH 256 71 72 #define BNXT_TX_PUSH_THRESH 164 73 74 enum board_idx { 75 BCM57301, 76 BCM57302, 77 BCM57304, 78 BCM57402, 79 BCM57404, 80 BCM57406, 81 BCM57304_VF, 82 BCM57404_VF, 83 }; 84 85 /* indexed by enum above */ 86 static const struct { 87 char *name; 88 } board_info[] = { 89 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, 90 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, 91 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, 92 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, 93 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, 94 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, 95 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, 96 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, 97 }; 98 99 static const struct pci_device_id bnxt_pci_tbl[] = { 100 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 101 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 102 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 103 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 104 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 105 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 106 #ifdef CONFIG_BNXT_SRIOV 107 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, 108 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, 109 #endif 110 { 0 } 111 }; 112 113 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 114 115 static const u16 bnxt_vf_req_snif[] = { 116 HWRM_FUNC_CFG, 117 HWRM_PORT_PHY_QCFG, 118 HWRM_CFA_L2_FILTER_ALLOC, 119 }; 120 121 static bool bnxt_vf_pciid(enum board_idx idx) 122 { 123 return (idx == BCM57304_VF || idx == BCM57404_VF); 124 } 125 126 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 127 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 128 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 129 130 #define BNXT_CP_DB_REARM(db, raw_cons) \ 131 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 132 133 #define BNXT_CP_DB(db, raw_cons) \ 134 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 135 136 #define BNXT_CP_DB_IRQ_DIS(db) \ 137 writel(DB_CP_IRQ_DIS_FLAGS, db) 138 139 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 140 { 141 /* Tell compiler to fetch tx indices from memory. */ 142 barrier(); 143 144 return bp->tx_ring_size - 145 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); 146 } 147 148 static const u16 bnxt_lhint_arr[] = { 149 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 150 TX_BD_FLAGS_LHINT_512_TO_1023, 151 TX_BD_FLAGS_LHINT_1024_TO_2047, 152 TX_BD_FLAGS_LHINT_1024_TO_2047, 153 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 154 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 155 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 156 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 157 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 158 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 159 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 160 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 161 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 162 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 163 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 164 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 165 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 166 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 167 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 168 }; 169 170 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 171 { 172 struct bnxt *bp = netdev_priv(dev); 173 struct tx_bd *txbd; 174 struct tx_bd_ext *txbd1; 175 struct netdev_queue *txq; 176 int i; 177 dma_addr_t mapping; 178 unsigned int length, pad = 0; 179 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 180 u16 prod, last_frag; 181 struct pci_dev *pdev = bp->pdev; 182 struct bnxt_tx_ring_info *txr; 183 struct bnxt_sw_tx_bd *tx_buf; 184 185 i = skb_get_queue_mapping(skb); 186 if (unlikely(i >= bp->tx_nr_rings)) { 187 dev_kfree_skb_any(skb); 188 return NETDEV_TX_OK; 189 } 190 191 txr = &bp->tx_ring[i]; 192 txq = netdev_get_tx_queue(dev, i); 193 prod = txr->tx_prod; 194 195 free_size = bnxt_tx_avail(bp, txr); 196 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 197 netif_tx_stop_queue(txq); 198 return NETDEV_TX_BUSY; 199 } 200 201 length = skb->len; 202 len = skb_headlen(skb); 203 last_frag = skb_shinfo(skb)->nr_frags; 204 205 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 206 207 txbd->tx_bd_opaque = prod; 208 209 tx_buf = &txr->tx_buf_ring[prod]; 210 tx_buf->skb = skb; 211 tx_buf->nr_frags = last_frag; 212 213 vlan_tag_flags = 0; 214 cfa_action = 0; 215 if (skb_vlan_tag_present(skb)) { 216 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 217 skb_vlan_tag_get(skb); 218 /* Currently supports 8021Q, 8021AD vlan offloads 219 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 220 */ 221 if (skb->vlan_proto == htons(ETH_P_8021Q)) 222 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 223 } 224 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 226 struct tx_push_buffer *tx_push_buf = txr->tx_push; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 229 void *pdata = tx_push_buf->data; 230 u64 *end; 231 int j, push_len; 232 233 /* Set COAL_NOW to be ready quickly for the next push */ 234 tx_push->tx_bd_len_flags_type = 235 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 236 TX_BD_TYPE_LONG_TX_BD | 237 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 238 TX_BD_FLAGS_COAL_NOW | 239 TX_BD_FLAGS_PACKET_END | 240 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 241 242 if (skb->ip_summed == CHECKSUM_PARTIAL) 243 tx_push1->tx_bd_hsize_lflags = 244 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 245 else 246 tx_push1->tx_bd_hsize_lflags = 0; 247 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 250 251 end = pdata + length; 252 end = PTR_ALIGN(end, 8) - 1; 253 *end = 0; 254 255 skb_copy_from_linear_data(skb, pdata, len); 256 pdata += len; 257 for (j = 0; j < last_frag; j++) { 258 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 259 void *fptr; 260 261 fptr = skb_frag_address_safe(frag); 262 if (!fptr) 263 goto normal_tx; 264 265 memcpy(pdata, fptr, skb_frag_size(frag)); 266 pdata += skb_frag_size(frag); 267 } 268 269 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 270 txbd->tx_bd_haddr = txr->data_mapping; 271 prod = NEXT_TX(prod); 272 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 273 memcpy(txbd, tx_push1, sizeof(*txbd)); 274 prod = NEXT_TX(prod); 275 tx_push->doorbell = 276 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 277 txr->tx_prod = prod; 278 279 netdev_tx_sent_queue(txq, skb->len); 280 281 push_len = (length + sizeof(*tx_push) + 7) / 8; 282 if (push_len > 16) { 283 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 284 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 285 push_len - 16); 286 } else { 287 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 288 push_len); 289 } 290 291 tx_buf->is_push = 1; 292 goto tx_done; 293 } 294 295 normal_tx: 296 if (length < BNXT_MIN_PKT_SIZE) { 297 pad = BNXT_MIN_PKT_SIZE - length; 298 if (skb_pad(skb, pad)) { 299 /* SKB already freed. */ 300 tx_buf->skb = NULL; 301 return NETDEV_TX_OK; 302 } 303 length = BNXT_MIN_PKT_SIZE; 304 } 305 306 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 307 308 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 309 dev_kfree_skb_any(skb); 310 tx_buf->skb = NULL; 311 return NETDEV_TX_OK; 312 } 313 314 dma_unmap_addr_set(tx_buf, mapping, mapping); 315 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 316 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 317 318 txbd->tx_bd_haddr = cpu_to_le64(mapping); 319 320 prod = NEXT_TX(prod); 321 txbd1 = (struct tx_bd_ext *) 322 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 323 324 txbd1->tx_bd_hsize_lflags = 0; 325 if (skb_is_gso(skb)) { 326 u32 hdr_len; 327 328 if (skb->encapsulation) 329 hdr_len = skb_inner_network_offset(skb) + 330 skb_inner_network_header_len(skb) + 331 inner_tcp_hdrlen(skb); 332 else 333 hdr_len = skb_transport_offset(skb) + 334 tcp_hdrlen(skb); 335 336 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 337 TX_BD_FLAGS_T_IPID | 338 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 339 length = skb_shinfo(skb)->gso_size; 340 txbd1->tx_bd_mss = cpu_to_le32(length); 341 length += hdr_len; 342 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 343 txbd1->tx_bd_hsize_lflags = 344 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 345 txbd1->tx_bd_mss = 0; 346 } 347 348 length >>= 9; 349 flags |= bnxt_lhint_arr[length]; 350 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 351 352 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 353 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 354 for (i = 0; i < last_frag; i++) { 355 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 356 357 prod = NEXT_TX(prod); 358 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 359 360 len = skb_frag_size(frag); 361 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 362 DMA_TO_DEVICE); 363 364 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 365 goto tx_dma_error; 366 367 tx_buf = &txr->tx_buf_ring[prod]; 368 dma_unmap_addr_set(tx_buf, mapping, mapping); 369 370 txbd->tx_bd_haddr = cpu_to_le64(mapping); 371 372 flags = len << TX_BD_LEN_SHIFT; 373 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 374 } 375 376 flags &= ~TX_BD_LEN; 377 txbd->tx_bd_len_flags_type = 378 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 379 TX_BD_FLAGS_PACKET_END); 380 381 netdev_tx_sent_queue(txq, skb->len); 382 383 /* Sync BD data before updating doorbell */ 384 wmb(); 385 386 prod = NEXT_TX(prod); 387 txr->tx_prod = prod; 388 389 writel(DB_KEY_TX | prod, txr->tx_doorbell); 390 writel(DB_KEY_TX | prod, txr->tx_doorbell); 391 392 tx_done: 393 394 mmiowb(); 395 396 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 397 netif_tx_stop_queue(txq); 398 399 /* netif_tx_stop_queue() must be done before checking 400 * tx index in bnxt_tx_avail() below, because in 401 * bnxt_tx_int(), we update tx index before checking for 402 * netif_tx_queue_stopped(). 403 */ 404 smp_mb(); 405 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 406 netif_tx_wake_queue(txq); 407 } 408 return NETDEV_TX_OK; 409 410 tx_dma_error: 411 last_frag = i; 412 413 /* start back at beginning and unmap skb */ 414 prod = txr->tx_prod; 415 tx_buf = &txr->tx_buf_ring[prod]; 416 tx_buf->skb = NULL; 417 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 418 skb_headlen(skb), PCI_DMA_TODEVICE); 419 prod = NEXT_TX(prod); 420 421 /* unmap remaining mapped pages */ 422 for (i = 0; i < last_frag; i++) { 423 prod = NEXT_TX(prod); 424 tx_buf = &txr->tx_buf_ring[prod]; 425 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 426 skb_frag_size(&skb_shinfo(skb)->frags[i]), 427 PCI_DMA_TODEVICE); 428 } 429 430 dev_kfree_skb_any(skb); 431 return NETDEV_TX_OK; 432 } 433 434 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 435 { 436 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 437 int index = txr - &bp->tx_ring[0]; 438 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); 439 u16 cons = txr->tx_cons; 440 struct pci_dev *pdev = bp->pdev; 441 int i; 442 unsigned int tx_bytes = 0; 443 444 for (i = 0; i < nr_pkts; i++) { 445 struct bnxt_sw_tx_bd *tx_buf; 446 struct sk_buff *skb; 447 int j, last; 448 449 tx_buf = &txr->tx_buf_ring[cons]; 450 cons = NEXT_TX(cons); 451 skb = tx_buf->skb; 452 tx_buf->skb = NULL; 453 454 if (tx_buf->is_push) { 455 tx_buf->is_push = 0; 456 goto next_tx_int; 457 } 458 459 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 460 skb_headlen(skb), PCI_DMA_TODEVICE); 461 last = tx_buf->nr_frags; 462 463 for (j = 0; j < last; j++) { 464 cons = NEXT_TX(cons); 465 tx_buf = &txr->tx_buf_ring[cons]; 466 dma_unmap_page( 467 &pdev->dev, 468 dma_unmap_addr(tx_buf, mapping), 469 skb_frag_size(&skb_shinfo(skb)->frags[j]), 470 PCI_DMA_TODEVICE); 471 } 472 473 next_tx_int: 474 cons = NEXT_TX(cons); 475 476 tx_bytes += skb->len; 477 dev_kfree_skb_any(skb); 478 } 479 480 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 481 txr->tx_cons = cons; 482 483 /* Need to make the tx_cons update visible to bnxt_start_xmit() 484 * before checking for netif_tx_queue_stopped(). Without the 485 * memory barrier, there is a small possibility that bnxt_start_xmit() 486 * will miss it and cause the queue to be stopped forever. 487 */ 488 smp_mb(); 489 490 if (unlikely(netif_tx_queue_stopped(txq)) && 491 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 492 __netif_tx_lock(txq, smp_processor_id()); 493 if (netif_tx_queue_stopped(txq) && 494 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 495 txr->dev_state != BNXT_DEV_STATE_CLOSING) 496 netif_tx_wake_queue(txq); 497 __netif_tx_unlock(txq); 498 } 499 } 500 501 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 502 gfp_t gfp) 503 { 504 u8 *data; 505 struct pci_dev *pdev = bp->pdev; 506 507 data = kmalloc(bp->rx_buf_size, gfp); 508 if (!data) 509 return NULL; 510 511 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET, 512 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 513 514 if (dma_mapping_error(&pdev->dev, *mapping)) { 515 kfree(data); 516 data = NULL; 517 } 518 return data; 519 } 520 521 static inline int bnxt_alloc_rx_data(struct bnxt *bp, 522 struct bnxt_rx_ring_info *rxr, 523 u16 prod, gfp_t gfp) 524 { 525 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 526 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 527 u8 *data; 528 dma_addr_t mapping; 529 530 data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 531 if (!data) 532 return -ENOMEM; 533 534 rx_buf->data = data; 535 dma_unmap_addr_set(rx_buf, mapping, mapping); 536 537 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 538 539 return 0; 540 } 541 542 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, 543 u8 *data) 544 { 545 u16 prod = rxr->rx_prod; 546 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 547 struct rx_bd *cons_bd, *prod_bd; 548 549 prod_rx_buf = &rxr->rx_buf_ring[prod]; 550 cons_rx_buf = &rxr->rx_buf_ring[cons]; 551 552 prod_rx_buf->data = data; 553 554 dma_unmap_addr_set(prod_rx_buf, mapping, 555 dma_unmap_addr(cons_rx_buf, mapping)); 556 557 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 558 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 559 560 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 561 } 562 563 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 564 { 565 u16 next, max = rxr->rx_agg_bmap_size; 566 567 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 568 if (next >= max) 569 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 570 return next; 571 } 572 573 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 574 struct bnxt_rx_ring_info *rxr, 575 u16 prod, gfp_t gfp) 576 { 577 struct rx_bd *rxbd = 578 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 579 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 580 struct pci_dev *pdev = bp->pdev; 581 struct page *page; 582 dma_addr_t mapping; 583 u16 sw_prod = rxr->rx_sw_agg_prod; 584 585 page = alloc_page(gfp); 586 if (!page) 587 return -ENOMEM; 588 589 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, 590 PCI_DMA_FROMDEVICE); 591 if (dma_mapping_error(&pdev->dev, mapping)) { 592 __free_page(page); 593 return -EIO; 594 } 595 596 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 597 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 598 599 __set_bit(sw_prod, rxr->rx_agg_bmap); 600 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 601 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 602 603 rx_agg_buf->page = page; 604 rx_agg_buf->mapping = mapping; 605 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 606 rxbd->rx_bd_opaque = sw_prod; 607 return 0; 608 } 609 610 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 611 u32 agg_bufs) 612 { 613 struct bnxt *bp = bnapi->bp; 614 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 615 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 616 u16 prod = rxr->rx_agg_prod; 617 u16 sw_prod = rxr->rx_sw_agg_prod; 618 u32 i; 619 620 for (i = 0; i < agg_bufs; i++) { 621 u16 cons; 622 struct rx_agg_cmp *agg; 623 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 624 struct rx_bd *prod_bd; 625 struct page *page; 626 627 agg = (struct rx_agg_cmp *) 628 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 629 cons = agg->rx_agg_cmp_opaque; 630 __clear_bit(cons, rxr->rx_agg_bmap); 631 632 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 633 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 634 635 __set_bit(sw_prod, rxr->rx_agg_bmap); 636 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 637 cons_rx_buf = &rxr->rx_agg_ring[cons]; 638 639 /* It is possible for sw_prod to be equal to cons, so 640 * set cons_rx_buf->page to NULL first. 641 */ 642 page = cons_rx_buf->page; 643 cons_rx_buf->page = NULL; 644 prod_rx_buf->page = page; 645 646 prod_rx_buf->mapping = cons_rx_buf->mapping; 647 648 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 649 650 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 651 prod_bd->rx_bd_opaque = sw_prod; 652 653 prod = NEXT_RX_AGG(prod); 654 sw_prod = NEXT_RX_AGG(sw_prod); 655 cp_cons = NEXT_CMP(cp_cons); 656 } 657 rxr->rx_agg_prod = prod; 658 rxr->rx_sw_agg_prod = sw_prod; 659 } 660 661 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 662 struct bnxt_rx_ring_info *rxr, u16 cons, 663 u16 prod, u8 *data, dma_addr_t dma_addr, 664 unsigned int len) 665 { 666 int err; 667 struct sk_buff *skb; 668 669 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 670 if (unlikely(err)) { 671 bnxt_reuse_rx_data(rxr, cons, data); 672 return NULL; 673 } 674 675 skb = build_skb(data, 0); 676 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 677 PCI_DMA_FROMDEVICE); 678 if (!skb) { 679 kfree(data); 680 return NULL; 681 } 682 683 skb_reserve(skb, BNXT_RX_OFFSET); 684 skb_put(skb, len); 685 return skb; 686 } 687 688 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 689 struct sk_buff *skb, u16 cp_cons, 690 u32 agg_bufs) 691 { 692 struct pci_dev *pdev = bp->pdev; 693 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 694 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 695 u16 prod = rxr->rx_agg_prod; 696 u32 i; 697 698 for (i = 0; i < agg_bufs; i++) { 699 u16 cons, frag_len; 700 struct rx_agg_cmp *agg; 701 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 702 struct page *page; 703 dma_addr_t mapping; 704 705 agg = (struct rx_agg_cmp *) 706 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 707 cons = agg->rx_agg_cmp_opaque; 708 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 709 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 710 711 cons_rx_buf = &rxr->rx_agg_ring[cons]; 712 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); 713 __clear_bit(cons, rxr->rx_agg_bmap); 714 715 /* It is possible for bnxt_alloc_rx_page() to allocate 716 * a sw_prod index that equals the cons index, so we 717 * need to clear the cons entry now. 718 */ 719 mapping = dma_unmap_addr(cons_rx_buf, mapping); 720 page = cons_rx_buf->page; 721 cons_rx_buf->page = NULL; 722 723 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 724 struct skb_shared_info *shinfo; 725 unsigned int nr_frags; 726 727 shinfo = skb_shinfo(skb); 728 nr_frags = --shinfo->nr_frags; 729 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 730 731 dev_kfree_skb(skb); 732 733 cons_rx_buf->page = page; 734 735 /* Update prod since possibly some pages have been 736 * allocated already. 737 */ 738 rxr->rx_agg_prod = prod; 739 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 740 return NULL; 741 } 742 743 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, 744 PCI_DMA_FROMDEVICE); 745 746 skb->data_len += frag_len; 747 skb->len += frag_len; 748 skb->truesize += PAGE_SIZE; 749 750 prod = NEXT_RX_AGG(prod); 751 cp_cons = NEXT_CMP(cp_cons); 752 } 753 rxr->rx_agg_prod = prod; 754 return skb; 755 } 756 757 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 758 u8 agg_bufs, u32 *raw_cons) 759 { 760 u16 last; 761 struct rx_agg_cmp *agg; 762 763 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 764 last = RING_CMP(*raw_cons); 765 agg = (struct rx_agg_cmp *) 766 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 767 return RX_AGG_CMP_VALID(agg, *raw_cons); 768 } 769 770 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 771 unsigned int len, 772 dma_addr_t mapping) 773 { 774 struct bnxt *bp = bnapi->bp; 775 struct pci_dev *pdev = bp->pdev; 776 struct sk_buff *skb; 777 778 skb = napi_alloc_skb(&bnapi->napi, len); 779 if (!skb) 780 return NULL; 781 782 dma_sync_single_for_cpu(&pdev->dev, mapping, 783 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); 784 785 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET); 786 787 dma_sync_single_for_device(&pdev->dev, mapping, 788 bp->rx_copy_thresh, 789 PCI_DMA_FROMDEVICE); 790 791 skb_put(skb, len); 792 return skb; 793 } 794 795 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 796 struct rx_tpa_start_cmp *tpa_start, 797 struct rx_tpa_start_cmp_ext *tpa_start1) 798 { 799 u8 agg_id = TPA_START_AGG_ID(tpa_start); 800 u16 cons, prod; 801 struct bnxt_tpa_info *tpa_info; 802 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 803 struct rx_bd *prod_bd; 804 dma_addr_t mapping; 805 806 cons = tpa_start->rx_tpa_start_cmp_opaque; 807 prod = rxr->rx_prod; 808 cons_rx_buf = &rxr->rx_buf_ring[cons]; 809 prod_rx_buf = &rxr->rx_buf_ring[prod]; 810 tpa_info = &rxr->rx_tpa[agg_id]; 811 812 prod_rx_buf->data = tpa_info->data; 813 814 mapping = tpa_info->mapping; 815 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 816 817 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 818 819 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 820 821 tpa_info->data = cons_rx_buf->data; 822 cons_rx_buf->data = NULL; 823 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping); 824 825 tpa_info->len = 826 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 827 RX_TPA_START_CMP_LEN_SHIFT; 828 if (likely(TPA_START_HASH_VALID(tpa_start))) { 829 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 830 831 tpa_info->hash_type = PKT_HASH_TYPE_L4; 832 tpa_info->gso_type = SKB_GSO_TCPV4; 833 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 834 if (hash_type == 3) 835 tpa_info->gso_type = SKB_GSO_TCPV6; 836 tpa_info->rss_hash = 837 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 838 } else { 839 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 840 tpa_info->gso_type = 0; 841 if (netif_msg_rx_err(bp)) 842 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 843 } 844 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 845 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 846 847 rxr->rx_prod = NEXT_RX(prod); 848 cons = NEXT_RX(cons); 849 cons_rx_buf = &rxr->rx_buf_ring[cons]; 850 851 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 852 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 853 cons_rx_buf->data = NULL; 854 } 855 856 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 857 u16 cp_cons, u32 agg_bufs) 858 { 859 if (agg_bufs) 860 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 861 } 862 863 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 864 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 865 866 static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, 867 struct rx_tpa_end_cmp *tpa_end, 868 struct rx_tpa_end_cmp_ext *tpa_end1, 869 struct sk_buff *skb) 870 { 871 #ifdef CONFIG_INET 872 struct tcphdr *th; 873 int payload_off, tcp_opt_len = 0; 874 int len, nw_off; 875 u16 segs; 876 877 segs = TPA_END_TPA_SEGS(tpa_end); 878 if (segs == 1) 879 return skb; 880 881 NAPI_GRO_CB(skb)->count = segs; 882 skb_shinfo(skb)->gso_size = 883 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 884 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 885 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 886 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 887 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 888 if (TPA_END_GRO_TS(tpa_end)) 889 tcp_opt_len = 12; 890 891 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 892 struct iphdr *iph; 893 894 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 895 ETH_HLEN; 896 skb_set_network_header(skb, nw_off); 897 iph = ip_hdr(skb); 898 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 899 len = skb->len - skb_transport_offset(skb); 900 th = tcp_hdr(skb); 901 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 902 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 903 struct ipv6hdr *iph; 904 905 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 906 ETH_HLEN; 907 skb_set_network_header(skb, nw_off); 908 iph = ipv6_hdr(skb); 909 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 910 len = skb->len - skb_transport_offset(skb); 911 th = tcp_hdr(skb); 912 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 913 } else { 914 dev_kfree_skb_any(skb); 915 return NULL; 916 } 917 tcp_gro_complete(skb); 918 919 if (nw_off) { /* tunnel */ 920 struct udphdr *uh = NULL; 921 922 if (skb->protocol == htons(ETH_P_IP)) { 923 struct iphdr *iph = (struct iphdr *)skb->data; 924 925 if (iph->protocol == IPPROTO_UDP) 926 uh = (struct udphdr *)(iph + 1); 927 } else { 928 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 929 930 if (iph->nexthdr == IPPROTO_UDP) 931 uh = (struct udphdr *)(iph + 1); 932 } 933 if (uh) { 934 if (uh->check) 935 skb_shinfo(skb)->gso_type |= 936 SKB_GSO_UDP_TUNNEL_CSUM; 937 else 938 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 939 } 940 } 941 #endif 942 return skb; 943 } 944 945 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 946 struct bnxt_napi *bnapi, 947 u32 *raw_cons, 948 struct rx_tpa_end_cmp *tpa_end, 949 struct rx_tpa_end_cmp_ext *tpa_end1, 950 bool *agg_event) 951 { 952 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 953 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 954 u8 agg_id = TPA_END_AGG_ID(tpa_end); 955 u8 *data, agg_bufs; 956 u16 cp_cons = RING_CMP(*raw_cons); 957 unsigned int len; 958 struct bnxt_tpa_info *tpa_info; 959 dma_addr_t mapping; 960 struct sk_buff *skb; 961 962 tpa_info = &rxr->rx_tpa[agg_id]; 963 data = tpa_info->data; 964 prefetch(data); 965 len = tpa_info->len; 966 mapping = tpa_info->mapping; 967 968 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 969 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 970 971 if (agg_bufs) { 972 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 973 return ERR_PTR(-EBUSY); 974 975 *agg_event = true; 976 cp_cons = NEXT_CMP(cp_cons); 977 } 978 979 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 980 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 981 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 982 agg_bufs, (int)MAX_SKB_FRAGS); 983 return NULL; 984 } 985 986 if (len <= bp->rx_copy_thresh) { 987 skb = bnxt_copy_skb(bnapi, data, len, mapping); 988 if (!skb) { 989 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 990 return NULL; 991 } 992 } else { 993 u8 *new_data; 994 dma_addr_t new_mapping; 995 996 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 997 if (!new_data) { 998 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 999 return NULL; 1000 } 1001 1002 tpa_info->data = new_data; 1003 tpa_info->mapping = new_mapping; 1004 1005 skb = build_skb(data, 0); 1006 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, 1007 PCI_DMA_FROMDEVICE); 1008 1009 if (!skb) { 1010 kfree(data); 1011 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1012 return NULL; 1013 } 1014 skb_reserve(skb, BNXT_RX_OFFSET); 1015 skb_put(skb, len); 1016 } 1017 1018 if (agg_bufs) { 1019 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1020 if (!skb) { 1021 /* Page reuse already handled by bnxt_rx_pages(). */ 1022 return NULL; 1023 } 1024 } 1025 skb->protocol = eth_type_trans(skb, bp->dev); 1026 1027 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1028 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1029 1030 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1031 netdev_features_t features = skb->dev->features; 1032 u16 vlan_proto = tpa_info->metadata >> 1033 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1034 1035 if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1036 vlan_proto == ETH_P_8021Q) || 1037 ((features & NETIF_F_HW_VLAN_STAG_RX) && 1038 vlan_proto == ETH_P_8021AD)) { 1039 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), 1040 tpa_info->metadata & 1041 RX_CMP_FLAGS2_METADATA_VID_MASK); 1042 } 1043 } 1044 1045 skb_checksum_none_assert(skb); 1046 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1047 skb->ip_summed = CHECKSUM_UNNECESSARY; 1048 skb->csum_level = 1049 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1050 } 1051 1052 if (TPA_END_GRO(tpa_end)) 1053 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); 1054 1055 return skb; 1056 } 1057 1058 /* returns the following: 1059 * 1 - 1 packet successfully received 1060 * 0 - successful TPA_START, packet not completed yet 1061 * -EBUSY - completion ring does not have all the agg buffers yet 1062 * -ENOMEM - packet aborted due to out of memory 1063 * -EIO - packet aborted due to hw error indicated in BD 1064 */ 1065 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1066 bool *agg_event) 1067 { 1068 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1069 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1070 struct net_device *dev = bp->dev; 1071 struct rx_cmp *rxcmp; 1072 struct rx_cmp_ext *rxcmp1; 1073 u32 tmp_raw_cons = *raw_cons; 1074 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1075 struct bnxt_sw_rx_bd *rx_buf; 1076 unsigned int len; 1077 u8 *data, agg_bufs, cmp_type; 1078 dma_addr_t dma_addr; 1079 struct sk_buff *skb; 1080 int rc = 0; 1081 1082 rxcmp = (struct rx_cmp *) 1083 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1084 1085 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1086 cp_cons = RING_CMP(tmp_raw_cons); 1087 rxcmp1 = (struct rx_cmp_ext *) 1088 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1089 1090 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1091 return -EBUSY; 1092 1093 cmp_type = RX_CMP_TYPE(rxcmp); 1094 1095 prod = rxr->rx_prod; 1096 1097 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1098 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1099 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1100 1101 goto next_rx_no_prod; 1102 1103 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1104 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1105 (struct rx_tpa_end_cmp *)rxcmp, 1106 (struct rx_tpa_end_cmp_ext *)rxcmp1, 1107 agg_event); 1108 1109 if (unlikely(IS_ERR(skb))) 1110 return -EBUSY; 1111 1112 rc = -ENOMEM; 1113 if (likely(skb)) { 1114 skb_record_rx_queue(skb, bnapi->index); 1115 skb_mark_napi_id(skb, &bnapi->napi); 1116 if (bnxt_busy_polling(bnapi)) 1117 netif_receive_skb(skb); 1118 else 1119 napi_gro_receive(&bnapi->napi, skb); 1120 rc = 1; 1121 } 1122 goto next_rx_no_prod; 1123 } 1124 1125 cons = rxcmp->rx_cmp_opaque; 1126 rx_buf = &rxr->rx_buf_ring[cons]; 1127 data = rx_buf->data; 1128 prefetch(data); 1129 1130 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> 1131 RX_CMP_AGG_BUFS_SHIFT; 1132 1133 if (agg_bufs) { 1134 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1135 return -EBUSY; 1136 1137 cp_cons = NEXT_CMP(cp_cons); 1138 *agg_event = true; 1139 } 1140 1141 rx_buf->data = NULL; 1142 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1143 bnxt_reuse_rx_data(rxr, cons, data); 1144 if (agg_bufs) 1145 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1146 1147 rc = -EIO; 1148 goto next_rx; 1149 } 1150 1151 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1152 dma_addr = dma_unmap_addr(rx_buf, mapping); 1153 1154 if (len <= bp->rx_copy_thresh) { 1155 skb = bnxt_copy_skb(bnapi, data, len, dma_addr); 1156 bnxt_reuse_rx_data(rxr, cons, data); 1157 if (!skb) { 1158 rc = -ENOMEM; 1159 goto next_rx; 1160 } 1161 } else { 1162 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); 1163 if (!skb) { 1164 rc = -ENOMEM; 1165 goto next_rx; 1166 } 1167 } 1168 1169 if (agg_bufs) { 1170 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1171 if (!skb) { 1172 rc = -ENOMEM; 1173 goto next_rx; 1174 } 1175 } 1176 1177 if (RX_CMP_HASH_VALID(rxcmp)) { 1178 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1179 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1180 1181 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1182 if (hash_type != 1 && hash_type != 3) 1183 type = PKT_HASH_TYPE_L3; 1184 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1185 } 1186 1187 skb->protocol = eth_type_trans(skb, dev); 1188 1189 if (rxcmp1->rx_cmp_flags2 & 1190 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { 1191 netdev_features_t features = skb->dev->features; 1192 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1193 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1194 1195 if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1196 vlan_proto == ETH_P_8021Q) || 1197 ((features & NETIF_F_HW_VLAN_STAG_RX) && 1198 vlan_proto == ETH_P_8021AD)) 1199 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), 1200 meta_data & 1201 RX_CMP_FLAGS2_METADATA_VID_MASK); 1202 } 1203 1204 skb_checksum_none_assert(skb); 1205 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1206 if (dev->features & NETIF_F_RXCSUM) { 1207 skb->ip_summed = CHECKSUM_UNNECESSARY; 1208 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1209 } 1210 } else { 1211 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1212 if (dev->features & NETIF_F_RXCSUM) 1213 cpr->rx_l4_csum_errors++; 1214 } 1215 } 1216 1217 skb_record_rx_queue(skb, bnapi->index); 1218 skb_mark_napi_id(skb, &bnapi->napi); 1219 if (bnxt_busy_polling(bnapi)) 1220 netif_receive_skb(skb); 1221 else 1222 napi_gro_receive(&bnapi->napi, skb); 1223 rc = 1; 1224 1225 next_rx: 1226 rxr->rx_prod = NEXT_RX(prod); 1227 1228 next_rx_no_prod: 1229 *raw_cons = tmp_raw_cons; 1230 1231 return rc; 1232 } 1233 1234 static int bnxt_async_event_process(struct bnxt *bp, 1235 struct hwrm_async_event_cmpl *cmpl) 1236 { 1237 u16 event_id = le16_to_cpu(cmpl->event_id); 1238 1239 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1240 switch (event_id) { 1241 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1242 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1243 break; 1244 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1245 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1246 break; 1247 default: 1248 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", 1249 event_id); 1250 goto async_event_process_exit; 1251 } 1252 schedule_work(&bp->sp_task); 1253 async_event_process_exit: 1254 return 0; 1255 } 1256 1257 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1258 { 1259 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1260 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1261 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1262 (struct hwrm_fwd_req_cmpl *)txcmp; 1263 1264 switch (cmpl_type) { 1265 case CMPL_BASE_TYPE_HWRM_DONE: 1266 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1267 if (seq_id == bp->hwrm_intr_seq_id) 1268 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1269 else 1270 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1271 break; 1272 1273 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1274 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1275 1276 if ((vf_id < bp->pf.first_vf_id) || 1277 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1278 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1279 vf_id); 1280 return -EINVAL; 1281 } 1282 1283 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1284 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1285 schedule_work(&bp->sp_task); 1286 break; 1287 1288 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1289 bnxt_async_event_process(bp, 1290 (struct hwrm_async_event_cmpl *)txcmp); 1291 1292 default: 1293 break; 1294 } 1295 1296 return 0; 1297 } 1298 1299 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1300 { 1301 struct bnxt_napi *bnapi = dev_instance; 1302 struct bnxt *bp = bnapi->bp; 1303 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1304 u32 cons = RING_CMP(cpr->cp_raw_cons); 1305 1306 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1307 napi_schedule(&bnapi->napi); 1308 return IRQ_HANDLED; 1309 } 1310 1311 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1312 { 1313 u32 raw_cons = cpr->cp_raw_cons; 1314 u16 cons = RING_CMP(raw_cons); 1315 struct tx_cmp *txcmp; 1316 1317 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1318 1319 return TX_CMP_VALID(txcmp, raw_cons); 1320 } 1321 1322 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1323 { 1324 struct bnxt_napi *bnapi = dev_instance; 1325 struct bnxt *bp = bnapi->bp; 1326 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1327 u32 cons = RING_CMP(cpr->cp_raw_cons); 1328 u32 int_status; 1329 1330 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1331 1332 if (!bnxt_has_work(bp, cpr)) { 1333 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1334 /* return if erroneous interrupt */ 1335 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1336 return IRQ_NONE; 1337 } 1338 1339 /* disable ring IRQ */ 1340 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1341 1342 /* Return here if interrupt is shared and is disabled. */ 1343 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1344 return IRQ_HANDLED; 1345 1346 napi_schedule(&bnapi->napi); 1347 return IRQ_HANDLED; 1348 } 1349 1350 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1351 { 1352 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1353 u32 raw_cons = cpr->cp_raw_cons; 1354 u32 cons; 1355 int tx_pkts = 0; 1356 int rx_pkts = 0; 1357 bool rx_event = false; 1358 bool agg_event = false; 1359 struct tx_cmp *txcmp; 1360 1361 while (1) { 1362 int rc; 1363 1364 cons = RING_CMP(raw_cons); 1365 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1366 1367 if (!TX_CMP_VALID(txcmp, raw_cons)) 1368 break; 1369 1370 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1371 tx_pkts++; 1372 /* return full budget so NAPI will complete. */ 1373 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1374 rx_pkts = budget; 1375 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1376 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); 1377 if (likely(rc >= 0)) 1378 rx_pkts += rc; 1379 else if (rc == -EBUSY) /* partial completion */ 1380 break; 1381 rx_event = true; 1382 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1383 CMPL_BASE_TYPE_HWRM_DONE) || 1384 (TX_CMP_TYPE(txcmp) == 1385 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1386 (TX_CMP_TYPE(txcmp) == 1387 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1388 bnxt_hwrm_handler(bp, txcmp); 1389 } 1390 raw_cons = NEXT_RAW_CMP(raw_cons); 1391 1392 if (rx_pkts == budget) 1393 break; 1394 } 1395 1396 cpr->cp_raw_cons = raw_cons; 1397 /* ACK completion ring before freeing tx ring and producing new 1398 * buffers in rx/agg rings to prevent overflowing the completion 1399 * ring. 1400 */ 1401 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1402 1403 if (tx_pkts) 1404 bnxt_tx_int(bp, bnapi, tx_pkts); 1405 1406 if (rx_event) { 1407 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1408 1409 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1410 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 1411 if (agg_event) { 1412 writel(DB_KEY_RX | rxr->rx_agg_prod, 1413 rxr->rx_agg_doorbell); 1414 writel(DB_KEY_RX | rxr->rx_agg_prod, 1415 rxr->rx_agg_doorbell); 1416 } 1417 } 1418 return rx_pkts; 1419 } 1420 1421 static int bnxt_poll(struct napi_struct *napi, int budget) 1422 { 1423 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1424 struct bnxt *bp = bnapi->bp; 1425 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1426 int work_done = 0; 1427 1428 if (!bnxt_lock_napi(bnapi)) 1429 return budget; 1430 1431 while (1) { 1432 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 1433 1434 if (work_done >= budget) 1435 break; 1436 1437 if (!bnxt_has_work(bp, cpr)) { 1438 napi_complete(napi); 1439 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1440 break; 1441 } 1442 } 1443 mmiowb(); 1444 bnxt_unlock_napi(bnapi); 1445 return work_done; 1446 } 1447 1448 #ifdef CONFIG_NET_RX_BUSY_POLL 1449 static int bnxt_busy_poll(struct napi_struct *napi) 1450 { 1451 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1452 struct bnxt *bp = bnapi->bp; 1453 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1454 int rx_work, budget = 4; 1455 1456 if (atomic_read(&bp->intr_sem) != 0) 1457 return LL_FLUSH_FAILED; 1458 1459 if (!bnxt_lock_poll(bnapi)) 1460 return LL_FLUSH_BUSY; 1461 1462 rx_work = bnxt_poll_work(bp, bnapi, budget); 1463 1464 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 1465 1466 bnxt_unlock_poll(bnapi); 1467 return rx_work; 1468 } 1469 #endif 1470 1471 static void bnxt_free_tx_skbs(struct bnxt *bp) 1472 { 1473 int i, max_idx; 1474 struct pci_dev *pdev = bp->pdev; 1475 1476 if (!bp->tx_ring) 1477 return; 1478 1479 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 1480 for (i = 0; i < bp->tx_nr_rings; i++) { 1481 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 1482 int j; 1483 1484 for (j = 0; j < max_idx;) { 1485 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 1486 struct sk_buff *skb = tx_buf->skb; 1487 int k, last; 1488 1489 if (!skb) { 1490 j++; 1491 continue; 1492 } 1493 1494 tx_buf->skb = NULL; 1495 1496 if (tx_buf->is_push) { 1497 dev_kfree_skb(skb); 1498 j += 2; 1499 continue; 1500 } 1501 1502 dma_unmap_single(&pdev->dev, 1503 dma_unmap_addr(tx_buf, mapping), 1504 skb_headlen(skb), 1505 PCI_DMA_TODEVICE); 1506 1507 last = tx_buf->nr_frags; 1508 j += 2; 1509 for (k = 0; k < last; k++, j++) { 1510 int ring_idx = j & bp->tx_ring_mask; 1511 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 1512 1513 tx_buf = &txr->tx_buf_ring[ring_idx]; 1514 dma_unmap_page( 1515 &pdev->dev, 1516 dma_unmap_addr(tx_buf, mapping), 1517 skb_frag_size(frag), PCI_DMA_TODEVICE); 1518 } 1519 dev_kfree_skb(skb); 1520 } 1521 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 1522 } 1523 } 1524 1525 static void bnxt_free_rx_skbs(struct bnxt *bp) 1526 { 1527 int i, max_idx, max_agg_idx; 1528 struct pci_dev *pdev = bp->pdev; 1529 1530 if (!bp->rx_ring) 1531 return; 1532 1533 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 1534 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 1535 for (i = 0; i < bp->rx_nr_rings; i++) { 1536 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 1537 int j; 1538 1539 if (rxr->rx_tpa) { 1540 for (j = 0; j < MAX_TPA; j++) { 1541 struct bnxt_tpa_info *tpa_info = 1542 &rxr->rx_tpa[j]; 1543 u8 *data = tpa_info->data; 1544 1545 if (!data) 1546 continue; 1547 1548 dma_unmap_single( 1549 &pdev->dev, 1550 dma_unmap_addr(tpa_info, mapping), 1551 bp->rx_buf_use_size, 1552 PCI_DMA_FROMDEVICE); 1553 1554 tpa_info->data = NULL; 1555 1556 kfree(data); 1557 } 1558 } 1559 1560 for (j = 0; j < max_idx; j++) { 1561 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1562 u8 *data = rx_buf->data; 1563 1564 if (!data) 1565 continue; 1566 1567 dma_unmap_single(&pdev->dev, 1568 dma_unmap_addr(rx_buf, mapping), 1569 bp->rx_buf_use_size, 1570 PCI_DMA_FROMDEVICE); 1571 1572 rx_buf->data = NULL; 1573 1574 kfree(data); 1575 } 1576 1577 for (j = 0; j < max_agg_idx; j++) { 1578 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 1579 &rxr->rx_agg_ring[j]; 1580 struct page *page = rx_agg_buf->page; 1581 1582 if (!page) 1583 continue; 1584 1585 dma_unmap_page(&pdev->dev, 1586 dma_unmap_addr(rx_agg_buf, mapping), 1587 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1588 1589 rx_agg_buf->page = NULL; 1590 __clear_bit(j, rxr->rx_agg_bmap); 1591 1592 __free_page(page); 1593 } 1594 } 1595 } 1596 1597 static void bnxt_free_skbs(struct bnxt *bp) 1598 { 1599 bnxt_free_tx_skbs(bp); 1600 bnxt_free_rx_skbs(bp); 1601 } 1602 1603 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 1604 { 1605 struct pci_dev *pdev = bp->pdev; 1606 int i; 1607 1608 for (i = 0; i < ring->nr_pages; i++) { 1609 if (!ring->pg_arr[i]) 1610 continue; 1611 1612 dma_free_coherent(&pdev->dev, ring->page_size, 1613 ring->pg_arr[i], ring->dma_arr[i]); 1614 1615 ring->pg_arr[i] = NULL; 1616 } 1617 if (ring->pg_tbl) { 1618 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 1619 ring->pg_tbl, ring->pg_tbl_map); 1620 ring->pg_tbl = NULL; 1621 } 1622 if (ring->vmem_size && *ring->vmem) { 1623 vfree(*ring->vmem); 1624 *ring->vmem = NULL; 1625 } 1626 } 1627 1628 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 1629 { 1630 int i; 1631 struct pci_dev *pdev = bp->pdev; 1632 1633 if (ring->nr_pages > 1) { 1634 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 1635 ring->nr_pages * 8, 1636 &ring->pg_tbl_map, 1637 GFP_KERNEL); 1638 if (!ring->pg_tbl) 1639 return -ENOMEM; 1640 } 1641 1642 for (i = 0; i < ring->nr_pages; i++) { 1643 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 1644 ring->page_size, 1645 &ring->dma_arr[i], 1646 GFP_KERNEL); 1647 if (!ring->pg_arr[i]) 1648 return -ENOMEM; 1649 1650 if (ring->nr_pages > 1) 1651 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 1652 } 1653 1654 if (ring->vmem_size) { 1655 *ring->vmem = vzalloc(ring->vmem_size); 1656 if (!(*ring->vmem)) 1657 return -ENOMEM; 1658 } 1659 return 0; 1660 } 1661 1662 static void bnxt_free_rx_rings(struct bnxt *bp) 1663 { 1664 int i; 1665 1666 if (!bp->rx_ring) 1667 return; 1668 1669 for (i = 0; i < bp->rx_nr_rings; i++) { 1670 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 1671 struct bnxt_ring_struct *ring; 1672 1673 kfree(rxr->rx_tpa); 1674 rxr->rx_tpa = NULL; 1675 1676 kfree(rxr->rx_agg_bmap); 1677 rxr->rx_agg_bmap = NULL; 1678 1679 ring = &rxr->rx_ring_struct; 1680 bnxt_free_ring(bp, ring); 1681 1682 ring = &rxr->rx_agg_ring_struct; 1683 bnxt_free_ring(bp, ring); 1684 } 1685 } 1686 1687 static int bnxt_alloc_rx_rings(struct bnxt *bp) 1688 { 1689 int i, rc, agg_rings = 0, tpa_rings = 0; 1690 1691 if (!bp->rx_ring) 1692 return -ENOMEM; 1693 1694 if (bp->flags & BNXT_FLAG_AGG_RINGS) 1695 agg_rings = 1; 1696 1697 if (bp->flags & BNXT_FLAG_TPA) 1698 tpa_rings = 1; 1699 1700 for (i = 0; i < bp->rx_nr_rings; i++) { 1701 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 1702 struct bnxt_ring_struct *ring; 1703 1704 ring = &rxr->rx_ring_struct; 1705 1706 rc = bnxt_alloc_ring(bp, ring); 1707 if (rc) 1708 return rc; 1709 1710 if (agg_rings) { 1711 u16 mem_size; 1712 1713 ring = &rxr->rx_agg_ring_struct; 1714 rc = bnxt_alloc_ring(bp, ring); 1715 if (rc) 1716 return rc; 1717 1718 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 1719 mem_size = rxr->rx_agg_bmap_size / 8; 1720 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 1721 if (!rxr->rx_agg_bmap) 1722 return -ENOMEM; 1723 1724 if (tpa_rings) { 1725 rxr->rx_tpa = kcalloc(MAX_TPA, 1726 sizeof(struct bnxt_tpa_info), 1727 GFP_KERNEL); 1728 if (!rxr->rx_tpa) 1729 return -ENOMEM; 1730 } 1731 } 1732 } 1733 return 0; 1734 } 1735 1736 static void bnxt_free_tx_rings(struct bnxt *bp) 1737 { 1738 int i; 1739 struct pci_dev *pdev = bp->pdev; 1740 1741 if (!bp->tx_ring) 1742 return; 1743 1744 for (i = 0; i < bp->tx_nr_rings; i++) { 1745 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 1746 struct bnxt_ring_struct *ring; 1747 1748 if (txr->tx_push) { 1749 dma_free_coherent(&pdev->dev, bp->tx_push_size, 1750 txr->tx_push, txr->tx_push_mapping); 1751 txr->tx_push = NULL; 1752 } 1753 1754 ring = &txr->tx_ring_struct; 1755 1756 bnxt_free_ring(bp, ring); 1757 } 1758 } 1759 1760 static int bnxt_alloc_tx_rings(struct bnxt *bp) 1761 { 1762 int i, j, rc; 1763 struct pci_dev *pdev = bp->pdev; 1764 1765 bp->tx_push_size = 0; 1766 if (bp->tx_push_thresh) { 1767 int push_size; 1768 1769 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1770 bp->tx_push_thresh); 1771 1772 if (push_size > 256) { 1773 push_size = 0; 1774 bp->tx_push_thresh = 0; 1775 } 1776 1777 bp->tx_push_size = push_size; 1778 } 1779 1780 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 1781 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 1782 struct bnxt_ring_struct *ring; 1783 1784 ring = &txr->tx_ring_struct; 1785 1786 rc = bnxt_alloc_ring(bp, ring); 1787 if (rc) 1788 return rc; 1789 1790 if (bp->tx_push_size) { 1791 dma_addr_t mapping; 1792 1793 /* One pre-allocated DMA buffer to backup 1794 * TX push operation 1795 */ 1796 txr->tx_push = dma_alloc_coherent(&pdev->dev, 1797 bp->tx_push_size, 1798 &txr->tx_push_mapping, 1799 GFP_KERNEL); 1800 1801 if (!txr->tx_push) 1802 return -ENOMEM; 1803 1804 mapping = txr->tx_push_mapping + 1805 sizeof(struct tx_push_bd); 1806 txr->data_mapping = cpu_to_le64(mapping); 1807 1808 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 1809 } 1810 ring->queue_id = bp->q_info[j].queue_id; 1811 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1812 j++; 1813 } 1814 return 0; 1815 } 1816 1817 static void bnxt_free_cp_rings(struct bnxt *bp) 1818 { 1819 int i; 1820 1821 if (!bp->bnapi) 1822 return; 1823 1824 for (i = 0; i < bp->cp_nr_rings; i++) { 1825 struct bnxt_napi *bnapi = bp->bnapi[i]; 1826 struct bnxt_cp_ring_info *cpr; 1827 struct bnxt_ring_struct *ring; 1828 1829 if (!bnapi) 1830 continue; 1831 1832 cpr = &bnapi->cp_ring; 1833 ring = &cpr->cp_ring_struct; 1834 1835 bnxt_free_ring(bp, ring); 1836 } 1837 } 1838 1839 static int bnxt_alloc_cp_rings(struct bnxt *bp) 1840 { 1841 int i, rc; 1842 1843 for (i = 0; i < bp->cp_nr_rings; i++) { 1844 struct bnxt_napi *bnapi = bp->bnapi[i]; 1845 struct bnxt_cp_ring_info *cpr; 1846 struct bnxt_ring_struct *ring; 1847 1848 if (!bnapi) 1849 continue; 1850 1851 cpr = &bnapi->cp_ring; 1852 ring = &cpr->cp_ring_struct; 1853 1854 rc = bnxt_alloc_ring(bp, ring); 1855 if (rc) 1856 return rc; 1857 } 1858 return 0; 1859 } 1860 1861 static void bnxt_init_ring_struct(struct bnxt *bp) 1862 { 1863 int i; 1864 1865 for (i = 0; i < bp->cp_nr_rings; i++) { 1866 struct bnxt_napi *bnapi = bp->bnapi[i]; 1867 struct bnxt_cp_ring_info *cpr; 1868 struct bnxt_rx_ring_info *rxr; 1869 struct bnxt_tx_ring_info *txr; 1870 struct bnxt_ring_struct *ring; 1871 1872 if (!bnapi) 1873 continue; 1874 1875 cpr = &bnapi->cp_ring; 1876 ring = &cpr->cp_ring_struct; 1877 ring->nr_pages = bp->cp_nr_pages; 1878 ring->page_size = HW_CMPD_RING_SIZE; 1879 ring->pg_arr = (void **)cpr->cp_desc_ring; 1880 ring->dma_arr = cpr->cp_desc_mapping; 1881 ring->vmem_size = 0; 1882 1883 rxr = bnapi->rx_ring; 1884 if (!rxr) 1885 goto skip_rx; 1886 1887 ring = &rxr->rx_ring_struct; 1888 ring->nr_pages = bp->rx_nr_pages; 1889 ring->page_size = HW_RXBD_RING_SIZE; 1890 ring->pg_arr = (void **)rxr->rx_desc_ring; 1891 ring->dma_arr = rxr->rx_desc_mapping; 1892 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 1893 ring->vmem = (void **)&rxr->rx_buf_ring; 1894 1895 ring = &rxr->rx_agg_ring_struct; 1896 ring->nr_pages = bp->rx_agg_nr_pages; 1897 ring->page_size = HW_RXBD_RING_SIZE; 1898 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 1899 ring->dma_arr = rxr->rx_agg_desc_mapping; 1900 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 1901 ring->vmem = (void **)&rxr->rx_agg_ring; 1902 1903 skip_rx: 1904 txr = bnapi->tx_ring; 1905 if (!txr) 1906 continue; 1907 1908 ring = &txr->tx_ring_struct; 1909 ring->nr_pages = bp->tx_nr_pages; 1910 ring->page_size = HW_RXBD_RING_SIZE; 1911 ring->pg_arr = (void **)txr->tx_desc_ring; 1912 ring->dma_arr = txr->tx_desc_mapping; 1913 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 1914 ring->vmem = (void **)&txr->tx_buf_ring; 1915 } 1916 } 1917 1918 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 1919 { 1920 int i; 1921 u32 prod; 1922 struct rx_bd **rx_buf_ring; 1923 1924 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 1925 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 1926 int j; 1927 struct rx_bd *rxbd; 1928 1929 rxbd = rx_buf_ring[i]; 1930 if (!rxbd) 1931 continue; 1932 1933 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 1934 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 1935 rxbd->rx_bd_opaque = prod; 1936 } 1937 } 1938 } 1939 1940 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 1941 { 1942 struct net_device *dev = bp->dev; 1943 struct bnxt_rx_ring_info *rxr; 1944 struct bnxt_ring_struct *ring; 1945 u32 prod, type; 1946 int i; 1947 1948 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 1949 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 1950 1951 if (NET_IP_ALIGN == 2) 1952 type |= RX_BD_FLAGS_SOP; 1953 1954 rxr = &bp->rx_ring[ring_nr]; 1955 ring = &rxr->rx_ring_struct; 1956 bnxt_init_rxbd_pages(ring, type); 1957 1958 prod = rxr->rx_prod; 1959 for (i = 0; i < bp->rx_ring_size; i++) { 1960 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 1961 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 1962 ring_nr, i, bp->rx_ring_size); 1963 break; 1964 } 1965 prod = NEXT_RX(prod); 1966 } 1967 rxr->rx_prod = prod; 1968 ring->fw_ring_id = INVALID_HW_RING_ID; 1969 1970 ring = &rxr->rx_agg_ring_struct; 1971 ring->fw_ring_id = INVALID_HW_RING_ID; 1972 1973 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 1974 return 0; 1975 1976 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | 1977 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 1978 1979 bnxt_init_rxbd_pages(ring, type); 1980 1981 prod = rxr->rx_agg_prod; 1982 for (i = 0; i < bp->rx_agg_ring_size; i++) { 1983 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 1984 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 1985 ring_nr, i, bp->rx_ring_size); 1986 break; 1987 } 1988 prod = NEXT_RX_AGG(prod); 1989 } 1990 rxr->rx_agg_prod = prod; 1991 1992 if (bp->flags & BNXT_FLAG_TPA) { 1993 if (rxr->rx_tpa) { 1994 u8 *data; 1995 dma_addr_t mapping; 1996 1997 for (i = 0; i < MAX_TPA; i++) { 1998 data = __bnxt_alloc_rx_data(bp, &mapping, 1999 GFP_KERNEL); 2000 if (!data) 2001 return -ENOMEM; 2002 2003 rxr->rx_tpa[i].data = data; 2004 rxr->rx_tpa[i].mapping = mapping; 2005 } 2006 } else { 2007 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2008 return -ENOMEM; 2009 } 2010 } 2011 2012 return 0; 2013 } 2014 2015 static int bnxt_init_rx_rings(struct bnxt *bp) 2016 { 2017 int i, rc = 0; 2018 2019 for (i = 0; i < bp->rx_nr_rings; i++) { 2020 rc = bnxt_init_one_rx_ring(bp, i); 2021 if (rc) 2022 break; 2023 } 2024 2025 return rc; 2026 } 2027 2028 static int bnxt_init_tx_rings(struct bnxt *bp) 2029 { 2030 u16 i; 2031 2032 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2033 MAX_SKB_FRAGS + 1); 2034 2035 for (i = 0; i < bp->tx_nr_rings; i++) { 2036 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2037 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2038 2039 ring->fw_ring_id = INVALID_HW_RING_ID; 2040 } 2041 2042 return 0; 2043 } 2044 2045 static void bnxt_free_ring_grps(struct bnxt *bp) 2046 { 2047 kfree(bp->grp_info); 2048 bp->grp_info = NULL; 2049 } 2050 2051 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2052 { 2053 int i; 2054 2055 if (irq_re_init) { 2056 bp->grp_info = kcalloc(bp->cp_nr_rings, 2057 sizeof(struct bnxt_ring_grp_info), 2058 GFP_KERNEL); 2059 if (!bp->grp_info) 2060 return -ENOMEM; 2061 } 2062 for (i = 0; i < bp->cp_nr_rings; i++) { 2063 if (irq_re_init) 2064 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2065 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2066 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2067 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2068 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2069 } 2070 return 0; 2071 } 2072 2073 static void bnxt_free_vnics(struct bnxt *bp) 2074 { 2075 kfree(bp->vnic_info); 2076 bp->vnic_info = NULL; 2077 bp->nr_vnics = 0; 2078 } 2079 2080 static int bnxt_alloc_vnics(struct bnxt *bp) 2081 { 2082 int num_vnics = 1; 2083 2084 #ifdef CONFIG_RFS_ACCEL 2085 if (bp->flags & BNXT_FLAG_RFS) 2086 num_vnics += bp->rx_nr_rings; 2087 #endif 2088 2089 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2090 GFP_KERNEL); 2091 if (!bp->vnic_info) 2092 return -ENOMEM; 2093 2094 bp->nr_vnics = num_vnics; 2095 return 0; 2096 } 2097 2098 static void bnxt_init_vnics(struct bnxt *bp) 2099 { 2100 int i; 2101 2102 for (i = 0; i < bp->nr_vnics; i++) { 2103 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2104 2105 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2106 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; 2107 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2108 2109 if (bp->vnic_info[i].rss_hash_key) { 2110 if (i == 0) 2111 prandom_bytes(vnic->rss_hash_key, 2112 HW_HASH_KEY_SIZE); 2113 else 2114 memcpy(vnic->rss_hash_key, 2115 bp->vnic_info[0].rss_hash_key, 2116 HW_HASH_KEY_SIZE); 2117 } 2118 } 2119 } 2120 2121 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2122 { 2123 int pages; 2124 2125 pages = ring_size / desc_per_pg; 2126 2127 if (!pages) 2128 return 1; 2129 2130 pages++; 2131 2132 while (pages & (pages - 1)) 2133 pages++; 2134 2135 return pages; 2136 } 2137 2138 static void bnxt_set_tpa_flags(struct bnxt *bp) 2139 { 2140 bp->flags &= ~BNXT_FLAG_TPA; 2141 if (bp->dev->features & NETIF_F_LRO) 2142 bp->flags |= BNXT_FLAG_LRO; 2143 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) 2144 bp->flags |= BNXT_FLAG_GRO; 2145 } 2146 2147 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2148 * be set on entry. 2149 */ 2150 void bnxt_set_ring_params(struct bnxt *bp) 2151 { 2152 u32 ring_size, rx_size, rx_space; 2153 u32 agg_factor = 0, agg_ring_size = 0; 2154 2155 /* 8 for CRC and VLAN */ 2156 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2157 2158 rx_space = rx_size + NET_SKB_PAD + 2159 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2160 2161 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2162 ring_size = bp->rx_ring_size; 2163 bp->rx_agg_ring_size = 0; 2164 bp->rx_agg_nr_pages = 0; 2165 2166 if (bp->flags & BNXT_FLAG_TPA) 2167 agg_factor = 4; 2168 2169 bp->flags &= ~BNXT_FLAG_JUMBO; 2170 if (rx_space > PAGE_SIZE) { 2171 u32 jumbo_factor; 2172 2173 bp->flags |= BNXT_FLAG_JUMBO; 2174 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2175 if (jumbo_factor > agg_factor) 2176 agg_factor = jumbo_factor; 2177 } 2178 agg_ring_size = ring_size * agg_factor; 2179 2180 if (agg_ring_size) { 2181 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2182 RX_DESC_CNT); 2183 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2184 u32 tmp = agg_ring_size; 2185 2186 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2187 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2188 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2189 tmp, agg_ring_size); 2190 } 2191 bp->rx_agg_ring_size = agg_ring_size; 2192 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2193 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2194 rx_space = rx_size + NET_SKB_PAD + 2195 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2196 } 2197 2198 bp->rx_buf_use_size = rx_size; 2199 bp->rx_buf_size = rx_space; 2200 2201 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2202 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2203 2204 ring_size = bp->tx_ring_size; 2205 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2206 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2207 2208 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2209 bp->cp_ring_size = ring_size; 2210 2211 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2212 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2213 bp->cp_nr_pages = MAX_CP_PAGES; 2214 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2215 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2216 ring_size, bp->cp_ring_size); 2217 } 2218 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2219 bp->cp_ring_mask = bp->cp_bit - 1; 2220 } 2221 2222 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2223 { 2224 int i; 2225 struct bnxt_vnic_info *vnic; 2226 struct pci_dev *pdev = bp->pdev; 2227 2228 if (!bp->vnic_info) 2229 return; 2230 2231 for (i = 0; i < bp->nr_vnics; i++) { 2232 vnic = &bp->vnic_info[i]; 2233 2234 kfree(vnic->fw_grp_ids); 2235 vnic->fw_grp_ids = NULL; 2236 2237 kfree(vnic->uc_list); 2238 vnic->uc_list = NULL; 2239 2240 if (vnic->mc_list) { 2241 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2242 vnic->mc_list, vnic->mc_list_mapping); 2243 vnic->mc_list = NULL; 2244 } 2245 2246 if (vnic->rss_table) { 2247 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2248 vnic->rss_table, 2249 vnic->rss_table_dma_addr); 2250 vnic->rss_table = NULL; 2251 } 2252 2253 vnic->rss_hash_key = NULL; 2254 vnic->flags = 0; 2255 } 2256 } 2257 2258 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2259 { 2260 int i, rc = 0, size; 2261 struct bnxt_vnic_info *vnic; 2262 struct pci_dev *pdev = bp->pdev; 2263 int max_rings; 2264 2265 for (i = 0; i < bp->nr_vnics; i++) { 2266 vnic = &bp->vnic_info[i]; 2267 2268 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2269 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2270 2271 if (mem_size > 0) { 2272 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2273 if (!vnic->uc_list) { 2274 rc = -ENOMEM; 2275 goto out; 2276 } 2277 } 2278 } 2279 2280 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2281 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2282 vnic->mc_list = 2283 dma_alloc_coherent(&pdev->dev, 2284 vnic->mc_list_size, 2285 &vnic->mc_list_mapping, 2286 GFP_KERNEL); 2287 if (!vnic->mc_list) { 2288 rc = -ENOMEM; 2289 goto out; 2290 } 2291 } 2292 2293 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2294 max_rings = bp->rx_nr_rings; 2295 else 2296 max_rings = 1; 2297 2298 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2299 if (!vnic->fw_grp_ids) { 2300 rc = -ENOMEM; 2301 goto out; 2302 } 2303 2304 /* Allocate rss table and hash key */ 2305 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2306 &vnic->rss_table_dma_addr, 2307 GFP_KERNEL); 2308 if (!vnic->rss_table) { 2309 rc = -ENOMEM; 2310 goto out; 2311 } 2312 2313 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2314 2315 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2316 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2317 } 2318 return 0; 2319 2320 out: 2321 return rc; 2322 } 2323 2324 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2325 { 2326 struct pci_dev *pdev = bp->pdev; 2327 2328 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2329 bp->hwrm_cmd_resp_dma_addr); 2330 2331 bp->hwrm_cmd_resp_addr = NULL; 2332 if (bp->hwrm_dbg_resp_addr) { 2333 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2334 bp->hwrm_dbg_resp_addr, 2335 bp->hwrm_dbg_resp_dma_addr); 2336 2337 bp->hwrm_dbg_resp_addr = NULL; 2338 } 2339 } 2340 2341 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2342 { 2343 struct pci_dev *pdev = bp->pdev; 2344 2345 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2346 &bp->hwrm_cmd_resp_dma_addr, 2347 GFP_KERNEL); 2348 if (!bp->hwrm_cmd_resp_addr) 2349 return -ENOMEM; 2350 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 2351 HWRM_DBG_REG_BUF_SIZE, 2352 &bp->hwrm_dbg_resp_dma_addr, 2353 GFP_KERNEL); 2354 if (!bp->hwrm_dbg_resp_addr) 2355 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 2356 2357 return 0; 2358 } 2359 2360 static void bnxt_free_stats(struct bnxt *bp) 2361 { 2362 u32 size, i; 2363 struct pci_dev *pdev = bp->pdev; 2364 2365 if (bp->hw_rx_port_stats) { 2366 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 2367 bp->hw_rx_port_stats, 2368 bp->hw_rx_port_stats_map); 2369 bp->hw_rx_port_stats = NULL; 2370 bp->flags &= ~BNXT_FLAG_PORT_STATS; 2371 } 2372 2373 if (!bp->bnapi) 2374 return; 2375 2376 size = sizeof(struct ctx_hw_stats); 2377 2378 for (i = 0; i < bp->cp_nr_rings; i++) { 2379 struct bnxt_napi *bnapi = bp->bnapi[i]; 2380 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2381 2382 if (cpr->hw_stats) { 2383 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 2384 cpr->hw_stats_map); 2385 cpr->hw_stats = NULL; 2386 } 2387 } 2388 } 2389 2390 static int bnxt_alloc_stats(struct bnxt *bp) 2391 { 2392 u32 size, i; 2393 struct pci_dev *pdev = bp->pdev; 2394 2395 size = sizeof(struct ctx_hw_stats); 2396 2397 for (i = 0; i < bp->cp_nr_rings; i++) { 2398 struct bnxt_napi *bnapi = bp->bnapi[i]; 2399 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2400 2401 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 2402 &cpr->hw_stats_map, 2403 GFP_KERNEL); 2404 if (!cpr->hw_stats) 2405 return -ENOMEM; 2406 2407 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 2408 } 2409 2410 if (BNXT_PF(bp)) { 2411 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 2412 sizeof(struct tx_port_stats) + 1024; 2413 2414 bp->hw_rx_port_stats = 2415 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 2416 &bp->hw_rx_port_stats_map, 2417 GFP_KERNEL); 2418 if (!bp->hw_rx_port_stats) 2419 return -ENOMEM; 2420 2421 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 2422 512; 2423 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 2424 sizeof(struct rx_port_stats) + 512; 2425 bp->flags |= BNXT_FLAG_PORT_STATS; 2426 } 2427 return 0; 2428 } 2429 2430 static void bnxt_clear_ring_indices(struct bnxt *bp) 2431 { 2432 int i; 2433 2434 if (!bp->bnapi) 2435 return; 2436 2437 for (i = 0; i < bp->cp_nr_rings; i++) { 2438 struct bnxt_napi *bnapi = bp->bnapi[i]; 2439 struct bnxt_cp_ring_info *cpr; 2440 struct bnxt_rx_ring_info *rxr; 2441 struct bnxt_tx_ring_info *txr; 2442 2443 if (!bnapi) 2444 continue; 2445 2446 cpr = &bnapi->cp_ring; 2447 cpr->cp_raw_cons = 0; 2448 2449 txr = bnapi->tx_ring; 2450 if (txr) { 2451 txr->tx_prod = 0; 2452 txr->tx_cons = 0; 2453 } 2454 2455 rxr = bnapi->rx_ring; 2456 if (rxr) { 2457 rxr->rx_prod = 0; 2458 rxr->rx_agg_prod = 0; 2459 rxr->rx_sw_agg_prod = 0; 2460 } 2461 } 2462 } 2463 2464 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 2465 { 2466 #ifdef CONFIG_RFS_ACCEL 2467 int i; 2468 2469 /* Under rtnl_lock and all our NAPIs have been disabled. It's 2470 * safe to delete the hash table. 2471 */ 2472 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 2473 struct hlist_head *head; 2474 struct hlist_node *tmp; 2475 struct bnxt_ntuple_filter *fltr; 2476 2477 head = &bp->ntp_fltr_hash_tbl[i]; 2478 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 2479 hlist_del(&fltr->hash); 2480 kfree(fltr); 2481 } 2482 } 2483 if (irq_reinit) { 2484 kfree(bp->ntp_fltr_bmap); 2485 bp->ntp_fltr_bmap = NULL; 2486 } 2487 bp->ntp_fltr_count = 0; 2488 #endif 2489 } 2490 2491 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 2492 { 2493 #ifdef CONFIG_RFS_ACCEL 2494 int i, rc = 0; 2495 2496 if (!(bp->flags & BNXT_FLAG_RFS)) 2497 return 0; 2498 2499 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 2500 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 2501 2502 bp->ntp_fltr_count = 0; 2503 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 2504 GFP_KERNEL); 2505 2506 if (!bp->ntp_fltr_bmap) 2507 rc = -ENOMEM; 2508 2509 return rc; 2510 #else 2511 return 0; 2512 #endif 2513 } 2514 2515 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 2516 { 2517 bnxt_free_vnic_attributes(bp); 2518 bnxt_free_tx_rings(bp); 2519 bnxt_free_rx_rings(bp); 2520 bnxt_free_cp_rings(bp); 2521 bnxt_free_ntp_fltrs(bp, irq_re_init); 2522 if (irq_re_init) { 2523 bnxt_free_stats(bp); 2524 bnxt_free_ring_grps(bp); 2525 bnxt_free_vnics(bp); 2526 kfree(bp->tx_ring); 2527 bp->tx_ring = NULL; 2528 kfree(bp->rx_ring); 2529 bp->rx_ring = NULL; 2530 kfree(bp->bnapi); 2531 bp->bnapi = NULL; 2532 } else { 2533 bnxt_clear_ring_indices(bp); 2534 } 2535 } 2536 2537 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 2538 { 2539 int i, j, rc, size, arr_size; 2540 void *bnapi; 2541 2542 if (irq_re_init) { 2543 /* Allocate bnapi mem pointer array and mem block for 2544 * all queues 2545 */ 2546 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 2547 bp->cp_nr_rings); 2548 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 2549 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 2550 if (!bnapi) 2551 return -ENOMEM; 2552 2553 bp->bnapi = bnapi; 2554 bnapi += arr_size; 2555 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 2556 bp->bnapi[i] = bnapi; 2557 bp->bnapi[i]->index = i; 2558 bp->bnapi[i]->bp = bp; 2559 } 2560 2561 bp->rx_ring = kcalloc(bp->rx_nr_rings, 2562 sizeof(struct bnxt_rx_ring_info), 2563 GFP_KERNEL); 2564 if (!bp->rx_ring) 2565 return -ENOMEM; 2566 2567 for (i = 0; i < bp->rx_nr_rings; i++) { 2568 bp->rx_ring[i].bnapi = bp->bnapi[i]; 2569 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 2570 } 2571 2572 bp->tx_ring = kcalloc(bp->tx_nr_rings, 2573 sizeof(struct bnxt_tx_ring_info), 2574 GFP_KERNEL); 2575 if (!bp->tx_ring) 2576 return -ENOMEM; 2577 2578 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 2579 j = 0; 2580 else 2581 j = bp->rx_nr_rings; 2582 2583 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 2584 bp->tx_ring[i].bnapi = bp->bnapi[j]; 2585 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 2586 } 2587 2588 rc = bnxt_alloc_stats(bp); 2589 if (rc) 2590 goto alloc_mem_err; 2591 2592 rc = bnxt_alloc_ntp_fltrs(bp); 2593 if (rc) 2594 goto alloc_mem_err; 2595 2596 rc = bnxt_alloc_vnics(bp); 2597 if (rc) 2598 goto alloc_mem_err; 2599 } 2600 2601 bnxt_init_ring_struct(bp); 2602 2603 rc = bnxt_alloc_rx_rings(bp); 2604 if (rc) 2605 goto alloc_mem_err; 2606 2607 rc = bnxt_alloc_tx_rings(bp); 2608 if (rc) 2609 goto alloc_mem_err; 2610 2611 rc = bnxt_alloc_cp_rings(bp); 2612 if (rc) 2613 goto alloc_mem_err; 2614 2615 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 2616 BNXT_VNIC_UCAST_FLAG; 2617 rc = bnxt_alloc_vnic_attributes(bp); 2618 if (rc) 2619 goto alloc_mem_err; 2620 return 0; 2621 2622 alloc_mem_err: 2623 bnxt_free_mem(bp, true); 2624 return rc; 2625 } 2626 2627 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 2628 u16 cmpl_ring, u16 target_id) 2629 { 2630 struct input *req = request; 2631 2632 req->req_type = cpu_to_le16(req_type); 2633 req->cmpl_ring = cpu_to_le16(cmpl_ring); 2634 req->target_id = cpu_to_le16(target_id); 2635 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 2636 } 2637 2638 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 2639 int timeout, bool silent) 2640 { 2641 int i, intr_process, rc; 2642 struct input *req = msg; 2643 u32 *data = msg; 2644 __le32 *resp_len, *valid; 2645 u16 cp_ring_id, len = 0; 2646 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 2647 2648 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 2649 memset(resp, 0, PAGE_SIZE); 2650 cp_ring_id = le16_to_cpu(req->cmpl_ring); 2651 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 2652 2653 /* Write request msg to hwrm channel */ 2654 __iowrite32_copy(bp->bar0, data, msg_len / 4); 2655 2656 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) 2657 writel(0, bp->bar0 + i); 2658 2659 /* currently supports only one outstanding message */ 2660 if (intr_process) 2661 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 2662 2663 /* Ring channel doorbell */ 2664 writel(1, bp->bar0 + 0x100); 2665 2666 if (!timeout) 2667 timeout = DFLT_HWRM_CMD_TIMEOUT; 2668 2669 i = 0; 2670 if (intr_process) { 2671 /* Wait until hwrm response cmpl interrupt is processed */ 2672 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 2673 i++ < timeout) { 2674 usleep_range(600, 800); 2675 } 2676 2677 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 2678 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 2679 le16_to_cpu(req->req_type)); 2680 return -1; 2681 } 2682 } else { 2683 /* Check if response len is updated */ 2684 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 2685 for (i = 0; i < timeout; i++) { 2686 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 2687 HWRM_RESP_LEN_SFT; 2688 if (len) 2689 break; 2690 usleep_range(600, 800); 2691 } 2692 2693 if (i >= timeout) { 2694 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 2695 timeout, le16_to_cpu(req->req_type), 2696 le16_to_cpu(req->seq_id), *resp_len); 2697 return -1; 2698 } 2699 2700 /* Last word of resp contains valid bit */ 2701 valid = bp->hwrm_cmd_resp_addr + len - 4; 2702 for (i = 0; i < timeout; i++) { 2703 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 2704 break; 2705 usleep_range(600, 800); 2706 } 2707 2708 if (i >= timeout) { 2709 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 2710 timeout, le16_to_cpu(req->req_type), 2711 le16_to_cpu(req->seq_id), len, *valid); 2712 return -1; 2713 } 2714 } 2715 2716 rc = le16_to_cpu(resp->error_code); 2717 if (rc && !silent) 2718 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 2719 le16_to_cpu(resp->req_type), 2720 le16_to_cpu(resp->seq_id), rc); 2721 return rc; 2722 } 2723 2724 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 2725 { 2726 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 2727 } 2728 2729 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 2730 { 2731 int rc; 2732 2733 mutex_lock(&bp->hwrm_cmd_lock); 2734 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 2735 mutex_unlock(&bp->hwrm_cmd_lock); 2736 return rc; 2737 } 2738 2739 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 2740 int timeout) 2741 { 2742 int rc; 2743 2744 mutex_lock(&bp->hwrm_cmd_lock); 2745 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 2746 mutex_unlock(&bp->hwrm_cmd_lock); 2747 return rc; 2748 } 2749 2750 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 2751 { 2752 struct hwrm_func_drv_rgtr_input req = {0}; 2753 int i; 2754 2755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 2756 2757 req.enables = 2758 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 2759 FUNC_DRV_RGTR_REQ_ENABLES_VER | 2760 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 2761 2762 /* TODO: current async event fwd bits are not defined and the firmware 2763 * only checks if it is non-zero to enable async event forwarding 2764 */ 2765 req.async_event_fwd[0] |= cpu_to_le32(1); 2766 req.os_type = cpu_to_le16(1); 2767 req.ver_maj = DRV_VER_MAJ; 2768 req.ver_min = DRV_VER_MIN; 2769 req.ver_upd = DRV_VER_UPD; 2770 2771 if (BNXT_PF(bp)) { 2772 DECLARE_BITMAP(vf_req_snif_bmap, 256); 2773 u32 *data = (u32 *)vf_req_snif_bmap; 2774 2775 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); 2776 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 2777 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 2778 2779 for (i = 0; i < 8; i++) 2780 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 2781 2782 req.enables |= 2783 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 2784 } 2785 2786 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2787 } 2788 2789 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 2790 { 2791 struct hwrm_func_drv_unrgtr_input req = {0}; 2792 2793 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 2794 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2795 } 2796 2797 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 2798 { 2799 u32 rc = 0; 2800 struct hwrm_tunnel_dst_port_free_input req = {0}; 2801 2802 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 2803 req.tunnel_type = tunnel_type; 2804 2805 switch (tunnel_type) { 2806 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 2807 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 2808 break; 2809 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 2810 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 2811 break; 2812 default: 2813 break; 2814 } 2815 2816 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2817 if (rc) 2818 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 2819 rc); 2820 return rc; 2821 } 2822 2823 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 2824 u8 tunnel_type) 2825 { 2826 u32 rc = 0; 2827 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 2828 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 2829 2830 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 2831 2832 req.tunnel_type = tunnel_type; 2833 req.tunnel_dst_port_val = port; 2834 2835 mutex_lock(&bp->hwrm_cmd_lock); 2836 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2837 if (rc) { 2838 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 2839 rc); 2840 goto err_out; 2841 } 2842 2843 if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) 2844 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 2845 2846 else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) 2847 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 2848 err_out: 2849 mutex_unlock(&bp->hwrm_cmd_lock); 2850 return rc; 2851 } 2852 2853 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 2854 { 2855 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 2856 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2857 2858 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 2859 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 2860 2861 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 2862 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 2863 req.mask = cpu_to_le32(vnic->rx_mask); 2864 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2865 } 2866 2867 #ifdef CONFIG_RFS_ACCEL 2868 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 2869 struct bnxt_ntuple_filter *fltr) 2870 { 2871 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 2872 2873 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 2874 req.ntuple_filter_id = fltr->filter_id; 2875 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2876 } 2877 2878 #define BNXT_NTP_FLTR_FLAGS \ 2879 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 2880 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 2881 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 2882 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 2883 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 2884 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 2885 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 2886 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 2887 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 2888 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 2889 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 2890 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 2891 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 2892 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 2893 2894 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 2895 struct bnxt_ntuple_filter *fltr) 2896 { 2897 int rc = 0; 2898 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 2899 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 2900 bp->hwrm_cmd_resp_addr; 2901 struct flow_keys *keys = &fltr->fkeys; 2902 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 2903 2904 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 2905 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; 2906 2907 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 2908 2909 req.ethertype = htons(ETH_P_IP); 2910 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 2911 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 2912 req.ip_protocol = keys->basic.ip_proto; 2913 2914 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 2915 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 2916 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 2917 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 2918 2919 req.src_port = keys->ports.src; 2920 req.src_port_mask = cpu_to_be16(0xffff); 2921 req.dst_port = keys->ports.dst; 2922 req.dst_port_mask = cpu_to_be16(0xffff); 2923 2924 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 2925 mutex_lock(&bp->hwrm_cmd_lock); 2926 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2927 if (!rc) 2928 fltr->filter_id = resp->ntuple_filter_id; 2929 mutex_unlock(&bp->hwrm_cmd_lock); 2930 return rc; 2931 } 2932 #endif 2933 2934 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 2935 u8 *mac_addr) 2936 { 2937 u32 rc = 0; 2938 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 2939 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 2940 2941 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 2942 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | 2943 CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 2944 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 2945 req.enables = 2946 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 2947 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 2948 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 2949 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 2950 req.l2_addr_mask[0] = 0xff; 2951 req.l2_addr_mask[1] = 0xff; 2952 req.l2_addr_mask[2] = 0xff; 2953 req.l2_addr_mask[3] = 0xff; 2954 req.l2_addr_mask[4] = 0xff; 2955 req.l2_addr_mask[5] = 0xff; 2956 2957 mutex_lock(&bp->hwrm_cmd_lock); 2958 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 2959 if (!rc) 2960 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 2961 resp->l2_filter_id; 2962 mutex_unlock(&bp->hwrm_cmd_lock); 2963 return rc; 2964 } 2965 2966 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 2967 { 2968 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 2969 int rc = 0; 2970 2971 /* Any associated ntuple filters will also be cleared by firmware. */ 2972 mutex_lock(&bp->hwrm_cmd_lock); 2973 for (i = 0; i < num_of_vnics; i++) { 2974 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2975 2976 for (j = 0; j < vnic->uc_filter_count; j++) { 2977 struct hwrm_cfa_l2_filter_free_input req = {0}; 2978 2979 bnxt_hwrm_cmd_hdr_init(bp, &req, 2980 HWRM_CFA_L2_FILTER_FREE, -1, -1); 2981 2982 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 2983 2984 rc = _hwrm_send_message(bp, &req, sizeof(req), 2985 HWRM_CMD_TIMEOUT); 2986 } 2987 vnic->uc_filter_count = 0; 2988 } 2989 mutex_unlock(&bp->hwrm_cmd_lock); 2990 2991 return rc; 2992 } 2993 2994 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 2995 { 2996 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2997 struct hwrm_vnic_tpa_cfg_input req = {0}; 2998 2999 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3000 3001 if (tpa_flags) { 3002 u16 mss = bp->dev->mtu - 40; 3003 u32 nsegs, n, segs = 0, flags; 3004 3005 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3006 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3007 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3008 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3009 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3010 if (tpa_flags & BNXT_FLAG_GRO) 3011 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3012 3013 req.flags = cpu_to_le32(flags); 3014 3015 req.enables = 3016 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3017 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3018 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3019 3020 /* Number of segs are log2 units, and first packet is not 3021 * included as part of this units. 3022 */ 3023 if (mss <= PAGE_SIZE) { 3024 n = PAGE_SIZE / mss; 3025 nsegs = (MAX_SKB_FRAGS - 1) * n; 3026 } else { 3027 n = mss / PAGE_SIZE; 3028 if (mss & (PAGE_SIZE - 1)) 3029 n++; 3030 nsegs = (MAX_SKB_FRAGS - n) / n; 3031 } 3032 3033 segs = ilog2(nsegs); 3034 req.max_agg_segs = cpu_to_le16(segs); 3035 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3036 3037 req.min_agg_len = cpu_to_le32(512); 3038 } 3039 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3040 3041 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3042 } 3043 3044 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3045 { 3046 u32 i, j, max_rings; 3047 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3048 struct hwrm_vnic_rss_cfg_input req = {0}; 3049 3050 if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID) 3051 return 0; 3052 3053 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3054 if (set_rss) { 3055 vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 | 3056 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 | 3057 BNXT_RSS_HASH_TYPE_FLAG_IPV6 | 3058 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6; 3059 3060 req.hash_type = cpu_to_le32(vnic->hash_type); 3061 3062 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3063 max_rings = bp->rx_nr_rings; 3064 else 3065 max_rings = 1; 3066 3067 /* Fill the RSS indirection table with ring group ids */ 3068 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3069 if (j == max_rings) 3070 j = 0; 3071 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3072 } 3073 3074 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3075 req.hash_key_tbl_addr = 3076 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3077 } 3078 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); 3079 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3080 } 3081 3082 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3083 { 3084 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3085 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3086 3087 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3088 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3089 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3090 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3091 req.enables = 3092 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3093 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3094 /* thresholds not implemented in firmware yet */ 3095 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3096 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3097 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3098 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3099 } 3100 3101 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) 3102 { 3103 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3104 3105 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3106 req.rss_cos_lb_ctx_id = 3107 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); 3108 3109 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3110 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; 3111 } 3112 3113 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3114 { 3115 int i; 3116 3117 for (i = 0; i < bp->nr_vnics; i++) { 3118 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3119 3120 if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID) 3121 bnxt_hwrm_vnic_ctx_free_one(bp, i); 3122 } 3123 bp->rsscos_nr_ctxs = 0; 3124 } 3125 3126 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) 3127 { 3128 int rc; 3129 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3130 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3131 bp->hwrm_cmd_resp_addr; 3132 3133 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3134 -1); 3135 3136 mutex_lock(&bp->hwrm_cmd_lock); 3137 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3138 if (!rc) 3139 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = 3140 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3141 mutex_unlock(&bp->hwrm_cmd_lock); 3142 3143 return rc; 3144 } 3145 3146 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3147 { 3148 unsigned int ring = 0, grp_idx; 3149 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3150 struct hwrm_vnic_cfg_input req = {0}; 3151 3152 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3153 /* Only RSS support for now TBD: COS & LB */ 3154 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | 3155 VNIC_CFG_REQ_ENABLES_RSS_RULE); 3156 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); 3157 req.cos_rule = cpu_to_le16(0xffff); 3158 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3159 ring = 0; 3160 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 3161 ring = vnic_id - 1; 3162 3163 grp_idx = bp->rx_ring[ring].bnapi->index; 3164 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3165 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 3166 3167 req.lb_rule = cpu_to_le16(0xffff); 3168 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3169 VLAN_HLEN); 3170 3171 if (bp->flags & BNXT_FLAG_STRIP_VLAN) 3172 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3173 3174 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3175 } 3176 3177 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 3178 { 3179 u32 rc = 0; 3180 3181 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 3182 struct hwrm_vnic_free_input req = {0}; 3183 3184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 3185 req.vnic_id = 3186 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 3187 3188 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3189 if (rc) 3190 return rc; 3191 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 3192 } 3193 return rc; 3194 } 3195 3196 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 3197 { 3198 u16 i; 3199 3200 for (i = 0; i < bp->nr_vnics; i++) 3201 bnxt_hwrm_vnic_free_one(bp, i); 3202 } 3203 3204 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 3205 unsigned int start_rx_ring_idx, 3206 unsigned int nr_rings) 3207 { 3208 int rc = 0; 3209 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 3210 struct hwrm_vnic_alloc_input req = {0}; 3211 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3212 3213 /* map ring groups to this vnic */ 3214 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 3215 grp_idx = bp->rx_ring[i].bnapi->index; 3216 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 3217 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 3218 j, nr_rings); 3219 break; 3220 } 3221 bp->vnic_info[vnic_id].fw_grp_ids[j] = 3222 bp->grp_info[grp_idx].fw_grp_id; 3223 } 3224 3225 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; 3226 if (vnic_id == 0) 3227 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 3228 3229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 3230 3231 mutex_lock(&bp->hwrm_cmd_lock); 3232 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3233 if (!rc) 3234 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 3235 mutex_unlock(&bp->hwrm_cmd_lock); 3236 return rc; 3237 } 3238 3239 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 3240 { 3241 u16 i; 3242 u32 rc = 0; 3243 3244 mutex_lock(&bp->hwrm_cmd_lock); 3245 for (i = 0; i < bp->rx_nr_rings; i++) { 3246 struct hwrm_ring_grp_alloc_input req = {0}; 3247 struct hwrm_ring_grp_alloc_output *resp = 3248 bp->hwrm_cmd_resp_addr; 3249 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 3250 3251 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 3252 3253 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 3254 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 3255 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 3256 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 3257 3258 rc = _hwrm_send_message(bp, &req, sizeof(req), 3259 HWRM_CMD_TIMEOUT); 3260 if (rc) 3261 break; 3262 3263 bp->grp_info[grp_idx].fw_grp_id = 3264 le32_to_cpu(resp->ring_group_id); 3265 } 3266 mutex_unlock(&bp->hwrm_cmd_lock); 3267 return rc; 3268 } 3269 3270 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 3271 { 3272 u16 i; 3273 u32 rc = 0; 3274 struct hwrm_ring_grp_free_input req = {0}; 3275 3276 if (!bp->grp_info) 3277 return 0; 3278 3279 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 3280 3281 mutex_lock(&bp->hwrm_cmd_lock); 3282 for (i = 0; i < bp->cp_nr_rings; i++) { 3283 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 3284 continue; 3285 req.ring_group_id = 3286 cpu_to_le32(bp->grp_info[i].fw_grp_id); 3287 3288 rc = _hwrm_send_message(bp, &req, sizeof(req), 3289 HWRM_CMD_TIMEOUT); 3290 if (rc) 3291 break; 3292 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 3293 } 3294 mutex_unlock(&bp->hwrm_cmd_lock); 3295 return rc; 3296 } 3297 3298 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 3299 struct bnxt_ring_struct *ring, 3300 u32 ring_type, u32 map_index, 3301 u32 stats_ctx_id) 3302 { 3303 int rc = 0, err = 0; 3304 struct hwrm_ring_alloc_input req = {0}; 3305 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3306 u16 ring_id; 3307 3308 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 3309 3310 req.enables = 0; 3311 if (ring->nr_pages > 1) { 3312 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 3313 /* Page size is in log2 units */ 3314 req.page_size = BNXT_PAGE_SHIFT; 3315 req.page_tbl_depth = 1; 3316 } else { 3317 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 3318 } 3319 req.fbo = 0; 3320 /* Association of ring index with doorbell index and MSIX number */ 3321 req.logical_id = cpu_to_le16(map_index); 3322 3323 switch (ring_type) { 3324 case HWRM_RING_ALLOC_TX: 3325 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 3326 /* Association of transmit ring with completion ring */ 3327 req.cmpl_ring_id = 3328 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 3329 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 3330 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 3331 req.queue_id = cpu_to_le16(ring->queue_id); 3332 break; 3333 case HWRM_RING_ALLOC_RX: 3334 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3335 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 3336 break; 3337 case HWRM_RING_ALLOC_AGG: 3338 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 3339 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 3340 break; 3341 case HWRM_RING_ALLOC_CMPL: 3342 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL; 3343 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 3344 if (bp->flags & BNXT_FLAG_USING_MSIX) 3345 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 3346 break; 3347 default: 3348 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 3349 ring_type); 3350 return -1; 3351 } 3352 3353 mutex_lock(&bp->hwrm_cmd_lock); 3354 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3355 err = le16_to_cpu(resp->error_code); 3356 ring_id = le16_to_cpu(resp->ring_id); 3357 mutex_unlock(&bp->hwrm_cmd_lock); 3358 3359 if (rc || err) { 3360 switch (ring_type) { 3361 case RING_FREE_REQ_RING_TYPE_CMPL: 3362 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 3363 rc, err); 3364 return -1; 3365 3366 case RING_FREE_REQ_RING_TYPE_RX: 3367 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 3368 rc, err); 3369 return -1; 3370 3371 case RING_FREE_REQ_RING_TYPE_TX: 3372 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 3373 rc, err); 3374 return -1; 3375 3376 default: 3377 netdev_err(bp->dev, "Invalid ring\n"); 3378 return -1; 3379 } 3380 } 3381 ring->fw_ring_id = ring_id; 3382 return rc; 3383 } 3384 3385 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 3386 { 3387 int i, rc = 0; 3388 3389 for (i = 0; i < bp->cp_nr_rings; i++) { 3390 struct bnxt_napi *bnapi = bp->bnapi[i]; 3391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3392 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3393 3394 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 3395 INVALID_STATS_CTX_ID); 3396 if (rc) 3397 goto err_out; 3398 cpr->cp_doorbell = bp->bar1 + i * 0x80; 3399 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3400 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 3401 } 3402 3403 for (i = 0; i < bp->tx_nr_rings; i++) { 3404 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3405 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3406 u32 map_idx = txr->bnapi->index; 3407 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 3408 3409 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 3410 map_idx, fw_stats_ctx); 3411 if (rc) 3412 goto err_out; 3413 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 3414 } 3415 3416 for (i = 0; i < bp->rx_nr_rings; i++) { 3417 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3418 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 3419 u32 map_idx = rxr->bnapi->index; 3420 3421 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 3422 map_idx, INVALID_STATS_CTX_ID); 3423 if (rc) 3424 goto err_out; 3425 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 3426 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 3427 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 3428 } 3429 3430 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 3431 for (i = 0; i < bp->rx_nr_rings; i++) { 3432 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3433 struct bnxt_ring_struct *ring = 3434 &rxr->rx_agg_ring_struct; 3435 u32 grp_idx = rxr->bnapi->index; 3436 u32 map_idx = grp_idx + bp->rx_nr_rings; 3437 3438 rc = hwrm_ring_alloc_send_msg(bp, ring, 3439 HWRM_RING_ALLOC_AGG, 3440 map_idx, 3441 INVALID_STATS_CTX_ID); 3442 if (rc) 3443 goto err_out; 3444 3445 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 3446 writel(DB_KEY_RX | rxr->rx_agg_prod, 3447 rxr->rx_agg_doorbell); 3448 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 3449 } 3450 } 3451 err_out: 3452 return rc; 3453 } 3454 3455 static int hwrm_ring_free_send_msg(struct bnxt *bp, 3456 struct bnxt_ring_struct *ring, 3457 u32 ring_type, int cmpl_ring_id) 3458 { 3459 int rc; 3460 struct hwrm_ring_free_input req = {0}; 3461 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 3462 u16 error_code; 3463 3464 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 3465 req.ring_type = ring_type; 3466 req.ring_id = cpu_to_le16(ring->fw_ring_id); 3467 3468 mutex_lock(&bp->hwrm_cmd_lock); 3469 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3470 error_code = le16_to_cpu(resp->error_code); 3471 mutex_unlock(&bp->hwrm_cmd_lock); 3472 3473 if (rc || error_code) { 3474 switch (ring_type) { 3475 case RING_FREE_REQ_RING_TYPE_CMPL: 3476 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 3477 rc); 3478 return rc; 3479 case RING_FREE_REQ_RING_TYPE_RX: 3480 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 3481 rc); 3482 return rc; 3483 case RING_FREE_REQ_RING_TYPE_TX: 3484 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 3485 rc); 3486 return rc; 3487 default: 3488 netdev_err(bp->dev, "Invalid ring\n"); 3489 return -1; 3490 } 3491 } 3492 return 0; 3493 } 3494 3495 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 3496 { 3497 int i; 3498 3499 if (!bp->bnapi) 3500 return; 3501 3502 for (i = 0; i < bp->tx_nr_rings; i++) { 3503 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3504 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 3505 u32 grp_idx = txr->bnapi->index; 3506 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3507 3508 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3509 hwrm_ring_free_send_msg(bp, ring, 3510 RING_FREE_REQ_RING_TYPE_TX, 3511 close_path ? cmpl_ring_id : 3512 INVALID_HW_RING_ID); 3513 ring->fw_ring_id = INVALID_HW_RING_ID; 3514 } 3515 } 3516 3517 for (i = 0; i < bp->rx_nr_rings; i++) { 3518 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3519 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 3520 u32 grp_idx = rxr->bnapi->index; 3521 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3522 3523 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3524 hwrm_ring_free_send_msg(bp, ring, 3525 RING_FREE_REQ_RING_TYPE_RX, 3526 close_path ? cmpl_ring_id : 3527 INVALID_HW_RING_ID); 3528 ring->fw_ring_id = INVALID_HW_RING_ID; 3529 bp->grp_info[grp_idx].rx_fw_ring_id = 3530 INVALID_HW_RING_ID; 3531 } 3532 } 3533 3534 for (i = 0; i < bp->rx_nr_rings; i++) { 3535 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3536 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 3537 u32 grp_idx = rxr->bnapi->index; 3538 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 3539 3540 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3541 hwrm_ring_free_send_msg(bp, ring, 3542 RING_FREE_REQ_RING_TYPE_RX, 3543 close_path ? cmpl_ring_id : 3544 INVALID_HW_RING_ID); 3545 ring->fw_ring_id = INVALID_HW_RING_ID; 3546 bp->grp_info[grp_idx].agg_fw_ring_id = 3547 INVALID_HW_RING_ID; 3548 } 3549 } 3550 3551 for (i = 0; i < bp->cp_nr_rings; i++) { 3552 struct bnxt_napi *bnapi = bp->bnapi[i]; 3553 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3554 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3555 3556 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 3557 hwrm_ring_free_send_msg(bp, ring, 3558 RING_FREE_REQ_RING_TYPE_CMPL, 3559 INVALID_HW_RING_ID); 3560 ring->fw_ring_id = INVALID_HW_RING_ID; 3561 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 3562 } 3563 } 3564 } 3565 3566 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, 3567 u32 buf_tmrs, u16 flags, 3568 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 3569 { 3570 req->flags = cpu_to_le16(flags); 3571 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); 3572 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); 3573 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); 3574 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); 3575 /* Minimum time between 2 interrupts set to buf_tmr x 2 */ 3576 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); 3577 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); 3578 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); 3579 } 3580 3581 int bnxt_hwrm_set_coal(struct bnxt *bp) 3582 { 3583 int i, rc = 0; 3584 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 3585 req_tx = {0}, *req; 3586 u16 max_buf, max_buf_irq; 3587 u16 buf_tmr, buf_tmr_irq; 3588 u32 flags; 3589 3590 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 3591 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 3592 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 3593 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 3594 3595 /* Each rx completion (2 records) should be DMAed immediately. 3596 * DMA 1/4 of the completion buffers at a time. 3597 */ 3598 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); 3599 /* max_buf must not be zero */ 3600 max_buf = clamp_t(u16, max_buf, 1, 63); 3601 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); 3602 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); 3603 /* buf timer set to 1/4 of interrupt timer */ 3604 buf_tmr = max_t(u16, buf_tmr / 4, 1); 3605 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); 3606 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 3607 3608 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 3609 3610 /* RING_IDLE generates more IRQs for lower latency. Enable it only 3611 * if coal_ticks is less than 25 us. 3612 */ 3613 if (bp->rx_coal_ticks < 25) 3614 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 3615 3616 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 3617 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); 3618 3619 /* max_buf must not be zero */ 3620 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); 3621 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); 3622 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); 3623 /* buf timer set to 1/4 of interrupt timer */ 3624 buf_tmr = max_t(u16, buf_tmr / 4, 1); 3625 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); 3626 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); 3627 3628 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 3629 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, 3630 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); 3631 3632 mutex_lock(&bp->hwrm_cmd_lock); 3633 for (i = 0; i < bp->cp_nr_rings; i++) { 3634 struct bnxt_napi *bnapi = bp->bnapi[i]; 3635 3636 req = &req_rx; 3637 if (!bnapi->rx_ring) 3638 req = &req_tx; 3639 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 3640 3641 rc = _hwrm_send_message(bp, req, sizeof(*req), 3642 HWRM_CMD_TIMEOUT); 3643 if (rc) 3644 break; 3645 } 3646 mutex_unlock(&bp->hwrm_cmd_lock); 3647 return rc; 3648 } 3649 3650 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 3651 { 3652 int rc = 0, i; 3653 struct hwrm_stat_ctx_free_input req = {0}; 3654 3655 if (!bp->bnapi) 3656 return 0; 3657 3658 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 3659 3660 mutex_lock(&bp->hwrm_cmd_lock); 3661 for (i = 0; i < bp->cp_nr_rings; i++) { 3662 struct bnxt_napi *bnapi = bp->bnapi[i]; 3663 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3664 3665 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 3666 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 3667 3668 rc = _hwrm_send_message(bp, &req, sizeof(req), 3669 HWRM_CMD_TIMEOUT); 3670 if (rc) 3671 break; 3672 3673 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3674 } 3675 } 3676 mutex_unlock(&bp->hwrm_cmd_lock); 3677 return rc; 3678 } 3679 3680 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 3681 { 3682 int rc = 0, i; 3683 struct hwrm_stat_ctx_alloc_input req = {0}; 3684 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3685 3686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 3687 3688 req.update_period_ms = cpu_to_le32(1000); 3689 3690 mutex_lock(&bp->hwrm_cmd_lock); 3691 for (i = 0; i < bp->cp_nr_rings; i++) { 3692 struct bnxt_napi *bnapi = bp->bnapi[i]; 3693 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3694 3695 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 3696 3697 rc = _hwrm_send_message(bp, &req, sizeof(req), 3698 HWRM_CMD_TIMEOUT); 3699 if (rc) 3700 break; 3701 3702 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 3703 3704 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 3705 } 3706 mutex_unlock(&bp->hwrm_cmd_lock); 3707 return 0; 3708 } 3709 3710 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 3711 { 3712 int rc = 0; 3713 struct hwrm_func_qcaps_input req = {0}; 3714 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 3715 3716 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 3717 req.fid = cpu_to_le16(0xffff); 3718 3719 mutex_lock(&bp->hwrm_cmd_lock); 3720 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3721 if (rc) 3722 goto hwrm_func_qcaps_exit; 3723 3724 if (BNXT_PF(bp)) { 3725 struct bnxt_pf_info *pf = &bp->pf; 3726 3727 pf->fw_fid = le16_to_cpu(resp->fid); 3728 pf->port_id = le16_to_cpu(resp->port_id); 3729 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3730 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); 3731 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3732 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3733 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 3734 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 3735 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 3736 if (!pf->max_hw_ring_grps) 3737 pf->max_hw_ring_grps = pf->max_tx_rings; 3738 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 3739 pf->max_vnics = le16_to_cpu(resp->max_vnics); 3740 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 3741 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 3742 pf->max_vfs = le16_to_cpu(resp->max_vfs); 3743 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 3744 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 3745 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 3746 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 3747 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 3748 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 3749 } else { 3750 #ifdef CONFIG_BNXT_SRIOV 3751 struct bnxt_vf_info *vf = &bp->vf; 3752 3753 vf->fw_fid = le16_to_cpu(resp->fid); 3754 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3755 if (is_valid_ether_addr(vf->mac_addr)) 3756 /* overwrite netdev dev_adr with admin VF MAC */ 3757 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 3758 else 3759 random_ether_addr(bp->dev->dev_addr); 3760 3761 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3762 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3763 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 3764 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 3765 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 3766 if (!vf->max_hw_ring_grps) 3767 vf->max_hw_ring_grps = vf->max_tx_rings; 3768 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 3769 vf->max_vnics = le16_to_cpu(resp->max_vnics); 3770 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 3771 #endif 3772 } 3773 3774 bp->tx_push_thresh = 0; 3775 if (resp->flags & 3776 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 3777 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 3778 3779 hwrm_func_qcaps_exit: 3780 mutex_unlock(&bp->hwrm_cmd_lock); 3781 return rc; 3782 } 3783 3784 static int bnxt_hwrm_func_reset(struct bnxt *bp) 3785 { 3786 struct hwrm_func_reset_input req = {0}; 3787 3788 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 3789 req.enables = 0; 3790 3791 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 3792 } 3793 3794 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 3795 { 3796 int rc = 0; 3797 struct hwrm_queue_qportcfg_input req = {0}; 3798 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 3799 u8 i, *qptr; 3800 3801 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 3802 3803 mutex_lock(&bp->hwrm_cmd_lock); 3804 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3805 if (rc) 3806 goto qportcfg_exit; 3807 3808 if (!resp->max_configurable_queues) { 3809 rc = -EINVAL; 3810 goto qportcfg_exit; 3811 } 3812 bp->max_tc = resp->max_configurable_queues; 3813 if (bp->max_tc > BNXT_MAX_QUEUE) 3814 bp->max_tc = BNXT_MAX_QUEUE; 3815 3816 qptr = &resp->queue_id0; 3817 for (i = 0; i < bp->max_tc; i++) { 3818 bp->q_info[i].queue_id = *qptr++; 3819 bp->q_info[i].queue_profile = *qptr++; 3820 } 3821 3822 qportcfg_exit: 3823 mutex_unlock(&bp->hwrm_cmd_lock); 3824 return rc; 3825 } 3826 3827 static int bnxt_hwrm_ver_get(struct bnxt *bp) 3828 { 3829 int rc; 3830 struct hwrm_ver_get_input req = {0}; 3831 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 3832 3833 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 3834 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 3835 req.hwrm_intf_min = HWRM_VERSION_MINOR; 3836 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 3837 mutex_lock(&bp->hwrm_cmd_lock); 3838 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3839 if (rc) 3840 goto hwrm_ver_get_exit; 3841 3842 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 3843 3844 if (resp->hwrm_intf_maj < 1) { 3845 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 3846 resp->hwrm_intf_maj, resp->hwrm_intf_min, 3847 resp->hwrm_intf_upd); 3848 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 3849 } 3850 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", 3851 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 3852 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); 3853 3854 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 3855 if (!bp->hwrm_cmd_timeout) 3856 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 3857 3858 hwrm_ver_get_exit: 3859 mutex_unlock(&bp->hwrm_cmd_lock); 3860 return rc; 3861 } 3862 3863 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 3864 { 3865 int rc; 3866 struct bnxt_pf_info *pf = &bp->pf; 3867 struct hwrm_port_qstats_input req = {0}; 3868 3869 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 3870 return 0; 3871 3872 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 3873 req.port_id = cpu_to_le16(pf->port_id); 3874 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 3875 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 3876 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3877 return rc; 3878 } 3879 3880 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 3881 { 3882 if (bp->vxlan_port_cnt) { 3883 bnxt_hwrm_tunnel_dst_port_free( 3884 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 3885 } 3886 bp->vxlan_port_cnt = 0; 3887 if (bp->nge_port_cnt) { 3888 bnxt_hwrm_tunnel_dst_port_free( 3889 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 3890 } 3891 bp->nge_port_cnt = 0; 3892 } 3893 3894 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 3895 { 3896 int rc, i; 3897 u32 tpa_flags = 0; 3898 3899 if (set_tpa) 3900 tpa_flags = bp->flags & BNXT_FLAG_TPA; 3901 for (i = 0; i < bp->nr_vnics; i++) { 3902 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 3903 if (rc) { 3904 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 3905 rc, i); 3906 return rc; 3907 } 3908 } 3909 return 0; 3910 } 3911 3912 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 3913 { 3914 int i; 3915 3916 for (i = 0; i < bp->nr_vnics; i++) 3917 bnxt_hwrm_vnic_set_rss(bp, i, false); 3918 } 3919 3920 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 3921 bool irq_re_init) 3922 { 3923 if (bp->vnic_info) { 3924 bnxt_hwrm_clear_vnic_filter(bp); 3925 /* clear all RSS setting before free vnic ctx */ 3926 bnxt_hwrm_clear_vnic_rss(bp); 3927 bnxt_hwrm_vnic_ctx_free(bp); 3928 /* before free the vnic, undo the vnic tpa settings */ 3929 if (bp->flags & BNXT_FLAG_TPA) 3930 bnxt_set_tpa(bp, false); 3931 bnxt_hwrm_vnic_free(bp); 3932 } 3933 bnxt_hwrm_ring_free(bp, close_path); 3934 bnxt_hwrm_ring_grp_free(bp); 3935 if (irq_re_init) { 3936 bnxt_hwrm_stat_ctx_free(bp); 3937 bnxt_hwrm_free_tunnel_ports(bp); 3938 } 3939 } 3940 3941 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 3942 { 3943 int rc; 3944 3945 /* allocate context for vnic */ 3946 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); 3947 if (rc) { 3948 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 3949 vnic_id, rc); 3950 goto vnic_setup_err; 3951 } 3952 bp->rsscos_nr_ctxs++; 3953 3954 /* configure default vnic, ring grp */ 3955 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 3956 if (rc) { 3957 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 3958 vnic_id, rc); 3959 goto vnic_setup_err; 3960 } 3961 3962 /* Enable RSS hashing on vnic */ 3963 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 3964 if (rc) { 3965 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 3966 vnic_id, rc); 3967 goto vnic_setup_err; 3968 } 3969 3970 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 3971 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 3972 if (rc) { 3973 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 3974 vnic_id, rc); 3975 } 3976 } 3977 3978 vnic_setup_err: 3979 return rc; 3980 } 3981 3982 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 3983 { 3984 #ifdef CONFIG_RFS_ACCEL 3985 int i, rc = 0; 3986 3987 for (i = 0; i < bp->rx_nr_rings; i++) { 3988 u16 vnic_id = i + 1; 3989 u16 ring_id = i; 3990 3991 if (vnic_id >= bp->nr_vnics) 3992 break; 3993 3994 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; 3995 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 3996 if (rc) { 3997 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 3998 vnic_id, rc); 3999 break; 4000 } 4001 rc = bnxt_setup_vnic(bp, vnic_id); 4002 if (rc) 4003 break; 4004 } 4005 return rc; 4006 #else 4007 return 0; 4008 #endif 4009 } 4010 4011 static int bnxt_cfg_rx_mode(struct bnxt *); 4012 4013 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 4014 { 4015 int rc = 0; 4016 4017 if (irq_re_init) { 4018 rc = bnxt_hwrm_stat_ctx_alloc(bp); 4019 if (rc) { 4020 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 4021 rc); 4022 goto err_out; 4023 } 4024 } 4025 4026 rc = bnxt_hwrm_ring_alloc(bp); 4027 if (rc) { 4028 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 4029 goto err_out; 4030 } 4031 4032 rc = bnxt_hwrm_ring_grp_alloc(bp); 4033 if (rc) { 4034 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 4035 goto err_out; 4036 } 4037 4038 /* default vnic 0 */ 4039 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); 4040 if (rc) { 4041 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 4042 goto err_out; 4043 } 4044 4045 rc = bnxt_setup_vnic(bp, 0); 4046 if (rc) 4047 goto err_out; 4048 4049 if (bp->flags & BNXT_FLAG_RFS) { 4050 rc = bnxt_alloc_rfs_vnics(bp); 4051 if (rc) 4052 goto err_out; 4053 } 4054 4055 if (bp->flags & BNXT_FLAG_TPA) { 4056 rc = bnxt_set_tpa(bp, true); 4057 if (rc) 4058 goto err_out; 4059 } 4060 4061 if (BNXT_VF(bp)) 4062 bnxt_update_vf_mac(bp); 4063 4064 /* Filter for default vnic 0 */ 4065 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 4066 if (rc) { 4067 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 4068 goto err_out; 4069 } 4070 bp->vnic_info[0].uc_filter_count = 1; 4071 4072 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4073 4074 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) 4075 bp->vnic_info[0].rx_mask |= 4076 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4077 4078 rc = bnxt_cfg_rx_mode(bp); 4079 if (rc) 4080 goto err_out; 4081 4082 rc = bnxt_hwrm_set_coal(bp); 4083 if (rc) 4084 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 4085 rc); 4086 4087 return 0; 4088 4089 err_out: 4090 bnxt_hwrm_resource_free(bp, 0, true); 4091 4092 return rc; 4093 } 4094 4095 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 4096 { 4097 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 4098 return 0; 4099 } 4100 4101 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 4102 { 4103 bnxt_init_rx_rings(bp); 4104 bnxt_init_tx_rings(bp); 4105 bnxt_init_ring_grps(bp, irq_re_init); 4106 bnxt_init_vnics(bp); 4107 4108 return bnxt_init_chip(bp, irq_re_init); 4109 } 4110 4111 static void bnxt_disable_int(struct bnxt *bp) 4112 { 4113 int i; 4114 4115 if (!bp->bnapi) 4116 return; 4117 4118 for (i = 0; i < bp->cp_nr_rings; i++) { 4119 struct bnxt_napi *bnapi = bp->bnapi[i]; 4120 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4121 4122 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4123 } 4124 } 4125 4126 static void bnxt_enable_int(struct bnxt *bp) 4127 { 4128 int i; 4129 4130 atomic_set(&bp->intr_sem, 0); 4131 for (i = 0; i < bp->cp_nr_rings; i++) { 4132 struct bnxt_napi *bnapi = bp->bnapi[i]; 4133 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4134 4135 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 4136 } 4137 } 4138 4139 static int bnxt_set_real_num_queues(struct bnxt *bp) 4140 { 4141 int rc; 4142 struct net_device *dev = bp->dev; 4143 4144 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); 4145 if (rc) 4146 return rc; 4147 4148 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 4149 if (rc) 4150 return rc; 4151 4152 #ifdef CONFIG_RFS_ACCEL 4153 if (bp->flags & BNXT_FLAG_RFS) 4154 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 4155 #endif 4156 4157 return rc; 4158 } 4159 4160 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 4161 bool shared) 4162 { 4163 int _rx = *rx, _tx = *tx; 4164 4165 if (shared) { 4166 *rx = min_t(int, _rx, max); 4167 *tx = min_t(int, _tx, max); 4168 } else { 4169 if (max < 2) 4170 return -ENOMEM; 4171 4172 while (_rx + _tx > max) { 4173 if (_rx > _tx && _rx > 1) 4174 _rx--; 4175 else if (_tx > 1) 4176 _tx--; 4177 } 4178 *rx = _rx; 4179 *tx = _tx; 4180 } 4181 return 0; 4182 } 4183 4184 static int bnxt_setup_msix(struct bnxt *bp) 4185 { 4186 struct msix_entry *msix_ent; 4187 struct net_device *dev = bp->dev; 4188 int i, total_vecs, rc = 0, min = 1; 4189 const int len = sizeof(bp->irq_tbl[0].name); 4190 4191 bp->flags &= ~BNXT_FLAG_USING_MSIX; 4192 total_vecs = bp->cp_nr_rings; 4193 4194 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 4195 if (!msix_ent) 4196 return -ENOMEM; 4197 4198 for (i = 0; i < total_vecs; i++) { 4199 msix_ent[i].entry = i; 4200 msix_ent[i].vector = 0; 4201 } 4202 4203 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 4204 min = 2; 4205 4206 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 4207 if (total_vecs < 0) { 4208 rc = -ENODEV; 4209 goto msix_setup_exit; 4210 } 4211 4212 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 4213 if (bp->irq_tbl) { 4214 int tcs; 4215 4216 /* Trim rings based upon num of vectors allocated */ 4217 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 4218 total_vecs, min == 1); 4219 if (rc) 4220 goto msix_setup_exit; 4221 4222 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4223 tcs = netdev_get_num_tc(dev); 4224 if (tcs > 1) { 4225 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; 4226 if (bp->tx_nr_rings_per_tc == 0) { 4227 netdev_reset_tc(dev); 4228 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4229 } else { 4230 int i, off, count; 4231 4232 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; 4233 for (i = 0; i < tcs; i++) { 4234 count = bp->tx_nr_rings_per_tc; 4235 off = i * count; 4236 netdev_set_tc_queue(dev, i, count, off); 4237 } 4238 } 4239 } 4240 bp->cp_nr_rings = total_vecs; 4241 4242 for (i = 0; i < bp->cp_nr_rings; i++) { 4243 char *attr; 4244 4245 bp->irq_tbl[i].vector = msix_ent[i].vector; 4246 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 4247 attr = "TxRx"; 4248 else if (i < bp->rx_nr_rings) 4249 attr = "rx"; 4250 else 4251 attr = "tx"; 4252 4253 snprintf(bp->irq_tbl[i].name, len, 4254 "%s-%s-%d", dev->name, attr, i); 4255 bp->irq_tbl[i].handler = bnxt_msix; 4256 } 4257 rc = bnxt_set_real_num_queues(bp); 4258 if (rc) 4259 goto msix_setup_exit; 4260 } else { 4261 rc = -ENOMEM; 4262 goto msix_setup_exit; 4263 } 4264 bp->flags |= BNXT_FLAG_USING_MSIX; 4265 kfree(msix_ent); 4266 return 0; 4267 4268 msix_setup_exit: 4269 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); 4270 pci_disable_msix(bp->pdev); 4271 kfree(msix_ent); 4272 return rc; 4273 } 4274 4275 static int bnxt_setup_inta(struct bnxt *bp) 4276 { 4277 int rc; 4278 const int len = sizeof(bp->irq_tbl[0].name); 4279 4280 if (netdev_get_num_tc(bp->dev)) 4281 netdev_reset_tc(bp->dev); 4282 4283 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 4284 if (!bp->irq_tbl) { 4285 rc = -ENOMEM; 4286 return rc; 4287 } 4288 bp->rx_nr_rings = 1; 4289 bp->tx_nr_rings = 1; 4290 bp->cp_nr_rings = 1; 4291 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 4292 bp->flags |= BNXT_FLAG_SHARED_RINGS; 4293 bp->irq_tbl[0].vector = bp->pdev->irq; 4294 snprintf(bp->irq_tbl[0].name, len, 4295 "%s-%s-%d", bp->dev->name, "TxRx", 0); 4296 bp->irq_tbl[0].handler = bnxt_inta; 4297 rc = bnxt_set_real_num_queues(bp); 4298 return rc; 4299 } 4300 4301 static int bnxt_setup_int_mode(struct bnxt *bp) 4302 { 4303 int rc = 0; 4304 4305 if (bp->flags & BNXT_FLAG_MSIX_CAP) 4306 rc = bnxt_setup_msix(bp); 4307 4308 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 4309 /* fallback to INTA */ 4310 rc = bnxt_setup_inta(bp); 4311 } 4312 return rc; 4313 } 4314 4315 static void bnxt_free_irq(struct bnxt *bp) 4316 { 4317 struct bnxt_irq *irq; 4318 int i; 4319 4320 #ifdef CONFIG_RFS_ACCEL 4321 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 4322 bp->dev->rx_cpu_rmap = NULL; 4323 #endif 4324 if (!bp->irq_tbl) 4325 return; 4326 4327 for (i = 0; i < bp->cp_nr_rings; i++) { 4328 irq = &bp->irq_tbl[i]; 4329 if (irq->requested) 4330 free_irq(irq->vector, bp->bnapi[i]); 4331 irq->requested = 0; 4332 } 4333 if (bp->flags & BNXT_FLAG_USING_MSIX) 4334 pci_disable_msix(bp->pdev); 4335 kfree(bp->irq_tbl); 4336 bp->irq_tbl = NULL; 4337 } 4338 4339 static int bnxt_request_irq(struct bnxt *bp) 4340 { 4341 int i, j, rc = 0; 4342 unsigned long flags = 0; 4343 #ifdef CONFIG_RFS_ACCEL 4344 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 4345 #endif 4346 4347 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 4348 flags = IRQF_SHARED; 4349 4350 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 4351 struct bnxt_irq *irq = &bp->irq_tbl[i]; 4352 #ifdef CONFIG_RFS_ACCEL 4353 if (rmap && bp->bnapi[i]->rx_ring) { 4354 rc = irq_cpu_rmap_add(rmap, irq->vector); 4355 if (rc) 4356 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 4357 j); 4358 j++; 4359 } 4360 #endif 4361 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 4362 bp->bnapi[i]); 4363 if (rc) 4364 break; 4365 4366 irq->requested = 1; 4367 } 4368 return rc; 4369 } 4370 4371 static void bnxt_del_napi(struct bnxt *bp) 4372 { 4373 int i; 4374 4375 if (!bp->bnapi) 4376 return; 4377 4378 for (i = 0; i < bp->cp_nr_rings; i++) { 4379 struct bnxt_napi *bnapi = bp->bnapi[i]; 4380 4381 napi_hash_del(&bnapi->napi); 4382 netif_napi_del(&bnapi->napi); 4383 } 4384 } 4385 4386 static void bnxt_init_napi(struct bnxt *bp) 4387 { 4388 int i; 4389 struct bnxt_napi *bnapi; 4390 4391 if (bp->flags & BNXT_FLAG_USING_MSIX) { 4392 for (i = 0; i < bp->cp_nr_rings; i++) { 4393 bnapi = bp->bnapi[i]; 4394 netif_napi_add(bp->dev, &bnapi->napi, 4395 bnxt_poll, 64); 4396 } 4397 } else { 4398 bnapi = bp->bnapi[0]; 4399 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 4400 } 4401 } 4402 4403 static void bnxt_disable_napi(struct bnxt *bp) 4404 { 4405 int i; 4406 4407 if (!bp->bnapi) 4408 return; 4409 4410 for (i = 0; i < bp->cp_nr_rings; i++) { 4411 napi_disable(&bp->bnapi[i]->napi); 4412 bnxt_disable_poll(bp->bnapi[i]); 4413 } 4414 } 4415 4416 static void bnxt_enable_napi(struct bnxt *bp) 4417 { 4418 int i; 4419 4420 for (i = 0; i < bp->cp_nr_rings; i++) { 4421 bnxt_enable_poll(bp->bnapi[i]); 4422 napi_enable(&bp->bnapi[i]->napi); 4423 } 4424 } 4425 4426 static void bnxt_tx_disable(struct bnxt *bp) 4427 { 4428 int i; 4429 struct bnxt_tx_ring_info *txr; 4430 struct netdev_queue *txq; 4431 4432 if (bp->tx_ring) { 4433 for (i = 0; i < bp->tx_nr_rings; i++) { 4434 txr = &bp->tx_ring[i]; 4435 txq = netdev_get_tx_queue(bp->dev, i); 4436 __netif_tx_lock(txq, smp_processor_id()); 4437 txr->dev_state = BNXT_DEV_STATE_CLOSING; 4438 __netif_tx_unlock(txq); 4439 } 4440 } 4441 /* Stop all TX queues */ 4442 netif_tx_disable(bp->dev); 4443 netif_carrier_off(bp->dev); 4444 } 4445 4446 static void bnxt_tx_enable(struct bnxt *bp) 4447 { 4448 int i; 4449 struct bnxt_tx_ring_info *txr; 4450 struct netdev_queue *txq; 4451 4452 for (i = 0; i < bp->tx_nr_rings; i++) { 4453 txr = &bp->tx_ring[i]; 4454 txq = netdev_get_tx_queue(bp->dev, i); 4455 txr->dev_state = 0; 4456 } 4457 netif_tx_wake_all_queues(bp->dev); 4458 if (bp->link_info.link_up) 4459 netif_carrier_on(bp->dev); 4460 } 4461 4462 static void bnxt_report_link(struct bnxt *bp) 4463 { 4464 if (bp->link_info.link_up) { 4465 const char *duplex; 4466 const char *flow_ctrl; 4467 u16 speed; 4468 4469 netif_carrier_on(bp->dev); 4470 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 4471 duplex = "full"; 4472 else 4473 duplex = "half"; 4474 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 4475 flow_ctrl = "ON - receive & transmit"; 4476 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 4477 flow_ctrl = "ON - transmit"; 4478 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 4479 flow_ctrl = "ON - receive"; 4480 else 4481 flow_ctrl = "none"; 4482 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 4483 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 4484 speed, duplex, flow_ctrl); 4485 } else { 4486 netif_carrier_off(bp->dev); 4487 netdev_err(bp->dev, "NIC Link is Down\n"); 4488 } 4489 } 4490 4491 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 4492 { 4493 int rc = 0; 4494 struct bnxt_link_info *link_info = &bp->link_info; 4495 struct hwrm_port_phy_qcfg_input req = {0}; 4496 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4497 u8 link_up = link_info->link_up; 4498 4499 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 4500 4501 mutex_lock(&bp->hwrm_cmd_lock); 4502 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4503 if (rc) { 4504 mutex_unlock(&bp->hwrm_cmd_lock); 4505 return rc; 4506 } 4507 4508 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 4509 link_info->phy_link_status = resp->link; 4510 link_info->duplex = resp->duplex; 4511 link_info->pause = resp->pause; 4512 link_info->auto_mode = resp->auto_mode; 4513 link_info->auto_pause_setting = resp->auto_pause; 4514 link_info->lp_pause = resp->link_partner_adv_pause; 4515 link_info->force_pause_setting = resp->force_pause; 4516 link_info->duplex_setting = resp->duplex; 4517 if (link_info->phy_link_status == BNXT_LINK_LINK) 4518 link_info->link_speed = le16_to_cpu(resp->link_speed); 4519 else 4520 link_info->link_speed = 0; 4521 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 4522 link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); 4523 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 4524 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 4525 link_info->lp_auto_link_speeds = 4526 le16_to_cpu(resp->link_partner_adv_speeds); 4527 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 4528 link_info->phy_ver[0] = resp->phy_maj; 4529 link_info->phy_ver[1] = resp->phy_min; 4530 link_info->phy_ver[2] = resp->phy_bld; 4531 link_info->media_type = resp->media_type; 4532 link_info->transceiver = resp->transceiver_type; 4533 link_info->phy_addr = resp->phy_addr; 4534 4535 /* TODO: need to add more logic to report VF link */ 4536 if (chng_link_state) { 4537 if (link_info->phy_link_status == BNXT_LINK_LINK) 4538 link_info->link_up = 1; 4539 else 4540 link_info->link_up = 0; 4541 if (link_up != link_info->link_up) 4542 bnxt_report_link(bp); 4543 } else { 4544 /* alwasy link down if not require to update link state */ 4545 link_info->link_up = 0; 4546 } 4547 mutex_unlock(&bp->hwrm_cmd_lock); 4548 return 0; 4549 } 4550 4551 static void 4552 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 4553 { 4554 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 4555 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 4556 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 4558 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4559 req->enables |= 4560 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 4561 } else { 4562 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 4563 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 4564 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 4565 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 4566 req->enables |= 4567 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 4568 } 4569 } 4570 4571 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 4572 struct hwrm_port_phy_cfg_input *req) 4573 { 4574 u8 autoneg = bp->link_info.autoneg; 4575 u16 fw_link_speed = bp->link_info.req_link_speed; 4576 u32 advertising = bp->link_info.advertising; 4577 4578 if (autoneg & BNXT_AUTONEG_SPEED) { 4579 req->auto_mode |= 4580 PORT_PHY_CFG_REQ_AUTO_MODE_MASK; 4581 4582 req->enables |= cpu_to_le32( 4583 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 4584 req->auto_link_speed_mask = cpu_to_le16(advertising); 4585 4586 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 4587 req->flags |= 4588 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 4589 } else { 4590 req->force_link_speed = cpu_to_le16(fw_link_speed); 4591 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 4592 } 4593 4594 /* currently don't support half duplex */ 4595 req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL; 4596 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX); 4597 /* tell chimp that the setting takes effect immediately */ 4598 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 4599 } 4600 4601 int bnxt_hwrm_set_pause(struct bnxt *bp) 4602 { 4603 struct hwrm_port_phy_cfg_input req = {0}; 4604 int rc; 4605 4606 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 4607 bnxt_hwrm_set_pause_common(bp, &req); 4608 4609 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 4610 bp->link_info.force_link_chng) 4611 bnxt_hwrm_set_link_common(bp, &req); 4612 4613 mutex_lock(&bp->hwrm_cmd_lock); 4614 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4615 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 4616 /* since changing of pause setting doesn't trigger any link 4617 * change event, the driver needs to update the current pause 4618 * result upon successfully return of the phy_cfg command 4619 */ 4620 bp->link_info.pause = 4621 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 4622 bp->link_info.auto_pause_setting = 0; 4623 if (!bp->link_info.force_link_chng) 4624 bnxt_report_link(bp); 4625 } 4626 bp->link_info.force_link_chng = false; 4627 mutex_unlock(&bp->hwrm_cmd_lock); 4628 return rc; 4629 } 4630 4631 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) 4632 { 4633 struct hwrm_port_phy_cfg_input req = {0}; 4634 4635 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 4636 if (set_pause) 4637 bnxt_hwrm_set_pause_common(bp, &req); 4638 4639 bnxt_hwrm_set_link_common(bp, &req); 4640 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4641 } 4642 4643 static int bnxt_update_phy_setting(struct bnxt *bp) 4644 { 4645 int rc; 4646 bool update_link = false; 4647 bool update_pause = false; 4648 struct bnxt_link_info *link_info = &bp->link_info; 4649 4650 rc = bnxt_update_link(bp, true); 4651 if (rc) { 4652 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 4653 rc); 4654 return rc; 4655 } 4656 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4657 link_info->auto_pause_setting != link_info->req_flow_ctrl) 4658 update_pause = true; 4659 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4660 link_info->force_pause_setting != link_info->req_flow_ctrl) 4661 update_pause = true; 4662 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4663 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4664 update_link = true; 4665 if (link_info->req_link_speed != link_info->force_link_speed) 4666 update_link = true; 4667 if (link_info->req_duplex != link_info->duplex_setting) 4668 update_link = true; 4669 } else { 4670 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4671 update_link = true; 4672 if (link_info->advertising != link_info->auto_link_speeds) 4673 update_link = true; 4674 } 4675 4676 if (update_link) 4677 rc = bnxt_hwrm_set_link_setting(bp, update_pause); 4678 else if (update_pause) 4679 rc = bnxt_hwrm_set_pause(bp); 4680 if (rc) { 4681 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 4682 rc); 4683 return rc; 4684 } 4685 4686 return rc; 4687 } 4688 4689 /* Common routine to pre-map certain register block to different GRC window. 4690 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 4691 * in PF and 3 windows in VF that can be customized to map in different 4692 * register blocks. 4693 */ 4694 static void bnxt_preset_reg_win(struct bnxt *bp) 4695 { 4696 if (BNXT_PF(bp)) { 4697 /* CAG registers map to GRC window #4 */ 4698 writel(BNXT_CAG_REG_BASE, 4699 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 4700 } 4701 } 4702 4703 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 4704 { 4705 int rc = 0; 4706 4707 bnxt_preset_reg_win(bp); 4708 netif_carrier_off(bp->dev); 4709 if (irq_re_init) { 4710 rc = bnxt_setup_int_mode(bp); 4711 if (rc) { 4712 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 4713 rc); 4714 return rc; 4715 } 4716 } 4717 if ((bp->flags & BNXT_FLAG_RFS) && 4718 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 4719 /* disable RFS if falling back to INTA */ 4720 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 4721 bp->flags &= ~BNXT_FLAG_RFS; 4722 } 4723 4724 rc = bnxt_alloc_mem(bp, irq_re_init); 4725 if (rc) { 4726 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 4727 goto open_err_free_mem; 4728 } 4729 4730 if (irq_re_init) { 4731 bnxt_init_napi(bp); 4732 rc = bnxt_request_irq(bp); 4733 if (rc) { 4734 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 4735 goto open_err; 4736 } 4737 } 4738 4739 bnxt_enable_napi(bp); 4740 4741 rc = bnxt_init_nic(bp, irq_re_init); 4742 if (rc) { 4743 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 4744 goto open_err; 4745 } 4746 4747 if (link_re_init) { 4748 rc = bnxt_update_phy_setting(bp); 4749 if (rc) 4750 netdev_warn(bp->dev, "failed to update phy settings\n"); 4751 } 4752 4753 if (irq_re_init) { 4754 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) 4755 vxlan_get_rx_port(bp->dev); 4756 #endif 4757 if (!bnxt_hwrm_tunnel_dst_port_alloc( 4758 bp, htons(0x17c1), 4759 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) 4760 bp->nge_port_cnt = 1; 4761 } 4762 4763 set_bit(BNXT_STATE_OPEN, &bp->state); 4764 bnxt_enable_int(bp); 4765 /* Enable TX queues */ 4766 bnxt_tx_enable(bp); 4767 mod_timer(&bp->timer, jiffies + bp->current_interval); 4768 bnxt_update_link(bp, true); 4769 4770 return 0; 4771 4772 open_err: 4773 bnxt_disable_napi(bp); 4774 bnxt_del_napi(bp); 4775 4776 open_err_free_mem: 4777 bnxt_free_skbs(bp); 4778 bnxt_free_irq(bp); 4779 bnxt_free_mem(bp, true); 4780 return rc; 4781 } 4782 4783 /* rtnl_lock held */ 4784 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 4785 { 4786 int rc = 0; 4787 4788 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 4789 if (rc) { 4790 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 4791 dev_close(bp->dev); 4792 } 4793 return rc; 4794 } 4795 4796 static int bnxt_open(struct net_device *dev) 4797 { 4798 struct bnxt *bp = netdev_priv(dev); 4799 int rc = 0; 4800 4801 rc = bnxt_hwrm_func_reset(bp); 4802 if (rc) { 4803 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", 4804 rc); 4805 rc = -1; 4806 return rc; 4807 } 4808 return __bnxt_open_nic(bp, true, true); 4809 } 4810 4811 static void bnxt_disable_int_sync(struct bnxt *bp) 4812 { 4813 int i; 4814 4815 atomic_inc(&bp->intr_sem); 4816 if (!netif_running(bp->dev)) 4817 return; 4818 4819 bnxt_disable_int(bp); 4820 for (i = 0; i < bp->cp_nr_rings; i++) 4821 synchronize_irq(bp->irq_tbl[i].vector); 4822 } 4823 4824 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 4825 { 4826 int rc = 0; 4827 4828 #ifdef CONFIG_BNXT_SRIOV 4829 if (bp->sriov_cfg) { 4830 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 4831 !bp->sriov_cfg, 4832 BNXT_SRIOV_CFG_WAIT_TMO); 4833 if (rc) 4834 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 4835 } 4836 #endif 4837 /* Change device state to avoid TX queue wake up's */ 4838 bnxt_tx_disable(bp); 4839 4840 clear_bit(BNXT_STATE_OPEN, &bp->state); 4841 smp_mb__after_atomic(); 4842 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) 4843 msleep(20); 4844 4845 /* Flush rings before disabling interrupts */ 4846 bnxt_shutdown_nic(bp, irq_re_init); 4847 4848 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 4849 4850 bnxt_disable_napi(bp); 4851 bnxt_disable_int_sync(bp); 4852 del_timer_sync(&bp->timer); 4853 bnxt_free_skbs(bp); 4854 4855 if (irq_re_init) { 4856 bnxt_free_irq(bp); 4857 bnxt_del_napi(bp); 4858 } 4859 bnxt_free_mem(bp, irq_re_init); 4860 return rc; 4861 } 4862 4863 static int bnxt_close(struct net_device *dev) 4864 { 4865 struct bnxt *bp = netdev_priv(dev); 4866 4867 bnxt_close_nic(bp, true, true); 4868 return 0; 4869 } 4870 4871 /* rtnl_lock held */ 4872 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4873 { 4874 switch (cmd) { 4875 case SIOCGMIIPHY: 4876 /* fallthru */ 4877 case SIOCGMIIREG: { 4878 if (!netif_running(dev)) 4879 return -EAGAIN; 4880 4881 return 0; 4882 } 4883 4884 case SIOCSMIIREG: 4885 if (!netif_running(dev)) 4886 return -EAGAIN; 4887 4888 return 0; 4889 4890 default: 4891 /* do nothing */ 4892 break; 4893 } 4894 return -EOPNOTSUPP; 4895 } 4896 4897 static struct rtnl_link_stats64 * 4898 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 4899 { 4900 u32 i; 4901 struct bnxt *bp = netdev_priv(dev); 4902 4903 memset(stats, 0, sizeof(struct rtnl_link_stats64)); 4904 4905 if (!bp->bnapi) 4906 return stats; 4907 4908 /* TODO check if we need to synchronize with bnxt_close path */ 4909 for (i = 0; i < bp->cp_nr_rings; i++) { 4910 struct bnxt_napi *bnapi = bp->bnapi[i]; 4911 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4912 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 4913 4914 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 4915 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 4916 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 4917 4918 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 4919 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 4920 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 4921 4922 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 4923 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 4924 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 4925 4926 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 4927 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 4928 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 4929 4930 stats->rx_missed_errors += 4931 le64_to_cpu(hw_stats->rx_discard_pkts); 4932 4933 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 4934 4935 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 4936 } 4937 4938 if (bp->flags & BNXT_FLAG_PORT_STATS) { 4939 struct rx_port_stats *rx = bp->hw_rx_port_stats; 4940 struct tx_port_stats *tx = bp->hw_tx_port_stats; 4941 4942 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 4943 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 4944 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 4945 le64_to_cpu(rx->rx_ovrsz_frames) + 4946 le64_to_cpu(rx->rx_runt_frames); 4947 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 4948 le64_to_cpu(rx->rx_jbr_frames); 4949 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 4950 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 4951 stats->tx_errors = le64_to_cpu(tx->tx_err); 4952 } 4953 4954 return stats; 4955 } 4956 4957 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 4958 { 4959 struct net_device *dev = bp->dev; 4960 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4961 struct netdev_hw_addr *ha; 4962 u8 *haddr; 4963 int mc_count = 0; 4964 bool update = false; 4965 int off = 0; 4966 4967 netdev_for_each_mc_addr(ha, dev) { 4968 if (mc_count >= BNXT_MAX_MC_ADDRS) { 4969 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 4970 vnic->mc_list_count = 0; 4971 return false; 4972 } 4973 haddr = ha->addr; 4974 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 4975 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 4976 update = true; 4977 } 4978 off += ETH_ALEN; 4979 mc_count++; 4980 } 4981 if (mc_count) 4982 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 4983 4984 if (mc_count != vnic->mc_list_count) { 4985 vnic->mc_list_count = mc_count; 4986 update = true; 4987 } 4988 return update; 4989 } 4990 4991 static bool bnxt_uc_list_updated(struct bnxt *bp) 4992 { 4993 struct net_device *dev = bp->dev; 4994 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4995 struct netdev_hw_addr *ha; 4996 int off = 0; 4997 4998 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 4999 return true; 5000 5001 netdev_for_each_uc_addr(ha, dev) { 5002 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 5003 return true; 5004 5005 off += ETH_ALEN; 5006 } 5007 return false; 5008 } 5009 5010 static void bnxt_set_rx_mode(struct net_device *dev) 5011 { 5012 struct bnxt *bp = netdev_priv(dev); 5013 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5014 u32 mask = vnic->rx_mask; 5015 bool mc_update = false; 5016 bool uc_update; 5017 5018 if (!netif_running(dev)) 5019 return; 5020 5021 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 5022 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 5023 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 5024 5025 /* Only allow PF to be in promiscuous mode */ 5026 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) 5027 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5028 5029 uc_update = bnxt_uc_list_updated(bp); 5030 5031 if (dev->flags & IFF_ALLMULTI) { 5032 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5033 vnic->mc_list_count = 0; 5034 } else { 5035 mc_update = bnxt_mc_list_updated(bp, &mask); 5036 } 5037 5038 if (mask != vnic->rx_mask || uc_update || mc_update) { 5039 vnic->rx_mask = mask; 5040 5041 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 5042 schedule_work(&bp->sp_task); 5043 } 5044 } 5045 5046 static int bnxt_cfg_rx_mode(struct bnxt *bp) 5047 { 5048 struct net_device *dev = bp->dev; 5049 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5050 struct netdev_hw_addr *ha; 5051 int i, off = 0, rc; 5052 bool uc_update; 5053 5054 netif_addr_lock_bh(dev); 5055 uc_update = bnxt_uc_list_updated(bp); 5056 netif_addr_unlock_bh(dev); 5057 5058 if (!uc_update) 5059 goto skip_uc; 5060 5061 mutex_lock(&bp->hwrm_cmd_lock); 5062 for (i = 1; i < vnic->uc_filter_count; i++) { 5063 struct hwrm_cfa_l2_filter_free_input req = {0}; 5064 5065 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 5066 -1); 5067 5068 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 5069 5070 rc = _hwrm_send_message(bp, &req, sizeof(req), 5071 HWRM_CMD_TIMEOUT); 5072 } 5073 mutex_unlock(&bp->hwrm_cmd_lock); 5074 5075 vnic->uc_filter_count = 1; 5076 5077 netif_addr_lock_bh(dev); 5078 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 5079 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5080 } else { 5081 netdev_for_each_uc_addr(ha, dev) { 5082 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 5083 off += ETH_ALEN; 5084 vnic->uc_filter_count++; 5085 } 5086 } 5087 netif_addr_unlock_bh(dev); 5088 5089 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 5090 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 5091 if (rc) { 5092 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 5093 rc); 5094 vnic->uc_filter_count = i; 5095 return rc; 5096 } 5097 } 5098 5099 skip_uc: 5100 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 5101 if (rc) 5102 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 5103 rc); 5104 5105 return rc; 5106 } 5107 5108 static bool bnxt_rfs_capable(struct bnxt *bp) 5109 { 5110 #ifdef CONFIG_RFS_ACCEL 5111 struct bnxt_pf_info *pf = &bp->pf; 5112 int vnics; 5113 5114 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) 5115 return false; 5116 5117 vnics = 1 + bp->rx_nr_rings; 5118 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) 5119 return false; 5120 5121 return true; 5122 #else 5123 return false; 5124 #endif 5125 } 5126 5127 static netdev_features_t bnxt_fix_features(struct net_device *dev, 5128 netdev_features_t features) 5129 { 5130 struct bnxt *bp = netdev_priv(dev); 5131 5132 if (!bnxt_rfs_capable(bp)) 5133 features &= ~NETIF_F_NTUPLE; 5134 return features; 5135 } 5136 5137 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 5138 { 5139 struct bnxt *bp = netdev_priv(dev); 5140 u32 flags = bp->flags; 5141 u32 changes; 5142 int rc = 0; 5143 bool re_init = false; 5144 bool update_tpa = false; 5145 5146 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 5147 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) 5148 flags |= BNXT_FLAG_GRO; 5149 if (features & NETIF_F_LRO) 5150 flags |= BNXT_FLAG_LRO; 5151 5152 if (features & NETIF_F_HW_VLAN_CTAG_RX) 5153 flags |= BNXT_FLAG_STRIP_VLAN; 5154 5155 if (features & NETIF_F_NTUPLE) 5156 flags |= BNXT_FLAG_RFS; 5157 5158 changes = flags ^ bp->flags; 5159 if (changes & BNXT_FLAG_TPA) { 5160 update_tpa = true; 5161 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 5162 (flags & BNXT_FLAG_TPA) == 0) 5163 re_init = true; 5164 } 5165 5166 if (changes & ~BNXT_FLAG_TPA) 5167 re_init = true; 5168 5169 if (flags != bp->flags) { 5170 u32 old_flags = bp->flags; 5171 5172 bp->flags = flags; 5173 5174 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 5175 if (update_tpa) 5176 bnxt_set_ring_params(bp); 5177 return rc; 5178 } 5179 5180 if (re_init) { 5181 bnxt_close_nic(bp, false, false); 5182 if (update_tpa) 5183 bnxt_set_ring_params(bp); 5184 5185 return bnxt_open_nic(bp, false, false); 5186 } 5187 if (update_tpa) { 5188 rc = bnxt_set_tpa(bp, 5189 (flags & BNXT_FLAG_TPA) ? 5190 true : false); 5191 if (rc) 5192 bp->flags = old_flags; 5193 } 5194 } 5195 return rc; 5196 } 5197 5198 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 5199 { 5200 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 5201 int i = bnapi->index; 5202 5203 if (!txr) 5204 return; 5205 5206 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 5207 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 5208 txr->tx_cons); 5209 } 5210 5211 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 5212 { 5213 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 5214 int i = bnapi->index; 5215 5216 if (!rxr) 5217 return; 5218 5219 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 5220 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 5221 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 5222 rxr->rx_sw_agg_prod); 5223 } 5224 5225 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 5226 { 5227 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5228 int i = bnapi->index; 5229 5230 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 5231 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 5232 } 5233 5234 static void bnxt_dbg_dump_states(struct bnxt *bp) 5235 { 5236 int i; 5237 struct bnxt_napi *bnapi; 5238 5239 for (i = 0; i < bp->cp_nr_rings; i++) { 5240 bnapi = bp->bnapi[i]; 5241 if (netif_msg_drv(bp)) { 5242 bnxt_dump_tx_sw_state(bnapi); 5243 bnxt_dump_rx_sw_state(bnapi); 5244 bnxt_dump_cp_sw_state(bnapi); 5245 } 5246 } 5247 } 5248 5249 static void bnxt_reset_task(struct bnxt *bp) 5250 { 5251 bnxt_dbg_dump_states(bp); 5252 if (netif_running(bp->dev)) { 5253 bnxt_close_nic(bp, false, false); 5254 bnxt_open_nic(bp, false, false); 5255 } 5256 } 5257 5258 static void bnxt_tx_timeout(struct net_device *dev) 5259 { 5260 struct bnxt *bp = netdev_priv(dev); 5261 5262 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 5263 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 5264 schedule_work(&bp->sp_task); 5265 } 5266 5267 #ifdef CONFIG_NET_POLL_CONTROLLER 5268 static void bnxt_poll_controller(struct net_device *dev) 5269 { 5270 struct bnxt *bp = netdev_priv(dev); 5271 int i; 5272 5273 for (i = 0; i < bp->cp_nr_rings; i++) { 5274 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5275 5276 disable_irq(irq->vector); 5277 irq->handler(irq->vector, bp->bnapi[i]); 5278 enable_irq(irq->vector); 5279 } 5280 } 5281 #endif 5282 5283 static void bnxt_timer(unsigned long data) 5284 { 5285 struct bnxt *bp = (struct bnxt *)data; 5286 struct net_device *dev = bp->dev; 5287 5288 if (!netif_running(dev)) 5289 return; 5290 5291 if (atomic_read(&bp->intr_sem) != 0) 5292 goto bnxt_restart_timer; 5293 5294 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { 5295 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 5296 schedule_work(&bp->sp_task); 5297 } 5298 bnxt_restart_timer: 5299 mod_timer(&bp->timer, jiffies + bp->current_interval); 5300 } 5301 5302 static void bnxt_cfg_ntp_filters(struct bnxt *); 5303 5304 static void bnxt_sp_task(struct work_struct *work) 5305 { 5306 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 5307 int rc; 5308 5309 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5310 smp_mb__after_atomic(); 5311 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 5312 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5313 return; 5314 } 5315 5316 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 5317 bnxt_cfg_rx_mode(bp); 5318 5319 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 5320 bnxt_cfg_ntp_filters(bp); 5321 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 5322 rc = bnxt_update_link(bp, true); 5323 if (rc) 5324 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 5325 rc); 5326 } 5327 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 5328 bnxt_hwrm_exec_fwd_req(bp); 5329 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 5330 bnxt_hwrm_tunnel_dst_port_alloc( 5331 bp, bp->vxlan_port, 5332 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5333 } 5334 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 5335 bnxt_hwrm_tunnel_dst_port_free( 5336 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5337 } 5338 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { 5339 /* bnxt_reset_task() calls bnxt_close_nic() which waits 5340 * for BNXT_STATE_IN_SP_TASK to clear. 5341 */ 5342 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5343 rtnl_lock(); 5344 bnxt_reset_task(bp); 5345 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5346 rtnl_unlock(); 5347 } 5348 5349 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 5350 bnxt_hwrm_port_qstats(bp); 5351 5352 smp_mb__before_atomic(); 5353 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5354 } 5355 5356 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 5357 { 5358 int rc; 5359 struct bnxt *bp = netdev_priv(dev); 5360 5361 SET_NETDEV_DEV(dev, &pdev->dev); 5362 5363 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 5364 rc = pci_enable_device(pdev); 5365 if (rc) { 5366 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 5367 goto init_err; 5368 } 5369 5370 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 5371 dev_err(&pdev->dev, 5372 "Cannot find PCI device base address, aborting\n"); 5373 rc = -ENODEV; 5374 goto init_err_disable; 5375 } 5376 5377 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 5378 if (rc) { 5379 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 5380 goto init_err_disable; 5381 } 5382 5383 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 5384 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 5385 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 5386 goto init_err_disable; 5387 } 5388 5389 pci_set_master(pdev); 5390 5391 bp->dev = dev; 5392 bp->pdev = pdev; 5393 5394 bp->bar0 = pci_ioremap_bar(pdev, 0); 5395 if (!bp->bar0) { 5396 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 5397 rc = -ENOMEM; 5398 goto init_err_release; 5399 } 5400 5401 bp->bar1 = pci_ioremap_bar(pdev, 2); 5402 if (!bp->bar1) { 5403 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 5404 rc = -ENOMEM; 5405 goto init_err_release; 5406 } 5407 5408 bp->bar2 = pci_ioremap_bar(pdev, 4); 5409 if (!bp->bar2) { 5410 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 5411 rc = -ENOMEM; 5412 goto init_err_release; 5413 } 5414 5415 pci_enable_pcie_error_reporting(pdev); 5416 5417 INIT_WORK(&bp->sp_task, bnxt_sp_task); 5418 5419 spin_lock_init(&bp->ntp_fltr_lock); 5420 5421 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 5422 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 5423 5424 /* tick values in micro seconds */ 5425 bp->rx_coal_ticks = 12; 5426 bp->rx_coal_bufs = 30; 5427 bp->rx_coal_ticks_irq = 1; 5428 bp->rx_coal_bufs_irq = 2; 5429 5430 bp->tx_coal_ticks = 25; 5431 bp->tx_coal_bufs = 30; 5432 bp->tx_coal_ticks_irq = 2; 5433 bp->tx_coal_bufs_irq = 2; 5434 5435 init_timer(&bp->timer); 5436 bp->timer.data = (unsigned long)bp; 5437 bp->timer.function = bnxt_timer; 5438 bp->current_interval = BNXT_TIMER_INTERVAL; 5439 5440 clear_bit(BNXT_STATE_OPEN, &bp->state); 5441 5442 return 0; 5443 5444 init_err_release: 5445 if (bp->bar2) { 5446 pci_iounmap(pdev, bp->bar2); 5447 bp->bar2 = NULL; 5448 } 5449 5450 if (bp->bar1) { 5451 pci_iounmap(pdev, bp->bar1); 5452 bp->bar1 = NULL; 5453 } 5454 5455 if (bp->bar0) { 5456 pci_iounmap(pdev, bp->bar0); 5457 bp->bar0 = NULL; 5458 } 5459 5460 pci_release_regions(pdev); 5461 5462 init_err_disable: 5463 pci_disable_device(pdev); 5464 5465 init_err: 5466 return rc; 5467 } 5468 5469 /* rtnl_lock held */ 5470 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 5471 { 5472 struct sockaddr *addr = p; 5473 struct bnxt *bp = netdev_priv(dev); 5474 int rc = 0; 5475 5476 if (!is_valid_ether_addr(addr->sa_data)) 5477 return -EADDRNOTAVAIL; 5478 5479 #ifdef CONFIG_BNXT_SRIOV 5480 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) 5481 return -EADDRNOTAVAIL; 5482 #endif 5483 5484 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 5485 return 0; 5486 5487 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5488 if (netif_running(dev)) { 5489 bnxt_close_nic(bp, false, false); 5490 rc = bnxt_open_nic(bp, false, false); 5491 } 5492 5493 return rc; 5494 } 5495 5496 /* rtnl_lock held */ 5497 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 5498 { 5499 struct bnxt *bp = netdev_priv(dev); 5500 5501 if (new_mtu < 60 || new_mtu > 9000) 5502 return -EINVAL; 5503 5504 if (netif_running(dev)) 5505 bnxt_close_nic(bp, false, false); 5506 5507 dev->mtu = new_mtu; 5508 bnxt_set_ring_params(bp); 5509 5510 if (netif_running(dev)) 5511 return bnxt_open_nic(bp, false, false); 5512 5513 return 0; 5514 } 5515 5516 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 5517 struct tc_to_netdev *ntc) 5518 { 5519 struct bnxt *bp = netdev_priv(dev); 5520 u8 tc; 5521 5522 if (ntc->type != TC_SETUP_MQPRIO) 5523 return -EINVAL; 5524 5525 tc = ntc->tc; 5526 5527 if (tc > bp->max_tc) { 5528 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", 5529 tc, bp->max_tc); 5530 return -EINVAL; 5531 } 5532 5533 if (netdev_get_num_tc(dev) == tc) 5534 return 0; 5535 5536 if (tc) { 5537 int max_rx_rings, max_tx_rings, rc; 5538 bool sh = false; 5539 5540 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5541 sh = true; 5542 5543 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 5544 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) 5545 return -ENOMEM; 5546 } 5547 5548 /* Needs to close the device and do hw resource re-allocations */ 5549 if (netif_running(bp->dev)) 5550 bnxt_close_nic(bp, true, false); 5551 5552 if (tc) { 5553 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 5554 netdev_set_num_tc(dev, tc); 5555 } else { 5556 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 5557 netdev_reset_tc(dev); 5558 } 5559 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 5560 bp->num_stat_ctxs = bp->cp_nr_rings; 5561 5562 if (netif_running(bp->dev)) 5563 return bnxt_open_nic(bp, true, false); 5564 5565 return 0; 5566 } 5567 5568 #ifdef CONFIG_RFS_ACCEL 5569 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 5570 struct bnxt_ntuple_filter *f2) 5571 { 5572 struct flow_keys *keys1 = &f1->fkeys; 5573 struct flow_keys *keys2 = &f2->fkeys; 5574 5575 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 5576 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 5577 keys1->ports.ports == keys2->ports.ports && 5578 keys1->basic.ip_proto == keys2->basic.ip_proto && 5579 keys1->basic.n_proto == keys2->basic.n_proto && 5580 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr)) 5581 return true; 5582 5583 return false; 5584 } 5585 5586 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 5587 u16 rxq_index, u32 flow_id) 5588 { 5589 struct bnxt *bp = netdev_priv(dev); 5590 struct bnxt_ntuple_filter *fltr, *new_fltr; 5591 struct flow_keys *fkeys; 5592 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 5593 int rc = 0, idx, bit_id; 5594 struct hlist_head *head; 5595 5596 if (skb->encapsulation) 5597 return -EPROTONOSUPPORT; 5598 5599 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 5600 if (!new_fltr) 5601 return -ENOMEM; 5602 5603 fkeys = &new_fltr->fkeys; 5604 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 5605 rc = -EPROTONOSUPPORT; 5606 goto err_free; 5607 } 5608 5609 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || 5610 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 5611 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 5612 rc = -EPROTONOSUPPORT; 5613 goto err_free; 5614 } 5615 5616 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 5617 5618 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 5619 head = &bp->ntp_fltr_hash_tbl[idx]; 5620 rcu_read_lock(); 5621 hlist_for_each_entry_rcu(fltr, head, hash) { 5622 if (bnxt_fltr_match(fltr, new_fltr)) { 5623 rcu_read_unlock(); 5624 rc = 0; 5625 goto err_free; 5626 } 5627 } 5628 rcu_read_unlock(); 5629 5630 spin_lock_bh(&bp->ntp_fltr_lock); 5631 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5632 BNXT_NTP_FLTR_MAX_FLTR, 0); 5633 if (bit_id < 0) { 5634 spin_unlock_bh(&bp->ntp_fltr_lock); 5635 rc = -ENOMEM; 5636 goto err_free; 5637 } 5638 5639 new_fltr->sw_id = (u16)bit_id; 5640 new_fltr->flow_id = flow_id; 5641 new_fltr->rxq = rxq_index; 5642 hlist_add_head_rcu(&new_fltr->hash, head); 5643 bp->ntp_fltr_count++; 5644 spin_unlock_bh(&bp->ntp_fltr_lock); 5645 5646 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 5647 schedule_work(&bp->sp_task); 5648 5649 return new_fltr->sw_id; 5650 5651 err_free: 5652 kfree(new_fltr); 5653 return rc; 5654 } 5655 5656 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 5657 { 5658 int i; 5659 5660 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 5661 struct hlist_head *head; 5662 struct hlist_node *tmp; 5663 struct bnxt_ntuple_filter *fltr; 5664 int rc; 5665 5666 head = &bp->ntp_fltr_hash_tbl[i]; 5667 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 5668 bool del = false; 5669 5670 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 5671 if (rps_may_expire_flow(bp->dev, fltr->rxq, 5672 fltr->flow_id, 5673 fltr->sw_id)) { 5674 bnxt_hwrm_cfa_ntuple_filter_free(bp, 5675 fltr); 5676 del = true; 5677 } 5678 } else { 5679 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 5680 fltr); 5681 if (rc) 5682 del = true; 5683 else 5684 set_bit(BNXT_FLTR_VALID, &fltr->state); 5685 } 5686 5687 if (del) { 5688 spin_lock_bh(&bp->ntp_fltr_lock); 5689 hlist_del_rcu(&fltr->hash); 5690 bp->ntp_fltr_count--; 5691 spin_unlock_bh(&bp->ntp_fltr_lock); 5692 synchronize_rcu(); 5693 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 5694 kfree(fltr); 5695 } 5696 } 5697 } 5698 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 5699 netdev_info(bp->dev, "Receive PF driver unload event!"); 5700 } 5701 5702 #else 5703 5704 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 5705 { 5706 } 5707 5708 #endif /* CONFIG_RFS_ACCEL */ 5709 5710 static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, 5711 __be16 port) 5712 { 5713 struct bnxt *bp = netdev_priv(dev); 5714 5715 if (!netif_running(dev)) 5716 return; 5717 5718 if (sa_family != AF_INET6 && sa_family != AF_INET) 5719 return; 5720 5721 if (bp->vxlan_port_cnt && bp->vxlan_port != port) 5722 return; 5723 5724 bp->vxlan_port_cnt++; 5725 if (bp->vxlan_port_cnt == 1) { 5726 bp->vxlan_port = port; 5727 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 5728 schedule_work(&bp->sp_task); 5729 } 5730 } 5731 5732 static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, 5733 __be16 port) 5734 { 5735 struct bnxt *bp = netdev_priv(dev); 5736 5737 if (!netif_running(dev)) 5738 return; 5739 5740 if (sa_family != AF_INET6 && sa_family != AF_INET) 5741 return; 5742 5743 if (bp->vxlan_port_cnt && bp->vxlan_port == port) { 5744 bp->vxlan_port_cnt--; 5745 5746 if (bp->vxlan_port_cnt == 0) { 5747 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 5748 schedule_work(&bp->sp_task); 5749 } 5750 } 5751 } 5752 5753 static const struct net_device_ops bnxt_netdev_ops = { 5754 .ndo_open = bnxt_open, 5755 .ndo_start_xmit = bnxt_start_xmit, 5756 .ndo_stop = bnxt_close, 5757 .ndo_get_stats64 = bnxt_get_stats64, 5758 .ndo_set_rx_mode = bnxt_set_rx_mode, 5759 .ndo_do_ioctl = bnxt_ioctl, 5760 .ndo_validate_addr = eth_validate_addr, 5761 .ndo_set_mac_address = bnxt_change_mac_addr, 5762 .ndo_change_mtu = bnxt_change_mtu, 5763 .ndo_fix_features = bnxt_fix_features, 5764 .ndo_set_features = bnxt_set_features, 5765 .ndo_tx_timeout = bnxt_tx_timeout, 5766 #ifdef CONFIG_BNXT_SRIOV 5767 .ndo_get_vf_config = bnxt_get_vf_config, 5768 .ndo_set_vf_mac = bnxt_set_vf_mac, 5769 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 5770 .ndo_set_vf_rate = bnxt_set_vf_bw, 5771 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 5772 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 5773 #endif 5774 #ifdef CONFIG_NET_POLL_CONTROLLER 5775 .ndo_poll_controller = bnxt_poll_controller, 5776 #endif 5777 .ndo_setup_tc = bnxt_setup_tc, 5778 #ifdef CONFIG_RFS_ACCEL 5779 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 5780 #endif 5781 .ndo_add_vxlan_port = bnxt_add_vxlan_port, 5782 .ndo_del_vxlan_port = bnxt_del_vxlan_port, 5783 #ifdef CONFIG_NET_RX_BUSY_POLL 5784 .ndo_busy_poll = bnxt_busy_poll, 5785 #endif 5786 }; 5787 5788 static void bnxt_remove_one(struct pci_dev *pdev) 5789 { 5790 struct net_device *dev = pci_get_drvdata(pdev); 5791 struct bnxt *bp = netdev_priv(dev); 5792 5793 if (BNXT_PF(bp)) 5794 bnxt_sriov_disable(bp); 5795 5796 pci_disable_pcie_error_reporting(pdev); 5797 unregister_netdev(dev); 5798 cancel_work_sync(&bp->sp_task); 5799 bp->sp_event = 0; 5800 5801 bnxt_hwrm_func_drv_unrgtr(bp); 5802 bnxt_free_hwrm_resources(bp); 5803 pci_iounmap(pdev, bp->bar2); 5804 pci_iounmap(pdev, bp->bar1); 5805 pci_iounmap(pdev, bp->bar0); 5806 free_netdev(dev); 5807 5808 pci_release_regions(pdev); 5809 pci_disable_device(pdev); 5810 } 5811 5812 static int bnxt_probe_phy(struct bnxt *bp) 5813 { 5814 int rc = 0; 5815 struct bnxt_link_info *link_info = &bp->link_info; 5816 5817 rc = bnxt_update_link(bp, false); 5818 if (rc) { 5819 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 5820 rc); 5821 return rc; 5822 } 5823 5824 /*initialize the ethool setting copy with NVM settings */ 5825 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 5826 link_info->autoneg = BNXT_AUTONEG_SPEED | 5827 BNXT_AUTONEG_FLOW_CTRL; 5828 link_info->advertising = link_info->auto_link_speeds; 5829 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5830 } else { 5831 link_info->req_link_speed = link_info->force_link_speed; 5832 link_info->req_duplex = link_info->duplex_setting; 5833 link_info->req_flow_ctrl = link_info->force_pause_setting; 5834 } 5835 return rc; 5836 } 5837 5838 static int bnxt_get_max_irq(struct pci_dev *pdev) 5839 { 5840 u16 ctrl; 5841 5842 if (!pdev->msix_cap) 5843 return 1; 5844 5845 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 5846 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 5847 } 5848 5849 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 5850 int *max_cp) 5851 { 5852 int max_ring_grps = 0; 5853 5854 #ifdef CONFIG_BNXT_SRIOV 5855 if (!BNXT_PF(bp)) { 5856 *max_tx = bp->vf.max_tx_rings; 5857 *max_rx = bp->vf.max_rx_rings; 5858 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 5859 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 5860 max_ring_grps = bp->vf.max_hw_ring_grps; 5861 } else 5862 #endif 5863 { 5864 *max_tx = bp->pf.max_tx_rings; 5865 *max_rx = bp->pf.max_rx_rings; 5866 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 5867 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 5868 max_ring_grps = bp->pf.max_hw_ring_grps; 5869 } 5870 5871 if (bp->flags & BNXT_FLAG_AGG_RINGS) 5872 *max_rx >>= 1; 5873 *max_rx = min_t(int, *max_rx, max_ring_grps); 5874 } 5875 5876 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 5877 { 5878 int rx, tx, cp; 5879 5880 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 5881 if (!rx || !tx || !cp) 5882 return -ENOMEM; 5883 5884 *max_rx = rx; 5885 *max_tx = tx; 5886 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 5887 } 5888 5889 static int bnxt_set_dflt_rings(struct bnxt *bp) 5890 { 5891 int dflt_rings, max_rx_rings, max_tx_rings, rc; 5892 bool sh = true; 5893 5894 if (sh) 5895 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5896 dflt_rings = netif_get_num_default_rss_queues(); 5897 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 5898 if (rc) 5899 return rc; 5900 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 5901 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 5902 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 5903 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5904 bp->tx_nr_rings + bp->rx_nr_rings; 5905 bp->num_stat_ctxs = bp->cp_nr_rings; 5906 return rc; 5907 } 5908 5909 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 5910 { 5911 static int version_printed; 5912 struct net_device *dev; 5913 struct bnxt *bp; 5914 int rc, max_irqs; 5915 5916 if (version_printed++ == 0) 5917 pr_info("%s", version); 5918 5919 max_irqs = bnxt_get_max_irq(pdev); 5920 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 5921 if (!dev) 5922 return -ENOMEM; 5923 5924 bp = netdev_priv(dev); 5925 5926 if (bnxt_vf_pciid(ent->driver_data)) 5927 bp->flags |= BNXT_FLAG_VF; 5928 5929 if (pdev->msix_cap) 5930 bp->flags |= BNXT_FLAG_MSIX_CAP; 5931 5932 rc = bnxt_init_board(pdev, dev); 5933 if (rc < 0) 5934 goto init_err_free; 5935 5936 dev->netdev_ops = &bnxt_netdev_ops; 5937 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 5938 dev->ethtool_ops = &bnxt_ethtool_ops; 5939 5940 pci_set_drvdata(pdev, dev); 5941 5942 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 5943 NETIF_F_TSO | NETIF_F_TSO6 | 5944 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 5945 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | 5946 NETIF_F_RXHASH | 5947 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; 5948 5949 dev->hw_enc_features = 5950 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 5951 NETIF_F_TSO | NETIF_F_TSO6 | 5952 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 5953 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 5954 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 5955 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 5956 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 5957 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 5958 dev->priv_flags |= IFF_UNICAST_FLT; 5959 5960 #ifdef CONFIG_BNXT_SRIOV 5961 init_waitqueue_head(&bp->sriov_cfg_wait); 5962 #endif 5963 rc = bnxt_alloc_hwrm_resources(bp); 5964 if (rc) 5965 goto init_err; 5966 5967 mutex_init(&bp->hwrm_cmd_lock); 5968 bnxt_hwrm_ver_get(bp); 5969 5970 rc = bnxt_hwrm_func_drv_rgtr(bp); 5971 if (rc) 5972 goto init_err; 5973 5974 /* Get the MAX capabilities for this function */ 5975 rc = bnxt_hwrm_func_qcaps(bp); 5976 if (rc) { 5977 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 5978 rc); 5979 rc = -1; 5980 goto init_err; 5981 } 5982 5983 rc = bnxt_hwrm_queue_qportcfg(bp); 5984 if (rc) { 5985 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 5986 rc); 5987 rc = -1; 5988 goto init_err; 5989 } 5990 5991 bnxt_set_tpa_flags(bp); 5992 bnxt_set_ring_params(bp); 5993 if (BNXT_PF(bp)) 5994 bp->pf.max_irqs = max_irqs; 5995 #if defined(CONFIG_BNXT_SRIOV) 5996 else 5997 bp->vf.max_irqs = max_irqs; 5998 #endif 5999 bnxt_set_dflt_rings(bp); 6000 6001 if (BNXT_PF(bp)) { 6002 dev->hw_features |= NETIF_F_NTUPLE; 6003 if (bnxt_rfs_capable(bp)) { 6004 bp->flags |= BNXT_FLAG_RFS; 6005 dev->features |= NETIF_F_NTUPLE; 6006 } 6007 } 6008 6009 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 6010 bp->flags |= BNXT_FLAG_STRIP_VLAN; 6011 6012 rc = bnxt_probe_phy(bp); 6013 if (rc) 6014 goto init_err; 6015 6016 rc = register_netdev(dev); 6017 if (rc) 6018 goto init_err; 6019 6020 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 6021 board_info[ent->driver_data].name, 6022 (long)pci_resource_start(pdev, 0), dev->dev_addr); 6023 6024 return 0; 6025 6026 init_err: 6027 pci_iounmap(pdev, bp->bar0); 6028 pci_release_regions(pdev); 6029 pci_disable_device(pdev); 6030 6031 init_err_free: 6032 free_netdev(dev); 6033 return rc; 6034 } 6035 6036 /** 6037 * bnxt_io_error_detected - called when PCI error is detected 6038 * @pdev: Pointer to PCI device 6039 * @state: The current pci connection state 6040 * 6041 * This function is called after a PCI bus error affecting 6042 * this device has been detected. 6043 */ 6044 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 6045 pci_channel_state_t state) 6046 { 6047 struct net_device *netdev = pci_get_drvdata(pdev); 6048 6049 netdev_info(netdev, "PCI I/O error detected\n"); 6050 6051 rtnl_lock(); 6052 netif_device_detach(netdev); 6053 6054 if (state == pci_channel_io_perm_failure) { 6055 rtnl_unlock(); 6056 return PCI_ERS_RESULT_DISCONNECT; 6057 } 6058 6059 if (netif_running(netdev)) 6060 bnxt_close(netdev); 6061 6062 pci_disable_device(pdev); 6063 rtnl_unlock(); 6064 6065 /* Request a slot slot reset. */ 6066 return PCI_ERS_RESULT_NEED_RESET; 6067 } 6068 6069 /** 6070 * bnxt_io_slot_reset - called after the pci bus has been reset. 6071 * @pdev: Pointer to PCI device 6072 * 6073 * Restart the card from scratch, as if from a cold-boot. 6074 * At this point, the card has exprienced a hard reset, 6075 * followed by fixups by BIOS, and has its config space 6076 * set up identically to what it was at cold boot. 6077 */ 6078 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 6079 { 6080 struct net_device *netdev = pci_get_drvdata(pdev); 6081 struct bnxt *bp = netdev_priv(netdev); 6082 int err = 0; 6083 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 6084 6085 netdev_info(bp->dev, "PCI Slot Reset\n"); 6086 6087 rtnl_lock(); 6088 6089 if (pci_enable_device(pdev)) { 6090 dev_err(&pdev->dev, 6091 "Cannot re-enable PCI device after reset.\n"); 6092 } else { 6093 pci_set_master(pdev); 6094 6095 if (netif_running(netdev)) 6096 err = bnxt_open(netdev); 6097 6098 if (!err) 6099 result = PCI_ERS_RESULT_RECOVERED; 6100 } 6101 6102 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 6103 dev_close(netdev); 6104 6105 rtnl_unlock(); 6106 6107 err = pci_cleanup_aer_uncorrect_error_status(pdev); 6108 if (err) { 6109 dev_err(&pdev->dev, 6110 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 6111 err); /* non-fatal, continue */ 6112 } 6113 6114 return PCI_ERS_RESULT_RECOVERED; 6115 } 6116 6117 /** 6118 * bnxt_io_resume - called when traffic can start flowing again. 6119 * @pdev: Pointer to PCI device 6120 * 6121 * This callback is called when the error recovery driver tells 6122 * us that its OK to resume normal operation. 6123 */ 6124 static void bnxt_io_resume(struct pci_dev *pdev) 6125 { 6126 struct net_device *netdev = pci_get_drvdata(pdev); 6127 6128 rtnl_lock(); 6129 6130 netif_device_attach(netdev); 6131 6132 rtnl_unlock(); 6133 } 6134 6135 static const struct pci_error_handlers bnxt_err_handler = { 6136 .error_detected = bnxt_io_error_detected, 6137 .slot_reset = bnxt_io_slot_reset, 6138 .resume = bnxt_io_resume 6139 }; 6140 6141 static struct pci_driver bnxt_pci_driver = { 6142 .name = DRV_MODULE_NAME, 6143 .id_table = bnxt_pci_tbl, 6144 .probe = bnxt_init_one, 6145 .remove = bnxt_remove_one, 6146 .err_handler = &bnxt_err_handler, 6147 #if defined(CONFIG_BNXT_SRIOV) 6148 .sriov_configure = bnxt_sriov_configure, 6149 #endif 6150 }; 6151 6152 module_pci_driver(bnxt_pci_driver); 6153