1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/if.h> 35 #include <linux/if_vlan.h> 36 #include <linux/if_bridge.h> 37 #include <linux/rtc.h> 38 #include <linux/bpf.h> 39 #include <net/ip.h> 40 #include <net/tcp.h> 41 #include <net/udp.h> 42 #include <net/checksum.h> 43 #include <net/ip6_checksum.h> 44 #include <net/udp_tunnel.h> 45 #include <linux/workqueue.h> 46 #include <linux/prefetch.h> 47 #include <linux/cache.h> 48 #include <linux/log2.h> 49 #include <linux/aer.h> 50 #include <linux/bitmap.h> 51 #include <linux/cpu_rmap.h> 52 #include <linux/cpumask.h> 53 #include <net/pkt_cls.h> 54 55 #include "bnxt_hsi.h" 56 #include "bnxt.h" 57 #include "bnxt_ulp.h" 58 #include "bnxt_sriov.h" 59 #include "bnxt_ethtool.h" 60 #include "bnxt_dcb.h" 61 #include "bnxt_xdp.h" 62 #include "bnxt_vfr.h" 63 #include "bnxt_tc.h" 64 #include "bnxt_devlink.h" 65 66 #define BNXT_TX_TIMEOUT (5 * HZ) 67 68 static const char version[] = 69 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; 70 71 MODULE_LICENSE("GPL"); 72 MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); 73 MODULE_VERSION(DRV_MODULE_VERSION); 74 75 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 76 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 77 #define BNXT_RX_COPY_THRESH 256 78 79 #define BNXT_TX_PUSH_THRESH 164 80 81 enum board_idx { 82 BCM57301, 83 BCM57302, 84 BCM57304, 85 BCM57417_NPAR, 86 BCM58700, 87 BCM57311, 88 BCM57312, 89 BCM57402, 90 BCM57404, 91 BCM57406, 92 BCM57402_NPAR, 93 BCM57407, 94 BCM57412, 95 BCM57414, 96 BCM57416, 97 BCM57417, 98 BCM57412_NPAR, 99 BCM57314, 100 BCM57417_SFP, 101 BCM57416_SFP, 102 BCM57404_NPAR, 103 BCM57406_NPAR, 104 BCM57407_SFP, 105 BCM57407_NPAR, 106 BCM57414_NPAR, 107 BCM57416_NPAR, 108 BCM57452, 109 BCM57454, 110 BCM58802, 111 BCM58804, 112 BCM58808, 113 NETXTREME_E_VF, 114 NETXTREME_C_VF, 115 NETXTREME_S_VF, 116 }; 117 118 /* indexed by enum above */ 119 static const struct { 120 char *name; 121 } board_info[] = { 122 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 123 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 124 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 125 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 126 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 127 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 128 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 129 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 130 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 131 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 132 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 133 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 134 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 135 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 136 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 137 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 138 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 139 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 140 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 141 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 142 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 143 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 144 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 145 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 146 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 147 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 148 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 149 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 150 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 151 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 152 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 153 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 154 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 155 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 156 }; 157 158 static const struct pci_device_id bnxt_pci_tbl[] = { 159 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 160 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 161 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 162 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 163 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 164 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 165 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 166 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 167 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 168 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 169 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 170 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 171 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 172 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 173 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 174 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 175 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 176 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 177 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 179 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 180 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 181 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 182 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 183 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 184 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 185 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 186 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 187 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 188 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 189 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 190 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 191 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 192 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 193 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 194 #ifdef CONFIG_BNXT_SRIOV 195 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 196 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 197 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 198 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 199 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 201 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 202 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 203 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 204 #endif 205 { 0 } 206 }; 207 208 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 209 210 static const u16 bnxt_vf_req_snif[] = { 211 HWRM_FUNC_CFG, 212 HWRM_PORT_PHY_QCFG, 213 HWRM_CFA_L2_FILTER_ALLOC, 214 }; 215 216 static const u16 bnxt_async_events_arr[] = { 217 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 218 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 219 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 220 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 221 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 222 }; 223 224 static struct workqueue_struct *bnxt_pf_wq; 225 226 static bool bnxt_vf_pciid(enum board_idx idx) 227 { 228 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 229 idx == NETXTREME_S_VF); 230 } 231 232 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 233 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 234 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS) 235 236 #define BNXT_CP_DB_REARM(db, raw_cons) \ 237 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db) 238 239 #define BNXT_CP_DB(db, raw_cons) \ 240 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db) 241 242 #define BNXT_CP_DB_IRQ_DIS(db) \ 243 writel(DB_CP_IRQ_DIS_FLAGS, db) 244 245 const u16 bnxt_lhint_arr[] = { 246 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 247 TX_BD_FLAGS_LHINT_512_TO_1023, 248 TX_BD_FLAGS_LHINT_1024_TO_2047, 249 TX_BD_FLAGS_LHINT_1024_TO_2047, 250 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 251 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 252 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 253 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 254 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 255 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 256 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 257 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 258 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 259 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 260 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 261 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 262 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 263 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 264 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 265 }; 266 267 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 268 { 269 struct metadata_dst *md_dst = skb_metadata_dst(skb); 270 271 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 272 return 0; 273 274 return md_dst->u.port_info.port_id; 275 } 276 277 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 278 { 279 struct bnxt *bp = netdev_priv(dev); 280 struct tx_bd *txbd; 281 struct tx_bd_ext *txbd1; 282 struct netdev_queue *txq; 283 int i; 284 dma_addr_t mapping; 285 unsigned int length, pad = 0; 286 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 287 u16 prod, last_frag; 288 struct pci_dev *pdev = bp->pdev; 289 struct bnxt_tx_ring_info *txr; 290 struct bnxt_sw_tx_bd *tx_buf; 291 292 i = skb_get_queue_mapping(skb); 293 if (unlikely(i >= bp->tx_nr_rings)) { 294 dev_kfree_skb_any(skb); 295 return NETDEV_TX_OK; 296 } 297 298 txq = netdev_get_tx_queue(dev, i); 299 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 300 prod = txr->tx_prod; 301 302 free_size = bnxt_tx_avail(bp, txr); 303 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 304 netif_tx_stop_queue(txq); 305 return NETDEV_TX_BUSY; 306 } 307 308 length = skb->len; 309 len = skb_headlen(skb); 310 last_frag = skb_shinfo(skb)->nr_frags; 311 312 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 313 314 txbd->tx_bd_opaque = prod; 315 316 tx_buf = &txr->tx_buf_ring[prod]; 317 tx_buf->skb = skb; 318 tx_buf->nr_frags = last_frag; 319 320 vlan_tag_flags = 0; 321 cfa_action = bnxt_xmit_get_cfa_action(skb); 322 if (skb_vlan_tag_present(skb)) { 323 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 324 skb_vlan_tag_get(skb); 325 /* Currently supports 8021Q, 8021AD vlan offloads 326 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 327 */ 328 if (skb->vlan_proto == htons(ETH_P_8021Q)) 329 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 330 } 331 332 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 333 struct tx_push_buffer *tx_push_buf = txr->tx_push; 334 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 335 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 336 void *pdata = tx_push_buf->data; 337 u64 *end; 338 int j, push_len; 339 340 /* Set COAL_NOW to be ready quickly for the next push */ 341 tx_push->tx_bd_len_flags_type = 342 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 343 TX_BD_TYPE_LONG_TX_BD | 344 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 345 TX_BD_FLAGS_COAL_NOW | 346 TX_BD_FLAGS_PACKET_END | 347 (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); 348 349 if (skb->ip_summed == CHECKSUM_PARTIAL) 350 tx_push1->tx_bd_hsize_lflags = 351 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 352 else 353 tx_push1->tx_bd_hsize_lflags = 0; 354 355 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 356 tx_push1->tx_bd_cfa_action = 357 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 358 359 end = pdata + length; 360 end = PTR_ALIGN(end, 8) - 1; 361 *end = 0; 362 363 skb_copy_from_linear_data(skb, pdata, len); 364 pdata += len; 365 for (j = 0; j < last_frag; j++) { 366 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 367 void *fptr; 368 369 fptr = skb_frag_address_safe(frag); 370 if (!fptr) 371 goto normal_tx; 372 373 memcpy(pdata, fptr, skb_frag_size(frag)); 374 pdata += skb_frag_size(frag); 375 } 376 377 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 378 txbd->tx_bd_haddr = txr->data_mapping; 379 prod = NEXT_TX(prod); 380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 381 memcpy(txbd, tx_push1, sizeof(*txbd)); 382 prod = NEXT_TX(prod); 383 tx_push->doorbell = 384 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 385 txr->tx_prod = prod; 386 387 tx_buf->is_push = 1; 388 netdev_tx_sent_queue(txq, skb->len); 389 wmb(); /* Sync is_push and byte queue before pushing data */ 390 391 push_len = (length + sizeof(*tx_push) + 7) / 8; 392 if (push_len > 16) { 393 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 394 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 395 (push_len - 16) << 1); 396 } else { 397 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 398 push_len); 399 } 400 401 goto tx_done; 402 } 403 404 normal_tx: 405 if (length < BNXT_MIN_PKT_SIZE) { 406 pad = BNXT_MIN_PKT_SIZE - length; 407 if (skb_pad(skb, pad)) { 408 /* SKB already freed. */ 409 tx_buf->skb = NULL; 410 return NETDEV_TX_OK; 411 } 412 length = BNXT_MIN_PKT_SIZE; 413 } 414 415 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 416 417 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { 418 dev_kfree_skb_any(skb); 419 tx_buf->skb = NULL; 420 return NETDEV_TX_OK; 421 } 422 423 dma_unmap_addr_set(tx_buf, mapping, mapping); 424 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 425 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); 426 427 txbd->tx_bd_haddr = cpu_to_le64(mapping); 428 429 prod = NEXT_TX(prod); 430 txbd1 = (struct tx_bd_ext *) 431 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 432 433 txbd1->tx_bd_hsize_lflags = 0; 434 if (skb_is_gso(skb)) { 435 u32 hdr_len; 436 437 if (skb->encapsulation) 438 hdr_len = skb_inner_network_offset(skb) + 439 skb_inner_network_header_len(skb) + 440 inner_tcp_hdrlen(skb); 441 else 442 hdr_len = skb_transport_offset(skb) + 443 tcp_hdrlen(skb); 444 445 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | 446 TX_BD_FLAGS_T_IPID | 447 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 448 length = skb_shinfo(skb)->gso_size; 449 txbd1->tx_bd_mss = cpu_to_le32(length); 450 length += hdr_len; 451 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 452 txbd1->tx_bd_hsize_lflags = 453 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 454 txbd1->tx_bd_mss = 0; 455 } 456 457 length >>= 9; 458 flags |= bnxt_lhint_arr[length]; 459 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 460 461 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 462 txbd1->tx_bd_cfa_action = 463 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 464 for (i = 0; i < last_frag; i++) { 465 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 466 467 prod = NEXT_TX(prod); 468 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 469 470 len = skb_frag_size(frag); 471 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 472 DMA_TO_DEVICE); 473 474 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 475 goto tx_dma_error; 476 477 tx_buf = &txr->tx_buf_ring[prod]; 478 dma_unmap_addr_set(tx_buf, mapping, mapping); 479 480 txbd->tx_bd_haddr = cpu_to_le64(mapping); 481 482 flags = len << TX_BD_LEN_SHIFT; 483 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 484 } 485 486 flags &= ~TX_BD_LEN; 487 txbd->tx_bd_len_flags_type = 488 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 489 TX_BD_FLAGS_PACKET_END); 490 491 netdev_tx_sent_queue(txq, skb->len); 492 493 /* Sync BD data before updating doorbell */ 494 wmb(); 495 496 prod = NEXT_TX(prod); 497 txr->tx_prod = prod; 498 499 if (!skb->xmit_more || netif_xmit_stopped(txq)) 500 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 501 502 tx_done: 503 504 mmiowb(); 505 506 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 507 if (skb->xmit_more && !tx_buf->is_push) 508 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod); 509 510 netif_tx_stop_queue(txq); 511 512 /* netif_tx_stop_queue() must be done before checking 513 * tx index in bnxt_tx_avail() below, because in 514 * bnxt_tx_int(), we update tx index before checking for 515 * netif_tx_queue_stopped(). 516 */ 517 smp_mb(); 518 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) 519 netif_tx_wake_queue(txq); 520 } 521 return NETDEV_TX_OK; 522 523 tx_dma_error: 524 last_frag = i; 525 526 /* start back at beginning and unmap skb */ 527 prod = txr->tx_prod; 528 tx_buf = &txr->tx_buf_ring[prod]; 529 tx_buf->skb = NULL; 530 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 531 skb_headlen(skb), PCI_DMA_TODEVICE); 532 prod = NEXT_TX(prod); 533 534 /* unmap remaining mapped pages */ 535 for (i = 0; i < last_frag; i++) { 536 prod = NEXT_TX(prod); 537 tx_buf = &txr->tx_buf_ring[prod]; 538 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 539 skb_frag_size(&skb_shinfo(skb)->frags[i]), 540 PCI_DMA_TODEVICE); 541 } 542 543 dev_kfree_skb_any(skb); 544 return NETDEV_TX_OK; 545 } 546 547 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) 548 { 549 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 550 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 551 u16 cons = txr->tx_cons; 552 struct pci_dev *pdev = bp->pdev; 553 int i; 554 unsigned int tx_bytes = 0; 555 556 for (i = 0; i < nr_pkts; i++) { 557 struct bnxt_sw_tx_bd *tx_buf; 558 struct sk_buff *skb; 559 int j, last; 560 561 tx_buf = &txr->tx_buf_ring[cons]; 562 cons = NEXT_TX(cons); 563 skb = tx_buf->skb; 564 tx_buf->skb = NULL; 565 566 if (tx_buf->is_push) { 567 tx_buf->is_push = 0; 568 goto next_tx_int; 569 } 570 571 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 572 skb_headlen(skb), PCI_DMA_TODEVICE); 573 last = tx_buf->nr_frags; 574 575 for (j = 0; j < last; j++) { 576 cons = NEXT_TX(cons); 577 tx_buf = &txr->tx_buf_ring[cons]; 578 dma_unmap_page( 579 &pdev->dev, 580 dma_unmap_addr(tx_buf, mapping), 581 skb_frag_size(&skb_shinfo(skb)->frags[j]), 582 PCI_DMA_TODEVICE); 583 } 584 585 next_tx_int: 586 cons = NEXT_TX(cons); 587 588 tx_bytes += skb->len; 589 dev_kfree_skb_any(skb); 590 } 591 592 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); 593 txr->tx_cons = cons; 594 595 /* Need to make the tx_cons update visible to bnxt_start_xmit() 596 * before checking for netif_tx_queue_stopped(). Without the 597 * memory barrier, there is a small possibility that bnxt_start_xmit() 598 * will miss it and cause the queue to be stopped forever. 599 */ 600 smp_mb(); 601 602 if (unlikely(netif_tx_queue_stopped(txq)) && 603 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 604 __netif_tx_lock(txq, smp_processor_id()); 605 if (netif_tx_queue_stopped(txq) && 606 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 607 txr->dev_state != BNXT_DEV_STATE_CLOSING) 608 netif_tx_wake_queue(txq); 609 __netif_tx_unlock(txq); 610 } 611 } 612 613 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 614 gfp_t gfp) 615 { 616 struct device *dev = &bp->pdev->dev; 617 struct page *page; 618 619 page = alloc_page(gfp); 620 if (!page) 621 return NULL; 622 623 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, 624 DMA_ATTR_WEAK_ORDERING); 625 if (dma_mapping_error(dev, *mapping)) { 626 __free_page(page); 627 return NULL; 628 } 629 *mapping += bp->rx_dma_offset; 630 return page; 631 } 632 633 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, 634 gfp_t gfp) 635 { 636 u8 *data; 637 struct pci_dev *pdev = bp->pdev; 638 639 data = kmalloc(bp->rx_buf_size, gfp); 640 if (!data) 641 return NULL; 642 643 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, 644 bp->rx_buf_use_size, bp->rx_dir, 645 DMA_ATTR_WEAK_ORDERING); 646 647 if (dma_mapping_error(&pdev->dev, *mapping)) { 648 kfree(data); 649 data = NULL; 650 } 651 return data; 652 } 653 654 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 655 u16 prod, gfp_t gfp) 656 { 657 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 658 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; 659 dma_addr_t mapping; 660 661 if (BNXT_RX_PAGE_MODE(bp)) { 662 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp); 663 664 if (!page) 665 return -ENOMEM; 666 667 rx_buf->data = page; 668 rx_buf->data_ptr = page_address(page) + bp->rx_offset; 669 } else { 670 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); 671 672 if (!data) 673 return -ENOMEM; 674 675 rx_buf->data = data; 676 rx_buf->data_ptr = data + bp->rx_offset; 677 } 678 rx_buf->mapping = mapping; 679 680 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 681 return 0; 682 } 683 684 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 685 { 686 u16 prod = rxr->rx_prod; 687 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 688 struct rx_bd *cons_bd, *prod_bd; 689 690 prod_rx_buf = &rxr->rx_buf_ring[prod]; 691 cons_rx_buf = &rxr->rx_buf_ring[cons]; 692 693 prod_rx_buf->data = data; 694 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 695 696 prod_rx_buf->mapping = cons_rx_buf->mapping; 697 698 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 699 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 700 701 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 702 } 703 704 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 705 { 706 u16 next, max = rxr->rx_agg_bmap_size; 707 708 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 709 if (next >= max) 710 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 711 return next; 712 } 713 714 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 715 struct bnxt_rx_ring_info *rxr, 716 u16 prod, gfp_t gfp) 717 { 718 struct rx_bd *rxbd = 719 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 720 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 721 struct pci_dev *pdev = bp->pdev; 722 struct page *page; 723 dma_addr_t mapping; 724 u16 sw_prod = rxr->rx_sw_agg_prod; 725 unsigned int offset = 0; 726 727 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 728 page = rxr->rx_page; 729 if (!page) { 730 page = alloc_page(gfp); 731 if (!page) 732 return -ENOMEM; 733 rxr->rx_page = page; 734 rxr->rx_page_offset = 0; 735 } 736 offset = rxr->rx_page_offset; 737 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; 738 if (rxr->rx_page_offset == PAGE_SIZE) 739 rxr->rx_page = NULL; 740 else 741 get_page(page); 742 } else { 743 page = alloc_page(gfp); 744 if (!page) 745 return -ENOMEM; 746 } 747 748 mapping = dma_map_page_attrs(&pdev->dev, page, offset, 749 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, 750 DMA_ATTR_WEAK_ORDERING); 751 if (dma_mapping_error(&pdev->dev, mapping)) { 752 __free_page(page); 753 return -EIO; 754 } 755 756 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 757 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 758 759 __set_bit(sw_prod, rxr->rx_agg_bmap); 760 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 761 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 762 763 rx_agg_buf->page = page; 764 rx_agg_buf->offset = offset; 765 rx_agg_buf->mapping = mapping; 766 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 767 rxbd->rx_bd_opaque = sw_prod; 768 return 0; 769 } 770 771 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, 772 u32 agg_bufs) 773 { 774 struct bnxt *bp = bnapi->bp; 775 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 776 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 777 u16 prod = rxr->rx_agg_prod; 778 u16 sw_prod = rxr->rx_sw_agg_prod; 779 u32 i; 780 781 for (i = 0; i < agg_bufs; i++) { 782 u16 cons; 783 struct rx_agg_cmp *agg; 784 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 785 struct rx_bd *prod_bd; 786 struct page *page; 787 788 agg = (struct rx_agg_cmp *) 789 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 790 cons = agg->rx_agg_cmp_opaque; 791 __clear_bit(cons, rxr->rx_agg_bmap); 792 793 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 794 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 795 796 __set_bit(sw_prod, rxr->rx_agg_bmap); 797 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 798 cons_rx_buf = &rxr->rx_agg_ring[cons]; 799 800 /* It is possible for sw_prod to be equal to cons, so 801 * set cons_rx_buf->page to NULL first. 802 */ 803 page = cons_rx_buf->page; 804 cons_rx_buf->page = NULL; 805 prod_rx_buf->page = page; 806 prod_rx_buf->offset = cons_rx_buf->offset; 807 808 prod_rx_buf->mapping = cons_rx_buf->mapping; 809 810 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 811 812 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 813 prod_bd->rx_bd_opaque = sw_prod; 814 815 prod = NEXT_RX_AGG(prod); 816 sw_prod = NEXT_RX_AGG(sw_prod); 817 cp_cons = NEXT_CMP(cp_cons); 818 } 819 rxr->rx_agg_prod = prod; 820 rxr->rx_sw_agg_prod = sw_prod; 821 } 822 823 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 824 struct bnxt_rx_ring_info *rxr, 825 u16 cons, void *data, u8 *data_ptr, 826 dma_addr_t dma_addr, 827 unsigned int offset_and_len) 828 { 829 unsigned int payload = offset_and_len >> 16; 830 unsigned int len = offset_and_len & 0xffff; 831 struct skb_frag_struct *frag; 832 struct page *page = data; 833 u16 prod = rxr->rx_prod; 834 struct sk_buff *skb; 835 int off, err; 836 837 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 838 if (unlikely(err)) { 839 bnxt_reuse_rx_data(rxr, cons, data); 840 return NULL; 841 } 842 dma_addr -= bp->rx_dma_offset; 843 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, 844 DMA_ATTR_WEAK_ORDERING); 845 846 if (unlikely(!payload)) 847 payload = eth_get_headlen(data_ptr, len); 848 849 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 850 if (!skb) { 851 __free_page(page); 852 return NULL; 853 } 854 855 off = (void *)data_ptr - page_address(page); 856 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); 857 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 858 payload + NET_IP_ALIGN); 859 860 frag = &skb_shinfo(skb)->frags[0]; 861 skb_frag_size_sub(frag, payload); 862 frag->page_offset += payload; 863 skb->data_len -= payload; 864 skb->tail += payload; 865 866 return skb; 867 } 868 869 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 870 struct bnxt_rx_ring_info *rxr, u16 cons, 871 void *data, u8 *data_ptr, 872 dma_addr_t dma_addr, 873 unsigned int offset_and_len) 874 { 875 u16 prod = rxr->rx_prod; 876 struct sk_buff *skb; 877 int err; 878 879 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 880 if (unlikely(err)) { 881 bnxt_reuse_rx_data(rxr, cons, data); 882 return NULL; 883 } 884 885 skb = build_skb(data, 0); 886 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 887 bp->rx_dir, DMA_ATTR_WEAK_ORDERING); 888 if (!skb) { 889 kfree(data); 890 return NULL; 891 } 892 893 skb_reserve(skb, bp->rx_offset); 894 skb_put(skb, offset_and_len & 0xffff); 895 return skb; 896 } 897 898 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, 899 struct sk_buff *skb, u16 cp_cons, 900 u32 agg_bufs) 901 { 902 struct pci_dev *pdev = bp->pdev; 903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 904 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 905 u16 prod = rxr->rx_agg_prod; 906 u32 i; 907 908 for (i = 0; i < agg_bufs; i++) { 909 u16 cons, frag_len; 910 struct rx_agg_cmp *agg; 911 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 912 struct page *page; 913 dma_addr_t mapping; 914 915 agg = (struct rx_agg_cmp *) 916 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 917 cons = agg->rx_agg_cmp_opaque; 918 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 919 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 920 921 cons_rx_buf = &rxr->rx_agg_ring[cons]; 922 skb_fill_page_desc(skb, i, cons_rx_buf->page, 923 cons_rx_buf->offset, frag_len); 924 __clear_bit(cons, rxr->rx_agg_bmap); 925 926 /* It is possible for bnxt_alloc_rx_page() to allocate 927 * a sw_prod index that equals the cons index, so we 928 * need to clear the cons entry now. 929 */ 930 mapping = cons_rx_buf->mapping; 931 page = cons_rx_buf->page; 932 cons_rx_buf->page = NULL; 933 934 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 935 struct skb_shared_info *shinfo; 936 unsigned int nr_frags; 937 938 shinfo = skb_shinfo(skb); 939 nr_frags = --shinfo->nr_frags; 940 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); 941 942 dev_kfree_skb(skb); 943 944 cons_rx_buf->page = page; 945 946 /* Update prod since possibly some pages have been 947 * allocated already. 948 */ 949 rxr->rx_agg_prod = prod; 950 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i); 951 return NULL; 952 } 953 954 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 955 PCI_DMA_FROMDEVICE, 956 DMA_ATTR_WEAK_ORDERING); 957 958 skb->data_len += frag_len; 959 skb->len += frag_len; 960 skb->truesize += PAGE_SIZE; 961 962 prod = NEXT_RX_AGG(prod); 963 cp_cons = NEXT_CMP(cp_cons); 964 } 965 rxr->rx_agg_prod = prod; 966 return skb; 967 } 968 969 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 970 u8 agg_bufs, u32 *raw_cons) 971 { 972 u16 last; 973 struct rx_agg_cmp *agg; 974 975 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 976 last = RING_CMP(*raw_cons); 977 agg = (struct rx_agg_cmp *) 978 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 979 return RX_AGG_CMP_VALID(agg, *raw_cons); 980 } 981 982 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 983 unsigned int len, 984 dma_addr_t mapping) 985 { 986 struct bnxt *bp = bnapi->bp; 987 struct pci_dev *pdev = bp->pdev; 988 struct sk_buff *skb; 989 990 skb = napi_alloc_skb(&bnapi->napi, len); 991 if (!skb) 992 return NULL; 993 994 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, 995 bp->rx_dir); 996 997 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 998 len + NET_IP_ALIGN); 999 1000 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, 1001 bp->rx_dir); 1002 1003 skb_put(skb, len); 1004 return skb; 1005 } 1006 1007 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, 1008 u32 *raw_cons, void *cmp) 1009 { 1010 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1011 struct rx_cmp *rxcmp = cmp; 1012 u32 tmp_raw_cons = *raw_cons; 1013 u8 cmp_type, agg_bufs = 0; 1014 1015 cmp_type = RX_CMP_TYPE(rxcmp); 1016 1017 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1018 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1019 RX_CMP_AGG_BUFS) >> 1020 RX_CMP_AGG_BUFS_SHIFT; 1021 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1022 struct rx_tpa_end_cmp *tpa_end = cmp; 1023 1024 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1025 RX_TPA_END_CMP_AGG_BUFS) >> 1026 RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1027 } 1028 1029 if (agg_bufs) { 1030 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1031 return -EBUSY; 1032 } 1033 *raw_cons = tmp_raw_cons; 1034 return 0; 1035 } 1036 1037 static void bnxt_queue_sp_work(struct bnxt *bp) 1038 { 1039 if (BNXT_PF(bp)) 1040 queue_work(bnxt_pf_wq, &bp->sp_task); 1041 else 1042 schedule_work(&bp->sp_task); 1043 } 1044 1045 static void bnxt_cancel_sp_work(struct bnxt *bp) 1046 { 1047 if (BNXT_PF(bp)) 1048 flush_workqueue(bnxt_pf_wq); 1049 else 1050 cancel_work_sync(&bp->sp_task); 1051 } 1052 1053 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1054 { 1055 if (!rxr->bnapi->in_reset) { 1056 rxr->bnapi->in_reset = true; 1057 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1058 bnxt_queue_sp_work(bp); 1059 } 1060 rxr->rx_next_cons = 0xffff; 1061 } 1062 1063 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1064 struct rx_tpa_start_cmp *tpa_start, 1065 struct rx_tpa_start_cmp_ext *tpa_start1) 1066 { 1067 u8 agg_id = TPA_START_AGG_ID(tpa_start); 1068 u16 cons, prod; 1069 struct bnxt_tpa_info *tpa_info; 1070 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1071 struct rx_bd *prod_bd; 1072 dma_addr_t mapping; 1073 1074 cons = tpa_start->rx_tpa_start_cmp_opaque; 1075 prod = rxr->rx_prod; 1076 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1077 prod_rx_buf = &rxr->rx_buf_ring[prod]; 1078 tpa_info = &rxr->rx_tpa[agg_id]; 1079 1080 if (unlikely(cons != rxr->rx_next_cons)) { 1081 bnxt_sched_reset(bp, rxr); 1082 return; 1083 } 1084 /* Store cfa_code in tpa_info to use in tpa_end 1085 * completion processing. 1086 */ 1087 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1088 prod_rx_buf->data = tpa_info->data; 1089 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1090 1091 mapping = tpa_info->mapping; 1092 prod_rx_buf->mapping = mapping; 1093 1094 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 1095 1096 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1097 1098 tpa_info->data = cons_rx_buf->data; 1099 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1100 cons_rx_buf->data = NULL; 1101 tpa_info->mapping = cons_rx_buf->mapping; 1102 1103 tpa_info->len = 1104 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1105 RX_TPA_START_CMP_LEN_SHIFT; 1106 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1107 u32 hash_type = TPA_START_HASH_TYPE(tpa_start); 1108 1109 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1110 tpa_info->gso_type = SKB_GSO_TCPV4; 1111 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1112 if (hash_type == 3) 1113 tpa_info->gso_type = SKB_GSO_TCPV6; 1114 tpa_info->rss_hash = 1115 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1116 } else { 1117 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1118 tpa_info->gso_type = 0; 1119 if (netif_msg_rx_err(bp)) 1120 netdev_warn(bp->dev, "TPA packet without valid hash\n"); 1121 } 1122 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1123 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1124 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1125 1126 rxr->rx_prod = NEXT_RX(prod); 1127 cons = NEXT_RX(cons); 1128 rxr->rx_next_cons = NEXT_RX(cons); 1129 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1130 1131 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1132 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1133 cons_rx_buf->data = NULL; 1134 } 1135 1136 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, 1137 u16 cp_cons, u32 agg_bufs) 1138 { 1139 if (agg_bufs) 1140 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1141 } 1142 1143 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1144 int payload_off, int tcp_ts, 1145 struct sk_buff *skb) 1146 { 1147 #ifdef CONFIG_INET 1148 struct tcphdr *th; 1149 int len, nw_off; 1150 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1151 u32 hdr_info = tpa_info->hdr_info; 1152 bool loopback = false; 1153 1154 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1155 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1156 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1157 1158 /* If the packet is an internal loopback packet, the offsets will 1159 * have an extra 4 bytes. 1160 */ 1161 if (inner_mac_off == 4) { 1162 loopback = true; 1163 } else if (inner_mac_off > 4) { 1164 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1165 ETH_HLEN - 2)); 1166 1167 /* We only support inner iPv4/ipv6. If we don't see the 1168 * correct protocol ID, it must be a loopback packet where 1169 * the offsets are off by 4. 1170 */ 1171 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1172 loopback = true; 1173 } 1174 if (loopback) { 1175 /* internal loopback packet, subtract all offsets by 4 */ 1176 inner_ip_off -= 4; 1177 inner_mac_off -= 4; 1178 outer_ip_off -= 4; 1179 } 1180 1181 nw_off = inner_ip_off - ETH_HLEN; 1182 skb_set_network_header(skb, nw_off); 1183 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1184 struct ipv6hdr *iph = ipv6_hdr(skb); 1185 1186 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1187 len = skb->len - skb_transport_offset(skb); 1188 th = tcp_hdr(skb); 1189 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1190 } else { 1191 struct iphdr *iph = ip_hdr(skb); 1192 1193 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1194 len = skb->len - skb_transport_offset(skb); 1195 th = tcp_hdr(skb); 1196 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1197 } 1198 1199 if (inner_mac_off) { /* tunnel */ 1200 struct udphdr *uh = NULL; 1201 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1202 ETH_HLEN - 2)); 1203 1204 if (proto == htons(ETH_P_IP)) { 1205 struct iphdr *iph = (struct iphdr *)skb->data; 1206 1207 if (iph->protocol == IPPROTO_UDP) 1208 uh = (struct udphdr *)(iph + 1); 1209 } else { 1210 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1211 1212 if (iph->nexthdr == IPPROTO_UDP) 1213 uh = (struct udphdr *)(iph + 1); 1214 } 1215 if (uh) { 1216 if (uh->check) 1217 skb_shinfo(skb)->gso_type |= 1218 SKB_GSO_UDP_TUNNEL_CSUM; 1219 else 1220 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1221 } 1222 } 1223 #endif 1224 return skb; 1225 } 1226 1227 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1228 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1229 1230 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1231 int payload_off, int tcp_ts, 1232 struct sk_buff *skb) 1233 { 1234 #ifdef CONFIG_INET 1235 struct tcphdr *th; 1236 int len, nw_off, tcp_opt_len = 0; 1237 1238 if (tcp_ts) 1239 tcp_opt_len = 12; 1240 1241 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1242 struct iphdr *iph; 1243 1244 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1245 ETH_HLEN; 1246 skb_set_network_header(skb, nw_off); 1247 iph = ip_hdr(skb); 1248 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1249 len = skb->len - skb_transport_offset(skb); 1250 th = tcp_hdr(skb); 1251 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1252 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1253 struct ipv6hdr *iph; 1254 1255 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1256 ETH_HLEN; 1257 skb_set_network_header(skb, nw_off); 1258 iph = ipv6_hdr(skb); 1259 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1260 len = skb->len - skb_transport_offset(skb); 1261 th = tcp_hdr(skb); 1262 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1263 } else { 1264 dev_kfree_skb_any(skb); 1265 return NULL; 1266 } 1267 1268 if (nw_off) { /* tunnel */ 1269 struct udphdr *uh = NULL; 1270 1271 if (skb->protocol == htons(ETH_P_IP)) { 1272 struct iphdr *iph = (struct iphdr *)skb->data; 1273 1274 if (iph->protocol == IPPROTO_UDP) 1275 uh = (struct udphdr *)(iph + 1); 1276 } else { 1277 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1278 1279 if (iph->nexthdr == IPPROTO_UDP) 1280 uh = (struct udphdr *)(iph + 1); 1281 } 1282 if (uh) { 1283 if (uh->check) 1284 skb_shinfo(skb)->gso_type |= 1285 SKB_GSO_UDP_TUNNEL_CSUM; 1286 else 1287 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1288 } 1289 } 1290 #endif 1291 return skb; 1292 } 1293 1294 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1295 struct bnxt_tpa_info *tpa_info, 1296 struct rx_tpa_end_cmp *tpa_end, 1297 struct rx_tpa_end_cmp_ext *tpa_end1, 1298 struct sk_buff *skb) 1299 { 1300 #ifdef CONFIG_INET 1301 int payload_off; 1302 u16 segs; 1303 1304 segs = TPA_END_TPA_SEGS(tpa_end); 1305 if (segs == 1) 1306 return skb; 1307 1308 NAPI_GRO_CB(skb)->count = segs; 1309 skb_shinfo(skb)->gso_size = 1310 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1311 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1312 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1313 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1314 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1315 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1316 if (likely(skb)) 1317 tcp_gro_complete(skb); 1318 #endif 1319 return skb; 1320 } 1321 1322 /* Given the cfa_code of a received packet determine which 1323 * netdev (vf-rep or PF) the packet is destined to. 1324 */ 1325 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1326 { 1327 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1328 1329 /* if vf-rep dev is NULL, the must belongs to the PF */ 1330 return dev ? dev : bp->dev; 1331 } 1332 1333 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1334 struct bnxt_napi *bnapi, 1335 u32 *raw_cons, 1336 struct rx_tpa_end_cmp *tpa_end, 1337 struct rx_tpa_end_cmp_ext *tpa_end1, 1338 u8 *event) 1339 { 1340 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1341 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1342 u8 agg_id = TPA_END_AGG_ID(tpa_end); 1343 u8 *data_ptr, agg_bufs; 1344 u16 cp_cons = RING_CMP(*raw_cons); 1345 unsigned int len; 1346 struct bnxt_tpa_info *tpa_info; 1347 dma_addr_t mapping; 1348 struct sk_buff *skb; 1349 void *data; 1350 1351 if (unlikely(bnapi->in_reset)) { 1352 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); 1353 1354 if (rc < 0) 1355 return ERR_PTR(-EBUSY); 1356 return NULL; 1357 } 1358 1359 tpa_info = &rxr->rx_tpa[agg_id]; 1360 data = tpa_info->data; 1361 data_ptr = tpa_info->data_ptr; 1362 prefetch(data_ptr); 1363 len = tpa_info->len; 1364 mapping = tpa_info->mapping; 1365 1366 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1367 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; 1368 1369 if (agg_bufs) { 1370 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1371 return ERR_PTR(-EBUSY); 1372 1373 *event |= BNXT_AGG_EVENT; 1374 cp_cons = NEXT_CMP(cp_cons); 1375 } 1376 1377 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1378 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1379 if (agg_bufs > MAX_SKB_FRAGS) 1380 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1381 agg_bufs, (int)MAX_SKB_FRAGS); 1382 return NULL; 1383 } 1384 1385 if (len <= bp->rx_copy_thresh) { 1386 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1387 if (!skb) { 1388 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1389 return NULL; 1390 } 1391 } else { 1392 u8 *new_data; 1393 dma_addr_t new_mapping; 1394 1395 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); 1396 if (!new_data) { 1397 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1398 return NULL; 1399 } 1400 1401 tpa_info->data = new_data; 1402 tpa_info->data_ptr = new_data + bp->rx_offset; 1403 tpa_info->mapping = new_mapping; 1404 1405 skb = build_skb(data, 0); 1406 dma_unmap_single_attrs(&bp->pdev->dev, mapping, 1407 bp->rx_buf_use_size, bp->rx_dir, 1408 DMA_ATTR_WEAK_ORDERING); 1409 1410 if (!skb) { 1411 kfree(data); 1412 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1413 return NULL; 1414 } 1415 skb_reserve(skb, bp->rx_offset); 1416 skb_put(skb, len); 1417 } 1418 1419 if (agg_bufs) { 1420 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1421 if (!skb) { 1422 /* Page reuse already handled by bnxt_rx_pages(). */ 1423 return NULL; 1424 } 1425 } 1426 1427 skb->protocol = 1428 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); 1429 1430 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1431 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1432 1433 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1434 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1435 u16 vlan_proto = tpa_info->metadata >> 1436 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1437 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1438 1439 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1440 } 1441 1442 skb_checksum_none_assert(skb); 1443 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1444 skb->ip_summed = CHECKSUM_UNNECESSARY; 1445 skb->csum_level = 1446 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1447 } 1448 1449 if (TPA_END_GRO(tpa_end)) 1450 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1451 1452 return skb; 1453 } 1454 1455 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1456 struct sk_buff *skb) 1457 { 1458 if (skb->dev != bp->dev) { 1459 /* this packet belongs to a vf-rep */ 1460 bnxt_vf_rep_rx(bp, skb); 1461 return; 1462 } 1463 skb_record_rx_queue(skb, bnapi->index); 1464 napi_gro_receive(&bnapi->napi, skb); 1465 } 1466 1467 /* returns the following: 1468 * 1 - 1 packet successfully received 1469 * 0 - successful TPA_START, packet not completed yet 1470 * -EBUSY - completion ring does not have all the agg buffers yet 1471 * -ENOMEM - packet aborted due to out of memory 1472 * -EIO - packet aborted due to hw error indicated in BD 1473 */ 1474 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, 1475 u8 *event) 1476 { 1477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1478 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1479 struct net_device *dev = bp->dev; 1480 struct rx_cmp *rxcmp; 1481 struct rx_cmp_ext *rxcmp1; 1482 u32 tmp_raw_cons = *raw_cons; 1483 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 1484 struct bnxt_sw_rx_bd *rx_buf; 1485 unsigned int len; 1486 u8 *data_ptr, agg_bufs, cmp_type; 1487 dma_addr_t dma_addr; 1488 struct sk_buff *skb; 1489 void *data; 1490 int rc = 0; 1491 u32 misc; 1492 1493 rxcmp = (struct rx_cmp *) 1494 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1495 1496 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1497 cp_cons = RING_CMP(tmp_raw_cons); 1498 rxcmp1 = (struct rx_cmp_ext *) 1499 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1500 1501 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1502 return -EBUSY; 1503 1504 cmp_type = RX_CMP_TYPE(rxcmp); 1505 1506 prod = rxr->rx_prod; 1507 1508 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { 1509 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, 1510 (struct rx_tpa_start_cmp_ext *)rxcmp1); 1511 1512 *event |= BNXT_RX_EVENT; 1513 goto next_rx_no_prod; 1514 1515 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1516 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, 1517 (struct rx_tpa_end_cmp *)rxcmp, 1518 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 1519 1520 if (IS_ERR(skb)) 1521 return -EBUSY; 1522 1523 rc = -ENOMEM; 1524 if (likely(skb)) { 1525 bnxt_deliver_skb(bp, bnapi, skb); 1526 rc = 1; 1527 } 1528 *event |= BNXT_RX_EVENT; 1529 goto next_rx_no_prod; 1530 } 1531 1532 cons = rxcmp->rx_cmp_opaque; 1533 rx_buf = &rxr->rx_buf_ring[cons]; 1534 data = rx_buf->data; 1535 data_ptr = rx_buf->data_ptr; 1536 if (unlikely(cons != rxr->rx_next_cons)) { 1537 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); 1538 1539 bnxt_sched_reset(bp, rxr); 1540 return rc1; 1541 } 1542 prefetch(data_ptr); 1543 1544 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 1545 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 1546 1547 if (agg_bufs) { 1548 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1549 return -EBUSY; 1550 1551 cp_cons = NEXT_CMP(cp_cons); 1552 *event |= BNXT_AGG_EVENT; 1553 } 1554 *event |= BNXT_RX_EVENT; 1555 1556 rx_buf->data = NULL; 1557 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 1558 bnxt_reuse_rx_data(rxr, cons, data); 1559 if (agg_bufs) 1560 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 1561 1562 rc = -EIO; 1563 goto next_rx; 1564 } 1565 1566 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1567 dma_addr = rx_buf->mapping; 1568 1569 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { 1570 rc = 1; 1571 goto next_rx; 1572 } 1573 1574 if (len <= bp->rx_copy_thresh) { 1575 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 1576 bnxt_reuse_rx_data(rxr, cons, data); 1577 if (!skb) { 1578 rc = -ENOMEM; 1579 goto next_rx; 1580 } 1581 } else { 1582 u32 payload; 1583 1584 if (rx_buf->data_ptr == data_ptr) 1585 payload = misc & RX_CMP_PAYLOAD_OFFSET; 1586 else 1587 payload = 0; 1588 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 1589 payload | len); 1590 if (!skb) { 1591 rc = -ENOMEM; 1592 goto next_rx; 1593 } 1594 } 1595 1596 if (agg_bufs) { 1597 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); 1598 if (!skb) { 1599 rc = -ENOMEM; 1600 goto next_rx; 1601 } 1602 } 1603 1604 if (RX_CMP_HASH_VALID(rxcmp)) { 1605 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); 1606 enum pkt_hash_types type = PKT_HASH_TYPE_L4; 1607 1608 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1609 if (hash_type != 1 && hash_type != 3) 1610 type = PKT_HASH_TYPE_L3; 1611 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 1612 } 1613 1614 cfa_code = RX_CMP_CFA_CODE(rxcmp1); 1615 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); 1616 1617 if ((rxcmp1->rx_cmp_flags2 & 1618 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1619 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1620 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1621 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1622 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1623 1624 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1625 } 1626 1627 skb_checksum_none_assert(skb); 1628 if (RX_CMP_L4_CS_OK(rxcmp1)) { 1629 if (dev->features & NETIF_F_RXCSUM) { 1630 skb->ip_summed = CHECKSUM_UNNECESSARY; 1631 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 1632 } 1633 } else { 1634 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 1635 if (dev->features & NETIF_F_RXCSUM) 1636 cpr->rx_l4_csum_errors++; 1637 } 1638 } 1639 1640 bnxt_deliver_skb(bp, bnapi, skb); 1641 rc = 1; 1642 1643 next_rx: 1644 rxr->rx_prod = NEXT_RX(prod); 1645 rxr->rx_next_cons = NEXT_RX(cons); 1646 1647 next_rx_no_prod: 1648 *raw_cons = tmp_raw_cons; 1649 1650 return rc; 1651 } 1652 1653 /* In netpoll mode, if we are using a combined completion ring, we need to 1654 * discard the rx packets and recycle the buffers. 1655 */ 1656 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, 1657 u32 *raw_cons, u8 *event) 1658 { 1659 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1660 u32 tmp_raw_cons = *raw_cons; 1661 struct rx_cmp_ext *rxcmp1; 1662 struct rx_cmp *rxcmp; 1663 u16 cp_cons; 1664 u8 cmp_type; 1665 1666 cp_cons = RING_CMP(tmp_raw_cons); 1667 rxcmp = (struct rx_cmp *) 1668 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1669 1670 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1671 cp_cons = RING_CMP(tmp_raw_cons); 1672 rxcmp1 = (struct rx_cmp_ext *) 1673 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1674 1675 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1676 return -EBUSY; 1677 1678 cmp_type = RX_CMP_TYPE(rxcmp); 1679 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1680 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1681 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1682 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1683 struct rx_tpa_end_cmp_ext *tpa_end1; 1684 1685 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1686 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1687 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1688 } 1689 return bnxt_rx_pkt(bp, bnapi, raw_cons, event); 1690 } 1691 1692 #define BNXT_GET_EVENT_PORT(data) \ 1693 ((data) & \ 1694 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1695 1696 static int bnxt_async_event_process(struct bnxt *bp, 1697 struct hwrm_async_event_cmpl *cmpl) 1698 { 1699 u16 event_id = le16_to_cpu(cmpl->event_id); 1700 1701 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 1702 switch (event_id) { 1703 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 1704 u32 data1 = le32_to_cpu(cmpl->event_data1); 1705 struct bnxt_link_info *link_info = &bp->link_info; 1706 1707 if (BNXT_VF(bp)) 1708 goto async_event_process_exit; 1709 1710 /* print unsupported speed warning in forced speed mode only */ 1711 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 1712 (data1 & 0x20000)) { 1713 u16 fw_speed = link_info->force_link_speed; 1714 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 1715 1716 if (speed != SPEED_UNKNOWN) 1717 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 1718 speed); 1719 } 1720 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 1721 /* fall thru */ 1722 } 1723 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 1724 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 1725 break; 1726 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 1727 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 1728 break; 1729 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 1730 u32 data1 = le32_to_cpu(cmpl->event_data1); 1731 u16 port_id = BNXT_GET_EVENT_PORT(data1); 1732 1733 if (BNXT_VF(bp)) 1734 break; 1735 1736 if (bp->pf.port_id != port_id) 1737 break; 1738 1739 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1740 break; 1741 } 1742 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1743 if (BNXT_PF(bp)) 1744 goto async_event_process_exit; 1745 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1746 break; 1747 default: 1748 goto async_event_process_exit; 1749 } 1750 bnxt_queue_sp_work(bp); 1751 async_event_process_exit: 1752 bnxt_ulp_async_events(bp, cmpl); 1753 return 0; 1754 } 1755 1756 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 1757 { 1758 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 1759 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 1760 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 1761 (struct hwrm_fwd_req_cmpl *)txcmp; 1762 1763 switch (cmpl_type) { 1764 case CMPL_BASE_TYPE_HWRM_DONE: 1765 seq_id = le16_to_cpu(h_cmpl->sequence_id); 1766 if (seq_id == bp->hwrm_intr_seq_id) 1767 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; 1768 else 1769 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); 1770 break; 1771 1772 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 1773 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 1774 1775 if ((vf_id < bp->pf.first_vf_id) || 1776 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 1777 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 1778 vf_id); 1779 return -EINVAL; 1780 } 1781 1782 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1783 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1784 bnxt_queue_sp_work(bp); 1785 break; 1786 1787 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1788 bnxt_async_event_process(bp, 1789 (struct hwrm_async_event_cmpl *)txcmp); 1790 1791 default: 1792 break; 1793 } 1794 1795 return 0; 1796 } 1797 1798 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 1799 { 1800 struct bnxt_napi *bnapi = dev_instance; 1801 struct bnxt *bp = bnapi->bp; 1802 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1803 u32 cons = RING_CMP(cpr->cp_raw_cons); 1804 1805 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1806 napi_schedule(&bnapi->napi); 1807 return IRQ_HANDLED; 1808 } 1809 1810 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 1811 { 1812 u32 raw_cons = cpr->cp_raw_cons; 1813 u16 cons = RING_CMP(raw_cons); 1814 struct tx_cmp *txcmp; 1815 1816 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1817 1818 return TX_CMP_VALID(txcmp, raw_cons); 1819 } 1820 1821 static irqreturn_t bnxt_inta(int irq, void *dev_instance) 1822 { 1823 struct bnxt_napi *bnapi = dev_instance; 1824 struct bnxt *bp = bnapi->bp; 1825 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1826 u32 cons = RING_CMP(cpr->cp_raw_cons); 1827 u32 int_status; 1828 1829 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 1830 1831 if (!bnxt_has_work(bp, cpr)) { 1832 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); 1833 /* return if erroneous interrupt */ 1834 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id))) 1835 return IRQ_NONE; 1836 } 1837 1838 /* disable ring IRQ */ 1839 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell); 1840 1841 /* Return here if interrupt is shared and is disabled. */ 1842 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 1843 return IRQ_HANDLED; 1844 1845 napi_schedule(&bnapi->napi); 1846 return IRQ_HANDLED; 1847 } 1848 1849 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 1850 { 1851 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1852 u32 raw_cons = cpr->cp_raw_cons; 1853 u32 cons; 1854 int tx_pkts = 0; 1855 int rx_pkts = 0; 1856 u8 event = 0; 1857 struct tx_cmp *txcmp; 1858 1859 while (1) { 1860 int rc; 1861 1862 cons = RING_CMP(raw_cons); 1863 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 1864 1865 if (!TX_CMP_VALID(txcmp, raw_cons)) 1866 break; 1867 1868 /* The valid test of the entry must be done first before 1869 * reading any further. 1870 */ 1871 dma_rmb(); 1872 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1873 tx_pkts++; 1874 /* return full budget so NAPI will complete. */ 1875 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1876 rx_pkts = budget; 1877 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1878 if (likely(budget)) 1879 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1880 else 1881 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, 1882 &event); 1883 if (likely(rc >= 0)) 1884 rx_pkts += rc; 1885 /* Increment rx_pkts when rc is -ENOMEM to count towards 1886 * the NAPI budget. Otherwise, we may potentially loop 1887 * here forever if we consistently cannot allocate 1888 * buffers. 1889 */ 1890 else if (rc == -ENOMEM && budget) 1891 rx_pkts++; 1892 else if (rc == -EBUSY) /* partial completion */ 1893 break; 1894 } else if (unlikely((TX_CMP_TYPE(txcmp) == 1895 CMPL_BASE_TYPE_HWRM_DONE) || 1896 (TX_CMP_TYPE(txcmp) == 1897 CMPL_BASE_TYPE_HWRM_FWD_REQ) || 1898 (TX_CMP_TYPE(txcmp) == 1899 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) { 1900 bnxt_hwrm_handler(bp, txcmp); 1901 } 1902 raw_cons = NEXT_RAW_CMP(raw_cons); 1903 1904 if (rx_pkts == budget) 1905 break; 1906 } 1907 1908 if (event & BNXT_TX_EVENT) { 1909 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 1910 void __iomem *db = txr->tx_doorbell; 1911 u16 prod = txr->tx_prod; 1912 1913 /* Sync BD data before updating doorbell */ 1914 wmb(); 1915 1916 bnxt_db_write(bp, db, DB_KEY_TX | prod); 1917 } 1918 1919 cpr->cp_raw_cons = raw_cons; 1920 /* ACK completion ring before freeing tx ring and producing new 1921 * buffers in rx/agg rings to prevent overflowing the completion 1922 * ring. 1923 */ 1924 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1925 1926 if (tx_pkts) 1927 bnapi->tx_int(bp, bnapi, tx_pkts); 1928 1929 if (event & BNXT_RX_EVENT) { 1930 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1931 1932 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1933 if (event & BNXT_AGG_EVENT) 1934 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1935 DB_KEY_RX | rxr->rx_agg_prod); 1936 } 1937 return rx_pkts; 1938 } 1939 1940 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 1941 { 1942 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 1943 struct bnxt *bp = bnapi->bp; 1944 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1945 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1946 struct tx_cmp *txcmp; 1947 struct rx_cmp_ext *rxcmp1; 1948 u32 cp_cons, tmp_raw_cons; 1949 u32 raw_cons = cpr->cp_raw_cons; 1950 u32 rx_pkts = 0; 1951 u8 event = 0; 1952 1953 while (1) { 1954 int rc; 1955 1956 cp_cons = RING_CMP(raw_cons); 1957 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1958 1959 if (!TX_CMP_VALID(txcmp, raw_cons)) 1960 break; 1961 1962 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1963 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 1964 cp_cons = RING_CMP(tmp_raw_cons); 1965 rxcmp1 = (struct rx_cmp_ext *) 1966 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1967 1968 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1969 break; 1970 1971 /* force an error to recycle the buffer */ 1972 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1973 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1974 1975 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1976 if (likely(rc == -EIO) && budget) 1977 rx_pkts++; 1978 else if (rc == -EBUSY) /* partial completion */ 1979 break; 1980 } else if (unlikely(TX_CMP_TYPE(txcmp) == 1981 CMPL_BASE_TYPE_HWRM_DONE)) { 1982 bnxt_hwrm_handler(bp, txcmp); 1983 } else { 1984 netdev_err(bp->dev, 1985 "Invalid completion received on special ring\n"); 1986 } 1987 raw_cons = NEXT_RAW_CMP(raw_cons); 1988 1989 if (rx_pkts == budget) 1990 break; 1991 } 1992 1993 cpr->cp_raw_cons = raw_cons; 1994 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 1995 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod); 1996 1997 if (event & BNXT_AGG_EVENT) 1998 bnxt_db_write(bp, rxr->rx_agg_doorbell, 1999 DB_KEY_RX | rxr->rx_agg_prod); 2000 2001 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 2002 napi_complete_done(napi, rx_pkts); 2003 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 2004 } 2005 return rx_pkts; 2006 } 2007 2008 static int bnxt_poll(struct napi_struct *napi, int budget) 2009 { 2010 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 2011 struct bnxt *bp = bnapi->bp; 2012 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2013 int work_done = 0; 2014 2015 while (1) { 2016 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 2017 2018 if (work_done >= budget) 2019 break; 2020 2021 if (!bnxt_has_work(bp, cpr)) { 2022 if (napi_complete_done(napi, work_done)) 2023 BNXT_CP_DB_REARM(cpr->cp_doorbell, 2024 cpr->cp_raw_cons); 2025 break; 2026 } 2027 } 2028 mmiowb(); 2029 return work_done; 2030 } 2031 2032 static void bnxt_free_tx_skbs(struct bnxt *bp) 2033 { 2034 int i, max_idx; 2035 struct pci_dev *pdev = bp->pdev; 2036 2037 if (!bp->tx_ring) 2038 return; 2039 2040 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 2041 for (i = 0; i < bp->tx_nr_rings; i++) { 2042 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2043 int j; 2044 2045 for (j = 0; j < max_idx;) { 2046 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 2047 struct sk_buff *skb = tx_buf->skb; 2048 int k, last; 2049 2050 if (!skb) { 2051 j++; 2052 continue; 2053 } 2054 2055 tx_buf->skb = NULL; 2056 2057 if (tx_buf->is_push) { 2058 dev_kfree_skb(skb); 2059 j += 2; 2060 continue; 2061 } 2062 2063 dma_unmap_single(&pdev->dev, 2064 dma_unmap_addr(tx_buf, mapping), 2065 skb_headlen(skb), 2066 PCI_DMA_TODEVICE); 2067 2068 last = tx_buf->nr_frags; 2069 j += 2; 2070 for (k = 0; k < last; k++, j++) { 2071 int ring_idx = j & bp->tx_ring_mask; 2072 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2073 2074 tx_buf = &txr->tx_buf_ring[ring_idx]; 2075 dma_unmap_page( 2076 &pdev->dev, 2077 dma_unmap_addr(tx_buf, mapping), 2078 skb_frag_size(frag), PCI_DMA_TODEVICE); 2079 } 2080 dev_kfree_skb(skb); 2081 } 2082 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); 2083 } 2084 } 2085 2086 static void bnxt_free_rx_skbs(struct bnxt *bp) 2087 { 2088 int i, max_idx, max_agg_idx; 2089 struct pci_dev *pdev = bp->pdev; 2090 2091 if (!bp->rx_ring) 2092 return; 2093 2094 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 2095 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 2096 for (i = 0; i < bp->rx_nr_rings; i++) { 2097 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2098 int j; 2099 2100 if (rxr->rx_tpa) { 2101 for (j = 0; j < MAX_TPA; j++) { 2102 struct bnxt_tpa_info *tpa_info = 2103 &rxr->rx_tpa[j]; 2104 u8 *data = tpa_info->data; 2105 2106 if (!data) 2107 continue; 2108 2109 dma_unmap_single_attrs(&pdev->dev, 2110 tpa_info->mapping, 2111 bp->rx_buf_use_size, 2112 bp->rx_dir, 2113 DMA_ATTR_WEAK_ORDERING); 2114 2115 tpa_info->data = NULL; 2116 2117 kfree(data); 2118 } 2119 } 2120 2121 for (j = 0; j < max_idx; j++) { 2122 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 2123 dma_addr_t mapping = rx_buf->mapping; 2124 void *data = rx_buf->data; 2125 2126 if (!data) 2127 continue; 2128 2129 rx_buf->data = NULL; 2130 2131 if (BNXT_RX_PAGE_MODE(bp)) { 2132 mapping -= bp->rx_dma_offset; 2133 dma_unmap_page_attrs(&pdev->dev, mapping, 2134 PAGE_SIZE, bp->rx_dir, 2135 DMA_ATTR_WEAK_ORDERING); 2136 __free_page(data); 2137 } else { 2138 dma_unmap_single_attrs(&pdev->dev, mapping, 2139 bp->rx_buf_use_size, 2140 bp->rx_dir, 2141 DMA_ATTR_WEAK_ORDERING); 2142 kfree(data); 2143 } 2144 } 2145 2146 for (j = 0; j < max_agg_idx; j++) { 2147 struct bnxt_sw_rx_agg_bd *rx_agg_buf = 2148 &rxr->rx_agg_ring[j]; 2149 struct page *page = rx_agg_buf->page; 2150 2151 if (!page) 2152 continue; 2153 2154 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, 2155 BNXT_RX_PAGE_SIZE, 2156 PCI_DMA_FROMDEVICE, 2157 DMA_ATTR_WEAK_ORDERING); 2158 2159 rx_agg_buf->page = NULL; 2160 __clear_bit(j, rxr->rx_agg_bmap); 2161 2162 __free_page(page); 2163 } 2164 if (rxr->rx_page) { 2165 __free_page(rxr->rx_page); 2166 rxr->rx_page = NULL; 2167 } 2168 } 2169 } 2170 2171 static void bnxt_free_skbs(struct bnxt *bp) 2172 { 2173 bnxt_free_tx_skbs(bp); 2174 bnxt_free_rx_skbs(bp); 2175 } 2176 2177 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2178 { 2179 struct pci_dev *pdev = bp->pdev; 2180 int i; 2181 2182 for (i = 0; i < ring->nr_pages; i++) { 2183 if (!ring->pg_arr[i]) 2184 continue; 2185 2186 dma_free_coherent(&pdev->dev, ring->page_size, 2187 ring->pg_arr[i], ring->dma_arr[i]); 2188 2189 ring->pg_arr[i] = NULL; 2190 } 2191 if (ring->pg_tbl) { 2192 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, 2193 ring->pg_tbl, ring->pg_tbl_map); 2194 ring->pg_tbl = NULL; 2195 } 2196 if (ring->vmem_size && *ring->vmem) { 2197 vfree(*ring->vmem); 2198 *ring->vmem = NULL; 2199 } 2200 } 2201 2202 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) 2203 { 2204 int i; 2205 struct pci_dev *pdev = bp->pdev; 2206 2207 if (ring->nr_pages > 1) { 2208 ring->pg_tbl = dma_alloc_coherent(&pdev->dev, 2209 ring->nr_pages * 8, 2210 &ring->pg_tbl_map, 2211 GFP_KERNEL); 2212 if (!ring->pg_tbl) 2213 return -ENOMEM; 2214 } 2215 2216 for (i = 0; i < ring->nr_pages; i++) { 2217 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 2218 ring->page_size, 2219 &ring->dma_arr[i], 2220 GFP_KERNEL); 2221 if (!ring->pg_arr[i]) 2222 return -ENOMEM; 2223 2224 if (ring->nr_pages > 1) 2225 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); 2226 } 2227 2228 if (ring->vmem_size) { 2229 *ring->vmem = vzalloc(ring->vmem_size); 2230 if (!(*ring->vmem)) 2231 return -ENOMEM; 2232 } 2233 return 0; 2234 } 2235 2236 static void bnxt_free_rx_rings(struct bnxt *bp) 2237 { 2238 int i; 2239 2240 if (!bp->rx_ring) 2241 return; 2242 2243 for (i = 0; i < bp->rx_nr_rings; i++) { 2244 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2245 struct bnxt_ring_struct *ring; 2246 2247 if (rxr->xdp_prog) 2248 bpf_prog_put(rxr->xdp_prog); 2249 2250 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 2251 xdp_rxq_info_unreg(&rxr->xdp_rxq); 2252 2253 kfree(rxr->rx_tpa); 2254 rxr->rx_tpa = NULL; 2255 2256 kfree(rxr->rx_agg_bmap); 2257 rxr->rx_agg_bmap = NULL; 2258 2259 ring = &rxr->rx_ring_struct; 2260 bnxt_free_ring(bp, ring); 2261 2262 ring = &rxr->rx_agg_ring_struct; 2263 bnxt_free_ring(bp, ring); 2264 } 2265 } 2266 2267 static int bnxt_alloc_rx_rings(struct bnxt *bp) 2268 { 2269 int i, rc, agg_rings = 0, tpa_rings = 0; 2270 2271 if (!bp->rx_ring) 2272 return -ENOMEM; 2273 2274 if (bp->flags & BNXT_FLAG_AGG_RINGS) 2275 agg_rings = 1; 2276 2277 if (bp->flags & BNXT_FLAG_TPA) 2278 tpa_rings = 1; 2279 2280 for (i = 0; i < bp->rx_nr_rings; i++) { 2281 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 2282 struct bnxt_ring_struct *ring; 2283 2284 ring = &rxr->rx_ring_struct; 2285 2286 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); 2287 if (rc < 0) 2288 return rc; 2289 2290 rc = bnxt_alloc_ring(bp, ring); 2291 if (rc) 2292 return rc; 2293 2294 if (agg_rings) { 2295 u16 mem_size; 2296 2297 ring = &rxr->rx_agg_ring_struct; 2298 rc = bnxt_alloc_ring(bp, ring); 2299 if (rc) 2300 return rc; 2301 2302 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 2303 mem_size = rxr->rx_agg_bmap_size / 8; 2304 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 2305 if (!rxr->rx_agg_bmap) 2306 return -ENOMEM; 2307 2308 if (tpa_rings) { 2309 rxr->rx_tpa = kcalloc(MAX_TPA, 2310 sizeof(struct bnxt_tpa_info), 2311 GFP_KERNEL); 2312 if (!rxr->rx_tpa) 2313 return -ENOMEM; 2314 } 2315 } 2316 } 2317 return 0; 2318 } 2319 2320 static void bnxt_free_tx_rings(struct bnxt *bp) 2321 { 2322 int i; 2323 struct pci_dev *pdev = bp->pdev; 2324 2325 if (!bp->tx_ring) 2326 return; 2327 2328 for (i = 0; i < bp->tx_nr_rings; i++) { 2329 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2330 struct bnxt_ring_struct *ring; 2331 2332 if (txr->tx_push) { 2333 dma_free_coherent(&pdev->dev, bp->tx_push_size, 2334 txr->tx_push, txr->tx_push_mapping); 2335 txr->tx_push = NULL; 2336 } 2337 2338 ring = &txr->tx_ring_struct; 2339 2340 bnxt_free_ring(bp, ring); 2341 } 2342 } 2343 2344 static int bnxt_alloc_tx_rings(struct bnxt *bp) 2345 { 2346 int i, j, rc; 2347 struct pci_dev *pdev = bp->pdev; 2348 2349 bp->tx_push_size = 0; 2350 if (bp->tx_push_thresh) { 2351 int push_size; 2352 2353 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 2354 bp->tx_push_thresh); 2355 2356 if (push_size > 256) { 2357 push_size = 0; 2358 bp->tx_push_thresh = 0; 2359 } 2360 2361 bp->tx_push_size = push_size; 2362 } 2363 2364 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2365 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2366 struct bnxt_ring_struct *ring; 2367 2368 ring = &txr->tx_ring_struct; 2369 2370 rc = bnxt_alloc_ring(bp, ring); 2371 if (rc) 2372 return rc; 2373 2374 if (bp->tx_push_size) { 2375 dma_addr_t mapping; 2376 2377 /* One pre-allocated DMA buffer to backup 2378 * TX push operation 2379 */ 2380 txr->tx_push = dma_alloc_coherent(&pdev->dev, 2381 bp->tx_push_size, 2382 &txr->tx_push_mapping, 2383 GFP_KERNEL); 2384 2385 if (!txr->tx_push) 2386 return -ENOMEM; 2387 2388 mapping = txr->tx_push_mapping + 2389 sizeof(struct tx_push_bd); 2390 txr->data_mapping = cpu_to_le64(mapping); 2391 2392 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2393 } 2394 ring->queue_id = bp->q_info[j].queue_id; 2395 if (i < bp->tx_nr_rings_xdp) 2396 continue; 2397 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2398 j++; 2399 } 2400 return 0; 2401 } 2402 2403 static void bnxt_free_cp_rings(struct bnxt *bp) 2404 { 2405 int i; 2406 2407 if (!bp->bnapi) 2408 return; 2409 2410 for (i = 0; i < bp->cp_nr_rings; i++) { 2411 struct bnxt_napi *bnapi = bp->bnapi[i]; 2412 struct bnxt_cp_ring_info *cpr; 2413 struct bnxt_ring_struct *ring; 2414 2415 if (!bnapi) 2416 continue; 2417 2418 cpr = &bnapi->cp_ring; 2419 ring = &cpr->cp_ring_struct; 2420 2421 bnxt_free_ring(bp, ring); 2422 } 2423 } 2424 2425 static int bnxt_alloc_cp_rings(struct bnxt *bp) 2426 { 2427 int i, rc; 2428 2429 for (i = 0; i < bp->cp_nr_rings; i++) { 2430 struct bnxt_napi *bnapi = bp->bnapi[i]; 2431 struct bnxt_cp_ring_info *cpr; 2432 struct bnxt_ring_struct *ring; 2433 2434 if (!bnapi) 2435 continue; 2436 2437 cpr = &bnapi->cp_ring; 2438 ring = &cpr->cp_ring_struct; 2439 2440 rc = bnxt_alloc_ring(bp, ring); 2441 if (rc) 2442 return rc; 2443 } 2444 return 0; 2445 } 2446 2447 static void bnxt_init_ring_struct(struct bnxt *bp) 2448 { 2449 int i; 2450 2451 for (i = 0; i < bp->cp_nr_rings; i++) { 2452 struct bnxt_napi *bnapi = bp->bnapi[i]; 2453 struct bnxt_cp_ring_info *cpr; 2454 struct bnxt_rx_ring_info *rxr; 2455 struct bnxt_tx_ring_info *txr; 2456 struct bnxt_ring_struct *ring; 2457 2458 if (!bnapi) 2459 continue; 2460 2461 cpr = &bnapi->cp_ring; 2462 ring = &cpr->cp_ring_struct; 2463 ring->nr_pages = bp->cp_nr_pages; 2464 ring->page_size = HW_CMPD_RING_SIZE; 2465 ring->pg_arr = (void **)cpr->cp_desc_ring; 2466 ring->dma_arr = cpr->cp_desc_mapping; 2467 ring->vmem_size = 0; 2468 2469 rxr = bnapi->rx_ring; 2470 if (!rxr) 2471 goto skip_rx; 2472 2473 ring = &rxr->rx_ring_struct; 2474 ring->nr_pages = bp->rx_nr_pages; 2475 ring->page_size = HW_RXBD_RING_SIZE; 2476 ring->pg_arr = (void **)rxr->rx_desc_ring; 2477 ring->dma_arr = rxr->rx_desc_mapping; 2478 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 2479 ring->vmem = (void **)&rxr->rx_buf_ring; 2480 2481 ring = &rxr->rx_agg_ring_struct; 2482 ring->nr_pages = bp->rx_agg_nr_pages; 2483 ring->page_size = HW_RXBD_RING_SIZE; 2484 ring->pg_arr = (void **)rxr->rx_agg_desc_ring; 2485 ring->dma_arr = rxr->rx_agg_desc_mapping; 2486 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 2487 ring->vmem = (void **)&rxr->rx_agg_ring; 2488 2489 skip_rx: 2490 txr = bnapi->tx_ring; 2491 if (!txr) 2492 continue; 2493 2494 ring = &txr->tx_ring_struct; 2495 ring->nr_pages = bp->tx_nr_pages; 2496 ring->page_size = HW_RXBD_RING_SIZE; 2497 ring->pg_arr = (void **)txr->tx_desc_ring; 2498 ring->dma_arr = txr->tx_desc_mapping; 2499 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 2500 ring->vmem = (void **)&txr->tx_buf_ring; 2501 } 2502 } 2503 2504 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 2505 { 2506 int i; 2507 u32 prod; 2508 struct rx_bd **rx_buf_ring; 2509 2510 rx_buf_ring = (struct rx_bd **)ring->pg_arr; 2511 for (i = 0, prod = 0; i < ring->nr_pages; i++) { 2512 int j; 2513 struct rx_bd *rxbd; 2514 2515 rxbd = rx_buf_ring[i]; 2516 if (!rxbd) 2517 continue; 2518 2519 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 2520 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 2521 rxbd->rx_bd_opaque = prod; 2522 } 2523 } 2524 } 2525 2526 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 2527 { 2528 struct net_device *dev = bp->dev; 2529 struct bnxt_rx_ring_info *rxr; 2530 struct bnxt_ring_struct *ring; 2531 u32 prod, type; 2532 int i; 2533 2534 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 2535 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 2536 2537 if (NET_IP_ALIGN == 2) 2538 type |= RX_BD_FLAGS_SOP; 2539 2540 rxr = &bp->rx_ring[ring_nr]; 2541 ring = &rxr->rx_ring_struct; 2542 bnxt_init_rxbd_pages(ring, type); 2543 2544 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 2545 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); 2546 if (IS_ERR(rxr->xdp_prog)) { 2547 int rc = PTR_ERR(rxr->xdp_prog); 2548 2549 rxr->xdp_prog = NULL; 2550 return rc; 2551 } 2552 } 2553 prod = rxr->rx_prod; 2554 for (i = 0; i < bp->rx_ring_size; i++) { 2555 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { 2556 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", 2557 ring_nr, i, bp->rx_ring_size); 2558 break; 2559 } 2560 prod = NEXT_RX(prod); 2561 } 2562 rxr->rx_prod = prod; 2563 ring->fw_ring_id = INVALID_HW_RING_ID; 2564 2565 ring = &rxr->rx_agg_ring_struct; 2566 ring->fw_ring_id = INVALID_HW_RING_ID; 2567 2568 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 2569 return 0; 2570 2571 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 2572 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2573 2574 bnxt_init_rxbd_pages(ring, type); 2575 2576 prod = rxr->rx_agg_prod; 2577 for (i = 0; i < bp->rx_agg_ring_size; i++) { 2578 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { 2579 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", 2580 ring_nr, i, bp->rx_ring_size); 2581 break; 2582 } 2583 prod = NEXT_RX_AGG(prod); 2584 } 2585 rxr->rx_agg_prod = prod; 2586 2587 if (bp->flags & BNXT_FLAG_TPA) { 2588 if (rxr->rx_tpa) { 2589 u8 *data; 2590 dma_addr_t mapping; 2591 2592 for (i = 0; i < MAX_TPA; i++) { 2593 data = __bnxt_alloc_rx_data(bp, &mapping, 2594 GFP_KERNEL); 2595 if (!data) 2596 return -ENOMEM; 2597 2598 rxr->rx_tpa[i].data = data; 2599 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 2600 rxr->rx_tpa[i].mapping = mapping; 2601 } 2602 } else { 2603 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); 2604 return -ENOMEM; 2605 } 2606 } 2607 2608 return 0; 2609 } 2610 2611 static void bnxt_init_cp_rings(struct bnxt *bp) 2612 { 2613 int i; 2614 2615 for (i = 0; i < bp->cp_nr_rings; i++) { 2616 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 2617 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 2618 2619 ring->fw_ring_id = INVALID_HW_RING_ID; 2620 } 2621 } 2622 2623 static int bnxt_init_rx_rings(struct bnxt *bp) 2624 { 2625 int i, rc = 0; 2626 2627 if (BNXT_RX_PAGE_MODE(bp)) { 2628 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 2629 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 2630 } else { 2631 bp->rx_offset = BNXT_RX_OFFSET; 2632 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 2633 } 2634 2635 for (i = 0; i < bp->rx_nr_rings; i++) { 2636 rc = bnxt_init_one_rx_ring(bp, i); 2637 if (rc) 2638 break; 2639 } 2640 2641 return rc; 2642 } 2643 2644 static int bnxt_init_tx_rings(struct bnxt *bp) 2645 { 2646 u16 i; 2647 2648 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 2649 MAX_SKB_FRAGS + 1); 2650 2651 for (i = 0; i < bp->tx_nr_rings; i++) { 2652 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2653 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 2654 2655 ring->fw_ring_id = INVALID_HW_RING_ID; 2656 } 2657 2658 return 0; 2659 } 2660 2661 static void bnxt_free_ring_grps(struct bnxt *bp) 2662 { 2663 kfree(bp->grp_info); 2664 bp->grp_info = NULL; 2665 } 2666 2667 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 2668 { 2669 int i; 2670 2671 if (irq_re_init) { 2672 bp->grp_info = kcalloc(bp->cp_nr_rings, 2673 sizeof(struct bnxt_ring_grp_info), 2674 GFP_KERNEL); 2675 if (!bp->grp_info) 2676 return -ENOMEM; 2677 } 2678 for (i = 0; i < bp->cp_nr_rings; i++) { 2679 if (irq_re_init) 2680 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 2681 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 2682 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 2683 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 2684 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 2685 } 2686 return 0; 2687 } 2688 2689 static void bnxt_free_vnics(struct bnxt *bp) 2690 { 2691 kfree(bp->vnic_info); 2692 bp->vnic_info = NULL; 2693 bp->nr_vnics = 0; 2694 } 2695 2696 static int bnxt_alloc_vnics(struct bnxt *bp) 2697 { 2698 int num_vnics = 1; 2699 2700 #ifdef CONFIG_RFS_ACCEL 2701 if (bp->flags & BNXT_FLAG_RFS) 2702 num_vnics += bp->rx_nr_rings; 2703 #endif 2704 2705 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 2706 num_vnics++; 2707 2708 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 2709 GFP_KERNEL); 2710 if (!bp->vnic_info) 2711 return -ENOMEM; 2712 2713 bp->nr_vnics = num_vnics; 2714 return 0; 2715 } 2716 2717 static void bnxt_init_vnics(struct bnxt *bp) 2718 { 2719 int i; 2720 2721 for (i = 0; i < bp->nr_vnics; i++) { 2722 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2723 2724 vnic->fw_vnic_id = INVALID_HW_RING_ID; 2725 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 2726 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 2727 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 2728 2729 if (bp->vnic_info[i].rss_hash_key) { 2730 if (i == 0) 2731 prandom_bytes(vnic->rss_hash_key, 2732 HW_HASH_KEY_SIZE); 2733 else 2734 memcpy(vnic->rss_hash_key, 2735 bp->vnic_info[0].rss_hash_key, 2736 HW_HASH_KEY_SIZE); 2737 } 2738 } 2739 } 2740 2741 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 2742 { 2743 int pages; 2744 2745 pages = ring_size / desc_per_pg; 2746 2747 if (!pages) 2748 return 1; 2749 2750 pages++; 2751 2752 while (pages & (pages - 1)) 2753 pages++; 2754 2755 return pages; 2756 } 2757 2758 void bnxt_set_tpa_flags(struct bnxt *bp) 2759 { 2760 bp->flags &= ~BNXT_FLAG_TPA; 2761 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 2762 return; 2763 if (bp->dev->features & NETIF_F_LRO) 2764 bp->flags |= BNXT_FLAG_LRO; 2765 else if (bp->dev->features & NETIF_F_GRO_HW) 2766 bp->flags |= BNXT_FLAG_GRO; 2767 } 2768 2769 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 2770 * be set on entry. 2771 */ 2772 void bnxt_set_ring_params(struct bnxt *bp) 2773 { 2774 u32 ring_size, rx_size, rx_space; 2775 u32 agg_factor = 0, agg_ring_size = 0; 2776 2777 /* 8 for CRC and VLAN */ 2778 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 2779 2780 rx_space = rx_size + NET_SKB_PAD + 2781 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2782 2783 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; 2784 ring_size = bp->rx_ring_size; 2785 bp->rx_agg_ring_size = 0; 2786 bp->rx_agg_nr_pages = 0; 2787 2788 if (bp->flags & BNXT_FLAG_TPA) 2789 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 2790 2791 bp->flags &= ~BNXT_FLAG_JUMBO; 2792 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 2793 u32 jumbo_factor; 2794 2795 bp->flags |= BNXT_FLAG_JUMBO; 2796 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 2797 if (jumbo_factor > agg_factor) 2798 agg_factor = jumbo_factor; 2799 } 2800 agg_ring_size = ring_size * agg_factor; 2801 2802 if (agg_ring_size) { 2803 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 2804 RX_DESC_CNT); 2805 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 2806 u32 tmp = agg_ring_size; 2807 2808 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 2809 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 2810 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 2811 tmp, agg_ring_size); 2812 } 2813 bp->rx_agg_ring_size = agg_ring_size; 2814 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 2815 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); 2816 rx_space = rx_size + NET_SKB_PAD + 2817 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2818 } 2819 2820 bp->rx_buf_use_size = rx_size; 2821 bp->rx_buf_size = rx_space; 2822 2823 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 2824 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 2825 2826 ring_size = bp->tx_ring_size; 2827 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 2828 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 2829 2830 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; 2831 bp->cp_ring_size = ring_size; 2832 2833 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 2834 if (bp->cp_nr_pages > MAX_CP_PAGES) { 2835 bp->cp_nr_pages = MAX_CP_PAGES; 2836 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 2837 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 2838 ring_size, bp->cp_ring_size); 2839 } 2840 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 2841 bp->cp_ring_mask = bp->cp_bit - 1; 2842 } 2843 2844 /* Changing allocation mode of RX rings. 2845 * TODO: Update when extending xdp_rxq_info to support allocation modes. 2846 */ 2847 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 2848 { 2849 if (page_mode) { 2850 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) 2851 return -EOPNOTSUPP; 2852 bp->dev->max_mtu = 2853 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 2854 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 2855 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; 2856 bp->rx_dir = DMA_BIDIRECTIONAL; 2857 bp->rx_skb_func = bnxt_rx_page_skb; 2858 /* Disable LRO or GRO_HW */ 2859 netdev_update_features(bp->dev); 2860 } else { 2861 bp->dev->max_mtu = bp->max_mtu; 2862 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 2863 bp->rx_dir = DMA_FROM_DEVICE; 2864 bp->rx_skb_func = bnxt_rx_skb; 2865 } 2866 return 0; 2867 } 2868 2869 static void bnxt_free_vnic_attributes(struct bnxt *bp) 2870 { 2871 int i; 2872 struct bnxt_vnic_info *vnic; 2873 struct pci_dev *pdev = bp->pdev; 2874 2875 if (!bp->vnic_info) 2876 return; 2877 2878 for (i = 0; i < bp->nr_vnics; i++) { 2879 vnic = &bp->vnic_info[i]; 2880 2881 kfree(vnic->fw_grp_ids); 2882 vnic->fw_grp_ids = NULL; 2883 2884 kfree(vnic->uc_list); 2885 vnic->uc_list = NULL; 2886 2887 if (vnic->mc_list) { 2888 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 2889 vnic->mc_list, vnic->mc_list_mapping); 2890 vnic->mc_list = NULL; 2891 } 2892 2893 if (vnic->rss_table) { 2894 dma_free_coherent(&pdev->dev, PAGE_SIZE, 2895 vnic->rss_table, 2896 vnic->rss_table_dma_addr); 2897 vnic->rss_table = NULL; 2898 } 2899 2900 vnic->rss_hash_key = NULL; 2901 vnic->flags = 0; 2902 } 2903 } 2904 2905 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 2906 { 2907 int i, rc = 0, size; 2908 struct bnxt_vnic_info *vnic; 2909 struct pci_dev *pdev = bp->pdev; 2910 int max_rings; 2911 2912 for (i = 0; i < bp->nr_vnics; i++) { 2913 vnic = &bp->vnic_info[i]; 2914 2915 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 2916 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 2917 2918 if (mem_size > 0) { 2919 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 2920 if (!vnic->uc_list) { 2921 rc = -ENOMEM; 2922 goto out; 2923 } 2924 } 2925 } 2926 2927 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 2928 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 2929 vnic->mc_list = 2930 dma_alloc_coherent(&pdev->dev, 2931 vnic->mc_list_size, 2932 &vnic->mc_list_mapping, 2933 GFP_KERNEL); 2934 if (!vnic->mc_list) { 2935 rc = -ENOMEM; 2936 goto out; 2937 } 2938 } 2939 2940 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 2941 max_rings = bp->rx_nr_rings; 2942 else 2943 max_rings = 1; 2944 2945 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 2946 if (!vnic->fw_grp_ids) { 2947 rc = -ENOMEM; 2948 goto out; 2949 } 2950 2951 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) && 2952 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 2953 continue; 2954 2955 /* Allocate rss table and hash key */ 2956 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2957 &vnic->rss_table_dma_addr, 2958 GFP_KERNEL); 2959 if (!vnic->rss_table) { 2960 rc = -ENOMEM; 2961 goto out; 2962 } 2963 2964 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 2965 2966 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 2967 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 2968 } 2969 return 0; 2970 2971 out: 2972 return rc; 2973 } 2974 2975 static void bnxt_free_hwrm_resources(struct bnxt *bp) 2976 { 2977 struct pci_dev *pdev = bp->pdev; 2978 2979 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 2980 bp->hwrm_cmd_resp_dma_addr); 2981 2982 bp->hwrm_cmd_resp_addr = NULL; 2983 if (bp->hwrm_dbg_resp_addr) { 2984 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, 2985 bp->hwrm_dbg_resp_addr, 2986 bp->hwrm_dbg_resp_dma_addr); 2987 2988 bp->hwrm_dbg_resp_addr = NULL; 2989 } 2990 } 2991 2992 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 2993 { 2994 struct pci_dev *pdev = bp->pdev; 2995 2996 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 2997 &bp->hwrm_cmd_resp_dma_addr, 2998 GFP_KERNEL); 2999 if (!bp->hwrm_cmd_resp_addr) 3000 return -ENOMEM; 3001 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, 3002 HWRM_DBG_REG_BUF_SIZE, 3003 &bp->hwrm_dbg_resp_dma_addr, 3004 GFP_KERNEL); 3005 if (!bp->hwrm_dbg_resp_addr) 3006 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); 3007 3008 return 0; 3009 } 3010 3011 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) 3012 { 3013 if (bp->hwrm_short_cmd_req_addr) { 3014 struct pci_dev *pdev = bp->pdev; 3015 3016 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 3017 bp->hwrm_short_cmd_req_addr, 3018 bp->hwrm_short_cmd_req_dma_addr); 3019 bp->hwrm_short_cmd_req_addr = NULL; 3020 } 3021 } 3022 3023 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) 3024 { 3025 struct pci_dev *pdev = bp->pdev; 3026 3027 bp->hwrm_short_cmd_req_addr = 3028 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN, 3029 &bp->hwrm_short_cmd_req_dma_addr, 3030 GFP_KERNEL); 3031 if (!bp->hwrm_short_cmd_req_addr) 3032 return -ENOMEM; 3033 3034 return 0; 3035 } 3036 3037 static void bnxt_free_stats(struct bnxt *bp) 3038 { 3039 u32 size, i; 3040 struct pci_dev *pdev = bp->pdev; 3041 3042 if (bp->hw_rx_port_stats) { 3043 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, 3044 bp->hw_rx_port_stats, 3045 bp->hw_rx_port_stats_map); 3046 bp->hw_rx_port_stats = NULL; 3047 bp->flags &= ~BNXT_FLAG_PORT_STATS; 3048 } 3049 3050 if (!bp->bnapi) 3051 return; 3052 3053 size = sizeof(struct ctx_hw_stats); 3054 3055 for (i = 0; i < bp->cp_nr_rings; i++) { 3056 struct bnxt_napi *bnapi = bp->bnapi[i]; 3057 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3058 3059 if (cpr->hw_stats) { 3060 dma_free_coherent(&pdev->dev, size, cpr->hw_stats, 3061 cpr->hw_stats_map); 3062 cpr->hw_stats = NULL; 3063 } 3064 } 3065 } 3066 3067 static int bnxt_alloc_stats(struct bnxt *bp) 3068 { 3069 u32 size, i; 3070 struct pci_dev *pdev = bp->pdev; 3071 3072 size = sizeof(struct ctx_hw_stats); 3073 3074 for (i = 0; i < bp->cp_nr_rings; i++) { 3075 struct bnxt_napi *bnapi = bp->bnapi[i]; 3076 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3077 3078 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, 3079 &cpr->hw_stats_map, 3080 GFP_KERNEL); 3081 if (!cpr->hw_stats) 3082 return -ENOMEM; 3083 3084 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 3085 } 3086 3087 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { 3088 bp->hw_port_stats_size = sizeof(struct rx_port_stats) + 3089 sizeof(struct tx_port_stats) + 1024; 3090 3091 bp->hw_rx_port_stats = 3092 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, 3093 &bp->hw_rx_port_stats_map, 3094 GFP_KERNEL); 3095 if (!bp->hw_rx_port_stats) 3096 return -ENOMEM; 3097 3098 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 3099 512; 3100 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + 3101 sizeof(struct rx_port_stats) + 512; 3102 bp->flags |= BNXT_FLAG_PORT_STATS; 3103 } 3104 return 0; 3105 } 3106 3107 static void bnxt_clear_ring_indices(struct bnxt *bp) 3108 { 3109 int i; 3110 3111 if (!bp->bnapi) 3112 return; 3113 3114 for (i = 0; i < bp->cp_nr_rings; i++) { 3115 struct bnxt_napi *bnapi = bp->bnapi[i]; 3116 struct bnxt_cp_ring_info *cpr; 3117 struct bnxt_rx_ring_info *rxr; 3118 struct bnxt_tx_ring_info *txr; 3119 3120 if (!bnapi) 3121 continue; 3122 3123 cpr = &bnapi->cp_ring; 3124 cpr->cp_raw_cons = 0; 3125 3126 txr = bnapi->tx_ring; 3127 if (txr) { 3128 txr->tx_prod = 0; 3129 txr->tx_cons = 0; 3130 } 3131 3132 rxr = bnapi->rx_ring; 3133 if (rxr) { 3134 rxr->rx_prod = 0; 3135 rxr->rx_agg_prod = 0; 3136 rxr->rx_sw_agg_prod = 0; 3137 rxr->rx_next_cons = 0; 3138 } 3139 } 3140 } 3141 3142 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) 3143 { 3144 #ifdef CONFIG_RFS_ACCEL 3145 int i; 3146 3147 /* Under rtnl_lock and all our NAPIs have been disabled. It's 3148 * safe to delete the hash table. 3149 */ 3150 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 3151 struct hlist_head *head; 3152 struct hlist_node *tmp; 3153 struct bnxt_ntuple_filter *fltr; 3154 3155 head = &bp->ntp_fltr_hash_tbl[i]; 3156 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 3157 hlist_del(&fltr->hash); 3158 kfree(fltr); 3159 } 3160 } 3161 if (irq_reinit) { 3162 kfree(bp->ntp_fltr_bmap); 3163 bp->ntp_fltr_bmap = NULL; 3164 } 3165 bp->ntp_fltr_count = 0; 3166 #endif 3167 } 3168 3169 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 3170 { 3171 #ifdef CONFIG_RFS_ACCEL 3172 int i, rc = 0; 3173 3174 if (!(bp->flags & BNXT_FLAG_RFS)) 3175 return 0; 3176 3177 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 3178 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 3179 3180 bp->ntp_fltr_count = 0; 3181 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), 3182 sizeof(long), 3183 GFP_KERNEL); 3184 3185 if (!bp->ntp_fltr_bmap) 3186 rc = -ENOMEM; 3187 3188 return rc; 3189 #else 3190 return 0; 3191 #endif 3192 } 3193 3194 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 3195 { 3196 bnxt_free_vnic_attributes(bp); 3197 bnxt_free_tx_rings(bp); 3198 bnxt_free_rx_rings(bp); 3199 bnxt_free_cp_rings(bp); 3200 bnxt_free_ntp_fltrs(bp, irq_re_init); 3201 if (irq_re_init) { 3202 bnxt_free_stats(bp); 3203 bnxt_free_ring_grps(bp); 3204 bnxt_free_vnics(bp); 3205 kfree(bp->tx_ring_map); 3206 bp->tx_ring_map = NULL; 3207 kfree(bp->tx_ring); 3208 bp->tx_ring = NULL; 3209 kfree(bp->rx_ring); 3210 bp->rx_ring = NULL; 3211 kfree(bp->bnapi); 3212 bp->bnapi = NULL; 3213 } else { 3214 bnxt_clear_ring_indices(bp); 3215 } 3216 } 3217 3218 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 3219 { 3220 int i, j, rc, size, arr_size; 3221 void *bnapi; 3222 3223 if (irq_re_init) { 3224 /* Allocate bnapi mem pointer array and mem block for 3225 * all queues 3226 */ 3227 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 3228 bp->cp_nr_rings); 3229 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 3230 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 3231 if (!bnapi) 3232 return -ENOMEM; 3233 3234 bp->bnapi = bnapi; 3235 bnapi += arr_size; 3236 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 3237 bp->bnapi[i] = bnapi; 3238 bp->bnapi[i]->index = i; 3239 bp->bnapi[i]->bp = bp; 3240 } 3241 3242 bp->rx_ring = kcalloc(bp->rx_nr_rings, 3243 sizeof(struct bnxt_rx_ring_info), 3244 GFP_KERNEL); 3245 if (!bp->rx_ring) 3246 return -ENOMEM; 3247 3248 for (i = 0; i < bp->rx_nr_rings; i++) { 3249 bp->rx_ring[i].bnapi = bp->bnapi[i]; 3250 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 3251 } 3252 3253 bp->tx_ring = kcalloc(bp->tx_nr_rings, 3254 sizeof(struct bnxt_tx_ring_info), 3255 GFP_KERNEL); 3256 if (!bp->tx_ring) 3257 return -ENOMEM; 3258 3259 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 3260 GFP_KERNEL); 3261 3262 if (!bp->tx_ring_map) 3263 return -ENOMEM; 3264 3265 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 3266 j = 0; 3267 else 3268 j = bp->rx_nr_rings; 3269 3270 for (i = 0; i < bp->tx_nr_rings; i++, j++) { 3271 bp->tx_ring[i].bnapi = bp->bnapi[j]; 3272 bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; 3273 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 3274 if (i >= bp->tx_nr_rings_xdp) { 3275 bp->tx_ring[i].txq_index = i - 3276 bp->tx_nr_rings_xdp; 3277 bp->bnapi[j]->tx_int = bnxt_tx_int; 3278 } else { 3279 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP; 3280 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp; 3281 } 3282 } 3283 3284 rc = bnxt_alloc_stats(bp); 3285 if (rc) 3286 goto alloc_mem_err; 3287 3288 rc = bnxt_alloc_ntp_fltrs(bp); 3289 if (rc) 3290 goto alloc_mem_err; 3291 3292 rc = bnxt_alloc_vnics(bp); 3293 if (rc) 3294 goto alloc_mem_err; 3295 } 3296 3297 bnxt_init_ring_struct(bp); 3298 3299 rc = bnxt_alloc_rx_rings(bp); 3300 if (rc) 3301 goto alloc_mem_err; 3302 3303 rc = bnxt_alloc_tx_rings(bp); 3304 if (rc) 3305 goto alloc_mem_err; 3306 3307 rc = bnxt_alloc_cp_rings(bp); 3308 if (rc) 3309 goto alloc_mem_err; 3310 3311 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 3312 BNXT_VNIC_UCAST_FLAG; 3313 rc = bnxt_alloc_vnic_attributes(bp); 3314 if (rc) 3315 goto alloc_mem_err; 3316 return 0; 3317 3318 alloc_mem_err: 3319 bnxt_free_mem(bp, true); 3320 return rc; 3321 } 3322 3323 static void bnxt_disable_int(struct bnxt *bp) 3324 { 3325 int i; 3326 3327 if (!bp->bnapi) 3328 return; 3329 3330 for (i = 0; i < bp->cp_nr_rings; i++) { 3331 struct bnxt_napi *bnapi = bp->bnapi[i]; 3332 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3333 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3334 3335 if (ring->fw_ring_id != INVALID_HW_RING_ID) 3336 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3337 } 3338 } 3339 3340 static void bnxt_disable_int_sync(struct bnxt *bp) 3341 { 3342 int i; 3343 3344 atomic_inc(&bp->intr_sem); 3345 3346 bnxt_disable_int(bp); 3347 for (i = 0; i < bp->cp_nr_rings; i++) 3348 synchronize_irq(bp->irq_tbl[i].vector); 3349 } 3350 3351 static void bnxt_enable_int(struct bnxt *bp) 3352 { 3353 int i; 3354 3355 atomic_set(&bp->intr_sem, 0); 3356 for (i = 0; i < bp->cp_nr_rings; i++) { 3357 struct bnxt_napi *bnapi = bp->bnapi[i]; 3358 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3359 3360 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); 3361 } 3362 } 3363 3364 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, 3365 u16 cmpl_ring, u16 target_id) 3366 { 3367 struct input *req = request; 3368 3369 req->req_type = cpu_to_le16(req_type); 3370 req->cmpl_ring = cpu_to_le16(cmpl_ring); 3371 req->target_id = cpu_to_le16(target_id); 3372 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); 3373 } 3374 3375 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, 3376 int timeout, bool silent) 3377 { 3378 int i, intr_process, rc, tmo_count; 3379 struct input *req = msg; 3380 u32 *data = msg; 3381 __le32 *resp_len, *valid; 3382 u16 cp_ring_id, len = 0; 3383 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 3384 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 3385 struct hwrm_short_input short_input = {0}; 3386 3387 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); 3388 memset(resp, 0, PAGE_SIZE); 3389 cp_ring_id = le16_to_cpu(req->cmpl_ring); 3390 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; 3391 3392 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 3393 void *short_cmd_req = bp->hwrm_short_cmd_req_addr; 3394 3395 memcpy(short_cmd_req, req, msg_len); 3396 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - 3397 msg_len); 3398 3399 short_input.req_type = req->req_type; 3400 short_input.signature = 3401 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); 3402 short_input.size = cpu_to_le16(msg_len); 3403 short_input.req_addr = 3404 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); 3405 3406 data = (u32 *)&short_input; 3407 msg_len = sizeof(short_input); 3408 3409 /* Sync memory write before updating doorbell */ 3410 wmb(); 3411 3412 max_req_len = BNXT_HWRM_SHORT_REQ_LEN; 3413 } 3414 3415 /* Write request msg to hwrm channel */ 3416 __iowrite32_copy(bp->bar0, data, msg_len / 4); 3417 3418 for (i = msg_len; i < max_req_len; i += 4) 3419 writel(0, bp->bar0 + i); 3420 3421 /* currently supports only one outstanding message */ 3422 if (intr_process) 3423 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); 3424 3425 /* Ring channel doorbell */ 3426 writel(1, bp->bar0 + 0x100); 3427 3428 if (!timeout) 3429 timeout = DFLT_HWRM_CMD_TIMEOUT; 3430 3431 i = 0; 3432 tmo_count = timeout * 40; 3433 if (intr_process) { 3434 /* Wait until hwrm response cmpl interrupt is processed */ 3435 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3436 i++ < tmo_count) { 3437 usleep_range(25, 40); 3438 } 3439 3440 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3441 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", 3442 le16_to_cpu(req->req_type)); 3443 return -1; 3444 } 3445 } else { 3446 /* Check if response len is updated */ 3447 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3448 for (i = 0; i < tmo_count; i++) { 3449 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 3450 HWRM_RESP_LEN_SFT; 3451 if (len) 3452 break; 3453 usleep_range(25, 40); 3454 } 3455 3456 if (i >= tmo_count) { 3457 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", 3458 timeout, le16_to_cpu(req->req_type), 3459 le16_to_cpu(req->seq_id), len); 3460 return -1; 3461 } 3462 3463 /* Last word of resp contains valid bit */ 3464 valid = bp->hwrm_cmd_resp_addr + len - 4; 3465 for (i = 0; i < 5; i++) { 3466 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) 3467 break; 3468 udelay(1); 3469 } 3470 3471 if (i >= 5) { 3472 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", 3473 timeout, le16_to_cpu(req->req_type), 3474 le16_to_cpu(req->seq_id), len, *valid); 3475 return -1; 3476 } 3477 } 3478 3479 rc = le16_to_cpu(resp->error_code); 3480 if (rc && !silent) 3481 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", 3482 le16_to_cpu(resp->req_type), 3483 le16_to_cpu(resp->seq_id), rc); 3484 return rc; 3485 } 3486 3487 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3488 { 3489 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3490 } 3491 3492 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3493 int timeout) 3494 { 3495 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3496 } 3497 3498 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3499 { 3500 int rc; 3501 3502 mutex_lock(&bp->hwrm_cmd_lock); 3503 rc = _hwrm_send_message(bp, msg, msg_len, timeout); 3504 mutex_unlock(&bp->hwrm_cmd_lock); 3505 return rc; 3506 } 3507 3508 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, 3509 int timeout) 3510 { 3511 int rc; 3512 3513 mutex_lock(&bp->hwrm_cmd_lock); 3514 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); 3515 mutex_unlock(&bp->hwrm_cmd_lock); 3516 return rc; 3517 } 3518 3519 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 3520 int bmap_size) 3521 { 3522 struct hwrm_func_drv_rgtr_input req = {0}; 3523 DECLARE_BITMAP(async_events_bmap, 256); 3524 u32 *events = (u32 *)async_events_bmap; 3525 int i; 3526 3527 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3528 3529 req.enables = 3530 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 3531 3532 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 3533 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) 3534 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 3535 3536 if (bmap && bmap_size) { 3537 for (i = 0; i < bmap_size; i++) { 3538 if (test_bit(i, bmap)) 3539 __set_bit(i, async_events_bmap); 3540 } 3541 } 3542 3543 for (i = 0; i < 8; i++) 3544 req.async_event_fwd[i] |= cpu_to_le32(events[i]); 3545 3546 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3547 } 3548 3549 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) 3550 { 3551 struct hwrm_func_drv_rgtr_input req = {0}; 3552 3553 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); 3554 3555 req.enables = 3556 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 3557 FUNC_DRV_RGTR_REQ_ENABLES_VER); 3558 3559 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 3560 req.ver_maj = DRV_VER_MAJ; 3561 req.ver_min = DRV_VER_MIN; 3562 req.ver_upd = DRV_VER_UPD; 3563 3564 if (BNXT_PF(bp)) { 3565 u32 data[8]; 3566 int i; 3567 3568 memset(data, 0, sizeof(data)); 3569 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 3570 u16 cmd = bnxt_vf_req_snif[i]; 3571 unsigned int bit, idx; 3572 3573 idx = cmd / 32; 3574 bit = cmd % 32; 3575 data[idx] |= 1 << bit; 3576 } 3577 3578 for (i = 0; i < 8; i++) 3579 req.vf_req_fwd[i] = cpu_to_le32(data[i]); 3580 3581 req.enables |= 3582 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 3583 } 3584 3585 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3586 } 3587 3588 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 3589 { 3590 struct hwrm_func_drv_unrgtr_input req = {0}; 3591 3592 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); 3593 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3594 } 3595 3596 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 3597 { 3598 u32 rc = 0; 3599 struct hwrm_tunnel_dst_port_free_input req = {0}; 3600 3601 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); 3602 req.tunnel_type = tunnel_type; 3603 3604 switch (tunnel_type) { 3605 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 3606 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; 3607 break; 3608 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 3609 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; 3610 break; 3611 default: 3612 break; 3613 } 3614 3615 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3616 if (rc) 3617 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 3618 rc); 3619 return rc; 3620 } 3621 3622 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 3623 u8 tunnel_type) 3624 { 3625 u32 rc = 0; 3626 struct hwrm_tunnel_dst_port_alloc_input req = {0}; 3627 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3628 3629 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); 3630 3631 req.tunnel_type = tunnel_type; 3632 req.tunnel_dst_port_val = port; 3633 3634 mutex_lock(&bp->hwrm_cmd_lock); 3635 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3636 if (rc) { 3637 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 3638 rc); 3639 goto err_out; 3640 } 3641 3642 switch (tunnel_type) { 3643 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3644 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3645 break; 3646 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3647 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3648 break; 3649 default: 3650 break; 3651 } 3652 3653 err_out: 3654 mutex_unlock(&bp->hwrm_cmd_lock); 3655 return rc; 3656 } 3657 3658 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 3659 { 3660 struct hwrm_cfa_l2_set_rx_mask_input req = {0}; 3661 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3662 3663 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); 3664 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3665 3666 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); 3667 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 3668 req.mask = cpu_to_le32(vnic->rx_mask); 3669 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3670 } 3671 3672 #ifdef CONFIG_RFS_ACCEL 3673 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 3674 struct bnxt_ntuple_filter *fltr) 3675 { 3676 struct hwrm_cfa_ntuple_filter_free_input req = {0}; 3677 3678 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); 3679 req.ntuple_filter_id = fltr->filter_id; 3680 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3681 } 3682 3683 #define BNXT_NTP_FLTR_FLAGS \ 3684 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 3685 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 3686 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \ 3687 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 3688 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 3689 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 3690 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 3691 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 3692 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 3693 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 3694 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 3695 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 3696 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 3697 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 3698 3699 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 3700 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 3701 3702 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 3703 struct bnxt_ntuple_filter *fltr) 3704 { 3705 int rc = 0; 3706 struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; 3707 struct hwrm_cfa_ntuple_filter_alloc_output *resp = 3708 bp->hwrm_cmd_resp_addr; 3709 struct flow_keys *keys = &fltr->fkeys; 3710 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; 3711 3712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); 3713 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; 3714 3715 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 3716 3717 req.ethertype = htons(ETH_P_IP); 3718 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); 3719 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 3720 req.ip_protocol = keys->basic.ip_proto; 3721 3722 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 3723 int i; 3724 3725 req.ethertype = htons(ETH_P_IPV6); 3726 req.ip_addr_type = 3727 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 3728 *(struct in6_addr *)&req.src_ipaddr[0] = 3729 keys->addrs.v6addrs.src; 3730 *(struct in6_addr *)&req.dst_ipaddr[0] = 3731 keys->addrs.v6addrs.dst; 3732 for (i = 0; i < 4; i++) { 3733 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3734 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); 3735 } 3736 } else { 3737 req.src_ipaddr[0] = keys->addrs.v4addrs.src; 3738 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3739 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; 3740 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); 3741 } 3742 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 3743 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 3744 req.tunnel_type = 3745 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 3746 } 3747 3748 req.src_port = keys->ports.src; 3749 req.src_port_mask = cpu_to_be16(0xffff); 3750 req.dst_port = keys->ports.dst; 3751 req.dst_port_mask = cpu_to_be16(0xffff); 3752 3753 req.dst_id = cpu_to_le16(vnic->fw_vnic_id); 3754 mutex_lock(&bp->hwrm_cmd_lock); 3755 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3756 if (!rc) 3757 fltr->filter_id = resp->ntuple_filter_id; 3758 mutex_unlock(&bp->hwrm_cmd_lock); 3759 return rc; 3760 } 3761 #endif 3762 3763 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 3764 u8 *mac_addr) 3765 { 3766 u32 rc = 0; 3767 struct hwrm_cfa_l2_filter_alloc_input req = {0}; 3768 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 3769 3770 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); 3771 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 3772 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 3773 req.flags |= 3774 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 3775 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); 3776 req.enables = 3777 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 3778 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 3779 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 3780 memcpy(req.l2_addr, mac_addr, ETH_ALEN); 3781 req.l2_addr_mask[0] = 0xff; 3782 req.l2_addr_mask[1] = 0xff; 3783 req.l2_addr_mask[2] = 0xff; 3784 req.l2_addr_mask[3] = 0xff; 3785 req.l2_addr_mask[4] = 0xff; 3786 req.l2_addr_mask[5] = 0xff; 3787 3788 mutex_lock(&bp->hwrm_cmd_lock); 3789 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3790 if (!rc) 3791 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = 3792 resp->l2_filter_id; 3793 mutex_unlock(&bp->hwrm_cmd_lock); 3794 return rc; 3795 } 3796 3797 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 3798 { 3799 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 3800 int rc = 0; 3801 3802 /* Any associated ntuple filters will also be cleared by firmware. */ 3803 mutex_lock(&bp->hwrm_cmd_lock); 3804 for (i = 0; i < num_of_vnics; i++) { 3805 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3806 3807 for (j = 0; j < vnic->uc_filter_count; j++) { 3808 struct hwrm_cfa_l2_filter_free_input req = {0}; 3809 3810 bnxt_hwrm_cmd_hdr_init(bp, &req, 3811 HWRM_CFA_L2_FILTER_FREE, -1, -1); 3812 3813 req.l2_filter_id = vnic->fw_l2_filter_id[j]; 3814 3815 rc = _hwrm_send_message(bp, &req, sizeof(req), 3816 HWRM_CMD_TIMEOUT); 3817 } 3818 vnic->uc_filter_count = 0; 3819 } 3820 mutex_unlock(&bp->hwrm_cmd_lock); 3821 3822 return rc; 3823 } 3824 3825 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) 3826 { 3827 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3828 struct hwrm_vnic_tpa_cfg_input req = {0}; 3829 3830 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3831 3832 if (tpa_flags) { 3833 u16 mss = bp->dev->mtu - 40; 3834 u32 nsegs, n, segs = 0, flags; 3835 3836 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 3837 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 3838 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 3839 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 3840 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 3841 if (tpa_flags & BNXT_FLAG_GRO) 3842 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 3843 3844 req.flags = cpu_to_le32(flags); 3845 3846 req.enables = 3847 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 3848 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 3849 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 3850 3851 /* Number of segs are log2 units, and first packet is not 3852 * included as part of this units. 3853 */ 3854 if (mss <= BNXT_RX_PAGE_SIZE) { 3855 n = BNXT_RX_PAGE_SIZE / mss; 3856 nsegs = (MAX_SKB_FRAGS - 1) * n; 3857 } else { 3858 n = mss / BNXT_RX_PAGE_SIZE; 3859 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 3860 n++; 3861 nsegs = (MAX_SKB_FRAGS - n) / n; 3862 } 3863 3864 segs = ilog2(nsegs); 3865 req.max_agg_segs = cpu_to_le16(segs); 3866 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); 3867 3868 req.min_agg_len = cpu_to_le32(512); 3869 } 3870 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 3871 3872 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3873 } 3874 3875 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) 3876 { 3877 u32 i, j, max_rings; 3878 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3879 struct hwrm_vnic_rss_cfg_input req = {0}; 3880 3881 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 3882 return 0; 3883 3884 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); 3885 if (set_rss) { 3886 req.hash_type = cpu_to_le32(bp->rss_hash_cfg); 3887 if (vnic->flags & BNXT_VNIC_RSS_FLAG) { 3888 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 3889 max_rings = bp->rx_nr_rings - 1; 3890 else 3891 max_rings = bp->rx_nr_rings; 3892 } else { 3893 max_rings = 1; 3894 } 3895 3896 /* Fill the RSS indirection table with ring group ids */ 3897 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { 3898 if (j == max_rings) 3899 j = 0; 3900 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 3901 } 3902 3903 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 3904 req.hash_key_tbl_addr = 3905 cpu_to_le64(vnic->rss_hash_key_dma_addr); 3906 } 3907 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3908 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3909 } 3910 3911 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) 3912 { 3913 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3914 struct hwrm_vnic_plcmodes_cfg_input req = {0}; 3915 3916 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); 3917 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | 3918 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 3919 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 3920 req.enables = 3921 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | 3922 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 3923 /* thresholds not implemented in firmware yet */ 3924 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); 3925 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); 3926 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); 3927 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3928 } 3929 3930 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, 3931 u16 ctx_idx) 3932 { 3933 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; 3934 3935 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); 3936 req.rss_cos_lb_ctx_id = 3937 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); 3938 3939 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3940 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 3941 } 3942 3943 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 3944 { 3945 int i, j; 3946 3947 for (i = 0; i < bp->nr_vnics; i++) { 3948 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3949 3950 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 3951 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 3952 bnxt_hwrm_vnic_ctx_free_one(bp, i, j); 3953 } 3954 } 3955 bp->rsscos_nr_ctxs = 0; 3956 } 3957 3958 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) 3959 { 3960 int rc; 3961 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; 3962 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = 3963 bp->hwrm_cmd_resp_addr; 3964 3965 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, 3966 -1); 3967 3968 mutex_lock(&bp->hwrm_cmd_lock); 3969 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3970 if (!rc) 3971 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = 3972 le16_to_cpu(resp->rss_cos_lb_ctx_id); 3973 mutex_unlock(&bp->hwrm_cmd_lock); 3974 3975 return rc; 3976 } 3977 3978 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 3979 { 3980 unsigned int ring = 0, grp_idx; 3981 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3982 struct hwrm_vnic_cfg_input req = {0}; 3983 u16 def_vlan = 0; 3984 3985 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3986 3987 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 3988 /* Only RSS support for now TBD: COS & LB */ 3989 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 3990 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 3991 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3992 VNIC_CFG_REQ_ENABLES_MRU); 3993 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 3994 req.rss_rule = 3995 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 3996 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 3997 VNIC_CFG_REQ_ENABLES_MRU); 3998 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 3999 } else { 4000 req.rss_rule = cpu_to_le16(0xffff); 4001 } 4002 4003 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 4004 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 4005 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 4006 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 4007 } else { 4008 req.cos_rule = cpu_to_le16(0xffff); 4009 } 4010 4011 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4012 ring = 0; 4013 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 4014 ring = vnic_id - 1; 4015 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 4016 ring = bp->rx_nr_rings - 1; 4017 4018 grp_idx = bp->rx_ring[ring].bnapi->index; 4019 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); 4020 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 4021 4022 req.lb_rule = cpu_to_le16(0xffff); 4023 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 4024 VLAN_HLEN); 4025 4026 #ifdef CONFIG_BNXT_SRIOV 4027 if (BNXT_VF(bp)) 4028 def_vlan = bp->vf.vlan; 4029 #endif 4030 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 4031 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 4032 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) 4033 req.flags |= 4034 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); 4035 4036 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4037 } 4038 4039 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) 4040 { 4041 u32 rc = 0; 4042 4043 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { 4044 struct hwrm_vnic_free_input req = {0}; 4045 4046 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); 4047 req.vnic_id = 4048 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); 4049 4050 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4051 if (rc) 4052 return rc; 4053 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; 4054 } 4055 return rc; 4056 } 4057 4058 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 4059 { 4060 u16 i; 4061 4062 for (i = 0; i < bp->nr_vnics; i++) 4063 bnxt_hwrm_vnic_free_one(bp, i); 4064 } 4065 4066 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, 4067 unsigned int start_rx_ring_idx, 4068 unsigned int nr_rings) 4069 { 4070 int rc = 0; 4071 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 4072 struct hwrm_vnic_alloc_input req = {0}; 4073 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4074 4075 /* map ring groups to this vnic */ 4076 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 4077 grp_idx = bp->rx_ring[i].bnapi->index; 4078 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 4079 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 4080 j, nr_rings); 4081 break; 4082 } 4083 bp->vnic_info[vnic_id].fw_grp_ids[j] = 4084 bp->grp_info[grp_idx].fw_grp_id; 4085 } 4086 4087 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; 4088 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; 4089 if (vnic_id == 0) 4090 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 4091 4092 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); 4093 4094 mutex_lock(&bp->hwrm_cmd_lock); 4095 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4096 if (!rc) 4097 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); 4098 mutex_unlock(&bp->hwrm_cmd_lock); 4099 return rc; 4100 } 4101 4102 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 4103 { 4104 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4105 struct hwrm_vnic_qcaps_input req = {0}; 4106 int rc; 4107 4108 if (bp->hwrm_spec_code < 0x10600) 4109 return 0; 4110 4111 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); 4112 mutex_lock(&bp->hwrm_cmd_lock); 4113 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4114 if (!rc) { 4115 if (resp->flags & 4116 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 4117 bp->flags |= BNXT_FLAG_NEW_RSS_CAP; 4118 } 4119 mutex_unlock(&bp->hwrm_cmd_lock); 4120 return rc; 4121 } 4122 4123 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 4124 { 4125 u16 i; 4126 u32 rc = 0; 4127 4128 mutex_lock(&bp->hwrm_cmd_lock); 4129 for (i = 0; i < bp->rx_nr_rings; i++) { 4130 struct hwrm_ring_grp_alloc_input req = {0}; 4131 struct hwrm_ring_grp_alloc_output *resp = 4132 bp->hwrm_cmd_resp_addr; 4133 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 4134 4135 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); 4136 4137 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 4138 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 4139 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 4140 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 4141 4142 rc = _hwrm_send_message(bp, &req, sizeof(req), 4143 HWRM_CMD_TIMEOUT); 4144 if (rc) 4145 break; 4146 4147 bp->grp_info[grp_idx].fw_grp_id = 4148 le32_to_cpu(resp->ring_group_id); 4149 } 4150 mutex_unlock(&bp->hwrm_cmd_lock); 4151 return rc; 4152 } 4153 4154 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) 4155 { 4156 u16 i; 4157 u32 rc = 0; 4158 struct hwrm_ring_grp_free_input req = {0}; 4159 4160 if (!bp->grp_info) 4161 return 0; 4162 4163 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); 4164 4165 mutex_lock(&bp->hwrm_cmd_lock); 4166 for (i = 0; i < bp->cp_nr_rings; i++) { 4167 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 4168 continue; 4169 req.ring_group_id = 4170 cpu_to_le32(bp->grp_info[i].fw_grp_id); 4171 4172 rc = _hwrm_send_message(bp, &req, sizeof(req), 4173 HWRM_CMD_TIMEOUT); 4174 if (rc) 4175 break; 4176 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4177 } 4178 mutex_unlock(&bp->hwrm_cmd_lock); 4179 return rc; 4180 } 4181 4182 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 4183 struct bnxt_ring_struct *ring, 4184 u32 ring_type, u32 map_index, 4185 u32 stats_ctx_id) 4186 { 4187 int rc = 0, err = 0; 4188 struct hwrm_ring_alloc_input req = {0}; 4189 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4190 u16 ring_id; 4191 4192 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); 4193 4194 req.enables = 0; 4195 if (ring->nr_pages > 1) { 4196 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); 4197 /* Page size is in log2 units */ 4198 req.page_size = BNXT_PAGE_SHIFT; 4199 req.page_tbl_depth = 1; 4200 } else { 4201 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); 4202 } 4203 req.fbo = 0; 4204 /* Association of ring index with doorbell index and MSIX number */ 4205 req.logical_id = cpu_to_le16(map_index); 4206 4207 switch (ring_type) { 4208 case HWRM_RING_ALLOC_TX: 4209 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 4210 /* Association of transmit ring with completion ring */ 4211 req.cmpl_ring_id = 4212 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); 4213 req.length = cpu_to_le32(bp->tx_ring_mask + 1); 4214 req.stat_ctx_id = cpu_to_le32(stats_ctx_id); 4215 req.queue_id = cpu_to_le16(ring->queue_id); 4216 break; 4217 case HWRM_RING_ALLOC_RX: 4218 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4219 req.length = cpu_to_le32(bp->rx_ring_mask + 1); 4220 break; 4221 case HWRM_RING_ALLOC_AGG: 4222 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 4223 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); 4224 break; 4225 case HWRM_RING_ALLOC_CMPL: 4226 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 4227 req.length = cpu_to_le32(bp->cp_ring_mask + 1); 4228 if (bp->flags & BNXT_FLAG_USING_MSIX) 4229 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 4230 break; 4231 default: 4232 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 4233 ring_type); 4234 return -1; 4235 } 4236 4237 mutex_lock(&bp->hwrm_cmd_lock); 4238 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4239 err = le16_to_cpu(resp->error_code); 4240 ring_id = le16_to_cpu(resp->ring_id); 4241 mutex_unlock(&bp->hwrm_cmd_lock); 4242 4243 if (rc || err) { 4244 switch (ring_type) { 4245 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4246 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 4247 rc, err); 4248 return -1; 4249 4250 case RING_FREE_REQ_RING_TYPE_RX: 4251 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", 4252 rc, err); 4253 return -1; 4254 4255 case RING_FREE_REQ_RING_TYPE_TX: 4256 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", 4257 rc, err); 4258 return -1; 4259 4260 default: 4261 netdev_err(bp->dev, "Invalid ring\n"); 4262 return -1; 4263 } 4264 } 4265 ring->fw_ring_id = ring_id; 4266 return rc; 4267 } 4268 4269 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 4270 { 4271 int rc; 4272 4273 if (BNXT_PF(bp)) { 4274 struct hwrm_func_cfg_input req = {0}; 4275 4276 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4277 req.fid = cpu_to_le16(0xffff); 4278 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4279 req.async_event_cr = cpu_to_le16(idx); 4280 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4281 } else { 4282 struct hwrm_func_vf_cfg_input req = {0}; 4283 4284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4285 req.enables = 4286 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 4287 req.async_event_cr = cpu_to_le16(idx); 4288 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4289 } 4290 return rc; 4291 } 4292 4293 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 4294 { 4295 int i, rc = 0; 4296 4297 for (i = 0; i < bp->cp_nr_rings; i++) { 4298 struct bnxt_napi *bnapi = bp->bnapi[i]; 4299 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4300 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4301 4302 cpr->cp_doorbell = bp->bar1 + i * 0x80; 4303 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 4304 INVALID_STATS_CTX_ID); 4305 if (rc) 4306 goto err_out; 4307 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 4308 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4309 4310 if (!i) { 4311 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 4312 if (rc) 4313 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 4314 } 4315 } 4316 4317 for (i = 0; i < bp->tx_nr_rings; i++) { 4318 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4319 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4320 u32 map_idx = txr->bnapi->index; 4321 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; 4322 4323 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, 4324 map_idx, fw_stats_ctx); 4325 if (rc) 4326 goto err_out; 4327 txr->tx_doorbell = bp->bar1 + map_idx * 0x80; 4328 } 4329 4330 for (i = 0; i < bp->rx_nr_rings; i++) { 4331 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4332 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4333 u32 map_idx = rxr->bnapi->index; 4334 4335 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, 4336 map_idx, INVALID_STATS_CTX_ID); 4337 if (rc) 4338 goto err_out; 4339 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; 4340 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); 4341 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 4342 } 4343 4344 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 4345 for (i = 0; i < bp->rx_nr_rings; i++) { 4346 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4347 struct bnxt_ring_struct *ring = 4348 &rxr->rx_agg_ring_struct; 4349 u32 grp_idx = rxr->bnapi->index; 4350 u32 map_idx = grp_idx + bp->rx_nr_rings; 4351 4352 rc = hwrm_ring_alloc_send_msg(bp, ring, 4353 HWRM_RING_ALLOC_AGG, 4354 map_idx, 4355 INVALID_STATS_CTX_ID); 4356 if (rc) 4357 goto err_out; 4358 4359 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80; 4360 writel(DB_KEY_RX | rxr->rx_agg_prod, 4361 rxr->rx_agg_doorbell); 4362 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 4363 } 4364 } 4365 err_out: 4366 return rc; 4367 } 4368 4369 static int hwrm_ring_free_send_msg(struct bnxt *bp, 4370 struct bnxt_ring_struct *ring, 4371 u32 ring_type, int cmpl_ring_id) 4372 { 4373 int rc; 4374 struct hwrm_ring_free_input req = {0}; 4375 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; 4376 u16 error_code; 4377 4378 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); 4379 req.ring_type = ring_type; 4380 req.ring_id = cpu_to_le16(ring->fw_ring_id); 4381 4382 mutex_lock(&bp->hwrm_cmd_lock); 4383 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4384 error_code = le16_to_cpu(resp->error_code); 4385 mutex_unlock(&bp->hwrm_cmd_lock); 4386 4387 if (rc || error_code) { 4388 switch (ring_type) { 4389 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4390 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 4391 rc); 4392 return rc; 4393 case RING_FREE_REQ_RING_TYPE_RX: 4394 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", 4395 rc); 4396 return rc; 4397 case RING_FREE_REQ_RING_TYPE_TX: 4398 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", 4399 rc); 4400 return rc; 4401 default: 4402 netdev_err(bp->dev, "Invalid ring\n"); 4403 return -1; 4404 } 4405 } 4406 return 0; 4407 } 4408 4409 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 4410 { 4411 int i; 4412 4413 if (!bp->bnapi) 4414 return; 4415 4416 for (i = 0; i < bp->tx_nr_rings; i++) { 4417 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4418 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4419 u32 grp_idx = txr->bnapi->index; 4420 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4421 4422 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4423 hwrm_ring_free_send_msg(bp, ring, 4424 RING_FREE_REQ_RING_TYPE_TX, 4425 close_path ? cmpl_ring_id : 4426 INVALID_HW_RING_ID); 4427 ring->fw_ring_id = INVALID_HW_RING_ID; 4428 } 4429 } 4430 4431 for (i = 0; i < bp->rx_nr_rings; i++) { 4432 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4433 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 4434 u32 grp_idx = rxr->bnapi->index; 4435 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4436 4437 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4438 hwrm_ring_free_send_msg(bp, ring, 4439 RING_FREE_REQ_RING_TYPE_RX, 4440 close_path ? cmpl_ring_id : 4441 INVALID_HW_RING_ID); 4442 ring->fw_ring_id = INVALID_HW_RING_ID; 4443 bp->grp_info[grp_idx].rx_fw_ring_id = 4444 INVALID_HW_RING_ID; 4445 } 4446 } 4447 4448 for (i = 0; i < bp->rx_nr_rings; i++) { 4449 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 4450 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 4451 u32 grp_idx = rxr->bnapi->index; 4452 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id; 4453 4454 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4455 hwrm_ring_free_send_msg(bp, ring, 4456 RING_FREE_REQ_RING_TYPE_RX, 4457 close_path ? cmpl_ring_id : 4458 INVALID_HW_RING_ID); 4459 ring->fw_ring_id = INVALID_HW_RING_ID; 4460 bp->grp_info[grp_idx].agg_fw_ring_id = 4461 INVALID_HW_RING_ID; 4462 } 4463 } 4464 4465 /* The completion rings are about to be freed. After that the 4466 * IRQ doorbell will not work anymore. So we need to disable 4467 * IRQ here. 4468 */ 4469 bnxt_disable_int_sync(bp); 4470 4471 for (i = 0; i < bp->cp_nr_rings; i++) { 4472 struct bnxt_napi *bnapi = bp->bnapi[i]; 4473 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4474 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4475 4476 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 4477 hwrm_ring_free_send_msg(bp, ring, 4478 RING_FREE_REQ_RING_TYPE_L2_CMPL, 4479 INVALID_HW_RING_ID); 4480 ring->fw_ring_id = INVALID_HW_RING_ID; 4481 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4482 } 4483 } 4484 } 4485 4486 /* Caller must hold bp->hwrm_cmd_lock */ 4487 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 4488 { 4489 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4490 struct hwrm_func_qcfg_input req = {0}; 4491 int rc; 4492 4493 if (bp->hwrm_spec_code < 0x10601) 4494 return 0; 4495 4496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4497 req.fid = cpu_to_le16(fid); 4498 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4499 if (!rc) 4500 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 4501 4502 return rc; 4503 } 4504 4505 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) 4506 { 4507 struct hwrm_func_cfg_input req = {0}; 4508 int rc; 4509 4510 if (bp->hwrm_spec_code < 0x10601) 4511 return 0; 4512 4513 if (BNXT_VF(bp)) 4514 return 0; 4515 4516 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4517 req.fid = cpu_to_le16(0xffff); 4518 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4519 req.num_tx_rings = cpu_to_le16(*tx_rings); 4520 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4521 if (rc) 4522 return rc; 4523 4524 mutex_lock(&bp->hwrm_cmd_lock); 4525 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); 4526 mutex_unlock(&bp->hwrm_cmd_lock); 4527 if (!rc) 4528 bp->tx_reserved_rings = *tx_rings; 4529 return rc; 4530 } 4531 4532 static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) 4533 { 4534 struct hwrm_func_cfg_input req = {0}; 4535 int rc; 4536 4537 if (bp->hwrm_spec_code < 0x10801) 4538 return 0; 4539 4540 if (BNXT_VF(bp)) 4541 return 0; 4542 4543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4544 req.fid = cpu_to_le16(0xffff); 4545 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); 4546 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); 4547 req.num_tx_rings = cpu_to_le16(tx_rings); 4548 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4549 if (rc) 4550 return -ENOMEM; 4551 return 0; 4552 } 4553 4554 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4555 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 4556 { 4557 u16 val, tmr, max, flags; 4558 4559 max = hw_coal->bufs_per_record * 128; 4560 if (hw_coal->budget) 4561 max = hw_coal->bufs_per_record * hw_coal->budget; 4562 4563 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 4564 req->num_cmpl_aggr_int = cpu_to_le16(val); 4565 4566 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 4567 val = min_t(u16, val, 63); 4568 req->num_cmpl_dma_aggr = cpu_to_le16(val); 4569 4570 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ 4571 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); 4572 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 4573 4574 tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); 4575 tmr = max_t(u16, tmr, 1); 4576 req->int_lat_tmr_max = cpu_to_le16(tmr); 4577 4578 /* min timer set to 1/2 of interrupt timer */ 4579 val = tmr / 2; 4580 req->int_lat_tmr_min = cpu_to_le16(val); 4581 4582 /* buf timer set to 1/4 of interrupt timer */ 4583 val = max_t(u16, tmr / 4, 1); 4584 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 4585 4586 tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); 4587 tmr = max_t(u16, tmr, 1); 4588 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); 4589 4590 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 4591 if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 4592 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 4593 req->flags = cpu_to_le16(flags); 4594 } 4595 4596 int bnxt_hwrm_set_coal(struct bnxt *bp) 4597 { 4598 int i, rc = 0; 4599 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, 4600 req_tx = {0}, *req; 4601 4602 bnxt_hwrm_cmd_hdr_init(bp, &req_rx, 4603 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4604 bnxt_hwrm_cmd_hdr_init(bp, &req_tx, 4605 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); 4606 4607 bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); 4608 bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); 4609 4610 mutex_lock(&bp->hwrm_cmd_lock); 4611 for (i = 0; i < bp->cp_nr_rings; i++) { 4612 struct bnxt_napi *bnapi = bp->bnapi[i]; 4613 4614 req = &req_rx; 4615 if (!bnapi->rx_ring) 4616 req = &req_tx; 4617 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); 4618 4619 rc = _hwrm_send_message(bp, req, sizeof(*req), 4620 HWRM_CMD_TIMEOUT); 4621 if (rc) 4622 break; 4623 } 4624 mutex_unlock(&bp->hwrm_cmd_lock); 4625 return rc; 4626 } 4627 4628 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 4629 { 4630 int rc = 0, i; 4631 struct hwrm_stat_ctx_free_input req = {0}; 4632 4633 if (!bp->bnapi) 4634 return 0; 4635 4636 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4637 return 0; 4638 4639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); 4640 4641 mutex_lock(&bp->hwrm_cmd_lock); 4642 for (i = 0; i < bp->cp_nr_rings; i++) { 4643 struct bnxt_napi *bnapi = bp->bnapi[i]; 4644 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4645 4646 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 4647 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 4648 4649 rc = _hwrm_send_message(bp, &req, sizeof(req), 4650 HWRM_CMD_TIMEOUT); 4651 if (rc) 4652 break; 4653 4654 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 4655 } 4656 } 4657 mutex_unlock(&bp->hwrm_cmd_lock); 4658 return rc; 4659 } 4660 4661 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 4662 { 4663 int rc = 0, i; 4664 struct hwrm_stat_ctx_alloc_input req = {0}; 4665 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; 4666 4667 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4668 return 0; 4669 4670 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 4671 4672 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 4673 4674 mutex_lock(&bp->hwrm_cmd_lock); 4675 for (i = 0; i < bp->cp_nr_rings; i++) { 4676 struct bnxt_napi *bnapi = bp->bnapi[i]; 4677 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4678 4679 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); 4680 4681 rc = _hwrm_send_message(bp, &req, sizeof(req), 4682 HWRM_CMD_TIMEOUT); 4683 if (rc) 4684 break; 4685 4686 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 4687 4688 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 4689 } 4690 mutex_unlock(&bp->hwrm_cmd_lock); 4691 return rc; 4692 } 4693 4694 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 4695 { 4696 struct hwrm_func_qcfg_input req = {0}; 4697 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 4698 u16 flags; 4699 int rc; 4700 4701 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 4702 req.fid = cpu_to_le16(0xffff); 4703 mutex_lock(&bp->hwrm_cmd_lock); 4704 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4705 if (rc) 4706 goto func_qcfg_exit; 4707 4708 #ifdef CONFIG_BNXT_SRIOV 4709 if (BNXT_VF(bp)) { 4710 struct bnxt_vf_info *vf = &bp->vf; 4711 4712 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4713 } 4714 #endif 4715 flags = le16_to_cpu(resp->flags); 4716 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 4717 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 4718 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; 4719 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 4720 bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; 4721 } 4722 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 4723 bp->flags |= BNXT_FLAG_MULTI_HOST; 4724 4725 switch (resp->port_partition_type) { 4726 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4727 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4728 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 4729 bp->port_partition_type = resp->port_partition_type; 4730 break; 4731 } 4732 if (bp->hwrm_spec_code < 0x10707 || 4733 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 4734 bp->br_mode = BRIDGE_MODE_VEB; 4735 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 4736 bp->br_mode = BRIDGE_MODE_VEPA; 4737 else 4738 bp->br_mode = BRIDGE_MODE_UNDEF; 4739 4740 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 4741 if (!bp->max_mtu) 4742 bp->max_mtu = BNXT_MAX_MTU; 4743 4744 func_qcfg_exit: 4745 mutex_unlock(&bp->hwrm_cmd_lock); 4746 return rc; 4747 } 4748 4749 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) 4750 { 4751 int rc = 0; 4752 struct hwrm_func_qcaps_input req = {0}; 4753 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4754 4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); 4756 req.fid = cpu_to_le16(0xffff); 4757 4758 mutex_lock(&bp->hwrm_cmd_lock); 4759 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4760 if (rc) 4761 goto hwrm_func_qcaps_exit; 4762 4763 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)) 4764 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 4765 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)) 4766 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 4767 4768 bp->tx_push_thresh = 0; 4769 if (resp->flags & 4770 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) 4771 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 4772 4773 if (BNXT_PF(bp)) { 4774 struct bnxt_pf_info *pf = &bp->pf; 4775 4776 pf->fw_fid = le16_to_cpu(resp->fid); 4777 pf->port_id = le16_to_cpu(resp->port_id); 4778 bp->dev->dev_port = pf->port_id; 4779 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4780 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4781 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4782 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4783 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4784 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4785 if (!pf->max_hw_ring_grps) 4786 pf->max_hw_ring_grps = pf->max_tx_rings; 4787 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4788 pf->max_vnics = le16_to_cpu(resp->max_vnics); 4789 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4790 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 4791 pf->max_vfs = le16_to_cpu(resp->max_vfs); 4792 pf->max_encap_records = le32_to_cpu(resp->max_encap_records); 4793 pf->max_decap_records = le32_to_cpu(resp->max_decap_records); 4794 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 4795 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 4796 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 4797 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 4798 if (resp->flags & 4799 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)) 4800 bp->flags |= BNXT_FLAG_WOL_CAP; 4801 } else { 4802 #ifdef CONFIG_BNXT_SRIOV 4803 struct bnxt_vf_info *vf = &bp->vf; 4804 4805 vf->fw_fid = le16_to_cpu(resp->fid); 4806 4807 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4808 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 4809 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 4810 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 4811 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 4812 if (!vf->max_hw_ring_grps) 4813 vf->max_hw_ring_grps = vf->max_tx_rings; 4814 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 4815 vf->max_vnics = le16_to_cpu(resp->max_vnics); 4816 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 4817 4818 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 4819 #endif 4820 } 4821 4822 hwrm_func_qcaps_exit: 4823 mutex_unlock(&bp->hwrm_cmd_lock); 4824 return rc; 4825 } 4826 4827 static int bnxt_hwrm_func_reset(struct bnxt *bp) 4828 { 4829 struct hwrm_func_reset_input req = {0}; 4830 4831 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); 4832 req.enables = 0; 4833 4834 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); 4835 } 4836 4837 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 4838 { 4839 int rc = 0; 4840 struct hwrm_queue_qportcfg_input req = {0}; 4841 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; 4842 u8 i, *qptr; 4843 4844 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); 4845 4846 mutex_lock(&bp->hwrm_cmd_lock); 4847 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4848 if (rc) 4849 goto qportcfg_exit; 4850 4851 if (!resp->max_configurable_queues) { 4852 rc = -EINVAL; 4853 goto qportcfg_exit; 4854 } 4855 bp->max_tc = resp->max_configurable_queues; 4856 bp->max_lltc = resp->max_configurable_lossless_queues; 4857 if (bp->max_tc > BNXT_MAX_QUEUE) 4858 bp->max_tc = BNXT_MAX_QUEUE; 4859 4860 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 4861 bp->max_tc = 1; 4862 4863 if (bp->max_lltc > bp->max_tc) 4864 bp->max_lltc = bp->max_tc; 4865 4866 qptr = &resp->queue_id0; 4867 for (i = 0; i < bp->max_tc; i++) { 4868 bp->q_info[i].queue_id = *qptr++; 4869 bp->q_info[i].queue_profile = *qptr++; 4870 } 4871 4872 qportcfg_exit: 4873 mutex_unlock(&bp->hwrm_cmd_lock); 4874 return rc; 4875 } 4876 4877 static int bnxt_hwrm_ver_get(struct bnxt *bp) 4878 { 4879 int rc; 4880 struct hwrm_ver_get_input req = {0}; 4881 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 4882 u32 dev_caps_cfg; 4883 4884 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 4885 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 4886 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 4887 req.hwrm_intf_min = HWRM_VERSION_MINOR; 4888 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 4889 mutex_lock(&bp->hwrm_cmd_lock); 4890 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4891 if (rc) 4892 goto hwrm_ver_get_exit; 4893 4894 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 4895 4896 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | 4897 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; 4898 if (resp->hwrm_intf_maj < 1) { 4899 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 4900 resp->hwrm_intf_maj, resp->hwrm_intf_min, 4901 resp->hwrm_intf_upd); 4902 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 4903 } 4904 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", 4905 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, 4906 resp->hwrm_fw_rsvd); 4907 4908 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 4909 if (!bp->hwrm_cmd_timeout) 4910 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 4911 4912 if (resp->hwrm_intf_maj >= 1) 4913 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 4914 4915 bp->chip_num = le16_to_cpu(resp->chip_num); 4916 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 4917 !resp->chip_metal) 4918 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 4919 4920 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 4921 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 4922 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 4923 bp->flags |= BNXT_FLAG_SHORT_CMD; 4924 4925 hwrm_ver_get_exit: 4926 mutex_unlock(&bp->hwrm_cmd_lock); 4927 return rc; 4928 } 4929 4930 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 4931 { 4932 struct hwrm_fw_set_time_input req = {0}; 4933 struct tm tm; 4934 time64_t now = ktime_get_real_seconds(); 4935 4936 if (bp->hwrm_spec_code < 0x10400) 4937 return -EOPNOTSUPP; 4938 4939 time64_to_tm(now, 0, &tm); 4940 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); 4941 req.year = cpu_to_le16(1900 + tm.tm_year); 4942 req.month = 1 + tm.tm_mon; 4943 req.day = tm.tm_mday; 4944 req.hour = tm.tm_hour; 4945 req.minute = tm.tm_min; 4946 req.second = tm.tm_sec; 4947 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4948 } 4949 4950 static int bnxt_hwrm_port_qstats(struct bnxt *bp) 4951 { 4952 int rc; 4953 struct bnxt_pf_info *pf = &bp->pf; 4954 struct hwrm_port_qstats_input req = {0}; 4955 4956 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 4957 return 0; 4958 4959 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); 4960 req.port_id = cpu_to_le16(pf->port_id); 4961 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); 4962 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); 4963 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4964 return rc; 4965 } 4966 4967 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 4968 { 4969 if (bp->vxlan_port_cnt) { 4970 bnxt_hwrm_tunnel_dst_port_free( 4971 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 4972 } 4973 bp->vxlan_port_cnt = 0; 4974 if (bp->nge_port_cnt) { 4975 bnxt_hwrm_tunnel_dst_port_free( 4976 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 4977 } 4978 bp->nge_port_cnt = 0; 4979 } 4980 4981 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 4982 { 4983 int rc, i; 4984 u32 tpa_flags = 0; 4985 4986 if (set_tpa) 4987 tpa_flags = bp->flags & BNXT_FLAG_TPA; 4988 for (i = 0; i < bp->nr_vnics; i++) { 4989 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4990 if (rc) { 4991 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4992 i, rc); 4993 return rc; 4994 } 4995 } 4996 return 0; 4997 } 4998 4999 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 5000 { 5001 int i; 5002 5003 for (i = 0; i < bp->nr_vnics; i++) 5004 bnxt_hwrm_vnic_set_rss(bp, i, false); 5005 } 5006 5007 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 5008 bool irq_re_init) 5009 { 5010 if (bp->vnic_info) { 5011 bnxt_hwrm_clear_vnic_filter(bp); 5012 /* clear all RSS setting before free vnic ctx */ 5013 bnxt_hwrm_clear_vnic_rss(bp); 5014 bnxt_hwrm_vnic_ctx_free(bp); 5015 /* before free the vnic, undo the vnic tpa settings */ 5016 if (bp->flags & BNXT_FLAG_TPA) 5017 bnxt_set_tpa(bp, false); 5018 bnxt_hwrm_vnic_free(bp); 5019 } 5020 bnxt_hwrm_ring_free(bp, close_path); 5021 bnxt_hwrm_ring_grp_free(bp); 5022 if (irq_re_init) { 5023 bnxt_hwrm_stat_ctx_free(bp); 5024 bnxt_hwrm_free_tunnel_ports(bp); 5025 } 5026 } 5027 5028 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 5029 { 5030 struct hwrm_func_cfg_input req = {0}; 5031 int rc; 5032 5033 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 5034 req.fid = cpu_to_le16(0xffff); 5035 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 5036 if (br_mode == BRIDGE_MODE_VEB) 5037 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 5038 else if (br_mode == BRIDGE_MODE_VEPA) 5039 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 5040 else 5041 return -EINVAL; 5042 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5043 if (rc) 5044 rc = -EIO; 5045 return rc; 5046 } 5047 5048 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) 5049 { 5050 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5051 int rc; 5052 5053 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 5054 goto skip_rss_ctx; 5055 5056 /* allocate context for vnic */ 5057 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); 5058 if (rc) { 5059 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5060 vnic_id, rc); 5061 goto vnic_setup_err; 5062 } 5063 bp->rsscos_nr_ctxs++; 5064 5065 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5066 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); 5067 if (rc) { 5068 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 5069 vnic_id, rc); 5070 goto vnic_setup_err; 5071 } 5072 bp->rsscos_nr_ctxs++; 5073 } 5074 5075 skip_rss_ctx: 5076 /* configure default vnic, ring grp */ 5077 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); 5078 if (rc) { 5079 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 5080 vnic_id, rc); 5081 goto vnic_setup_err; 5082 } 5083 5084 /* Enable RSS hashing on vnic */ 5085 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); 5086 if (rc) { 5087 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 5088 vnic_id, rc); 5089 goto vnic_setup_err; 5090 } 5091 5092 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5093 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); 5094 if (rc) { 5095 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 5096 vnic_id, rc); 5097 } 5098 } 5099 5100 vnic_setup_err: 5101 return rc; 5102 } 5103 5104 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 5105 { 5106 #ifdef CONFIG_RFS_ACCEL 5107 int i, rc = 0; 5108 5109 for (i = 0; i < bp->rx_nr_rings; i++) { 5110 struct bnxt_vnic_info *vnic; 5111 u16 vnic_id = i + 1; 5112 u16 ring_id = i; 5113 5114 if (vnic_id >= bp->nr_vnics) 5115 break; 5116 5117 vnic = &bp->vnic_info[vnic_id]; 5118 vnic->flags |= BNXT_VNIC_RFS_FLAG; 5119 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 5120 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 5121 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 5122 if (rc) { 5123 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 5124 vnic_id, rc); 5125 break; 5126 } 5127 rc = bnxt_setup_vnic(bp, vnic_id); 5128 if (rc) 5129 break; 5130 } 5131 return rc; 5132 #else 5133 return 0; 5134 #endif 5135 } 5136 5137 /* Allow PF and VF with default VLAN to be in promiscuous mode */ 5138 static bool bnxt_promisc_ok(struct bnxt *bp) 5139 { 5140 #ifdef CONFIG_BNXT_SRIOV 5141 if (BNXT_VF(bp) && !bp->vf.vlan) 5142 return false; 5143 #endif 5144 return true; 5145 } 5146 5147 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 5148 { 5149 unsigned int rc = 0; 5150 5151 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); 5152 if (rc) { 5153 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5154 rc); 5155 return rc; 5156 } 5157 5158 rc = bnxt_hwrm_vnic_cfg(bp, 1); 5159 if (rc) { 5160 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 5161 rc); 5162 return rc; 5163 } 5164 return rc; 5165 } 5166 5167 static int bnxt_cfg_rx_mode(struct bnxt *); 5168 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 5169 5170 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 5171 { 5172 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 5173 int rc = 0; 5174 unsigned int rx_nr_rings = bp->rx_nr_rings; 5175 5176 if (irq_re_init) { 5177 rc = bnxt_hwrm_stat_ctx_alloc(bp); 5178 if (rc) { 5179 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 5180 rc); 5181 goto err_out; 5182 } 5183 if (bp->tx_reserved_rings != bp->tx_nr_rings) { 5184 int tx = bp->tx_nr_rings; 5185 5186 if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || 5187 tx < bp->tx_nr_rings) { 5188 rc = -ENOMEM; 5189 goto err_out; 5190 } 5191 } 5192 } 5193 5194 rc = bnxt_hwrm_ring_alloc(bp); 5195 if (rc) { 5196 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 5197 goto err_out; 5198 } 5199 5200 rc = bnxt_hwrm_ring_grp_alloc(bp); 5201 if (rc) { 5202 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 5203 goto err_out; 5204 } 5205 5206 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5207 rx_nr_rings--; 5208 5209 /* default vnic 0 */ 5210 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 5211 if (rc) { 5212 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 5213 goto err_out; 5214 } 5215 5216 rc = bnxt_setup_vnic(bp, 0); 5217 if (rc) 5218 goto err_out; 5219 5220 if (bp->flags & BNXT_FLAG_RFS) { 5221 rc = bnxt_alloc_rfs_vnics(bp); 5222 if (rc) 5223 goto err_out; 5224 } 5225 5226 if (bp->flags & BNXT_FLAG_TPA) { 5227 rc = bnxt_set_tpa(bp, true); 5228 if (rc) 5229 goto err_out; 5230 } 5231 5232 if (BNXT_VF(bp)) 5233 bnxt_update_vf_mac(bp); 5234 5235 /* Filter for default vnic 0 */ 5236 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 5237 if (rc) { 5238 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 5239 goto err_out; 5240 } 5241 vnic->uc_filter_count = 1; 5242 5243 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 5244 5245 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5246 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5247 5248 if (bp->dev->flags & IFF_ALLMULTI) { 5249 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 5250 vnic->mc_list_count = 0; 5251 } else { 5252 u32 mask = 0; 5253 5254 bnxt_mc_list_updated(bp, &mask); 5255 vnic->rx_mask |= mask; 5256 } 5257 5258 rc = bnxt_cfg_rx_mode(bp); 5259 if (rc) 5260 goto err_out; 5261 5262 rc = bnxt_hwrm_set_coal(bp); 5263 if (rc) 5264 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 5265 rc); 5266 5267 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5268 rc = bnxt_setup_nitroa0_vnic(bp); 5269 if (rc) 5270 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 5271 rc); 5272 } 5273 5274 if (BNXT_VF(bp)) { 5275 bnxt_hwrm_func_qcfg(bp); 5276 netdev_update_features(bp->dev); 5277 } 5278 5279 return 0; 5280 5281 err_out: 5282 bnxt_hwrm_resource_free(bp, 0, true); 5283 5284 return rc; 5285 } 5286 5287 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 5288 { 5289 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 5290 return 0; 5291 } 5292 5293 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5294 { 5295 bnxt_init_cp_rings(bp); 5296 bnxt_init_rx_rings(bp); 5297 bnxt_init_tx_rings(bp); 5298 bnxt_init_ring_grps(bp, irq_re_init); 5299 bnxt_init_vnics(bp); 5300 5301 return bnxt_init_chip(bp, irq_re_init); 5302 } 5303 5304 static int bnxt_set_real_num_queues(struct bnxt *bp) 5305 { 5306 int rc; 5307 struct net_device *dev = bp->dev; 5308 5309 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 5310 bp->tx_nr_rings_xdp); 5311 if (rc) 5312 return rc; 5313 5314 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 5315 if (rc) 5316 return rc; 5317 5318 #ifdef CONFIG_RFS_ACCEL 5319 if (bp->flags & BNXT_FLAG_RFS) 5320 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 5321 #endif 5322 5323 return rc; 5324 } 5325 5326 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 5327 bool shared) 5328 { 5329 int _rx = *rx, _tx = *tx; 5330 5331 if (shared) { 5332 *rx = min_t(int, _rx, max); 5333 *tx = min_t(int, _tx, max); 5334 } else { 5335 if (max < 2) 5336 return -ENOMEM; 5337 5338 while (_rx + _tx > max) { 5339 if (_rx > _tx && _rx > 1) 5340 _rx--; 5341 else if (_tx > 1) 5342 _tx--; 5343 } 5344 *rx = _rx; 5345 *tx = _tx; 5346 } 5347 return 0; 5348 } 5349 5350 static void bnxt_setup_msix(struct bnxt *bp) 5351 { 5352 const int len = sizeof(bp->irq_tbl[0].name); 5353 struct net_device *dev = bp->dev; 5354 int tcs, i; 5355 5356 tcs = netdev_get_num_tc(dev); 5357 if (tcs > 1) { 5358 int i, off, count; 5359 5360 for (i = 0; i < tcs; i++) { 5361 count = bp->tx_nr_rings_per_tc; 5362 off = i * count; 5363 netdev_set_tc_queue(dev, i, count, off); 5364 } 5365 } 5366 5367 for (i = 0; i < bp->cp_nr_rings; i++) { 5368 char *attr; 5369 5370 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5371 attr = "TxRx"; 5372 else if (i < bp->rx_nr_rings) 5373 attr = "rx"; 5374 else 5375 attr = "tx"; 5376 5377 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, 5378 i); 5379 bp->irq_tbl[i].handler = bnxt_msix; 5380 } 5381 } 5382 5383 static void bnxt_setup_inta(struct bnxt *bp) 5384 { 5385 const int len = sizeof(bp->irq_tbl[0].name); 5386 5387 if (netdev_get_num_tc(bp->dev)) 5388 netdev_reset_tc(bp->dev); 5389 5390 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 5391 0); 5392 bp->irq_tbl[0].handler = bnxt_inta; 5393 } 5394 5395 static int bnxt_setup_int_mode(struct bnxt *bp) 5396 { 5397 int rc; 5398 5399 if (bp->flags & BNXT_FLAG_USING_MSIX) 5400 bnxt_setup_msix(bp); 5401 else 5402 bnxt_setup_inta(bp); 5403 5404 rc = bnxt_set_real_num_queues(bp); 5405 return rc; 5406 } 5407 5408 #ifdef CONFIG_RFS_ACCEL 5409 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 5410 { 5411 #if defined(CONFIG_BNXT_SRIOV) 5412 if (BNXT_VF(bp)) 5413 return bp->vf.max_rsscos_ctxs; 5414 #endif 5415 return bp->pf.max_rsscos_ctxs; 5416 } 5417 5418 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 5419 { 5420 #if defined(CONFIG_BNXT_SRIOV) 5421 if (BNXT_VF(bp)) 5422 return bp->vf.max_vnics; 5423 #endif 5424 return bp->pf.max_vnics; 5425 } 5426 #endif 5427 5428 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 5429 { 5430 #if defined(CONFIG_BNXT_SRIOV) 5431 if (BNXT_VF(bp)) 5432 return bp->vf.max_stat_ctxs; 5433 #endif 5434 return bp->pf.max_stat_ctxs; 5435 } 5436 5437 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) 5438 { 5439 #if defined(CONFIG_BNXT_SRIOV) 5440 if (BNXT_VF(bp)) 5441 bp->vf.max_stat_ctxs = max; 5442 else 5443 #endif 5444 bp->pf.max_stat_ctxs = max; 5445 } 5446 5447 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 5448 { 5449 #if defined(CONFIG_BNXT_SRIOV) 5450 if (BNXT_VF(bp)) 5451 return bp->vf.max_cp_rings; 5452 #endif 5453 return bp->pf.max_cp_rings; 5454 } 5455 5456 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5457 { 5458 #if defined(CONFIG_BNXT_SRIOV) 5459 if (BNXT_VF(bp)) 5460 bp->vf.max_cp_rings = max; 5461 else 5462 #endif 5463 bp->pf.max_cp_rings = max; 5464 } 5465 5466 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5467 { 5468 #if defined(CONFIG_BNXT_SRIOV) 5469 if (BNXT_VF(bp)) 5470 return min_t(unsigned int, bp->vf.max_irqs, 5471 bp->vf.max_cp_rings); 5472 #endif 5473 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings); 5474 } 5475 5476 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5477 { 5478 #if defined(CONFIG_BNXT_SRIOV) 5479 if (BNXT_VF(bp)) 5480 bp->vf.max_irqs = max_irqs; 5481 else 5482 #endif 5483 bp->pf.max_irqs = max_irqs; 5484 } 5485 5486 static int bnxt_init_msix(struct bnxt *bp) 5487 { 5488 int i, total_vecs, rc = 0, min = 1; 5489 struct msix_entry *msix_ent; 5490 5491 total_vecs = bnxt_get_max_func_irqs(bp); 5492 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 5493 if (!msix_ent) 5494 return -ENOMEM; 5495 5496 for (i = 0; i < total_vecs; i++) { 5497 msix_ent[i].entry = i; 5498 msix_ent[i].vector = 0; 5499 } 5500 5501 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 5502 min = 2; 5503 5504 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); 5505 if (total_vecs < 0) { 5506 rc = -ENODEV; 5507 goto msix_setup_exit; 5508 } 5509 5510 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); 5511 if (bp->irq_tbl) { 5512 for (i = 0; i < total_vecs; i++) 5513 bp->irq_tbl[i].vector = msix_ent[i].vector; 5514 5515 bp->total_irqs = total_vecs; 5516 /* Trim rings based upon num of vectors allocated */ 5517 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 5518 total_vecs, min == 1); 5519 if (rc) 5520 goto msix_setup_exit; 5521 5522 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5523 bp->cp_nr_rings = (min == 1) ? 5524 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5525 bp->tx_nr_rings + bp->rx_nr_rings; 5526 5527 } else { 5528 rc = -ENOMEM; 5529 goto msix_setup_exit; 5530 } 5531 bp->flags |= BNXT_FLAG_USING_MSIX; 5532 kfree(msix_ent); 5533 return 0; 5534 5535 msix_setup_exit: 5536 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc); 5537 kfree(bp->irq_tbl); 5538 bp->irq_tbl = NULL; 5539 pci_disable_msix(bp->pdev); 5540 kfree(msix_ent); 5541 return rc; 5542 } 5543 5544 static int bnxt_init_inta(struct bnxt *bp) 5545 { 5546 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); 5547 if (!bp->irq_tbl) 5548 return -ENOMEM; 5549 5550 bp->total_irqs = 1; 5551 bp->rx_nr_rings = 1; 5552 bp->tx_nr_rings = 1; 5553 bp->cp_nr_rings = 1; 5554 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 5555 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5556 bp->irq_tbl[0].vector = bp->pdev->irq; 5557 return 0; 5558 } 5559 5560 static int bnxt_init_int_mode(struct bnxt *bp) 5561 { 5562 int rc = 0; 5563 5564 if (bp->flags & BNXT_FLAG_MSIX_CAP) 5565 rc = bnxt_init_msix(bp); 5566 5567 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { 5568 /* fallback to INTA */ 5569 rc = bnxt_init_inta(bp); 5570 } 5571 return rc; 5572 } 5573 5574 static void bnxt_clear_int_mode(struct bnxt *bp) 5575 { 5576 if (bp->flags & BNXT_FLAG_USING_MSIX) 5577 pci_disable_msix(bp->pdev); 5578 5579 kfree(bp->irq_tbl); 5580 bp->irq_tbl = NULL; 5581 bp->flags &= ~BNXT_FLAG_USING_MSIX; 5582 } 5583 5584 static void bnxt_free_irq(struct bnxt *bp) 5585 { 5586 struct bnxt_irq *irq; 5587 int i; 5588 5589 #ifdef CONFIG_RFS_ACCEL 5590 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 5591 bp->dev->rx_cpu_rmap = NULL; 5592 #endif 5593 if (!bp->irq_tbl) 5594 return; 5595 5596 for (i = 0; i < bp->cp_nr_rings; i++) { 5597 irq = &bp->irq_tbl[i]; 5598 if (irq->requested) { 5599 if (irq->have_cpumask) { 5600 irq_set_affinity_hint(irq->vector, NULL); 5601 free_cpumask_var(irq->cpu_mask); 5602 irq->have_cpumask = 0; 5603 } 5604 free_irq(irq->vector, bp->bnapi[i]); 5605 } 5606 5607 irq->requested = 0; 5608 } 5609 } 5610 5611 static int bnxt_request_irq(struct bnxt *bp) 5612 { 5613 int i, j, rc = 0; 5614 unsigned long flags = 0; 5615 #ifdef CONFIG_RFS_ACCEL 5616 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; 5617 #endif 5618 5619 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) 5620 flags = IRQF_SHARED; 5621 5622 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 5623 struct bnxt_irq *irq = &bp->irq_tbl[i]; 5624 #ifdef CONFIG_RFS_ACCEL 5625 if (rmap && bp->bnapi[i]->rx_ring) { 5626 rc = irq_cpu_rmap_add(rmap, irq->vector); 5627 if (rc) 5628 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 5629 j); 5630 j++; 5631 } 5632 #endif 5633 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 5634 bp->bnapi[i]); 5635 if (rc) 5636 break; 5637 5638 irq->requested = 1; 5639 5640 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 5641 int numa_node = dev_to_node(&bp->pdev->dev); 5642 5643 irq->have_cpumask = 1; 5644 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 5645 irq->cpu_mask); 5646 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); 5647 if (rc) { 5648 netdev_warn(bp->dev, 5649 "Set affinity failed, IRQ = %d\n", 5650 irq->vector); 5651 break; 5652 } 5653 } 5654 } 5655 return rc; 5656 } 5657 5658 static void bnxt_del_napi(struct bnxt *bp) 5659 { 5660 int i; 5661 5662 if (!bp->bnapi) 5663 return; 5664 5665 for (i = 0; i < bp->cp_nr_rings; i++) { 5666 struct bnxt_napi *bnapi = bp->bnapi[i]; 5667 5668 napi_hash_del(&bnapi->napi); 5669 netif_napi_del(&bnapi->napi); 5670 } 5671 /* We called napi_hash_del() before netif_napi_del(), we need 5672 * to respect an RCU grace period before freeing napi structures. 5673 */ 5674 synchronize_net(); 5675 } 5676 5677 static void bnxt_init_napi(struct bnxt *bp) 5678 { 5679 int i; 5680 unsigned int cp_nr_rings = bp->cp_nr_rings; 5681 struct bnxt_napi *bnapi; 5682 5683 if (bp->flags & BNXT_FLAG_USING_MSIX) { 5684 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5685 cp_nr_rings--; 5686 for (i = 0; i < cp_nr_rings; i++) { 5687 bnapi = bp->bnapi[i]; 5688 netif_napi_add(bp->dev, &bnapi->napi, 5689 bnxt_poll, 64); 5690 } 5691 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 5692 bnapi = bp->bnapi[cp_nr_rings]; 5693 netif_napi_add(bp->dev, &bnapi->napi, 5694 bnxt_poll_nitroa0, 64); 5695 } 5696 } else { 5697 bnapi = bp->bnapi[0]; 5698 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); 5699 } 5700 } 5701 5702 static void bnxt_disable_napi(struct bnxt *bp) 5703 { 5704 int i; 5705 5706 if (!bp->bnapi) 5707 return; 5708 5709 for (i = 0; i < bp->cp_nr_rings; i++) 5710 napi_disable(&bp->bnapi[i]->napi); 5711 } 5712 5713 static void bnxt_enable_napi(struct bnxt *bp) 5714 { 5715 int i; 5716 5717 for (i = 0; i < bp->cp_nr_rings; i++) { 5718 bp->bnapi[i]->in_reset = false; 5719 napi_enable(&bp->bnapi[i]->napi); 5720 } 5721 } 5722 5723 void bnxt_tx_disable(struct bnxt *bp) 5724 { 5725 int i; 5726 struct bnxt_tx_ring_info *txr; 5727 5728 if (bp->tx_ring) { 5729 for (i = 0; i < bp->tx_nr_rings; i++) { 5730 txr = &bp->tx_ring[i]; 5731 txr->dev_state = BNXT_DEV_STATE_CLOSING; 5732 } 5733 } 5734 /* Stop all TX queues */ 5735 netif_tx_disable(bp->dev); 5736 netif_carrier_off(bp->dev); 5737 } 5738 5739 void bnxt_tx_enable(struct bnxt *bp) 5740 { 5741 int i; 5742 struct bnxt_tx_ring_info *txr; 5743 5744 for (i = 0; i < bp->tx_nr_rings; i++) { 5745 txr = &bp->tx_ring[i]; 5746 txr->dev_state = 0; 5747 } 5748 netif_tx_wake_all_queues(bp->dev); 5749 if (bp->link_info.link_up) 5750 netif_carrier_on(bp->dev); 5751 } 5752 5753 static void bnxt_report_link(struct bnxt *bp) 5754 { 5755 if (bp->link_info.link_up) { 5756 const char *duplex; 5757 const char *flow_ctrl; 5758 u32 speed; 5759 u16 fec; 5760 5761 netif_carrier_on(bp->dev); 5762 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 5763 duplex = "full"; 5764 else 5765 duplex = "half"; 5766 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 5767 flow_ctrl = "ON - receive & transmit"; 5768 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 5769 flow_ctrl = "ON - transmit"; 5770 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 5771 flow_ctrl = "ON - receive"; 5772 else 5773 flow_ctrl = "none"; 5774 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 5775 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", 5776 speed, duplex, flow_ctrl); 5777 if (bp->flags & BNXT_FLAG_EEE_CAP) 5778 netdev_info(bp->dev, "EEE is %s\n", 5779 bp->eee.eee_active ? "active" : 5780 "not active"); 5781 fec = bp->link_info.fec_cfg; 5782 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 5783 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", 5784 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 5785 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : 5786 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); 5787 } else { 5788 netif_carrier_off(bp->dev); 5789 netdev_err(bp->dev, "NIC Link is Down\n"); 5790 } 5791 } 5792 5793 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 5794 { 5795 int rc = 0; 5796 struct hwrm_port_phy_qcaps_input req = {0}; 5797 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 5798 struct bnxt_link_info *link_info = &bp->link_info; 5799 5800 if (bp->hwrm_spec_code < 0x10201) 5801 return 0; 5802 5803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); 5804 5805 mutex_lock(&bp->hwrm_cmd_lock); 5806 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5807 if (rc) 5808 goto hwrm_phy_qcaps_exit; 5809 5810 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 5811 struct ethtool_eee *eee = &bp->eee; 5812 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 5813 5814 bp->flags |= BNXT_FLAG_EEE_CAP; 5815 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5816 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 5817 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 5818 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5819 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5820 } 5821 if (resp->supported_speeds_auto_mode) 5822 link_info->support_auto_speeds = 5823 le16_to_cpu(resp->supported_speeds_auto_mode); 5824 5825 bp->port_count = resp->port_cnt; 5826 5827 hwrm_phy_qcaps_exit: 5828 mutex_unlock(&bp->hwrm_cmd_lock); 5829 return rc; 5830 } 5831 5832 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 5833 { 5834 int rc = 0; 5835 struct bnxt_link_info *link_info = &bp->link_info; 5836 struct hwrm_port_phy_qcfg_input req = {0}; 5837 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 5838 u8 link_up = link_info->link_up; 5839 u16 diff; 5840 5841 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); 5842 5843 mutex_lock(&bp->hwrm_cmd_lock); 5844 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 5845 if (rc) { 5846 mutex_unlock(&bp->hwrm_cmd_lock); 5847 return rc; 5848 } 5849 5850 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 5851 link_info->phy_link_status = resp->link; 5852 link_info->duplex = resp->duplex_cfg; 5853 if (bp->hwrm_spec_code >= 0x10800) 5854 link_info->duplex = resp->duplex_state; 5855 link_info->pause = resp->pause; 5856 link_info->auto_mode = resp->auto_mode; 5857 link_info->auto_pause_setting = resp->auto_pause; 5858 link_info->lp_pause = resp->link_partner_adv_pause; 5859 link_info->force_pause_setting = resp->force_pause; 5860 link_info->duplex_setting = resp->duplex_cfg; 5861 if (link_info->phy_link_status == BNXT_LINK_LINK) 5862 link_info->link_speed = le16_to_cpu(resp->link_speed); 5863 else 5864 link_info->link_speed = 0; 5865 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 5866 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 5867 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 5868 link_info->lp_auto_link_speeds = 5869 le16_to_cpu(resp->link_partner_adv_speeds); 5870 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 5871 link_info->phy_ver[0] = resp->phy_maj; 5872 link_info->phy_ver[1] = resp->phy_min; 5873 link_info->phy_ver[2] = resp->phy_bld; 5874 link_info->media_type = resp->media_type; 5875 link_info->phy_type = resp->phy_type; 5876 link_info->transceiver = resp->xcvr_pkg_type; 5877 link_info->phy_addr = resp->eee_config_phy_addr & 5878 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 5879 link_info->module_status = resp->module_status; 5880 5881 if (bp->flags & BNXT_FLAG_EEE_CAP) { 5882 struct ethtool_eee *eee = &bp->eee; 5883 u16 fw_speeds; 5884 5885 eee->eee_active = 0; 5886 if (resp->eee_config_phy_addr & 5887 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 5888 eee->eee_active = 1; 5889 fw_speeds = le16_to_cpu( 5890 resp->link_partner_adv_eee_link_speed_mask); 5891 eee->lp_advertised = 5892 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5893 } 5894 5895 /* Pull initial EEE config */ 5896 if (!chng_link_state) { 5897 if (resp->eee_config_phy_addr & 5898 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 5899 eee->eee_enabled = 1; 5900 5901 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 5902 eee->advertised = 5903 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 5904 5905 if (resp->eee_config_phy_addr & 5906 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 5907 __le32 tmr; 5908 5909 eee->tx_lpi_enabled = 1; 5910 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 5911 eee->tx_lpi_timer = le32_to_cpu(tmr) & 5912 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 5913 } 5914 } 5915 } 5916 5917 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 5918 if (bp->hwrm_spec_code >= 0x10504) 5919 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 5920 5921 /* TODO: need to add more logic to report VF link */ 5922 if (chng_link_state) { 5923 if (link_info->phy_link_status == BNXT_LINK_LINK) 5924 link_info->link_up = 1; 5925 else 5926 link_info->link_up = 0; 5927 if (link_up != link_info->link_up) 5928 bnxt_report_link(bp); 5929 } else { 5930 /* alwasy link down if not require to update link state */ 5931 link_info->link_up = 0; 5932 } 5933 mutex_unlock(&bp->hwrm_cmd_lock); 5934 5935 diff = link_info->support_auto_speeds ^ link_info->advertising; 5936 if ((link_info->support_auto_speeds | diff) != 5937 link_info->support_auto_speeds) { 5938 /* An advertised speed is no longer supported, so we need to 5939 * update the advertisement settings. Caller holds RTNL 5940 * so we can modify link settings. 5941 */ 5942 link_info->advertising = link_info->support_auto_speeds; 5943 if (link_info->autoneg & BNXT_AUTONEG_SPEED) 5944 bnxt_hwrm_set_link_setting(bp, true, false); 5945 } 5946 return 0; 5947 } 5948 5949 static void bnxt_get_port_module_status(struct bnxt *bp) 5950 { 5951 struct bnxt_link_info *link_info = &bp->link_info; 5952 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 5953 u8 module_status; 5954 5955 if (bnxt_update_link(bp, true)) 5956 return; 5957 5958 module_status = link_info->module_status; 5959 switch (module_status) { 5960 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 5961 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 5962 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 5963 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 5964 bp->pf.port_id); 5965 if (bp->hwrm_spec_code >= 0x10201) { 5966 netdev_warn(bp->dev, "Module part number %s\n", 5967 resp->phy_vendor_partnumber); 5968 } 5969 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 5970 netdev_warn(bp->dev, "TX is disabled\n"); 5971 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 5972 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 5973 } 5974 } 5975 5976 static void 5977 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 5978 { 5979 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 5980 if (bp->hwrm_spec_code >= 0x10201) 5981 req->auto_pause = 5982 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 5983 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5984 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 5985 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5986 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 5987 req->enables |= 5988 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 5989 } else { 5990 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 5991 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 5992 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 5993 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 5994 req->enables |= 5995 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 5996 if (bp->hwrm_spec_code >= 0x10201) { 5997 req->auto_pause = req->force_pause; 5998 req->enables |= cpu_to_le32( 5999 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 6000 } 6001 } 6002 } 6003 6004 static void bnxt_hwrm_set_link_common(struct bnxt *bp, 6005 struct hwrm_port_phy_cfg_input *req) 6006 { 6007 u8 autoneg = bp->link_info.autoneg; 6008 u16 fw_link_speed = bp->link_info.req_link_speed; 6009 u16 advertising = bp->link_info.advertising; 6010 6011 if (autoneg & BNXT_AUTONEG_SPEED) { 6012 req->auto_mode |= 6013 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 6014 6015 req->enables |= cpu_to_le32( 6016 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 6017 req->auto_link_speed_mask = cpu_to_le16(advertising); 6018 6019 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 6020 req->flags |= 6021 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 6022 } else { 6023 req->force_link_speed = cpu_to_le16(fw_link_speed); 6024 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 6025 } 6026 6027 /* tell chimp that the setting takes effect immediately */ 6028 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 6029 } 6030 6031 int bnxt_hwrm_set_pause(struct bnxt *bp) 6032 { 6033 struct hwrm_port_phy_cfg_input req = {0}; 6034 int rc; 6035 6036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6037 bnxt_hwrm_set_pause_common(bp, &req); 6038 6039 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 6040 bp->link_info.force_link_chng) 6041 bnxt_hwrm_set_link_common(bp, &req); 6042 6043 mutex_lock(&bp->hwrm_cmd_lock); 6044 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6045 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 6046 /* since changing of pause setting doesn't trigger any link 6047 * change event, the driver needs to update the current pause 6048 * result upon successfully return of the phy_cfg command 6049 */ 6050 bp->link_info.pause = 6051 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 6052 bp->link_info.auto_pause_setting = 0; 6053 if (!bp->link_info.force_link_chng) 6054 bnxt_report_link(bp); 6055 } 6056 bp->link_info.force_link_chng = false; 6057 mutex_unlock(&bp->hwrm_cmd_lock); 6058 return rc; 6059 } 6060 6061 static void bnxt_hwrm_set_eee(struct bnxt *bp, 6062 struct hwrm_port_phy_cfg_input *req) 6063 { 6064 struct ethtool_eee *eee = &bp->eee; 6065 6066 if (eee->eee_enabled) { 6067 u16 eee_speeds; 6068 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 6069 6070 if (eee->tx_lpi_enabled) 6071 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 6072 else 6073 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 6074 6075 req->flags |= cpu_to_le32(flags); 6076 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 6077 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 6078 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 6079 } else { 6080 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 6081 } 6082 } 6083 6084 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 6085 { 6086 struct hwrm_port_phy_cfg_input req = {0}; 6087 6088 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6089 if (set_pause) 6090 bnxt_hwrm_set_pause_common(bp, &req); 6091 6092 bnxt_hwrm_set_link_common(bp, &req); 6093 6094 if (set_eee) 6095 bnxt_hwrm_set_eee(bp, &req); 6096 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6097 } 6098 6099 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 6100 { 6101 struct hwrm_port_phy_cfg_input req = {0}; 6102 6103 if (!BNXT_SINGLE_PF(bp)) 6104 return 0; 6105 6106 if (pci_num_vf(bp->pdev)) 6107 return 0; 6108 6109 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); 6110 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 6111 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6112 } 6113 6114 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 6115 { 6116 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 6117 struct hwrm_port_led_qcaps_input req = {0}; 6118 struct bnxt_pf_info *pf = &bp->pf; 6119 int rc; 6120 6121 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 6122 return 0; 6123 6124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); 6125 req.port_id = cpu_to_le16(pf->port_id); 6126 mutex_lock(&bp->hwrm_cmd_lock); 6127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6128 if (rc) { 6129 mutex_unlock(&bp->hwrm_cmd_lock); 6130 return rc; 6131 } 6132 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 6133 int i; 6134 6135 bp->num_leds = resp->num_leds; 6136 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 6137 bp->num_leds); 6138 for (i = 0; i < bp->num_leds; i++) { 6139 struct bnxt_led_info *led = &bp->leds[i]; 6140 __le16 caps = led->led_state_caps; 6141 6142 if (!led->led_group_id || 6143 !BNXT_LED_ALT_BLINK_CAP(caps)) { 6144 bp->num_leds = 0; 6145 break; 6146 } 6147 } 6148 } 6149 mutex_unlock(&bp->hwrm_cmd_lock); 6150 return 0; 6151 } 6152 6153 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 6154 { 6155 struct hwrm_wol_filter_alloc_input req = {0}; 6156 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; 6157 int rc; 6158 6159 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); 6160 req.port_id = cpu_to_le16(bp->pf.port_id); 6161 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 6162 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 6163 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); 6164 mutex_lock(&bp->hwrm_cmd_lock); 6165 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6166 if (!rc) 6167 bp->wol_filter_id = resp->wol_filter_id; 6168 mutex_unlock(&bp->hwrm_cmd_lock); 6169 return rc; 6170 } 6171 6172 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 6173 { 6174 struct hwrm_wol_filter_free_input req = {0}; 6175 int rc; 6176 6177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); 6178 req.port_id = cpu_to_le16(bp->pf.port_id); 6179 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 6180 req.wol_filter_id = bp->wol_filter_id; 6181 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6182 return rc; 6183 } 6184 6185 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 6186 { 6187 struct hwrm_wol_filter_qcfg_input req = {0}; 6188 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 6189 u16 next_handle = 0; 6190 int rc; 6191 6192 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); 6193 req.port_id = cpu_to_le16(bp->pf.port_id); 6194 req.handle = cpu_to_le16(handle); 6195 mutex_lock(&bp->hwrm_cmd_lock); 6196 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6197 if (!rc) { 6198 next_handle = le16_to_cpu(resp->next_handle); 6199 if (next_handle != 0) { 6200 if (resp->wol_type == 6201 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 6202 bp->wol = 1; 6203 bp->wol_filter_id = resp->wol_filter_id; 6204 } 6205 } 6206 } 6207 mutex_unlock(&bp->hwrm_cmd_lock); 6208 return next_handle; 6209 } 6210 6211 static void bnxt_get_wol_settings(struct bnxt *bp) 6212 { 6213 u16 handle = 0; 6214 6215 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 6216 return; 6217 6218 do { 6219 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 6220 } while (handle && handle != 0xffff); 6221 } 6222 6223 static bool bnxt_eee_config_ok(struct bnxt *bp) 6224 { 6225 struct ethtool_eee *eee = &bp->eee; 6226 struct bnxt_link_info *link_info = &bp->link_info; 6227 6228 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 6229 return true; 6230 6231 if (eee->eee_enabled) { 6232 u32 advertising = 6233 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 6234 6235 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6236 eee->eee_enabled = 0; 6237 return false; 6238 } 6239 if (eee->advertised & ~advertising) { 6240 eee->advertised = advertising & eee->supported; 6241 return false; 6242 } 6243 } 6244 return true; 6245 } 6246 6247 static int bnxt_update_phy_setting(struct bnxt *bp) 6248 { 6249 int rc; 6250 bool update_link = false; 6251 bool update_pause = false; 6252 bool update_eee = false; 6253 struct bnxt_link_info *link_info = &bp->link_info; 6254 6255 rc = bnxt_update_link(bp, true); 6256 if (rc) { 6257 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 6258 rc); 6259 return rc; 6260 } 6261 if (!BNXT_SINGLE_PF(bp)) 6262 return 0; 6263 6264 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6265 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 6266 link_info->req_flow_ctrl) 6267 update_pause = true; 6268 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 6269 link_info->force_pause_setting != link_info->req_flow_ctrl) 6270 update_pause = true; 6271 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 6272 if (BNXT_AUTO_MODE(link_info->auto_mode)) 6273 update_link = true; 6274 if (link_info->req_link_speed != link_info->force_link_speed) 6275 update_link = true; 6276 if (link_info->req_duplex != link_info->duplex_setting) 6277 update_link = true; 6278 } else { 6279 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 6280 update_link = true; 6281 if (link_info->advertising != link_info->auto_link_speeds) 6282 update_link = true; 6283 } 6284 6285 /* The last close may have shutdown the link, so need to call 6286 * PHY_CFG to bring it back up. 6287 */ 6288 if (!netif_carrier_ok(bp->dev)) 6289 update_link = true; 6290 6291 if (!bnxt_eee_config_ok(bp)) 6292 update_eee = true; 6293 6294 if (update_link) 6295 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 6296 else if (update_pause) 6297 rc = bnxt_hwrm_set_pause(bp); 6298 if (rc) { 6299 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 6300 rc); 6301 return rc; 6302 } 6303 6304 return rc; 6305 } 6306 6307 /* Common routine to pre-map certain register block to different GRC window. 6308 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows 6309 * in PF and 3 windows in VF that can be customized to map in different 6310 * register blocks. 6311 */ 6312 static void bnxt_preset_reg_win(struct bnxt *bp) 6313 { 6314 if (BNXT_PF(bp)) { 6315 /* CAG registers map to GRC window #4 */ 6316 writel(BNXT_CAG_REG_BASE, 6317 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); 6318 } 6319 } 6320 6321 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6322 { 6323 int rc = 0; 6324 6325 bnxt_preset_reg_win(bp); 6326 netif_carrier_off(bp->dev); 6327 if (irq_re_init) { 6328 rc = bnxt_setup_int_mode(bp); 6329 if (rc) { 6330 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 6331 rc); 6332 return rc; 6333 } 6334 } 6335 if ((bp->flags & BNXT_FLAG_RFS) && 6336 !(bp->flags & BNXT_FLAG_USING_MSIX)) { 6337 /* disable RFS if falling back to INTA */ 6338 bp->dev->hw_features &= ~NETIF_F_NTUPLE; 6339 bp->flags &= ~BNXT_FLAG_RFS; 6340 } 6341 6342 rc = bnxt_alloc_mem(bp, irq_re_init); 6343 if (rc) { 6344 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6345 goto open_err_free_mem; 6346 } 6347 6348 if (irq_re_init) { 6349 bnxt_init_napi(bp); 6350 rc = bnxt_request_irq(bp); 6351 if (rc) { 6352 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 6353 goto open_err; 6354 } 6355 } 6356 6357 bnxt_enable_napi(bp); 6358 6359 rc = bnxt_init_nic(bp, irq_re_init); 6360 if (rc) { 6361 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6362 goto open_err; 6363 } 6364 6365 if (link_re_init) { 6366 mutex_lock(&bp->link_lock); 6367 rc = bnxt_update_phy_setting(bp); 6368 mutex_unlock(&bp->link_lock); 6369 if (rc) 6370 netdev_warn(bp->dev, "failed to update phy settings\n"); 6371 } 6372 6373 if (irq_re_init) 6374 udp_tunnel_get_rx_info(bp->dev); 6375 6376 set_bit(BNXT_STATE_OPEN, &bp->state); 6377 bnxt_enable_int(bp); 6378 /* Enable TX queues */ 6379 bnxt_tx_enable(bp); 6380 mod_timer(&bp->timer, jiffies + bp->current_interval); 6381 /* Poll link status and check for SFP+ module status */ 6382 bnxt_get_port_module_status(bp); 6383 6384 /* VF-reps may need to be re-opened after the PF is re-opened */ 6385 if (BNXT_PF(bp)) 6386 bnxt_vf_reps_open(bp); 6387 return 0; 6388 6389 open_err: 6390 bnxt_disable_napi(bp); 6391 bnxt_del_napi(bp); 6392 6393 open_err_free_mem: 6394 bnxt_free_skbs(bp); 6395 bnxt_free_irq(bp); 6396 bnxt_free_mem(bp, true); 6397 return rc; 6398 } 6399 6400 /* rtnl_lock held */ 6401 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6402 { 6403 int rc = 0; 6404 6405 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 6406 if (rc) { 6407 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 6408 dev_close(bp->dev); 6409 } 6410 return rc; 6411 } 6412 6413 /* rtnl_lock held, open the NIC half way by allocating all resources, but 6414 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline 6415 * self tests. 6416 */ 6417 int bnxt_half_open_nic(struct bnxt *bp) 6418 { 6419 int rc = 0; 6420 6421 rc = bnxt_alloc_mem(bp, false); 6422 if (rc) { 6423 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 6424 goto half_open_err; 6425 } 6426 rc = bnxt_init_nic(bp, false); 6427 if (rc) { 6428 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 6429 goto half_open_err; 6430 } 6431 return 0; 6432 6433 half_open_err: 6434 bnxt_free_skbs(bp); 6435 bnxt_free_mem(bp, false); 6436 dev_close(bp->dev); 6437 return rc; 6438 } 6439 6440 /* rtnl_lock held, this call can only be made after a previous successful 6441 * call to bnxt_half_open_nic(). 6442 */ 6443 void bnxt_half_close_nic(struct bnxt *bp) 6444 { 6445 bnxt_hwrm_resource_free(bp, false, false); 6446 bnxt_free_skbs(bp); 6447 bnxt_free_mem(bp, false); 6448 } 6449 6450 static int bnxt_open(struct net_device *dev) 6451 { 6452 struct bnxt *bp = netdev_priv(dev); 6453 6454 return __bnxt_open_nic(bp, true, true); 6455 } 6456 6457 static bool bnxt_drv_busy(struct bnxt *bp) 6458 { 6459 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 6460 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 6461 } 6462 6463 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6464 { 6465 int rc = 0; 6466 6467 #ifdef CONFIG_BNXT_SRIOV 6468 if (bp->sriov_cfg) { 6469 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 6470 !bp->sriov_cfg, 6471 BNXT_SRIOV_CFG_WAIT_TMO); 6472 if (rc) 6473 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); 6474 } 6475 6476 /* Close the VF-reps before closing PF */ 6477 if (BNXT_PF(bp)) 6478 bnxt_vf_reps_close(bp); 6479 #endif 6480 /* Change device state to avoid TX queue wake up's */ 6481 bnxt_tx_disable(bp); 6482 6483 clear_bit(BNXT_STATE_OPEN, &bp->state); 6484 smp_mb__after_atomic(); 6485 while (bnxt_drv_busy(bp)) 6486 msleep(20); 6487 6488 /* Flush rings and and disable interrupts */ 6489 bnxt_shutdown_nic(bp, irq_re_init); 6490 6491 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 6492 6493 bnxt_disable_napi(bp); 6494 del_timer_sync(&bp->timer); 6495 bnxt_free_skbs(bp); 6496 6497 if (irq_re_init) { 6498 bnxt_free_irq(bp); 6499 bnxt_del_napi(bp); 6500 } 6501 bnxt_free_mem(bp, irq_re_init); 6502 return rc; 6503 } 6504 6505 static int bnxt_close(struct net_device *dev) 6506 { 6507 struct bnxt *bp = netdev_priv(dev); 6508 6509 bnxt_close_nic(bp, true, true); 6510 bnxt_hwrm_shutdown_link(bp); 6511 return 0; 6512 } 6513 6514 /* rtnl_lock held */ 6515 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6516 { 6517 switch (cmd) { 6518 case SIOCGMIIPHY: 6519 /* fallthru */ 6520 case SIOCGMIIREG: { 6521 if (!netif_running(dev)) 6522 return -EAGAIN; 6523 6524 return 0; 6525 } 6526 6527 case SIOCSMIIREG: 6528 if (!netif_running(dev)) 6529 return -EAGAIN; 6530 6531 return 0; 6532 6533 default: 6534 /* do nothing */ 6535 break; 6536 } 6537 return -EOPNOTSUPP; 6538 } 6539 6540 static void 6541 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6542 { 6543 u32 i; 6544 struct bnxt *bp = netdev_priv(dev); 6545 6546 set_bit(BNXT_STATE_READ_STATS, &bp->state); 6547 /* Make sure bnxt_close_nic() sees that we are reading stats before 6548 * we check the BNXT_STATE_OPEN flag. 6549 */ 6550 smp_mb__after_atomic(); 6551 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6552 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6553 return; 6554 } 6555 6556 /* TODO check if we need to synchronize with bnxt_close path */ 6557 for (i = 0; i < bp->cp_nr_rings; i++) { 6558 struct bnxt_napi *bnapi = bp->bnapi[i]; 6559 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6560 struct ctx_hw_stats *hw_stats = cpr->hw_stats; 6561 6562 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); 6563 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); 6564 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); 6565 6566 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); 6567 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); 6568 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); 6569 6570 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); 6571 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); 6572 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); 6573 6574 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); 6575 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); 6576 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); 6577 6578 stats->rx_missed_errors += 6579 le64_to_cpu(hw_stats->rx_discard_pkts); 6580 6581 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); 6582 6583 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); 6584 } 6585 6586 if (bp->flags & BNXT_FLAG_PORT_STATS) { 6587 struct rx_port_stats *rx = bp->hw_rx_port_stats; 6588 struct tx_port_stats *tx = bp->hw_tx_port_stats; 6589 6590 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); 6591 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); 6592 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + 6593 le64_to_cpu(rx->rx_ovrsz_frames) + 6594 le64_to_cpu(rx->rx_runt_frames); 6595 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + 6596 le64_to_cpu(rx->rx_jbr_frames); 6597 stats->collisions = le64_to_cpu(tx->tx_total_collisions); 6598 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 6599 stats->tx_errors = le64_to_cpu(tx->tx_err); 6600 } 6601 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6602 } 6603 6604 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 6605 { 6606 struct net_device *dev = bp->dev; 6607 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6608 struct netdev_hw_addr *ha; 6609 u8 *haddr; 6610 int mc_count = 0; 6611 bool update = false; 6612 int off = 0; 6613 6614 netdev_for_each_mc_addr(ha, dev) { 6615 if (mc_count >= BNXT_MAX_MC_ADDRS) { 6616 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6617 vnic->mc_list_count = 0; 6618 return false; 6619 } 6620 haddr = ha->addr; 6621 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 6622 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 6623 update = true; 6624 } 6625 off += ETH_ALEN; 6626 mc_count++; 6627 } 6628 if (mc_count) 6629 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 6630 6631 if (mc_count != vnic->mc_list_count) { 6632 vnic->mc_list_count = mc_count; 6633 update = true; 6634 } 6635 return update; 6636 } 6637 6638 static bool bnxt_uc_list_updated(struct bnxt *bp) 6639 { 6640 struct net_device *dev = bp->dev; 6641 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6642 struct netdev_hw_addr *ha; 6643 int off = 0; 6644 6645 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 6646 return true; 6647 6648 netdev_for_each_uc_addr(ha, dev) { 6649 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 6650 return true; 6651 6652 off += ETH_ALEN; 6653 } 6654 return false; 6655 } 6656 6657 static void bnxt_set_rx_mode(struct net_device *dev) 6658 { 6659 struct bnxt *bp = netdev_priv(dev); 6660 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6661 u32 mask = vnic->rx_mask; 6662 bool mc_update = false; 6663 bool uc_update; 6664 6665 if (!netif_running(dev)) 6666 return; 6667 6668 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 6669 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 6670 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 6671 6672 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 6673 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6674 6675 uc_update = bnxt_uc_list_updated(bp); 6676 6677 if (dev->flags & IFF_ALLMULTI) { 6678 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 6679 vnic->mc_list_count = 0; 6680 } else { 6681 mc_update = bnxt_mc_list_updated(bp, &mask); 6682 } 6683 6684 if (mask != vnic->rx_mask || uc_update || mc_update) { 6685 vnic->rx_mask = mask; 6686 6687 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6688 bnxt_queue_sp_work(bp); 6689 } 6690 } 6691 6692 static int bnxt_cfg_rx_mode(struct bnxt *bp) 6693 { 6694 struct net_device *dev = bp->dev; 6695 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6696 struct netdev_hw_addr *ha; 6697 int i, off = 0, rc; 6698 bool uc_update; 6699 6700 netif_addr_lock_bh(dev); 6701 uc_update = bnxt_uc_list_updated(bp); 6702 netif_addr_unlock_bh(dev); 6703 6704 if (!uc_update) 6705 goto skip_uc; 6706 6707 mutex_lock(&bp->hwrm_cmd_lock); 6708 for (i = 1; i < vnic->uc_filter_count; i++) { 6709 struct hwrm_cfa_l2_filter_free_input req = {0}; 6710 6711 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, 6712 -1); 6713 6714 req.l2_filter_id = vnic->fw_l2_filter_id[i]; 6715 6716 rc = _hwrm_send_message(bp, &req, sizeof(req), 6717 HWRM_CMD_TIMEOUT); 6718 } 6719 mutex_unlock(&bp->hwrm_cmd_lock); 6720 6721 vnic->uc_filter_count = 1; 6722 6723 netif_addr_lock_bh(dev); 6724 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 6725 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 6726 } else { 6727 netdev_for_each_uc_addr(ha, dev) { 6728 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 6729 off += ETH_ALEN; 6730 vnic->uc_filter_count++; 6731 } 6732 } 6733 netif_addr_unlock_bh(dev); 6734 6735 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 6736 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 6737 if (rc) { 6738 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 6739 rc); 6740 vnic->uc_filter_count = i; 6741 return rc; 6742 } 6743 } 6744 6745 skip_uc: 6746 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 6747 if (rc) 6748 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 6749 rc); 6750 6751 return rc; 6752 } 6753 6754 /* If the chip and firmware supports RFS */ 6755 static bool bnxt_rfs_supported(struct bnxt *bp) 6756 { 6757 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 6758 return true; 6759 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6760 return true; 6761 return false; 6762 } 6763 6764 /* If runtime conditions support RFS */ 6765 static bool bnxt_rfs_capable(struct bnxt *bp) 6766 { 6767 #ifdef CONFIG_RFS_ACCEL 6768 int vnics, max_vnics, max_rss_ctxs; 6769 6770 if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) 6771 return false; 6772 6773 vnics = 1 + bp->rx_nr_rings; 6774 max_vnics = bnxt_get_max_func_vnics(bp); 6775 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 6776 6777 /* RSS contexts not a limiting factor */ 6778 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) 6779 max_rss_ctxs = max_vnics; 6780 if (vnics > max_vnics || vnics > max_rss_ctxs) { 6781 netdev_warn(bp->dev, 6782 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 6783 min(max_rss_ctxs - 1, max_vnics - 1)); 6784 return false; 6785 } 6786 6787 return true; 6788 #else 6789 return false; 6790 #endif 6791 } 6792 6793 static netdev_features_t bnxt_fix_features(struct net_device *dev, 6794 netdev_features_t features) 6795 { 6796 struct bnxt *bp = netdev_priv(dev); 6797 6798 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 6799 features &= ~NETIF_F_NTUPLE; 6800 6801 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6802 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 6803 6804 if (!(features & NETIF_F_GRO)) 6805 features &= ~NETIF_F_GRO_HW; 6806 6807 if (features & NETIF_F_GRO_HW) 6808 features &= ~NETIF_F_LRO; 6809 6810 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 6811 * turned on or off together. 6812 */ 6813 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 6814 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 6815 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 6816 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6817 NETIF_F_HW_VLAN_STAG_RX); 6818 else 6819 features |= NETIF_F_HW_VLAN_CTAG_RX | 6820 NETIF_F_HW_VLAN_STAG_RX; 6821 } 6822 #ifdef CONFIG_BNXT_SRIOV 6823 if (BNXT_VF(bp)) { 6824 if (bp->vf.vlan) { 6825 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 6826 NETIF_F_HW_VLAN_STAG_RX); 6827 } 6828 } 6829 #endif 6830 return features; 6831 } 6832 6833 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 6834 { 6835 struct bnxt *bp = netdev_priv(dev); 6836 u32 flags = bp->flags; 6837 u32 changes; 6838 int rc = 0; 6839 bool re_init = false; 6840 bool update_tpa = false; 6841 6842 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 6843 if (features & NETIF_F_GRO_HW) 6844 flags |= BNXT_FLAG_GRO; 6845 else if (features & NETIF_F_LRO) 6846 flags |= BNXT_FLAG_LRO; 6847 6848 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 6849 flags &= ~BNXT_FLAG_TPA; 6850 6851 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6852 flags |= BNXT_FLAG_STRIP_VLAN; 6853 6854 if (features & NETIF_F_NTUPLE) 6855 flags |= BNXT_FLAG_RFS; 6856 6857 changes = flags ^ bp->flags; 6858 if (changes & BNXT_FLAG_TPA) { 6859 update_tpa = true; 6860 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 6861 (flags & BNXT_FLAG_TPA) == 0) 6862 re_init = true; 6863 } 6864 6865 if (changes & ~BNXT_FLAG_TPA) 6866 re_init = true; 6867 6868 if (flags != bp->flags) { 6869 u32 old_flags = bp->flags; 6870 6871 bp->flags = flags; 6872 6873 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6874 if (update_tpa) 6875 bnxt_set_ring_params(bp); 6876 return rc; 6877 } 6878 6879 if (re_init) { 6880 bnxt_close_nic(bp, false, false); 6881 if (update_tpa) 6882 bnxt_set_ring_params(bp); 6883 6884 return bnxt_open_nic(bp, false, false); 6885 } 6886 if (update_tpa) { 6887 rc = bnxt_set_tpa(bp, 6888 (flags & BNXT_FLAG_TPA) ? 6889 true : false); 6890 if (rc) 6891 bp->flags = old_flags; 6892 } 6893 } 6894 return rc; 6895 } 6896 6897 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 6898 { 6899 struct bnxt_tx_ring_info *txr = bnapi->tx_ring; 6900 int i = bnapi->index; 6901 6902 if (!txr) 6903 return; 6904 6905 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 6906 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 6907 txr->tx_cons); 6908 } 6909 6910 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 6911 { 6912 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 6913 int i = bnapi->index; 6914 6915 if (!rxr) 6916 return; 6917 6918 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 6919 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 6920 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 6921 rxr->rx_sw_agg_prod); 6922 } 6923 6924 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 6925 { 6926 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 6927 int i = bnapi->index; 6928 6929 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 6930 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 6931 } 6932 6933 static void bnxt_dbg_dump_states(struct bnxt *bp) 6934 { 6935 int i; 6936 struct bnxt_napi *bnapi; 6937 6938 for (i = 0; i < bp->cp_nr_rings; i++) { 6939 bnapi = bp->bnapi[i]; 6940 if (netif_msg_drv(bp)) { 6941 bnxt_dump_tx_sw_state(bnapi); 6942 bnxt_dump_rx_sw_state(bnapi); 6943 bnxt_dump_cp_sw_state(bnapi); 6944 } 6945 } 6946 } 6947 6948 static void bnxt_reset_task(struct bnxt *bp, bool silent) 6949 { 6950 if (!silent) 6951 bnxt_dbg_dump_states(bp); 6952 if (netif_running(bp->dev)) { 6953 int rc; 6954 6955 if (!silent) 6956 bnxt_ulp_stop(bp); 6957 bnxt_close_nic(bp, false, false); 6958 rc = bnxt_open_nic(bp, false, false); 6959 if (!silent && !rc) 6960 bnxt_ulp_start(bp); 6961 } 6962 } 6963 6964 static void bnxt_tx_timeout(struct net_device *dev) 6965 { 6966 struct bnxt *bp = netdev_priv(dev); 6967 6968 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6969 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6970 bnxt_queue_sp_work(bp); 6971 } 6972 6973 #ifdef CONFIG_NET_POLL_CONTROLLER 6974 static void bnxt_poll_controller(struct net_device *dev) 6975 { 6976 struct bnxt *bp = netdev_priv(dev); 6977 int i; 6978 6979 /* Only process tx rings/combined rings in netpoll mode. */ 6980 for (i = 0; i < bp->tx_nr_rings; i++) { 6981 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6982 6983 napi_schedule(&txr->bnapi->napi); 6984 } 6985 } 6986 #endif 6987 6988 static void bnxt_timer(struct timer_list *t) 6989 { 6990 struct bnxt *bp = from_timer(bp, t, timer); 6991 struct net_device *dev = bp->dev; 6992 6993 if (!netif_running(dev)) 6994 return; 6995 6996 if (atomic_read(&bp->intr_sem) != 0) 6997 goto bnxt_restart_timer; 6998 6999 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 7000 bp->stats_coal_ticks) { 7001 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 7002 bnxt_queue_sp_work(bp); 7003 } 7004 7005 if (bnxt_tc_flower_enabled(bp)) { 7006 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); 7007 bnxt_queue_sp_work(bp); 7008 } 7009 bnxt_restart_timer: 7010 mod_timer(&bp->timer, jiffies + bp->current_interval); 7011 } 7012 7013 static void bnxt_rtnl_lock_sp(struct bnxt *bp) 7014 { 7015 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 7016 * set. If the device is being closed, bnxt_close() may be holding 7017 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 7018 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 7019 */ 7020 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7021 rtnl_lock(); 7022 } 7023 7024 static void bnxt_rtnl_unlock_sp(struct bnxt *bp) 7025 { 7026 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7027 rtnl_unlock(); 7028 } 7029 7030 /* Only called from bnxt_sp_task() */ 7031 static void bnxt_reset(struct bnxt *bp, bool silent) 7032 { 7033 bnxt_rtnl_lock_sp(bp); 7034 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7035 bnxt_reset_task(bp, silent); 7036 bnxt_rtnl_unlock_sp(bp); 7037 } 7038 7039 static void bnxt_cfg_ntp_filters(struct bnxt *); 7040 7041 static void bnxt_sp_task(struct work_struct *work) 7042 { 7043 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 7044 7045 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7046 smp_mb__after_atomic(); 7047 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 7048 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7049 return; 7050 } 7051 7052 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 7053 bnxt_cfg_rx_mode(bp); 7054 7055 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 7056 bnxt_cfg_ntp_filters(bp); 7057 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 7058 bnxt_hwrm_exec_fwd_req(bp); 7059 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7060 bnxt_hwrm_tunnel_dst_port_alloc( 7061 bp, bp->vxlan_port, 7062 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7063 } 7064 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7065 bnxt_hwrm_tunnel_dst_port_free( 7066 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 7067 } 7068 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { 7069 bnxt_hwrm_tunnel_dst_port_alloc( 7070 bp, bp->nge_port, 7071 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7072 } 7073 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { 7074 bnxt_hwrm_tunnel_dst_port_free( 7075 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 7076 } 7077 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7078 bnxt_hwrm_port_qstats(bp); 7079 7080 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7081 int rc; 7082 7083 mutex_lock(&bp->link_lock); 7084 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7085 &bp->sp_event)) 7086 bnxt_hwrm_phy_qcaps(bp); 7087 7088 rc = bnxt_update_link(bp, true); 7089 mutex_unlock(&bp->link_lock); 7090 if (rc) 7091 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7092 rc); 7093 } 7094 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7095 mutex_lock(&bp->link_lock); 7096 bnxt_get_port_module_status(bp); 7097 mutex_unlock(&bp->link_lock); 7098 } 7099 7100 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 7101 bnxt_tc_flow_stats_work(bp); 7102 7103 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 7104 * must be the last functions to be called before exiting. 7105 */ 7106 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7107 bnxt_reset(bp, false); 7108 7109 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 7110 bnxt_reset(bp, true); 7111 7112 smp_mb__before_atomic(); 7113 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 7114 } 7115 7116 /* Under rtnl_lock */ 7117 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 7118 int tx_xdp) 7119 { 7120 int max_rx, max_tx, tx_sets = 1; 7121 int tx_rings_needed; 7122 int rc; 7123 7124 if (tcs) 7125 tx_sets = tcs; 7126 7127 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); 7128 if (rc) 7129 return rc; 7130 7131 if (max_rx < rx) 7132 return -ENOMEM; 7133 7134 tx_rings_needed = tx * tx_sets + tx_xdp; 7135 if (max_tx < tx_rings_needed) 7136 return -ENOMEM; 7137 7138 return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); 7139 } 7140 7141 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7142 { 7143 if (bp->bar2) { 7144 pci_iounmap(pdev, bp->bar2); 7145 bp->bar2 = NULL; 7146 } 7147 7148 if (bp->bar1) { 7149 pci_iounmap(pdev, bp->bar1); 7150 bp->bar1 = NULL; 7151 } 7152 7153 if (bp->bar0) { 7154 pci_iounmap(pdev, bp->bar0); 7155 bp->bar0 = NULL; 7156 } 7157 } 7158 7159 static void bnxt_cleanup_pci(struct bnxt *bp) 7160 { 7161 bnxt_unmap_bars(bp, bp->pdev); 7162 pci_release_regions(bp->pdev); 7163 pci_disable_device(bp->pdev); 7164 } 7165 7166 static void bnxt_init_dflt_coal(struct bnxt *bp) 7167 { 7168 struct bnxt_coal *coal; 7169 7170 /* Tick values in micro seconds. 7171 * 1 coal_buf x bufs_per_record = 1 completion record. 7172 */ 7173 coal = &bp->rx_coal; 7174 coal->coal_ticks = 14; 7175 coal->coal_bufs = 30; 7176 coal->coal_ticks_irq = 1; 7177 coal->coal_bufs_irq = 2; 7178 coal->idle_thresh = 25; 7179 coal->bufs_per_record = 2; 7180 coal->budget = 64; /* NAPI budget */ 7181 7182 coal = &bp->tx_coal; 7183 coal->coal_ticks = 28; 7184 coal->coal_bufs = 30; 7185 coal->coal_ticks_irq = 2; 7186 coal->coal_bufs_irq = 2; 7187 coal->bufs_per_record = 1; 7188 7189 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 7190 } 7191 7192 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 7193 { 7194 int rc; 7195 struct bnxt *bp = netdev_priv(dev); 7196 7197 SET_NETDEV_DEV(dev, &pdev->dev); 7198 7199 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7200 rc = pci_enable_device(pdev); 7201 if (rc) { 7202 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 7203 goto init_err; 7204 } 7205 7206 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 7207 dev_err(&pdev->dev, 7208 "Cannot find PCI device base address, aborting\n"); 7209 rc = -ENODEV; 7210 goto init_err_disable; 7211 } 7212 7213 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 7214 if (rc) { 7215 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 7216 goto init_err_disable; 7217 } 7218 7219 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 7220 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 7221 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 7222 goto init_err_disable; 7223 } 7224 7225 pci_set_master(pdev); 7226 7227 bp->dev = dev; 7228 bp->pdev = pdev; 7229 7230 bp->bar0 = pci_ioremap_bar(pdev, 0); 7231 if (!bp->bar0) { 7232 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 7233 rc = -ENOMEM; 7234 goto init_err_release; 7235 } 7236 7237 bp->bar1 = pci_ioremap_bar(pdev, 2); 7238 if (!bp->bar1) { 7239 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n"); 7240 rc = -ENOMEM; 7241 goto init_err_release; 7242 } 7243 7244 bp->bar2 = pci_ioremap_bar(pdev, 4); 7245 if (!bp->bar2) { 7246 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 7247 rc = -ENOMEM; 7248 goto init_err_release; 7249 } 7250 7251 pci_enable_pcie_error_reporting(pdev); 7252 7253 INIT_WORK(&bp->sp_task, bnxt_sp_task); 7254 7255 spin_lock_init(&bp->ntp_fltr_lock); 7256 7257 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 7258 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 7259 7260 bnxt_init_dflt_coal(bp); 7261 7262 timer_setup(&bp->timer, bnxt_timer, 0); 7263 bp->current_interval = BNXT_TIMER_INTERVAL; 7264 7265 clear_bit(BNXT_STATE_OPEN, &bp->state); 7266 return 0; 7267 7268 init_err_release: 7269 bnxt_unmap_bars(bp, pdev); 7270 pci_release_regions(pdev); 7271 7272 init_err_disable: 7273 pci_disable_device(pdev); 7274 7275 init_err: 7276 return rc; 7277 } 7278 7279 /* rtnl_lock held */ 7280 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 7281 { 7282 struct sockaddr *addr = p; 7283 struct bnxt *bp = netdev_priv(dev); 7284 int rc = 0; 7285 7286 if (!is_valid_ether_addr(addr->sa_data)) 7287 return -EADDRNOTAVAIL; 7288 7289 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 7290 return 0; 7291 7292 rc = bnxt_approve_mac(bp, addr->sa_data); 7293 if (rc) 7294 return rc; 7295 7296 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 7297 if (netif_running(dev)) { 7298 bnxt_close_nic(bp, false, false); 7299 rc = bnxt_open_nic(bp, false, false); 7300 } 7301 7302 return rc; 7303 } 7304 7305 /* rtnl_lock held */ 7306 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 7307 { 7308 struct bnxt *bp = netdev_priv(dev); 7309 7310 if (netif_running(dev)) 7311 bnxt_close_nic(bp, false, false); 7312 7313 dev->mtu = new_mtu; 7314 bnxt_set_ring_params(bp); 7315 7316 if (netif_running(dev)) 7317 return bnxt_open_nic(bp, false, false); 7318 7319 return 0; 7320 } 7321 7322 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 7323 { 7324 struct bnxt *bp = netdev_priv(dev); 7325 bool sh = false; 7326 int rc; 7327 7328 if (tc > bp->max_tc) { 7329 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 7330 tc, bp->max_tc); 7331 return -EINVAL; 7332 } 7333 7334 if (netdev_get_num_tc(dev) == tc) 7335 return 0; 7336 7337 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7338 sh = true; 7339 7340 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 7341 sh, tc, bp->tx_nr_rings_xdp); 7342 if (rc) 7343 return rc; 7344 7345 /* Needs to close the device and do hw resource re-allocations */ 7346 if (netif_running(bp->dev)) 7347 bnxt_close_nic(bp, true, false); 7348 7349 if (tc) { 7350 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 7351 netdev_set_num_tc(dev, tc); 7352 } else { 7353 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7354 netdev_reset_tc(dev); 7355 } 7356 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 7357 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7358 bp->tx_nr_rings + bp->rx_nr_rings; 7359 bp->num_stat_ctxs = bp->cp_nr_rings; 7360 7361 if (netif_running(bp->dev)) 7362 return bnxt_open_nic(bp, true, false); 7363 7364 return 0; 7365 } 7366 7367 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 7368 void *cb_priv) 7369 { 7370 struct bnxt *bp = cb_priv; 7371 7372 if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev)) 7373 return -EOPNOTSUPP; 7374 7375 switch (type) { 7376 case TC_SETUP_CLSFLOWER: 7377 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 7378 default: 7379 return -EOPNOTSUPP; 7380 } 7381 } 7382 7383 static int bnxt_setup_tc_block(struct net_device *dev, 7384 struct tc_block_offload *f) 7385 { 7386 struct bnxt *bp = netdev_priv(dev); 7387 7388 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 7389 return -EOPNOTSUPP; 7390 7391 switch (f->command) { 7392 case TC_BLOCK_BIND: 7393 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, 7394 bp, bp); 7395 case TC_BLOCK_UNBIND: 7396 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); 7397 return 0; 7398 default: 7399 return -EOPNOTSUPP; 7400 } 7401 } 7402 7403 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 7404 void *type_data) 7405 { 7406 switch (type) { 7407 case TC_SETUP_BLOCK: 7408 return bnxt_setup_tc_block(dev, type_data); 7409 case TC_SETUP_QDISC_MQPRIO: { 7410 struct tc_mqprio_qopt *mqprio = type_data; 7411 7412 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 7413 7414 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 7415 } 7416 default: 7417 return -EOPNOTSUPP; 7418 } 7419 } 7420 7421 #ifdef CONFIG_RFS_ACCEL 7422 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 7423 struct bnxt_ntuple_filter *f2) 7424 { 7425 struct flow_keys *keys1 = &f1->fkeys; 7426 struct flow_keys *keys2 = &f2->fkeys; 7427 7428 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && 7429 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && 7430 keys1->ports.ports == keys2->ports.ports && 7431 keys1->basic.ip_proto == keys2->basic.ip_proto && 7432 keys1->basic.n_proto == keys2->basic.n_proto && 7433 keys1->control.flags == keys2->control.flags && 7434 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && 7435 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) 7436 return true; 7437 7438 return false; 7439 } 7440 7441 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 7442 u16 rxq_index, u32 flow_id) 7443 { 7444 struct bnxt *bp = netdev_priv(dev); 7445 struct bnxt_ntuple_filter *fltr, *new_fltr; 7446 struct flow_keys *fkeys; 7447 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 7448 int rc = 0, idx, bit_id, l2_idx = 0; 7449 struct hlist_head *head; 7450 7451 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { 7452 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 7453 int off = 0, j; 7454 7455 netif_addr_lock_bh(dev); 7456 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { 7457 if (ether_addr_equal(eth->h_dest, 7458 vnic->uc_list + off)) { 7459 l2_idx = j + 1; 7460 break; 7461 } 7462 } 7463 netif_addr_unlock_bh(dev); 7464 if (!l2_idx) 7465 return -EINVAL; 7466 } 7467 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 7468 if (!new_fltr) 7469 return -ENOMEM; 7470 7471 fkeys = &new_fltr->fkeys; 7472 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 7473 rc = -EPROTONOSUPPORT; 7474 goto err_free; 7475 } 7476 7477 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 7478 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 7479 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 7480 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 7481 rc = -EPROTONOSUPPORT; 7482 goto err_free; 7483 } 7484 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && 7485 bp->hwrm_spec_code < 0x10601) { 7486 rc = -EPROTONOSUPPORT; 7487 goto err_free; 7488 } 7489 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && 7490 bp->hwrm_spec_code < 0x10601) { 7491 rc = -EPROTONOSUPPORT; 7492 goto err_free; 7493 } 7494 7495 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); 7496 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); 7497 7498 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 7499 head = &bp->ntp_fltr_hash_tbl[idx]; 7500 rcu_read_lock(); 7501 hlist_for_each_entry_rcu(fltr, head, hash) { 7502 if (bnxt_fltr_match(fltr, new_fltr)) { 7503 rcu_read_unlock(); 7504 rc = 0; 7505 goto err_free; 7506 } 7507 } 7508 rcu_read_unlock(); 7509 7510 spin_lock_bh(&bp->ntp_fltr_lock); 7511 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 7512 BNXT_NTP_FLTR_MAX_FLTR, 0); 7513 if (bit_id < 0) { 7514 spin_unlock_bh(&bp->ntp_fltr_lock); 7515 rc = -ENOMEM; 7516 goto err_free; 7517 } 7518 7519 new_fltr->sw_id = (u16)bit_id; 7520 new_fltr->flow_id = flow_id; 7521 new_fltr->l2_fltr_idx = l2_idx; 7522 new_fltr->rxq = rxq_index; 7523 hlist_add_head_rcu(&new_fltr->hash, head); 7524 bp->ntp_fltr_count++; 7525 spin_unlock_bh(&bp->ntp_fltr_lock); 7526 7527 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7528 bnxt_queue_sp_work(bp); 7529 7530 return new_fltr->sw_id; 7531 7532 err_free: 7533 kfree(new_fltr); 7534 return rc; 7535 } 7536 7537 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7538 { 7539 int i; 7540 7541 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 7542 struct hlist_head *head; 7543 struct hlist_node *tmp; 7544 struct bnxt_ntuple_filter *fltr; 7545 int rc; 7546 7547 head = &bp->ntp_fltr_hash_tbl[i]; 7548 hlist_for_each_entry_safe(fltr, tmp, head, hash) { 7549 bool del = false; 7550 7551 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { 7552 if (rps_may_expire_flow(bp->dev, fltr->rxq, 7553 fltr->flow_id, 7554 fltr->sw_id)) { 7555 bnxt_hwrm_cfa_ntuple_filter_free(bp, 7556 fltr); 7557 del = true; 7558 } 7559 } else { 7560 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 7561 fltr); 7562 if (rc) 7563 del = true; 7564 else 7565 set_bit(BNXT_FLTR_VALID, &fltr->state); 7566 } 7567 7568 if (del) { 7569 spin_lock_bh(&bp->ntp_fltr_lock); 7570 hlist_del_rcu(&fltr->hash); 7571 bp->ntp_fltr_count--; 7572 spin_unlock_bh(&bp->ntp_fltr_lock); 7573 synchronize_rcu(); 7574 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 7575 kfree(fltr); 7576 } 7577 } 7578 } 7579 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 7580 netdev_info(bp->dev, "Receive PF driver unload event!"); 7581 } 7582 7583 #else 7584 7585 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 7586 { 7587 } 7588 7589 #endif /* CONFIG_RFS_ACCEL */ 7590 7591 static void bnxt_udp_tunnel_add(struct net_device *dev, 7592 struct udp_tunnel_info *ti) 7593 { 7594 struct bnxt *bp = netdev_priv(dev); 7595 7596 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7597 return; 7598 7599 if (!netif_running(dev)) 7600 return; 7601 7602 switch (ti->type) { 7603 case UDP_TUNNEL_TYPE_VXLAN: 7604 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) 7605 return; 7606 7607 bp->vxlan_port_cnt++; 7608 if (bp->vxlan_port_cnt == 1) { 7609 bp->vxlan_port = ti->port; 7610 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7611 bnxt_queue_sp_work(bp); 7612 } 7613 break; 7614 case UDP_TUNNEL_TYPE_GENEVE: 7615 if (bp->nge_port_cnt && bp->nge_port != ti->port) 7616 return; 7617 7618 bp->nge_port_cnt++; 7619 if (bp->nge_port_cnt == 1) { 7620 bp->nge_port = ti->port; 7621 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); 7622 } 7623 break; 7624 default: 7625 return; 7626 } 7627 7628 bnxt_queue_sp_work(bp); 7629 } 7630 7631 static void bnxt_udp_tunnel_del(struct net_device *dev, 7632 struct udp_tunnel_info *ti) 7633 { 7634 struct bnxt *bp = netdev_priv(dev); 7635 7636 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) 7637 return; 7638 7639 if (!netif_running(dev)) 7640 return; 7641 7642 switch (ti->type) { 7643 case UDP_TUNNEL_TYPE_VXLAN: 7644 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) 7645 return; 7646 bp->vxlan_port_cnt--; 7647 7648 if (bp->vxlan_port_cnt != 0) 7649 return; 7650 7651 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); 7652 break; 7653 case UDP_TUNNEL_TYPE_GENEVE: 7654 if (!bp->nge_port_cnt || bp->nge_port != ti->port) 7655 return; 7656 bp->nge_port_cnt--; 7657 7658 if (bp->nge_port_cnt != 0) 7659 return; 7660 7661 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); 7662 break; 7663 default: 7664 return; 7665 } 7666 7667 bnxt_queue_sp_work(bp); 7668 } 7669 7670 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7671 struct net_device *dev, u32 filter_mask, 7672 int nlflags) 7673 { 7674 struct bnxt *bp = netdev_priv(dev); 7675 7676 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 7677 nlflags, filter_mask, NULL); 7678 } 7679 7680 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7681 u16 flags) 7682 { 7683 struct bnxt *bp = netdev_priv(dev); 7684 struct nlattr *attr, *br_spec; 7685 int rem, rc = 0; 7686 7687 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 7688 return -EOPNOTSUPP; 7689 7690 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7691 if (!br_spec) 7692 return -EINVAL; 7693 7694 nla_for_each_nested(attr, br_spec, rem) { 7695 u16 mode; 7696 7697 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7698 continue; 7699 7700 if (nla_len(attr) < sizeof(mode)) 7701 return -EINVAL; 7702 7703 mode = nla_get_u16(attr); 7704 if (mode == bp->br_mode) 7705 break; 7706 7707 rc = bnxt_hwrm_set_br_mode(bp, mode); 7708 if (!rc) 7709 bp->br_mode = mode; 7710 break; 7711 } 7712 return rc; 7713 } 7714 7715 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, 7716 size_t len) 7717 { 7718 struct bnxt *bp = netdev_priv(dev); 7719 int rc; 7720 7721 /* The PF and it's VF-reps only support the switchdev framework */ 7722 if (!BNXT_PF(bp)) 7723 return -EOPNOTSUPP; 7724 7725 rc = snprintf(buf, len, "p%d", bp->pf.port_id); 7726 7727 if (rc >= len) 7728 return -EOPNOTSUPP; 7729 return 0; 7730 } 7731 7732 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) 7733 { 7734 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 7735 return -EOPNOTSUPP; 7736 7737 /* The PF and it's VF-reps only support the switchdev framework */ 7738 if (!BNXT_PF(bp)) 7739 return -EOPNOTSUPP; 7740 7741 switch (attr->id) { 7742 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 7743 /* In SRIOV each PF-pool (PF + child VFs) serves as a 7744 * switching domain, the PF's perm mac-addr can be used 7745 * as the unique parent-id 7746 */ 7747 attr->u.ppid.id_len = ETH_ALEN; 7748 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); 7749 break; 7750 default: 7751 return -EOPNOTSUPP; 7752 } 7753 return 0; 7754 } 7755 7756 static int bnxt_swdev_port_attr_get(struct net_device *dev, 7757 struct switchdev_attr *attr) 7758 { 7759 return bnxt_port_attr_get(netdev_priv(dev), attr); 7760 } 7761 7762 static const struct switchdev_ops bnxt_switchdev_ops = { 7763 .switchdev_port_attr_get = bnxt_swdev_port_attr_get 7764 }; 7765 7766 static const struct net_device_ops bnxt_netdev_ops = { 7767 .ndo_open = bnxt_open, 7768 .ndo_start_xmit = bnxt_start_xmit, 7769 .ndo_stop = bnxt_close, 7770 .ndo_get_stats64 = bnxt_get_stats64, 7771 .ndo_set_rx_mode = bnxt_set_rx_mode, 7772 .ndo_do_ioctl = bnxt_ioctl, 7773 .ndo_validate_addr = eth_validate_addr, 7774 .ndo_set_mac_address = bnxt_change_mac_addr, 7775 .ndo_change_mtu = bnxt_change_mtu, 7776 .ndo_fix_features = bnxt_fix_features, 7777 .ndo_set_features = bnxt_set_features, 7778 .ndo_tx_timeout = bnxt_tx_timeout, 7779 #ifdef CONFIG_BNXT_SRIOV 7780 .ndo_get_vf_config = bnxt_get_vf_config, 7781 .ndo_set_vf_mac = bnxt_set_vf_mac, 7782 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 7783 .ndo_set_vf_rate = bnxt_set_vf_bw, 7784 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 7785 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 7786 #endif 7787 #ifdef CONFIG_NET_POLL_CONTROLLER 7788 .ndo_poll_controller = bnxt_poll_controller, 7789 #endif 7790 .ndo_setup_tc = bnxt_setup_tc, 7791 #ifdef CONFIG_RFS_ACCEL 7792 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 7793 #endif 7794 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, 7795 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, 7796 .ndo_bpf = bnxt_xdp, 7797 .ndo_bridge_getlink = bnxt_bridge_getlink, 7798 .ndo_bridge_setlink = bnxt_bridge_setlink, 7799 .ndo_get_phys_port_name = bnxt_get_phys_port_name 7800 }; 7801 7802 static void bnxt_remove_one(struct pci_dev *pdev) 7803 { 7804 struct net_device *dev = pci_get_drvdata(pdev); 7805 struct bnxt *bp = netdev_priv(dev); 7806 7807 if (BNXT_PF(bp)) { 7808 bnxt_sriov_disable(bp); 7809 bnxt_dl_unregister(bp); 7810 } 7811 7812 pci_disable_pcie_error_reporting(pdev); 7813 unregister_netdev(dev); 7814 bnxt_shutdown_tc(bp); 7815 bnxt_cancel_sp_work(bp); 7816 bp->sp_event = 0; 7817 7818 bnxt_clear_int_mode(bp); 7819 bnxt_hwrm_func_drv_unrgtr(bp); 7820 bnxt_free_hwrm_resources(bp); 7821 bnxt_free_hwrm_short_cmd_req(bp); 7822 bnxt_ethtool_free(bp); 7823 bnxt_dcb_free(bp); 7824 kfree(bp->edev); 7825 bp->edev = NULL; 7826 bnxt_cleanup_pci(bp); 7827 free_netdev(dev); 7828 } 7829 7830 static int bnxt_probe_phy(struct bnxt *bp) 7831 { 7832 int rc = 0; 7833 struct bnxt_link_info *link_info = &bp->link_info; 7834 7835 rc = bnxt_hwrm_phy_qcaps(bp); 7836 if (rc) { 7837 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 7838 rc); 7839 return rc; 7840 } 7841 mutex_init(&bp->link_lock); 7842 7843 rc = bnxt_update_link(bp, false); 7844 if (rc) { 7845 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 7846 rc); 7847 return rc; 7848 } 7849 7850 /* Older firmware does not have supported_auto_speeds, so assume 7851 * that all supported speeds can be autonegotiated. 7852 */ 7853 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 7854 link_info->support_auto_speeds = link_info->support_speeds; 7855 7856 /*initialize the ethool setting copy with NVM settings */ 7857 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 7858 link_info->autoneg = BNXT_AUTONEG_SPEED; 7859 if (bp->hwrm_spec_code >= 0x10201) { 7860 if (link_info->auto_pause_setting & 7861 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 7862 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7863 } else { 7864 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 7865 } 7866 link_info->advertising = link_info->auto_link_speeds; 7867 } else { 7868 link_info->req_link_speed = link_info->force_link_speed; 7869 link_info->req_duplex = link_info->duplex_setting; 7870 } 7871 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 7872 link_info->req_flow_ctrl = 7873 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 7874 else 7875 link_info->req_flow_ctrl = link_info->force_pause_setting; 7876 return rc; 7877 } 7878 7879 static int bnxt_get_max_irq(struct pci_dev *pdev) 7880 { 7881 u16 ctrl; 7882 7883 if (!pdev->msix_cap) 7884 return 1; 7885 7886 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 7887 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 7888 } 7889 7890 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7891 int *max_cp) 7892 { 7893 int max_ring_grps = 0; 7894 7895 #ifdef CONFIG_BNXT_SRIOV 7896 if (!BNXT_PF(bp)) { 7897 *max_tx = bp->vf.max_tx_rings; 7898 *max_rx = bp->vf.max_rx_rings; 7899 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); 7900 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs); 7901 max_ring_grps = bp->vf.max_hw_ring_grps; 7902 } else 7903 #endif 7904 { 7905 *max_tx = bp->pf.max_tx_rings; 7906 *max_rx = bp->pf.max_rx_rings; 7907 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); 7908 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); 7909 max_ring_grps = bp->pf.max_hw_ring_grps; 7910 } 7911 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 7912 *max_cp -= 1; 7913 *max_rx -= 2; 7914 } 7915 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7916 *max_rx >>= 1; 7917 *max_rx = min_t(int, *max_rx, max_ring_grps); 7918 } 7919 7920 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 7921 { 7922 int rx, tx, cp; 7923 7924 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 7925 if (!rx || !tx || !cp) 7926 return -ENOMEM; 7927 7928 *max_rx = rx; 7929 *max_tx = tx; 7930 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 7931 } 7932 7933 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 7934 bool shared) 7935 { 7936 int rc; 7937 7938 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7939 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 7940 /* Not enough rings, try disabling agg rings. */ 7941 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7942 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 7943 if (rc) 7944 return rc; 7945 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7946 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 7947 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 7948 bnxt_set_ring_params(bp); 7949 } 7950 7951 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 7952 int max_cp, max_stat, max_irq; 7953 7954 /* Reserve minimum resources for RoCE */ 7955 max_cp = bnxt_get_max_func_cp_rings(bp); 7956 max_stat = bnxt_get_max_func_stat_ctxs(bp); 7957 max_irq = bnxt_get_max_func_irqs(bp); 7958 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 7959 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 7960 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 7961 return 0; 7962 7963 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 7964 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 7965 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 7966 max_cp = min_t(int, max_cp, max_irq); 7967 max_cp = min_t(int, max_cp, max_stat); 7968 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 7969 if (rc) 7970 rc = 0; 7971 } 7972 return rc; 7973 } 7974 7975 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 7976 { 7977 int dflt_rings, max_rx_rings, max_tx_rings, rc; 7978 7979 if (sh) 7980 bp->flags |= BNXT_FLAG_SHARED_RINGS; 7981 dflt_rings = netif_get_num_default_rss_queues(); 7982 /* Reduce default rings to reduce memory usage on multi-port cards */ 7983 if (bp->port_count > 1) 7984 dflt_rings = min_t(int, dflt_rings, 4); 7985 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 7986 if (rc) 7987 return rc; 7988 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 7989 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 7990 7991 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc); 7992 if (rc) 7993 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 7994 7995 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 7996 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 7997 bp->tx_nr_rings + bp->rx_nr_rings; 7998 bp->num_stat_ctxs = bp->cp_nr_rings; 7999 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 8000 bp->rx_nr_rings++; 8001 bp->cp_nr_rings++; 8002 } 8003 return rc; 8004 } 8005 8006 void bnxt_restore_pf_fw_resources(struct bnxt *bp) 8007 { 8008 ASSERT_RTNL(); 8009 bnxt_hwrm_func_qcaps(bp); 8010 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); 8011 } 8012 8013 static int bnxt_init_mac_addr(struct bnxt *bp) 8014 { 8015 int rc = 0; 8016 8017 if (BNXT_PF(bp)) { 8018 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); 8019 } else { 8020 #ifdef CONFIG_BNXT_SRIOV 8021 struct bnxt_vf_info *vf = &bp->vf; 8022 8023 if (is_valid_ether_addr(vf->mac_addr)) { 8024 /* overwrite netdev dev_adr with admin VF MAC */ 8025 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 8026 } else { 8027 eth_hw_addr_random(bp->dev); 8028 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 8029 } 8030 #endif 8031 } 8032 return rc; 8033 } 8034 8035 static void bnxt_parse_log_pcie_link(struct bnxt *bp) 8036 { 8037 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 8038 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 8039 8040 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || 8041 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 8042 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 8043 else 8044 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", 8045 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 8046 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 8047 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 8048 "Unknown", width); 8049 } 8050 8051 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8052 { 8053 static int version_printed; 8054 struct net_device *dev; 8055 struct bnxt *bp; 8056 int rc, max_irqs; 8057 8058 if (pci_is_bridge(pdev)) 8059 return -ENODEV; 8060 8061 if (version_printed++ == 0) 8062 pr_info("%s", version); 8063 8064 max_irqs = bnxt_get_max_irq(pdev); 8065 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); 8066 if (!dev) 8067 return -ENOMEM; 8068 8069 bp = netdev_priv(dev); 8070 8071 if (bnxt_vf_pciid(ent->driver_data)) 8072 bp->flags |= BNXT_FLAG_VF; 8073 8074 if (pdev->msix_cap) 8075 bp->flags |= BNXT_FLAG_MSIX_CAP; 8076 8077 rc = bnxt_init_board(pdev, dev); 8078 if (rc < 0) 8079 goto init_err_free; 8080 8081 dev->netdev_ops = &bnxt_netdev_ops; 8082 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 8083 dev->ethtool_ops = &bnxt_ethtool_ops; 8084 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); 8085 pci_set_drvdata(pdev, dev); 8086 8087 rc = bnxt_alloc_hwrm_resources(bp); 8088 if (rc) 8089 goto init_err_pci_clean; 8090 8091 mutex_init(&bp->hwrm_cmd_lock); 8092 rc = bnxt_hwrm_ver_get(bp); 8093 if (rc) 8094 goto init_err_pci_clean; 8095 8096 if (bp->flags & BNXT_FLAG_SHORT_CMD) { 8097 rc = bnxt_alloc_hwrm_short_cmd_req(bp); 8098 if (rc) 8099 goto init_err_pci_clean; 8100 } 8101 8102 rc = bnxt_hwrm_func_reset(bp); 8103 if (rc) 8104 goto init_err_pci_clean; 8105 8106 bnxt_hwrm_fw_set_time(bp); 8107 8108 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8109 NETIF_F_TSO | NETIF_F_TSO6 | 8110 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8111 NETIF_F_GSO_IPXIP4 | 8112 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8113 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 8114 NETIF_F_RXCSUM | NETIF_F_GRO; 8115 8116 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 8117 dev->hw_features |= NETIF_F_LRO; 8118 8119 dev->hw_enc_features = 8120 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 8121 NETIF_F_TSO | NETIF_F_TSO6 | 8122 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 8123 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 8124 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 8125 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 8126 NETIF_F_GSO_GRE_CSUM; 8127 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 8128 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | 8129 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; 8130 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 8131 dev->hw_features |= NETIF_F_GRO_HW; 8132 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 8133 if (dev->features & NETIF_F_GRO_HW) 8134 dev->features &= ~NETIF_F_LRO; 8135 dev->priv_flags |= IFF_UNICAST_FLT; 8136 8137 #ifdef CONFIG_BNXT_SRIOV 8138 init_waitqueue_head(&bp->sriov_cfg_wait); 8139 mutex_init(&bp->sriov_lock); 8140 #endif 8141 bp->gro_func = bnxt_gro_func_5730x; 8142 if (BNXT_CHIP_P4_PLUS(bp)) 8143 bp->gro_func = bnxt_gro_func_5731x; 8144 else 8145 bp->flags |= BNXT_FLAG_DOUBLE_DB; 8146 8147 rc = bnxt_hwrm_func_drv_rgtr(bp); 8148 if (rc) 8149 goto init_err_pci_clean; 8150 8151 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); 8152 if (rc) 8153 goto init_err_pci_clean; 8154 8155 bp->ulp_probe = bnxt_ulp_probe; 8156 8157 /* Get the MAX capabilities for this function */ 8158 rc = bnxt_hwrm_func_qcaps(bp); 8159 if (rc) { 8160 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 8161 rc); 8162 rc = -1; 8163 goto init_err_pci_clean; 8164 } 8165 rc = bnxt_init_mac_addr(bp); 8166 if (rc) { 8167 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 8168 rc = -EADDRNOTAVAIL; 8169 goto init_err_pci_clean; 8170 } 8171 rc = bnxt_hwrm_queue_qportcfg(bp); 8172 if (rc) { 8173 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", 8174 rc); 8175 rc = -1; 8176 goto init_err_pci_clean; 8177 } 8178 8179 bnxt_hwrm_func_qcfg(bp); 8180 bnxt_hwrm_port_led_qcaps(bp); 8181 bnxt_ethtool_init(bp); 8182 bnxt_dcb_init(bp); 8183 8184 /* MTU range: 60 - FW defined max */ 8185 dev->min_mtu = ETH_ZLEN; 8186 dev->max_mtu = bp->max_mtu; 8187 8188 rc = bnxt_probe_phy(bp); 8189 if (rc) 8190 goto init_err_pci_clean; 8191 8192 bnxt_set_rx_skb_mode(bp, false); 8193 bnxt_set_tpa_flags(bp); 8194 bnxt_set_ring_params(bp); 8195 bnxt_set_max_func_irqs(bp, max_irqs); 8196 rc = bnxt_set_dflt_rings(bp, true); 8197 if (rc) { 8198 netdev_err(bp->dev, "Not enough rings available.\n"); 8199 rc = -ENOMEM; 8200 goto init_err_pci_clean; 8201 } 8202 8203 /* Default RSS hash cfg. */ 8204 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 8205 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 8206 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 8207 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 8208 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 8209 bp->flags |= BNXT_FLAG_UDP_RSS_CAP; 8210 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 8211 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 8212 } 8213 8214 bnxt_hwrm_vnic_qcaps(bp); 8215 if (bnxt_rfs_supported(bp)) { 8216 dev->hw_features |= NETIF_F_NTUPLE; 8217 if (bnxt_rfs_capable(bp)) { 8218 bp->flags |= BNXT_FLAG_RFS; 8219 dev->features |= NETIF_F_NTUPLE; 8220 } 8221 } 8222 8223 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) 8224 bp->flags |= BNXT_FLAG_STRIP_VLAN; 8225 8226 rc = bnxt_init_int_mode(bp); 8227 if (rc) 8228 goto init_err_pci_clean; 8229 8230 bnxt_get_wol_settings(bp); 8231 if (bp->flags & BNXT_FLAG_WOL_CAP) 8232 device_set_wakeup_enable(&pdev->dev, bp->wol); 8233 else 8234 device_set_wakeup_capable(&pdev->dev, false); 8235 8236 if (BNXT_PF(bp)) { 8237 if (!bnxt_pf_wq) { 8238 bnxt_pf_wq = 8239 create_singlethread_workqueue("bnxt_pf_wq"); 8240 if (!bnxt_pf_wq) { 8241 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 8242 goto init_err_pci_clean; 8243 } 8244 } 8245 bnxt_init_tc(bp); 8246 } 8247 8248 rc = register_netdev(dev); 8249 if (rc) 8250 goto init_err_cleanup_tc; 8251 8252 if (BNXT_PF(bp)) 8253 bnxt_dl_register(bp); 8254 8255 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 8256 board_info[ent->driver_data].name, 8257 (long)pci_resource_start(pdev, 0), dev->dev_addr); 8258 8259 bnxt_parse_log_pcie_link(bp); 8260 8261 return 0; 8262 8263 init_err_cleanup_tc: 8264 bnxt_shutdown_tc(bp); 8265 bnxt_clear_int_mode(bp); 8266 8267 init_err_pci_clean: 8268 bnxt_cleanup_pci(bp); 8269 8270 init_err_free: 8271 free_netdev(dev); 8272 return rc; 8273 } 8274 8275 static void bnxt_shutdown(struct pci_dev *pdev) 8276 { 8277 struct net_device *dev = pci_get_drvdata(pdev); 8278 struct bnxt *bp; 8279 8280 if (!dev) 8281 return; 8282 8283 rtnl_lock(); 8284 bp = netdev_priv(dev); 8285 if (!bp) 8286 goto shutdown_exit; 8287 8288 if (netif_running(dev)) 8289 dev_close(dev); 8290 8291 bnxt_ulp_shutdown(bp); 8292 8293 if (system_state == SYSTEM_POWER_OFF) { 8294 bnxt_clear_int_mode(bp); 8295 pci_wake_from_d3(pdev, bp->wol); 8296 pci_set_power_state(pdev, PCI_D3hot); 8297 } 8298 8299 shutdown_exit: 8300 rtnl_unlock(); 8301 } 8302 8303 #ifdef CONFIG_PM_SLEEP 8304 static int bnxt_suspend(struct device *device) 8305 { 8306 struct pci_dev *pdev = to_pci_dev(device); 8307 struct net_device *dev = pci_get_drvdata(pdev); 8308 struct bnxt *bp = netdev_priv(dev); 8309 int rc = 0; 8310 8311 rtnl_lock(); 8312 if (netif_running(dev)) { 8313 netif_device_detach(dev); 8314 rc = bnxt_close(dev); 8315 } 8316 bnxt_hwrm_func_drv_unrgtr(bp); 8317 rtnl_unlock(); 8318 return rc; 8319 } 8320 8321 static int bnxt_resume(struct device *device) 8322 { 8323 struct pci_dev *pdev = to_pci_dev(device); 8324 struct net_device *dev = pci_get_drvdata(pdev); 8325 struct bnxt *bp = netdev_priv(dev); 8326 int rc = 0; 8327 8328 rtnl_lock(); 8329 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { 8330 rc = -ENODEV; 8331 goto resume_exit; 8332 } 8333 rc = bnxt_hwrm_func_reset(bp); 8334 if (rc) { 8335 rc = -EBUSY; 8336 goto resume_exit; 8337 } 8338 bnxt_get_wol_settings(bp); 8339 if (netif_running(dev)) { 8340 rc = bnxt_open(dev); 8341 if (!rc) 8342 netif_device_attach(dev); 8343 } 8344 8345 resume_exit: 8346 rtnl_unlock(); 8347 return rc; 8348 } 8349 8350 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 8351 #define BNXT_PM_OPS (&bnxt_pm_ops) 8352 8353 #else 8354 8355 #define BNXT_PM_OPS NULL 8356 8357 #endif /* CONFIG_PM_SLEEP */ 8358 8359 /** 8360 * bnxt_io_error_detected - called when PCI error is detected 8361 * @pdev: Pointer to PCI device 8362 * @state: The current pci connection state 8363 * 8364 * This function is called after a PCI bus error affecting 8365 * this device has been detected. 8366 */ 8367 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 8368 pci_channel_state_t state) 8369 { 8370 struct net_device *netdev = pci_get_drvdata(pdev); 8371 struct bnxt *bp = netdev_priv(netdev); 8372 8373 netdev_info(netdev, "PCI I/O error detected\n"); 8374 8375 rtnl_lock(); 8376 netif_device_detach(netdev); 8377 8378 bnxt_ulp_stop(bp); 8379 8380 if (state == pci_channel_io_perm_failure) { 8381 rtnl_unlock(); 8382 return PCI_ERS_RESULT_DISCONNECT; 8383 } 8384 8385 if (netif_running(netdev)) 8386 bnxt_close(netdev); 8387 8388 pci_disable_device(pdev); 8389 rtnl_unlock(); 8390 8391 /* Request a slot slot reset. */ 8392 return PCI_ERS_RESULT_NEED_RESET; 8393 } 8394 8395 /** 8396 * bnxt_io_slot_reset - called after the pci bus has been reset. 8397 * @pdev: Pointer to PCI device 8398 * 8399 * Restart the card from scratch, as if from a cold-boot. 8400 * At this point, the card has exprienced a hard reset, 8401 * followed by fixups by BIOS, and has its config space 8402 * set up identically to what it was at cold boot. 8403 */ 8404 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 8405 { 8406 struct net_device *netdev = pci_get_drvdata(pdev); 8407 struct bnxt *bp = netdev_priv(netdev); 8408 int err = 0; 8409 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 8410 8411 netdev_info(bp->dev, "PCI Slot Reset\n"); 8412 8413 rtnl_lock(); 8414 8415 if (pci_enable_device(pdev)) { 8416 dev_err(&pdev->dev, 8417 "Cannot re-enable PCI device after reset.\n"); 8418 } else { 8419 pci_set_master(pdev); 8420 8421 err = bnxt_hwrm_func_reset(bp); 8422 if (!err && netif_running(netdev)) 8423 err = bnxt_open(netdev); 8424 8425 if (!err) { 8426 result = PCI_ERS_RESULT_RECOVERED; 8427 bnxt_ulp_start(bp); 8428 } 8429 } 8430 8431 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 8432 dev_close(netdev); 8433 8434 rtnl_unlock(); 8435 8436 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8437 if (err) { 8438 dev_err(&pdev->dev, 8439 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 8440 err); /* non-fatal, continue */ 8441 } 8442 8443 return PCI_ERS_RESULT_RECOVERED; 8444 } 8445 8446 /** 8447 * bnxt_io_resume - called when traffic can start flowing again. 8448 * @pdev: Pointer to PCI device 8449 * 8450 * This callback is called when the error recovery driver tells 8451 * us that its OK to resume normal operation. 8452 */ 8453 static void bnxt_io_resume(struct pci_dev *pdev) 8454 { 8455 struct net_device *netdev = pci_get_drvdata(pdev); 8456 8457 rtnl_lock(); 8458 8459 netif_device_attach(netdev); 8460 8461 rtnl_unlock(); 8462 } 8463 8464 static const struct pci_error_handlers bnxt_err_handler = { 8465 .error_detected = bnxt_io_error_detected, 8466 .slot_reset = bnxt_io_slot_reset, 8467 .resume = bnxt_io_resume 8468 }; 8469 8470 static struct pci_driver bnxt_pci_driver = { 8471 .name = DRV_MODULE_NAME, 8472 .id_table = bnxt_pci_tbl, 8473 .probe = bnxt_init_one, 8474 .remove = bnxt_remove_one, 8475 .shutdown = bnxt_shutdown, 8476 .driver.pm = BNXT_PM_OPS, 8477 .err_handler = &bnxt_err_handler, 8478 #if defined(CONFIG_BNXT_SRIOV) 8479 .sriov_configure = bnxt_sriov_configure, 8480 #endif 8481 }; 8482 8483 static int __init bnxt_init(void) 8484 { 8485 return pci_register_driver(&bnxt_pci_driver); 8486 } 8487 8488 static void __exit bnxt_exit(void) 8489 { 8490 pci_unregister_driver(&bnxt_pci_driver); 8491 if (bnxt_pf_wq) 8492 destroy_workqueue(bnxt_pf_wq); 8493 } 8494 8495 module_init(bnxt_init); 8496 module_exit(bnxt_exit); 8497