1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/workqueue.h> 28 #include <linux/pci.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if.h> 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/tcp.h> 38 #include <linux/rtnetlink.h> 39 #include <linux/prefetch.h> 40 #include <net/ip6_checksum.h> 41 #include <linux/ktime.h> 42 #include <linux/numa.h> 43 #ifdef CONFIG_RFS_ACCEL 44 #include <linux/cpu_rmap.h> 45 #endif 46 #include <linux/crash_dump.h> 47 #include <net/busy_poll.h> 48 #include <net/vxlan.h> 49 50 #include "cq_enet_desc.h" 51 #include "vnic_dev.h" 52 #include "vnic_intr.h" 53 #include "vnic_stats.h" 54 #include "vnic_vic.h" 55 #include "enic_res.h" 56 #include "enic.h" 57 #include "enic_dev.h" 58 #include "enic_pp.h" 59 #include "enic_clsf.h" 60 61 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 62 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 63 #define MAX_TSO (1 << 16) 64 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 65 66 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 69 70 #define RX_COPYBREAK_DEFAULT 256 71 72 /* Supported devices */ 73 static const struct pci_device_id enic_id_table[] = { 74 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 76 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 77 { 0, } /* end of table */ 78 }; 79 80 MODULE_DESCRIPTION(DRV_DESCRIPTION); 81 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); 82 MODULE_LICENSE("GPL"); 83 MODULE_VERSION(DRV_VERSION); 84 MODULE_DEVICE_TABLE(pci, enic_id_table); 85 86 #define ENIC_LARGE_PKT_THRESHOLD 1000 87 #define ENIC_MAX_COALESCE_TIMERS 10 88 /* Interrupt moderation table, which will be used to decide the 89 * coalescing timer values 90 * {rx_rate in Mbps, mapping percentage of the range} 91 */ 92 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { 93 {4000, 0}, 94 {4400, 10}, 95 {5060, 20}, 96 {5230, 30}, 97 {5540, 40}, 98 {5820, 50}, 99 {6120, 60}, 100 {6435, 70}, 101 {6745, 80}, 102 {7000, 90}, 103 {0xFFFFFFFF, 100} 104 }; 105 106 /* This table helps the driver to pick different ranges for rx coalescing 107 * timer depending on the link speed. 108 */ 109 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { 110 {0, 0}, /* 0 - 4 Gbps */ 111 {0, 3}, /* 4 - 10 Gbps */ 112 {3, 6}, /* 10 - 40 Gbps */ 113 }; 114 115 static void enic_init_affinity_hint(struct enic *enic) 116 { 117 int numa_node = dev_to_node(&enic->pdev->dev); 118 int i; 119 120 for (i = 0; i < enic->intr_count; i++) { 121 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || 122 (enic->msix[i].affinity_mask && 123 !cpumask_empty(enic->msix[i].affinity_mask))) 124 continue; 125 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, 126 GFP_KERNEL)) 127 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 128 enic->msix[i].affinity_mask); 129 } 130 } 131 132 static void enic_free_affinity_hint(struct enic *enic) 133 { 134 int i; 135 136 for (i = 0; i < enic->intr_count; i++) { 137 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) 138 continue; 139 free_cpumask_var(enic->msix[i].affinity_mask); 140 } 141 } 142 143 static void enic_set_affinity_hint(struct enic *enic) 144 { 145 int i; 146 int err; 147 148 for (i = 0; i < enic->intr_count; i++) { 149 if (enic_is_err_intr(enic, i) || 150 enic_is_notify_intr(enic, i) || 151 !enic->msix[i].affinity_mask || 152 cpumask_empty(enic->msix[i].affinity_mask)) 153 continue; 154 err = irq_set_affinity_hint(enic->msix_entry[i].vector, 155 enic->msix[i].affinity_mask); 156 if (err) 157 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", 158 err); 159 } 160 161 for (i = 0; i < enic->wq_count; i++) { 162 int wq_intr = enic_msix_wq_intr(enic, i); 163 164 if (enic->msix[wq_intr].affinity_mask && 165 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) 166 netif_set_xps_queue(enic->netdev, 167 enic->msix[wq_intr].affinity_mask, 168 i); 169 } 170 } 171 172 static void enic_unset_affinity_hint(struct enic *enic) 173 { 174 int i; 175 176 for (i = 0; i < enic->intr_count; i++) 177 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); 178 } 179 180 static void enic_udp_tunnel_add(struct net_device *netdev, 181 struct udp_tunnel_info *ti) 182 { 183 struct enic *enic = netdev_priv(netdev); 184 __be16 port = ti->port; 185 int err; 186 187 spin_lock_bh(&enic->devcmd_lock); 188 189 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { 190 netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); 191 goto error; 192 } 193 194 if (ti->sa_family != AF_INET) { 195 netdev_info(netdev, "vxlan: only IPv4 offload supported"); 196 goto error; 197 } 198 199 if (enic->vxlan.vxlan_udp_port_number) { 200 if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) 201 netdev_warn(netdev, "vxlan: udp port already offloaded"); 202 else 203 netdev_info(netdev, "vxlan: offload supported for only one UDP port"); 204 205 goto error; 206 } 207 208 err = vnic_dev_overlay_offload_cfg(enic->vdev, 209 OVERLAY_CFG_VXLAN_PORT_UPDATE, 210 ntohs(port)); 211 if (err) 212 goto error; 213 214 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, 215 enic->vxlan.patch_level); 216 if (err) 217 goto error; 218 219 enic->vxlan.vxlan_udp_port_number = ntohs(port); 220 221 netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", 222 (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); 223 224 goto unlock; 225 226 error: 227 netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", 228 ntohs(port), ti->sa_family, ti->type); 229 unlock: 230 spin_unlock_bh(&enic->devcmd_lock); 231 } 232 233 static void enic_udp_tunnel_del(struct net_device *netdev, 234 struct udp_tunnel_info *ti) 235 { 236 struct enic *enic = netdev_priv(netdev); 237 int err; 238 239 spin_lock_bh(&enic->devcmd_lock); 240 241 if ((ti->sa_family != AF_INET) || 242 ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) || 243 (ti->type != UDP_TUNNEL_TYPE_VXLAN)) { 244 netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", 245 ntohs(ti->port), ti->sa_family, ti->type); 246 goto unlock; 247 } 248 249 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, 250 OVERLAY_OFFLOAD_DISABLE); 251 if (err) { 252 netdev_err(netdev, "vxlan: del offload udp port: %d failed", 253 ntohs(ti->port)); 254 goto unlock; 255 } 256 257 enic->vxlan.vxlan_udp_port_number = 0; 258 259 netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", 260 ntohs(ti->port), ti->sa_family); 261 262 unlock: 263 spin_unlock_bh(&enic->devcmd_lock); 264 } 265 266 static netdev_features_t enic_features_check(struct sk_buff *skb, 267 struct net_device *dev, 268 netdev_features_t features) 269 { 270 const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); 271 struct enic *enic = netdev_priv(dev); 272 struct udphdr *udph; 273 u16 port = 0; 274 u16 proto; 275 276 if (!skb->encapsulation) 277 return features; 278 279 features = vxlan_features_check(skb, features); 280 281 /* hardware only supports IPv4 vxlan tunnel */ 282 if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 283 goto out; 284 285 /* hardware does not support offload of ipv6 inner pkt */ 286 if (eth->h_proto != ntohs(ETH_P_IP)) 287 goto out; 288 289 proto = ip_hdr(skb)->protocol; 290 291 if (proto == IPPROTO_UDP) { 292 udph = udp_hdr(skb); 293 port = be16_to_cpu(udph->dest); 294 } 295 296 /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK 297 * for other UDP port tunnels 298 */ 299 if (port != enic->vxlan.vxlan_udp_port_number) 300 goto out; 301 302 return features; 303 304 out: 305 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 306 } 307 308 int enic_is_dynamic(struct enic *enic) 309 { 310 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 311 } 312 313 int enic_sriov_enabled(struct enic *enic) 314 { 315 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 316 } 317 318 static int enic_is_sriov_vf(struct enic *enic) 319 { 320 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 321 } 322 323 int enic_is_valid_vf(struct enic *enic, int vf) 324 { 325 #ifdef CONFIG_PCI_IOV 326 return vf >= 0 && vf < enic->num_vfs; 327 #else 328 return 0; 329 #endif 330 } 331 332 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 333 { 334 struct enic *enic = vnic_dev_priv(wq->vdev); 335 336 if (buf->sop) 337 pci_unmap_single(enic->pdev, buf->dma_addr, 338 buf->len, PCI_DMA_TODEVICE); 339 else 340 pci_unmap_page(enic->pdev, buf->dma_addr, 341 buf->len, PCI_DMA_TODEVICE); 342 343 if (buf->os_buf) 344 dev_kfree_skb_any(buf->os_buf); 345 } 346 347 static void enic_wq_free_buf(struct vnic_wq *wq, 348 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) 349 { 350 enic_free_wq_buf(wq, buf); 351 } 352 353 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 354 u8 type, u16 q_number, u16 completed_index, void *opaque) 355 { 356 struct enic *enic = vnic_dev_priv(vdev); 357 358 spin_lock(&enic->wq_lock[q_number]); 359 360 vnic_wq_service(&enic->wq[q_number], cq_desc, 361 completed_index, enic_wq_free_buf, 362 opaque); 363 364 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && 365 vnic_wq_desc_avail(&enic->wq[q_number]) >= 366 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 367 netif_wake_subqueue(enic->netdev, q_number); 368 369 spin_unlock(&enic->wq_lock[q_number]); 370 371 return 0; 372 } 373 374 static bool enic_log_q_error(struct enic *enic) 375 { 376 unsigned int i; 377 u32 error_status; 378 bool err = false; 379 380 for (i = 0; i < enic->wq_count; i++) { 381 error_status = vnic_wq_error_status(&enic->wq[i]); 382 err |= error_status; 383 if (error_status) 384 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", 385 i, error_status); 386 } 387 388 for (i = 0; i < enic->rq_count; i++) { 389 error_status = vnic_rq_error_status(&enic->rq[i]); 390 err |= error_status; 391 if (error_status) 392 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", 393 i, error_status); 394 } 395 396 return err; 397 } 398 399 static void enic_msglvl_check(struct enic *enic) 400 { 401 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 402 403 if (msg_enable != enic->msg_enable) { 404 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", 405 enic->msg_enable, msg_enable); 406 enic->msg_enable = msg_enable; 407 } 408 } 409 410 static void enic_mtu_check(struct enic *enic) 411 { 412 u32 mtu = vnic_dev_mtu(enic->vdev); 413 struct net_device *netdev = enic->netdev; 414 415 if (mtu && mtu != enic->port_mtu) { 416 enic->port_mtu = mtu; 417 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 418 mtu = max_t(int, ENIC_MIN_MTU, 419 min_t(int, ENIC_MAX_MTU, mtu)); 420 if (mtu != netdev->mtu) 421 schedule_work(&enic->change_mtu_work); 422 } else { 423 if (mtu < netdev->mtu) 424 netdev_warn(netdev, 425 "interface MTU (%d) set higher " 426 "than switch port MTU (%d)\n", 427 netdev->mtu, mtu); 428 } 429 } 430 } 431 432 static void enic_link_check(struct enic *enic) 433 { 434 int link_status = vnic_dev_link_status(enic->vdev); 435 int carrier_ok = netif_carrier_ok(enic->netdev); 436 437 if (link_status && !carrier_ok) { 438 netdev_info(enic->netdev, "Link UP\n"); 439 netif_carrier_on(enic->netdev); 440 } else if (!link_status && carrier_ok) { 441 netdev_info(enic->netdev, "Link DOWN\n"); 442 netif_carrier_off(enic->netdev); 443 } 444 } 445 446 static void enic_notify_check(struct enic *enic) 447 { 448 enic_msglvl_check(enic); 449 enic_mtu_check(enic); 450 enic_link_check(enic); 451 } 452 453 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) 454 455 static irqreturn_t enic_isr_legacy(int irq, void *data) 456 { 457 struct net_device *netdev = data; 458 struct enic *enic = netdev_priv(netdev); 459 unsigned int io_intr = enic_legacy_io_intr(); 460 unsigned int err_intr = enic_legacy_err_intr(); 461 unsigned int notify_intr = enic_legacy_notify_intr(); 462 u32 pba; 463 464 vnic_intr_mask(&enic->intr[io_intr]); 465 466 pba = vnic_intr_legacy_pba(enic->legacy_pba); 467 if (!pba) { 468 vnic_intr_unmask(&enic->intr[io_intr]); 469 return IRQ_NONE; /* not our interrupt */ 470 } 471 472 if (ENIC_TEST_INTR(pba, notify_intr)) { 473 enic_notify_check(enic); 474 vnic_intr_return_all_credits(&enic->intr[notify_intr]); 475 } 476 477 if (ENIC_TEST_INTR(pba, err_intr)) { 478 vnic_intr_return_all_credits(&enic->intr[err_intr]); 479 enic_log_q_error(enic); 480 /* schedule recovery from WQ/RQ error */ 481 schedule_work(&enic->reset); 482 return IRQ_HANDLED; 483 } 484 485 if (ENIC_TEST_INTR(pba, io_intr)) 486 napi_schedule_irqoff(&enic->napi[0]); 487 else 488 vnic_intr_unmask(&enic->intr[io_intr]); 489 490 return IRQ_HANDLED; 491 } 492 493 static irqreturn_t enic_isr_msi(int irq, void *data) 494 { 495 struct enic *enic = data; 496 497 /* With MSI, there is no sharing of interrupts, so this is 498 * our interrupt and there is no need to ack it. The device 499 * is not providing per-vector masking, so the OS will not 500 * write to PCI config space to mask/unmask the interrupt. 501 * We're using mask_on_assertion for MSI, so the device 502 * automatically masks the interrupt when the interrupt is 503 * generated. Later, when exiting polling, the interrupt 504 * will be unmasked (see enic_poll). 505 * 506 * Also, the device uses the same PCIe Traffic Class (TC) 507 * for Memory Write data and MSI, so there are no ordering 508 * issues; the MSI will always arrive at the Root Complex 509 * _after_ corresponding Memory Writes (i.e. descriptor 510 * writes). 511 */ 512 513 napi_schedule_irqoff(&enic->napi[0]); 514 515 return IRQ_HANDLED; 516 } 517 518 static irqreturn_t enic_isr_msix(int irq, void *data) 519 { 520 struct napi_struct *napi = data; 521 522 napi_schedule_irqoff(napi); 523 524 return IRQ_HANDLED; 525 } 526 527 static irqreturn_t enic_isr_msix_err(int irq, void *data) 528 { 529 struct enic *enic = data; 530 unsigned int intr = enic_msix_err_intr(enic); 531 532 vnic_intr_return_all_credits(&enic->intr[intr]); 533 534 if (enic_log_q_error(enic)) 535 /* schedule recovery from WQ/RQ error */ 536 schedule_work(&enic->reset); 537 538 return IRQ_HANDLED; 539 } 540 541 static irqreturn_t enic_isr_msix_notify(int irq, void *data) 542 { 543 struct enic *enic = data; 544 unsigned int intr = enic_msix_notify_intr(enic); 545 546 enic_notify_check(enic); 547 vnic_intr_return_all_credits(&enic->intr[intr]); 548 549 return IRQ_HANDLED; 550 } 551 552 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, 553 struct sk_buff *skb, unsigned int len_left, 554 int loopback) 555 { 556 const skb_frag_t *frag; 557 dma_addr_t dma_addr; 558 559 /* Queue additional data fragments */ 560 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 561 len_left -= skb_frag_size(frag); 562 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, 563 skb_frag_size(frag), 564 DMA_TO_DEVICE); 565 if (unlikely(enic_dma_map_check(enic, dma_addr))) 566 return -ENOMEM; 567 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), 568 (len_left == 0), /* EOP? */ 569 loopback); 570 } 571 572 return 0; 573 } 574 575 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, 576 struct sk_buff *skb, int vlan_tag_insert, 577 unsigned int vlan_tag, int loopback) 578 { 579 unsigned int head_len = skb_headlen(skb); 580 unsigned int len_left = skb->len - head_len; 581 int eop = (len_left == 0); 582 dma_addr_t dma_addr; 583 int err = 0; 584 585 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, 586 PCI_DMA_TODEVICE); 587 if (unlikely(enic_dma_map_check(enic, dma_addr))) 588 return -ENOMEM; 589 590 /* Queue the main skb fragment. The fragments are no larger 591 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 592 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 593 * per fragment is queued. 594 */ 595 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, 596 vlan_tag, eop, loopback); 597 598 if (!eop) 599 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 600 601 return err; 602 } 603 604 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, 605 struct sk_buff *skb, int vlan_tag_insert, 606 unsigned int vlan_tag, int loopback) 607 { 608 unsigned int head_len = skb_headlen(skb); 609 unsigned int len_left = skb->len - head_len; 610 unsigned int hdr_len = skb_checksum_start_offset(skb); 611 unsigned int csum_offset = hdr_len + skb->csum_offset; 612 int eop = (len_left == 0); 613 dma_addr_t dma_addr; 614 int err = 0; 615 616 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, 617 PCI_DMA_TODEVICE); 618 if (unlikely(enic_dma_map_check(enic, dma_addr))) 619 return -ENOMEM; 620 621 /* Queue the main skb fragment. The fragments are no larger 622 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 623 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 624 * per fragment is queued. 625 */ 626 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, 627 hdr_len, vlan_tag_insert, vlan_tag, eop, 628 loopback); 629 630 if (!eop) 631 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 632 633 return err; 634 } 635 636 static void enic_preload_tcp_csum_encap(struct sk_buff *skb) 637 { 638 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 639 inner_ip_hdr(skb)->check = 0; 640 inner_tcp_hdr(skb)->check = 641 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 642 inner_ip_hdr(skb)->daddr, 0, 643 IPPROTO_TCP, 0); 644 } 645 } 646 647 static void enic_preload_tcp_csum(struct sk_buff *skb) 648 { 649 /* Preload TCP csum field with IP pseudo hdr calculated 650 * with IP length set to zero. HW will later add in length 651 * to each TCP segment resulting from the TSO. 652 */ 653 654 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 655 ip_hdr(skb)->check = 0; 656 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 657 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 658 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 659 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 660 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 661 } 662 } 663 664 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, 665 struct sk_buff *skb, unsigned int mss, 666 int vlan_tag_insert, unsigned int vlan_tag, 667 int loopback) 668 { 669 unsigned int frag_len_left = skb_headlen(skb); 670 unsigned int len_left = skb->len - frag_len_left; 671 int eop = (len_left == 0); 672 unsigned int offset = 0; 673 unsigned int hdr_len; 674 dma_addr_t dma_addr; 675 unsigned int len; 676 skb_frag_t *frag; 677 678 if (skb->encapsulation) { 679 hdr_len = skb_inner_transport_header(skb) - skb->data; 680 hdr_len += inner_tcp_hdrlen(skb); 681 enic_preload_tcp_csum_encap(skb); 682 } else { 683 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 684 enic_preload_tcp_csum(skb); 685 } 686 687 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 688 * for the main skb fragment 689 */ 690 while (frag_len_left) { 691 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 692 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, 693 PCI_DMA_TODEVICE); 694 if (unlikely(enic_dma_map_check(enic, dma_addr))) 695 return -ENOMEM; 696 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, 697 vlan_tag_insert, vlan_tag, 698 eop && (len == frag_len_left), loopback); 699 frag_len_left -= len; 700 offset += len; 701 } 702 703 if (eop) 704 return 0; 705 706 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 707 * for additional data fragments 708 */ 709 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 710 len_left -= skb_frag_size(frag); 711 frag_len_left = skb_frag_size(frag); 712 offset = 0; 713 714 while (frag_len_left) { 715 len = min(frag_len_left, 716 (unsigned int)WQ_ENET_MAX_DESC_LEN); 717 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 718 offset, len, 719 DMA_TO_DEVICE); 720 if (unlikely(enic_dma_map_check(enic, dma_addr))) 721 return -ENOMEM; 722 enic_queue_wq_desc_cont(wq, skb, dma_addr, len, 723 (len_left == 0) && 724 (len == frag_len_left),/*EOP*/ 725 loopback); 726 frag_len_left -= len; 727 offset += len; 728 } 729 } 730 731 return 0; 732 } 733 734 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, 735 struct sk_buff *skb, 736 int vlan_tag_insert, 737 unsigned int vlan_tag, int loopback) 738 { 739 unsigned int head_len = skb_headlen(skb); 740 unsigned int len_left = skb->len - head_len; 741 /* Hardware will overwrite the checksum fields, calculating from 742 * scratch and ignoring the value placed by software. 743 * Offload mode = 00 744 * mss[2], mss[1], mss[0] bits are set 745 */ 746 unsigned int mss_or_csum = 7; 747 int eop = (len_left == 0); 748 dma_addr_t dma_addr; 749 int err = 0; 750 751 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, 752 PCI_DMA_TODEVICE); 753 if (unlikely(enic_dma_map_check(enic, dma_addr))) 754 return -ENOMEM; 755 756 enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0, 757 vlan_tag_insert, vlan_tag, 758 WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, 759 loopback); 760 if (!eop) 761 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 762 763 return err; 764 } 765 766 static inline void enic_queue_wq_skb(struct enic *enic, 767 struct vnic_wq *wq, struct sk_buff *skb) 768 { 769 unsigned int mss = skb_shinfo(skb)->gso_size; 770 unsigned int vlan_tag = 0; 771 int vlan_tag_insert = 0; 772 int loopback = 0; 773 int err; 774 775 if (skb_vlan_tag_present(skb)) { 776 /* VLAN tag from trunking driver */ 777 vlan_tag_insert = 1; 778 vlan_tag = skb_vlan_tag_get(skb); 779 } else if (enic->loop_enable) { 780 vlan_tag = enic->loop_tag; 781 loopback = 1; 782 } 783 784 if (mss) 785 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, 786 vlan_tag_insert, vlan_tag, 787 loopback); 788 else if (skb->encapsulation) 789 err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, 790 vlan_tag, loopback); 791 else if (skb->ip_summed == CHECKSUM_PARTIAL) 792 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, 793 vlan_tag, loopback); 794 else 795 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, 796 vlan_tag, loopback); 797 if (unlikely(err)) { 798 struct vnic_wq_buf *buf; 799 800 buf = wq->to_use->prev; 801 /* while not EOP of previous pkt && queue not empty. 802 * For all non EOP bufs, os_buf is NULL. 803 */ 804 while (!buf->os_buf && (buf->next != wq->to_clean)) { 805 enic_free_wq_buf(wq, buf); 806 wq->ring.desc_avail++; 807 buf = buf->prev; 808 } 809 wq->to_use = buf->next; 810 dev_kfree_skb(skb); 811 } 812 } 813 814 /* netif_tx_lock held, process context with BHs disabled, or BH */ 815 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 816 struct net_device *netdev) 817 { 818 struct enic *enic = netdev_priv(netdev); 819 struct vnic_wq *wq; 820 unsigned int txq_map; 821 struct netdev_queue *txq; 822 823 if (skb->len <= 0) { 824 dev_kfree_skb_any(skb); 825 return NETDEV_TX_OK; 826 } 827 828 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; 829 wq = &enic->wq[txq_map]; 830 txq = netdev_get_tx_queue(netdev, txq_map); 831 832 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 833 * which is very likely. In the off chance it's going to take 834 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 835 */ 836 837 if (skb_shinfo(skb)->gso_size == 0 && 838 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 839 skb_linearize(skb)) { 840 dev_kfree_skb_any(skb); 841 return NETDEV_TX_OK; 842 } 843 844 spin_lock(&enic->wq_lock[txq_map]); 845 846 if (vnic_wq_desc_avail(wq) < 847 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 848 netif_tx_stop_queue(txq); 849 /* This is a hard error, log it */ 850 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 851 spin_unlock(&enic->wq_lock[txq_map]); 852 return NETDEV_TX_BUSY; 853 } 854 855 enic_queue_wq_skb(enic, wq, skb); 856 857 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 858 netif_tx_stop_queue(txq); 859 if (!skb->xmit_more || netif_xmit_stopped(txq)) 860 vnic_wq_doorbell(wq); 861 862 spin_unlock(&enic->wq_lock[txq_map]); 863 864 return NETDEV_TX_OK; 865 } 866 867 /* dev_base_lock rwlock held, nominally process context */ 868 static void enic_get_stats(struct net_device *netdev, 869 struct rtnl_link_stats64 *net_stats) 870 { 871 struct enic *enic = netdev_priv(netdev); 872 struct vnic_stats *stats; 873 int err; 874 875 err = enic_dev_stats_dump(enic, &stats); 876 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump 877 * For other failures, like devcmd failure, we return previously 878 * recorded stats. 879 */ 880 if (err == -ENOMEM) 881 return; 882 883 net_stats->tx_packets = stats->tx.tx_frames_ok; 884 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 885 net_stats->tx_errors = stats->tx.tx_errors; 886 net_stats->tx_dropped = stats->tx.tx_drops; 887 888 net_stats->rx_packets = stats->rx.rx_frames_ok; 889 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 890 net_stats->rx_errors = stats->rx.rx_errors; 891 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 892 net_stats->rx_over_errors = enic->rq_truncated_pkts; 893 net_stats->rx_crc_errors = enic->rq_bad_fcs; 894 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; 895 } 896 897 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) 898 { 899 struct enic *enic = netdev_priv(netdev); 900 901 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { 902 unsigned int mc_count = netdev_mc_count(netdev); 903 904 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n", 905 ENIC_MULTICAST_PERFECT_FILTERS, mc_count); 906 907 return -ENOSPC; 908 } 909 910 enic_dev_add_addr(enic, mc_addr); 911 enic->mc_count++; 912 913 return 0; 914 } 915 916 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr) 917 { 918 struct enic *enic = netdev_priv(netdev); 919 920 enic_dev_del_addr(enic, mc_addr); 921 enic->mc_count--; 922 923 return 0; 924 } 925 926 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr) 927 { 928 struct enic *enic = netdev_priv(netdev); 929 930 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { 931 unsigned int uc_count = netdev_uc_count(netdev); 932 933 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n", 934 ENIC_UNICAST_PERFECT_FILTERS, uc_count); 935 936 return -ENOSPC; 937 } 938 939 enic_dev_add_addr(enic, uc_addr); 940 enic->uc_count++; 941 942 return 0; 943 } 944 945 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr) 946 { 947 struct enic *enic = netdev_priv(netdev); 948 949 enic_dev_del_addr(enic, uc_addr); 950 enic->uc_count--; 951 952 return 0; 953 } 954 955 void enic_reset_addr_lists(struct enic *enic) 956 { 957 struct net_device *netdev = enic->netdev; 958 959 __dev_uc_unsync(netdev, NULL); 960 __dev_mc_unsync(netdev, NULL); 961 962 enic->mc_count = 0; 963 enic->uc_count = 0; 964 enic->flags = 0; 965 } 966 967 static int enic_set_mac_addr(struct net_device *netdev, char *addr) 968 { 969 struct enic *enic = netdev_priv(netdev); 970 971 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 972 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 973 return -EADDRNOTAVAIL; 974 } else { 975 if (!is_valid_ether_addr(addr)) 976 return -EADDRNOTAVAIL; 977 } 978 979 memcpy(netdev->dev_addr, addr, netdev->addr_len); 980 981 return 0; 982 } 983 984 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 985 { 986 struct enic *enic = netdev_priv(netdev); 987 struct sockaddr *saddr = p; 988 char *addr = saddr->sa_data; 989 int err; 990 991 if (netif_running(enic->netdev)) { 992 err = enic_dev_del_station_addr(enic); 993 if (err) 994 return err; 995 } 996 997 err = enic_set_mac_addr(netdev, addr); 998 if (err) 999 return err; 1000 1001 if (netif_running(enic->netdev)) { 1002 err = enic_dev_add_station_addr(enic); 1003 if (err) 1004 return err; 1005 } 1006 1007 return err; 1008 } 1009 1010 static int enic_set_mac_address(struct net_device *netdev, void *p) 1011 { 1012 struct sockaddr *saddr = p; 1013 char *addr = saddr->sa_data; 1014 struct enic *enic = netdev_priv(netdev); 1015 int err; 1016 1017 err = enic_dev_del_station_addr(enic); 1018 if (err) 1019 return err; 1020 1021 err = enic_set_mac_addr(netdev, addr); 1022 if (err) 1023 return err; 1024 1025 return enic_dev_add_station_addr(enic); 1026 } 1027 1028 /* netif_tx_lock held, BHs disabled */ 1029 static void enic_set_rx_mode(struct net_device *netdev) 1030 { 1031 struct enic *enic = netdev_priv(netdev); 1032 int directed = 1; 1033 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 1034 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 1035 int promisc = (netdev->flags & IFF_PROMISC) || 1036 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; 1037 int allmulti = (netdev->flags & IFF_ALLMULTI) || 1038 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; 1039 unsigned int flags = netdev->flags | 1040 (allmulti ? IFF_ALLMULTI : 0) | 1041 (promisc ? IFF_PROMISC : 0); 1042 1043 if (enic->flags != flags) { 1044 enic->flags = flags; 1045 enic_dev_packet_filter(enic, directed, 1046 multicast, broadcast, promisc, allmulti); 1047 } 1048 1049 if (!promisc) { 1050 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync); 1051 if (!allmulti) 1052 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync); 1053 } 1054 } 1055 1056 /* netif_tx_lock held, BHs disabled */ 1057 static void enic_tx_timeout(struct net_device *netdev) 1058 { 1059 struct enic *enic = netdev_priv(netdev); 1060 schedule_work(&enic->tx_hang_reset); 1061 } 1062 1063 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1064 { 1065 struct enic *enic = netdev_priv(netdev); 1066 struct enic_port_profile *pp; 1067 int err; 1068 1069 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1070 if (err) 1071 return err; 1072 1073 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { 1074 if (vf == PORT_SELF_VF) { 1075 memcpy(pp->vf_mac, mac, ETH_ALEN); 1076 return 0; 1077 } else { 1078 /* 1079 * For sriov vf's set the mac in hw 1080 */ 1081 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 1082 vnic_dev_set_mac_addr, mac); 1083 return enic_dev_status_to_errno(err); 1084 } 1085 } else 1086 return -EINVAL; 1087 } 1088 1089 static int enic_set_vf_port(struct net_device *netdev, int vf, 1090 struct nlattr *port[]) 1091 { 1092 struct enic *enic = netdev_priv(netdev); 1093 struct enic_port_profile prev_pp; 1094 struct enic_port_profile *pp; 1095 int err = 0, restore_pp = 1; 1096 1097 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1098 if (err) 1099 return err; 1100 1101 if (!port[IFLA_PORT_REQUEST]) 1102 return -EOPNOTSUPP; 1103 1104 memcpy(&prev_pp, pp, sizeof(*enic->pp)); 1105 memset(pp, 0, sizeof(*enic->pp)); 1106 1107 pp->set |= ENIC_SET_REQUEST; 1108 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1109 1110 if (port[IFLA_PORT_PROFILE]) { 1111 pp->set |= ENIC_SET_NAME; 1112 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), 1113 PORT_PROFILE_MAX); 1114 } 1115 1116 if (port[IFLA_PORT_INSTANCE_UUID]) { 1117 pp->set |= ENIC_SET_INSTANCE; 1118 memcpy(pp->instance_uuid, 1119 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1120 } 1121 1122 if (port[IFLA_PORT_HOST_UUID]) { 1123 pp->set |= ENIC_SET_HOST; 1124 memcpy(pp->host_uuid, 1125 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1126 } 1127 1128 if (vf == PORT_SELF_VF) { 1129 /* Special case handling: mac came from IFLA_VF_MAC */ 1130 if (!is_zero_ether_addr(prev_pp.vf_mac)) 1131 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); 1132 1133 if (is_zero_ether_addr(netdev->dev_addr)) 1134 eth_hw_addr_random(netdev); 1135 } else { 1136 /* SR-IOV VF: get mac from adapter */ 1137 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 1138 vnic_dev_get_mac_addr, pp->mac_addr); 1139 if (err) { 1140 netdev_err(netdev, "Error getting mac for vf %d\n", vf); 1141 memcpy(pp, &prev_pp, sizeof(*pp)); 1142 return enic_dev_status_to_errno(err); 1143 } 1144 } 1145 1146 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); 1147 if (err) { 1148 if (restore_pp) { 1149 /* Things are still the way they were: Implicit 1150 * DISASSOCIATE failed 1151 */ 1152 memcpy(pp, &prev_pp, sizeof(*pp)); 1153 } else { 1154 memset(pp, 0, sizeof(*pp)); 1155 if (vf == PORT_SELF_VF) 1156 eth_zero_addr(netdev->dev_addr); 1157 } 1158 } else { 1159 /* Set flag to indicate that the port assoc/disassoc 1160 * request has been sent out to fw 1161 */ 1162 pp->set |= ENIC_PORT_REQUEST_APPLIED; 1163 1164 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ 1165 if (pp->request == PORT_REQUEST_DISASSOCIATE) { 1166 eth_zero_addr(pp->mac_addr); 1167 if (vf == PORT_SELF_VF) 1168 eth_zero_addr(netdev->dev_addr); 1169 } 1170 } 1171 1172 if (vf == PORT_SELF_VF) 1173 eth_zero_addr(pp->vf_mac); 1174 1175 return err; 1176 } 1177 1178 static int enic_get_vf_port(struct net_device *netdev, int vf, 1179 struct sk_buff *skb) 1180 { 1181 struct enic *enic = netdev_priv(netdev); 1182 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1183 struct enic_port_profile *pp; 1184 int err; 1185 1186 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1187 if (err) 1188 return err; 1189 1190 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) 1191 return -ENODATA; 1192 1193 err = enic_process_get_pp_request(enic, vf, pp->request, &response); 1194 if (err) 1195 return err; 1196 1197 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || 1198 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || 1199 ((pp->set & ENIC_SET_NAME) && 1200 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || 1201 ((pp->set & ENIC_SET_INSTANCE) && 1202 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1203 pp->instance_uuid)) || 1204 ((pp->set & ENIC_SET_HOST) && 1205 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) 1206 goto nla_put_failure; 1207 return 0; 1208 1209 nla_put_failure: 1210 return -EMSGSIZE; 1211 } 1212 1213 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 1214 { 1215 struct enic *enic = vnic_dev_priv(rq->vdev); 1216 1217 if (!buf->os_buf) 1218 return; 1219 1220 pci_unmap_single(enic->pdev, buf->dma_addr, 1221 buf->len, PCI_DMA_FROMDEVICE); 1222 dev_kfree_skb_any(buf->os_buf); 1223 buf->os_buf = NULL; 1224 } 1225 1226 static int enic_rq_alloc_buf(struct vnic_rq *rq) 1227 { 1228 struct enic *enic = vnic_dev_priv(rq->vdev); 1229 struct net_device *netdev = enic->netdev; 1230 struct sk_buff *skb; 1231 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; 1232 unsigned int os_buf_index = 0; 1233 dma_addr_t dma_addr; 1234 struct vnic_rq_buf *buf = rq->to_use; 1235 1236 if (buf->os_buf) { 1237 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, 1238 buf->len); 1239 1240 return 0; 1241 } 1242 skb = netdev_alloc_skb_ip_align(netdev, len); 1243 if (!skb) 1244 return -ENOMEM; 1245 1246 dma_addr = pci_map_single(enic->pdev, skb->data, len, 1247 PCI_DMA_FROMDEVICE); 1248 if (unlikely(enic_dma_map_check(enic, dma_addr))) { 1249 dev_kfree_skb(skb); 1250 return -ENOMEM; 1251 } 1252 1253 enic_queue_rq_desc(rq, skb, os_buf_index, 1254 dma_addr, len); 1255 1256 return 0; 1257 } 1258 1259 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, 1260 u32 pkt_len) 1261 { 1262 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) 1263 pkt_size->large_pkt_bytes_cnt += pkt_len; 1264 else 1265 pkt_size->small_pkt_bytes_cnt += pkt_len; 1266 } 1267 1268 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, 1269 struct vnic_rq_buf *buf, u16 len) 1270 { 1271 struct enic *enic = netdev_priv(netdev); 1272 struct sk_buff *new_skb; 1273 1274 if (len > enic->rx_copybreak) 1275 return false; 1276 new_skb = netdev_alloc_skb_ip_align(netdev, len); 1277 if (!new_skb) 1278 return false; 1279 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, 1280 DMA_FROM_DEVICE); 1281 memcpy(new_skb->data, (*skb)->data, len); 1282 *skb = new_skb; 1283 1284 return true; 1285 } 1286 1287 static void enic_rq_indicate_buf(struct vnic_rq *rq, 1288 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1289 int skipped, void *opaque) 1290 { 1291 struct enic *enic = vnic_dev_priv(rq->vdev); 1292 struct net_device *netdev = enic->netdev; 1293 struct sk_buff *skb; 1294 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1295 1296 u8 type, color, eop, sop, ingress_port, vlan_stripped; 1297 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 1298 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1299 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1300 u8 packet_error; 1301 u16 q_number, completed_index, bytes_written, vlan_tci, checksum; 1302 u32 rss_hash; 1303 bool outer_csum_ok = true, encap = false; 1304 1305 if (skipped) 1306 return; 1307 1308 skb = buf->os_buf; 1309 1310 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1311 &type, &color, &q_number, &completed_index, 1312 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1313 &csum_not_calc, &rss_hash, &bytes_written, 1314 &packet_error, &vlan_stripped, &vlan_tci, &checksum, 1315 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1316 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1317 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1318 &fcs_ok); 1319 1320 if (packet_error) { 1321 1322 if (!fcs_ok) { 1323 if (bytes_written > 0) 1324 enic->rq_bad_fcs++; 1325 else if (bytes_written == 0) 1326 enic->rq_truncated_pkts++; 1327 } 1328 1329 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1330 PCI_DMA_FROMDEVICE); 1331 dev_kfree_skb_any(skb); 1332 buf->os_buf = NULL; 1333 1334 return; 1335 } 1336 1337 if (eop && bytes_written > 0) { 1338 1339 /* Good receive 1340 */ 1341 1342 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { 1343 buf->os_buf = NULL; 1344 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1345 PCI_DMA_FROMDEVICE); 1346 } 1347 prefetch(skb->data - NET_IP_ALIGN); 1348 1349 skb_put(skb, bytes_written); 1350 skb->protocol = eth_type_trans(skb, netdev); 1351 skb_record_rx_queue(skb, q_number); 1352 if ((netdev->features & NETIF_F_RXHASH) && rss_hash && 1353 (type == 3)) { 1354 switch (rss_type) { 1355 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: 1356 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: 1357 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: 1358 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); 1359 break; 1360 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: 1361 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: 1362 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: 1363 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); 1364 break; 1365 } 1366 } 1367 if (enic->vxlan.vxlan_udp_port_number) { 1368 switch (enic->vxlan.patch_level) { 1369 case 0: 1370 if (fcoe) { 1371 encap = true; 1372 outer_csum_ok = fcoe_fc_crc_ok; 1373 } 1374 break; 1375 case 2: 1376 if ((type == 7) && 1377 (rss_hash & BIT(0))) { 1378 encap = true; 1379 outer_csum_ok = (rss_hash & BIT(1)) && 1380 (rss_hash & BIT(2)); 1381 } 1382 break; 1383 } 1384 } 1385 1386 /* Hardware does not provide whole packet checksum. It only 1387 * provides pseudo checksum. Since hw validates the packet 1388 * checksum but not provide us the checksum value. use 1389 * CHECSUM_UNNECESSARY. 1390 * 1391 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is 1392 * inner csum_ok. outer_csum_ok is set by hw when outer udp 1393 * csum is correct or is zero. 1394 */ 1395 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1396 tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { 1397 skb->ip_summed = CHECKSUM_UNNECESSARY; 1398 skb->csum_level = encap; 1399 } 1400 1401 if (vlan_stripped) 1402 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1403 1404 skb_mark_napi_id(skb, &enic->napi[rq->index]); 1405 if (!(netdev->features & NETIF_F_GRO)) 1406 netif_receive_skb(skb); 1407 else 1408 napi_gro_receive(&enic->napi[q_number], skb); 1409 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1410 enic_intr_update_pkt_size(&cq->pkt_size_counter, 1411 bytes_written); 1412 } else { 1413 1414 /* Buffer overflow 1415 */ 1416 1417 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1418 PCI_DMA_FROMDEVICE); 1419 dev_kfree_skb_any(skb); 1420 buf->os_buf = NULL; 1421 } 1422 } 1423 1424 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 1425 u8 type, u16 q_number, u16 completed_index, void *opaque) 1426 { 1427 struct enic *enic = vnic_dev_priv(vdev); 1428 1429 vnic_rq_service(&enic->rq[q_number], cq_desc, 1430 completed_index, VNIC_RQ_RETURN_DESC, 1431 enic_rq_indicate_buf, opaque); 1432 1433 return 0; 1434 } 1435 1436 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) 1437 { 1438 unsigned int intr = enic_msix_rq_intr(enic, rq->index); 1439 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1440 u32 timer = cq->tobe_rx_coal_timeval; 1441 1442 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { 1443 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); 1444 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; 1445 } 1446 } 1447 1448 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) 1449 { 1450 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1451 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1452 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; 1453 int index; 1454 u32 timer; 1455 u32 range_start; 1456 u32 traffic; 1457 u64 delta; 1458 ktime_t now = ktime_get(); 1459 1460 delta = ktime_us_delta(now, cq->prev_ts); 1461 if (delta < ENIC_AIC_TS_BREAK) 1462 return; 1463 cq->prev_ts = now; 1464 1465 traffic = pkt_size_counter->large_pkt_bytes_cnt + 1466 pkt_size_counter->small_pkt_bytes_cnt; 1467 /* The table takes Mbps 1468 * traffic *= 8 => bits 1469 * traffic *= (10^6 / delta) => bps 1470 * traffic /= 10^6 => Mbps 1471 * 1472 * Combining, traffic *= (8 / delta) 1473 */ 1474 1475 traffic <<= 3; 1476 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; 1477 1478 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) 1479 if (traffic < mod_table[index].rx_rate) 1480 break; 1481 range_start = (pkt_size_counter->small_pkt_bytes_cnt > 1482 pkt_size_counter->large_pkt_bytes_cnt << 1) ? 1483 rx_coal->small_pkt_range_start : 1484 rx_coal->large_pkt_range_start; 1485 timer = range_start + ((rx_coal->range_end - range_start) * 1486 mod_table[index].range_percent / 100); 1487 /* Damping */ 1488 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; 1489 1490 pkt_size_counter->large_pkt_bytes_cnt = 0; 1491 pkt_size_counter->small_pkt_bytes_cnt = 0; 1492 } 1493 1494 static int enic_poll(struct napi_struct *napi, int budget) 1495 { 1496 struct net_device *netdev = napi->dev; 1497 struct enic *enic = netdev_priv(netdev); 1498 unsigned int cq_rq = enic_cq_rq(enic, 0); 1499 unsigned int cq_wq = enic_cq_wq(enic, 0); 1500 unsigned int intr = enic_legacy_io_intr(); 1501 unsigned int rq_work_to_do = budget; 1502 unsigned int wq_work_to_do = -1; /* no limit */ 1503 unsigned int work_done, rq_work_done = 0, wq_work_done; 1504 int err; 1505 1506 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, 1507 enic_wq_service, NULL); 1508 1509 if (budget > 0) 1510 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1511 rq_work_to_do, enic_rq_service, NULL); 1512 1513 /* Accumulate intr event credits for this polling 1514 * cycle. An intr event is the completion of a 1515 * a WQ or RQ packet. 1516 */ 1517 1518 work_done = rq_work_done + wq_work_done; 1519 1520 if (work_done > 0) 1521 vnic_intr_return_credits(&enic->intr[intr], 1522 work_done, 1523 0 /* don't unmask intr */, 1524 0 /* don't reset intr timer */); 1525 1526 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1527 1528 /* Buffer allocation failed. Stay in polling 1529 * mode so we can try to fill the ring again. 1530 */ 1531 1532 if (err) 1533 rq_work_done = rq_work_to_do; 1534 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1535 /* Call the function which refreshes the intr coalescing timer 1536 * value based on the traffic. 1537 */ 1538 enic_calc_int_moderation(enic, &enic->rq[0]); 1539 1540 if (rq_work_done < rq_work_to_do) { 1541 1542 /* Some work done, but not enough to stay in polling, 1543 * exit polling 1544 */ 1545 1546 napi_complete_done(napi, rq_work_done); 1547 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1548 enic_set_int_moderation(enic, &enic->rq[0]); 1549 vnic_intr_unmask(&enic->intr[intr]); 1550 } 1551 1552 return rq_work_done; 1553 } 1554 1555 #ifdef CONFIG_RFS_ACCEL 1556 static void enic_free_rx_cpu_rmap(struct enic *enic) 1557 { 1558 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); 1559 enic->netdev->rx_cpu_rmap = NULL; 1560 } 1561 1562 static void enic_set_rx_cpu_rmap(struct enic *enic) 1563 { 1564 int i, res; 1565 1566 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { 1567 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); 1568 if (unlikely(!enic->netdev->rx_cpu_rmap)) 1569 return; 1570 for (i = 0; i < enic->rq_count; i++) { 1571 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, 1572 enic->msix_entry[i].vector); 1573 if (unlikely(res)) { 1574 enic_free_rx_cpu_rmap(enic); 1575 return; 1576 } 1577 } 1578 } 1579 } 1580 1581 #else 1582 1583 static void enic_free_rx_cpu_rmap(struct enic *enic) 1584 { 1585 } 1586 1587 static void enic_set_rx_cpu_rmap(struct enic *enic) 1588 { 1589 } 1590 1591 #endif /* CONFIG_RFS_ACCEL */ 1592 1593 static int enic_poll_msix_wq(struct napi_struct *napi, int budget) 1594 { 1595 struct net_device *netdev = napi->dev; 1596 struct enic *enic = netdev_priv(netdev); 1597 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; 1598 struct vnic_wq *wq = &enic->wq[wq_index]; 1599 unsigned int cq; 1600 unsigned int intr; 1601 unsigned int wq_work_to_do = -1; /* clean all desc possible */ 1602 unsigned int wq_work_done; 1603 unsigned int wq_irq; 1604 1605 wq_irq = wq->index; 1606 cq = enic_cq_wq(enic, wq_irq); 1607 intr = enic_msix_wq_intr(enic, wq_irq); 1608 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, 1609 enic_wq_service, NULL); 1610 1611 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 1612 0 /* don't unmask intr */, 1613 1 /* reset intr timer */); 1614 if (!wq_work_done) { 1615 napi_complete(napi); 1616 vnic_intr_unmask(&enic->intr[intr]); 1617 return 0; 1618 } 1619 1620 return budget; 1621 } 1622 1623 static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 1624 { 1625 struct net_device *netdev = napi->dev; 1626 struct enic *enic = netdev_priv(netdev); 1627 unsigned int rq = (napi - &enic->napi[0]); 1628 unsigned int cq = enic_cq_rq(enic, rq); 1629 unsigned int intr = enic_msix_rq_intr(enic, rq); 1630 unsigned int work_to_do = budget; 1631 unsigned int work_done = 0; 1632 int err; 1633 1634 /* Service RQ 1635 */ 1636 1637 if (budget > 0) 1638 work_done = vnic_cq_service(&enic->cq[cq], 1639 work_to_do, enic_rq_service, NULL); 1640 1641 /* Return intr event credits for this polling 1642 * cycle. An intr event is the completion of a 1643 * RQ packet. 1644 */ 1645 1646 if (work_done > 0) 1647 vnic_intr_return_credits(&enic->intr[intr], 1648 work_done, 1649 0 /* don't unmask intr */, 1650 0 /* don't reset intr timer */); 1651 1652 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); 1653 1654 /* Buffer allocation failed. Stay in polling mode 1655 * so we can try to fill the ring again. 1656 */ 1657 1658 if (err) 1659 work_done = work_to_do; 1660 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1661 /* Call the function which refreshes the intr coalescing timer 1662 * value based on the traffic. 1663 */ 1664 enic_calc_int_moderation(enic, &enic->rq[rq]); 1665 1666 if (work_done < work_to_do) { 1667 1668 /* Some work done, but not enough to stay in polling, 1669 * exit polling 1670 */ 1671 1672 napi_complete_done(napi, work_done); 1673 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1674 enic_set_int_moderation(enic, &enic->rq[rq]); 1675 vnic_intr_unmask(&enic->intr[intr]); 1676 } 1677 1678 return work_done; 1679 } 1680 1681 static void enic_notify_timer(unsigned long data) 1682 { 1683 struct enic *enic = (struct enic *)data; 1684 1685 enic_notify_check(enic); 1686 1687 mod_timer(&enic->notify_timer, 1688 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); 1689 } 1690 1691 static void enic_free_intr(struct enic *enic) 1692 { 1693 struct net_device *netdev = enic->netdev; 1694 unsigned int i; 1695 1696 enic_free_rx_cpu_rmap(enic); 1697 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1698 case VNIC_DEV_INTR_MODE_INTX: 1699 free_irq(enic->pdev->irq, netdev); 1700 break; 1701 case VNIC_DEV_INTR_MODE_MSI: 1702 free_irq(enic->pdev->irq, enic); 1703 break; 1704 case VNIC_DEV_INTR_MODE_MSIX: 1705 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1706 if (enic->msix[i].requested) 1707 free_irq(enic->msix_entry[i].vector, 1708 enic->msix[i].devid); 1709 break; 1710 default: 1711 break; 1712 } 1713 } 1714 1715 static int enic_request_intr(struct enic *enic) 1716 { 1717 struct net_device *netdev = enic->netdev; 1718 unsigned int i, intr; 1719 int err = 0; 1720 1721 enic_set_rx_cpu_rmap(enic); 1722 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1723 1724 case VNIC_DEV_INTR_MODE_INTX: 1725 1726 err = request_irq(enic->pdev->irq, enic_isr_legacy, 1727 IRQF_SHARED, netdev->name, netdev); 1728 break; 1729 1730 case VNIC_DEV_INTR_MODE_MSI: 1731 1732 err = request_irq(enic->pdev->irq, enic_isr_msi, 1733 0, netdev->name, enic); 1734 break; 1735 1736 case VNIC_DEV_INTR_MODE_MSIX: 1737 1738 for (i = 0; i < enic->rq_count; i++) { 1739 intr = enic_msix_rq_intr(enic, i); 1740 snprintf(enic->msix[intr].devname, 1741 sizeof(enic->msix[intr].devname), 1742 "%.11s-rx-%u", netdev->name, i); 1743 enic->msix[intr].isr = enic_isr_msix; 1744 enic->msix[intr].devid = &enic->napi[i]; 1745 } 1746 1747 for (i = 0; i < enic->wq_count; i++) { 1748 int wq = enic_cq_wq(enic, i); 1749 1750 intr = enic_msix_wq_intr(enic, i); 1751 snprintf(enic->msix[intr].devname, 1752 sizeof(enic->msix[intr].devname), 1753 "%.11s-tx-%u", netdev->name, i); 1754 enic->msix[intr].isr = enic_isr_msix; 1755 enic->msix[intr].devid = &enic->napi[wq]; 1756 } 1757 1758 intr = enic_msix_err_intr(enic); 1759 snprintf(enic->msix[intr].devname, 1760 sizeof(enic->msix[intr].devname), 1761 "%.11s-err", netdev->name); 1762 enic->msix[intr].isr = enic_isr_msix_err; 1763 enic->msix[intr].devid = enic; 1764 1765 intr = enic_msix_notify_intr(enic); 1766 snprintf(enic->msix[intr].devname, 1767 sizeof(enic->msix[intr].devname), 1768 "%.11s-notify", netdev->name); 1769 enic->msix[intr].isr = enic_isr_msix_notify; 1770 enic->msix[intr].devid = enic; 1771 1772 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1773 enic->msix[i].requested = 0; 1774 1775 for (i = 0; i < enic->intr_count; i++) { 1776 err = request_irq(enic->msix_entry[i].vector, 1777 enic->msix[i].isr, 0, 1778 enic->msix[i].devname, 1779 enic->msix[i].devid); 1780 if (err) { 1781 enic_free_intr(enic); 1782 break; 1783 } 1784 enic->msix[i].requested = 1; 1785 } 1786 1787 break; 1788 1789 default: 1790 break; 1791 } 1792 1793 return err; 1794 } 1795 1796 static void enic_synchronize_irqs(struct enic *enic) 1797 { 1798 unsigned int i; 1799 1800 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1801 case VNIC_DEV_INTR_MODE_INTX: 1802 case VNIC_DEV_INTR_MODE_MSI: 1803 synchronize_irq(enic->pdev->irq); 1804 break; 1805 case VNIC_DEV_INTR_MODE_MSIX: 1806 for (i = 0; i < enic->intr_count; i++) 1807 synchronize_irq(enic->msix_entry[i].vector); 1808 break; 1809 default: 1810 break; 1811 } 1812 } 1813 1814 static void enic_set_rx_coal_setting(struct enic *enic) 1815 { 1816 unsigned int speed; 1817 int index = -1; 1818 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1819 1820 /* 1. Read the link speed from fw 1821 * 2. Pick the default range for the speed 1822 * 3. Update it in enic->rx_coalesce_setting 1823 */ 1824 speed = vnic_dev_port_speed(enic->vdev); 1825 if (ENIC_LINK_SPEED_10G < speed) 1826 index = ENIC_LINK_40G_INDEX; 1827 else if (ENIC_LINK_SPEED_4G < speed) 1828 index = ENIC_LINK_10G_INDEX; 1829 else 1830 index = ENIC_LINK_4G_INDEX; 1831 1832 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; 1833 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; 1834 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; 1835 1836 /* Start with the value provided by UCSM */ 1837 for (index = 0; index < enic->rq_count; index++) 1838 enic->cq[index].cur_rx_coal_timeval = 1839 enic->config.intr_timer_usec; 1840 1841 rx_coal->use_adaptive_rx_coalesce = 1; 1842 } 1843 1844 static int enic_dev_notify_set(struct enic *enic) 1845 { 1846 int err; 1847 1848 spin_lock_bh(&enic->devcmd_lock); 1849 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1850 case VNIC_DEV_INTR_MODE_INTX: 1851 err = vnic_dev_notify_set(enic->vdev, 1852 enic_legacy_notify_intr()); 1853 break; 1854 case VNIC_DEV_INTR_MODE_MSIX: 1855 err = vnic_dev_notify_set(enic->vdev, 1856 enic_msix_notify_intr(enic)); 1857 break; 1858 default: 1859 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1860 break; 1861 } 1862 spin_unlock_bh(&enic->devcmd_lock); 1863 1864 return err; 1865 } 1866 1867 static void enic_notify_timer_start(struct enic *enic) 1868 { 1869 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1870 case VNIC_DEV_INTR_MODE_MSI: 1871 mod_timer(&enic->notify_timer, jiffies); 1872 break; 1873 default: 1874 /* Using intr for notification for INTx/MSI-X */ 1875 break; 1876 } 1877 } 1878 1879 /* rtnl lock is held, process context */ 1880 static int enic_open(struct net_device *netdev) 1881 { 1882 struct enic *enic = netdev_priv(netdev); 1883 unsigned int i; 1884 int err; 1885 1886 err = enic_request_intr(enic); 1887 if (err) { 1888 netdev_err(netdev, "Unable to request irq.\n"); 1889 return err; 1890 } 1891 enic_init_affinity_hint(enic); 1892 enic_set_affinity_hint(enic); 1893 1894 err = enic_dev_notify_set(enic); 1895 if (err) { 1896 netdev_err(netdev, 1897 "Failed to alloc notify buffer, aborting.\n"); 1898 goto err_out_free_intr; 1899 } 1900 1901 for (i = 0; i < enic->rq_count; i++) { 1902 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); 1903 /* Need at least one buffer on ring to get going */ 1904 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1905 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1906 err = -ENOMEM; 1907 goto err_out_free_rq; 1908 } 1909 } 1910 1911 for (i = 0; i < enic->wq_count; i++) 1912 vnic_wq_enable(&enic->wq[i]); 1913 for (i = 0; i < enic->rq_count; i++) 1914 vnic_rq_enable(&enic->rq[i]); 1915 1916 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1917 enic_dev_add_station_addr(enic); 1918 1919 enic_set_rx_mode(netdev); 1920 1921 netif_tx_wake_all_queues(netdev); 1922 1923 for (i = 0; i < enic->rq_count; i++) 1924 napi_enable(&enic->napi[i]); 1925 1926 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 1927 for (i = 0; i < enic->wq_count; i++) 1928 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); 1929 enic_dev_enable(enic); 1930 1931 for (i = 0; i < enic->intr_count; i++) 1932 vnic_intr_unmask(&enic->intr[i]); 1933 1934 enic_notify_timer_start(enic); 1935 enic_rfs_flw_tbl_init(enic); 1936 1937 return 0; 1938 1939 err_out_free_rq: 1940 for (i = 0; i < enic->rq_count; i++) 1941 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1942 enic_dev_notify_unset(enic); 1943 err_out_free_intr: 1944 enic_unset_affinity_hint(enic); 1945 enic_free_intr(enic); 1946 1947 return err; 1948 } 1949 1950 /* rtnl lock is held, process context */ 1951 static int enic_stop(struct net_device *netdev) 1952 { 1953 struct enic *enic = netdev_priv(netdev); 1954 unsigned int i; 1955 int err; 1956 1957 for (i = 0; i < enic->intr_count; i++) { 1958 vnic_intr_mask(&enic->intr[i]); 1959 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ 1960 } 1961 1962 enic_synchronize_irqs(enic); 1963 1964 del_timer_sync(&enic->notify_timer); 1965 enic_rfs_flw_tbl_free(enic); 1966 1967 enic_dev_disable(enic); 1968 1969 for (i = 0; i < enic->rq_count; i++) 1970 napi_disable(&enic->napi[i]); 1971 1972 netif_carrier_off(netdev); 1973 netif_tx_disable(netdev); 1974 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 1975 for (i = 0; i < enic->wq_count; i++) 1976 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); 1977 1978 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1979 enic_dev_del_station_addr(enic); 1980 1981 for (i = 0; i < enic->wq_count; i++) { 1982 err = vnic_wq_disable(&enic->wq[i]); 1983 if (err) 1984 return err; 1985 } 1986 for (i = 0; i < enic->rq_count; i++) { 1987 err = vnic_rq_disable(&enic->rq[i]); 1988 if (err) 1989 return err; 1990 } 1991 1992 enic_dev_notify_unset(enic); 1993 enic_unset_affinity_hint(enic); 1994 enic_free_intr(enic); 1995 1996 for (i = 0; i < enic->wq_count; i++) 1997 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1998 for (i = 0; i < enic->rq_count; i++) 1999 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 2000 for (i = 0; i < enic->cq_count; i++) 2001 vnic_cq_clean(&enic->cq[i]); 2002 for (i = 0; i < enic->intr_count; i++) 2003 vnic_intr_clean(&enic->intr[i]); 2004 2005 return 0; 2006 } 2007 2008 static int enic_change_mtu(struct net_device *netdev, int new_mtu) 2009 { 2010 struct enic *enic = netdev_priv(netdev); 2011 int running = netif_running(netdev); 2012 2013 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2014 return -EOPNOTSUPP; 2015 2016 if (running) 2017 enic_stop(netdev); 2018 2019 netdev->mtu = new_mtu; 2020 2021 if (netdev->mtu > enic->port_mtu) 2022 netdev_warn(netdev, 2023 "interface MTU (%d) set higher than port MTU (%d)\n", 2024 netdev->mtu, enic->port_mtu); 2025 2026 if (running) 2027 enic_open(netdev); 2028 2029 return 0; 2030 } 2031 2032 static void enic_change_mtu_work(struct work_struct *work) 2033 { 2034 struct enic *enic = container_of(work, struct enic, change_mtu_work); 2035 struct net_device *netdev = enic->netdev; 2036 int new_mtu = vnic_dev_mtu(enic->vdev); 2037 int err; 2038 unsigned int i; 2039 2040 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); 2041 2042 rtnl_lock(); 2043 2044 /* Stop RQ */ 2045 del_timer_sync(&enic->notify_timer); 2046 2047 for (i = 0; i < enic->rq_count; i++) 2048 napi_disable(&enic->napi[i]); 2049 2050 vnic_intr_mask(&enic->intr[0]); 2051 enic_synchronize_irqs(enic); 2052 err = vnic_rq_disable(&enic->rq[0]); 2053 if (err) { 2054 rtnl_unlock(); 2055 netdev_err(netdev, "Unable to disable RQ.\n"); 2056 return; 2057 } 2058 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); 2059 vnic_cq_clean(&enic->cq[0]); 2060 vnic_intr_clean(&enic->intr[0]); 2061 2062 /* Fill RQ with new_mtu-sized buffers */ 2063 netdev->mtu = new_mtu; 2064 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 2065 /* Need at least one buffer on ring to get going */ 2066 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { 2067 rtnl_unlock(); 2068 netdev_err(netdev, "Unable to alloc receive buffers.\n"); 2069 return; 2070 } 2071 2072 /* Start RQ */ 2073 vnic_rq_enable(&enic->rq[0]); 2074 napi_enable(&enic->napi[0]); 2075 vnic_intr_unmask(&enic->intr[0]); 2076 enic_notify_timer_start(enic); 2077 2078 rtnl_unlock(); 2079 2080 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 2081 } 2082 2083 #ifdef CONFIG_NET_POLL_CONTROLLER 2084 static void enic_poll_controller(struct net_device *netdev) 2085 { 2086 struct enic *enic = netdev_priv(netdev); 2087 struct vnic_dev *vdev = enic->vdev; 2088 unsigned int i, intr; 2089 2090 switch (vnic_dev_get_intr_mode(vdev)) { 2091 case VNIC_DEV_INTR_MODE_MSIX: 2092 for (i = 0; i < enic->rq_count; i++) { 2093 intr = enic_msix_rq_intr(enic, i); 2094 enic_isr_msix(enic->msix_entry[intr].vector, 2095 &enic->napi[i]); 2096 } 2097 2098 for (i = 0; i < enic->wq_count; i++) { 2099 intr = enic_msix_wq_intr(enic, i); 2100 enic_isr_msix(enic->msix_entry[intr].vector, 2101 &enic->napi[enic_cq_wq(enic, i)]); 2102 } 2103 2104 break; 2105 case VNIC_DEV_INTR_MODE_MSI: 2106 enic_isr_msi(enic->pdev->irq, enic); 2107 break; 2108 case VNIC_DEV_INTR_MODE_INTX: 2109 enic_isr_legacy(enic->pdev->irq, netdev); 2110 break; 2111 default: 2112 break; 2113 } 2114 } 2115 #endif 2116 2117 static int enic_dev_wait(struct vnic_dev *vdev, 2118 int (*start)(struct vnic_dev *, int), 2119 int (*finished)(struct vnic_dev *, int *), 2120 int arg) 2121 { 2122 unsigned long time; 2123 int done; 2124 int err; 2125 2126 BUG_ON(in_interrupt()); 2127 2128 err = start(vdev, arg); 2129 if (err) 2130 return err; 2131 2132 /* Wait for func to complete...2 seconds max 2133 */ 2134 2135 time = jiffies + (HZ * 2); 2136 do { 2137 2138 err = finished(vdev, &done); 2139 if (err) 2140 return err; 2141 2142 if (done) 2143 return 0; 2144 2145 schedule_timeout_uninterruptible(HZ / 10); 2146 2147 } while (time_after(time, jiffies)); 2148 2149 return -ETIMEDOUT; 2150 } 2151 2152 static int enic_dev_open(struct enic *enic) 2153 { 2154 int err; 2155 2156 err = enic_dev_wait(enic->vdev, vnic_dev_open, 2157 vnic_dev_open_done, 0); 2158 if (err) 2159 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", 2160 err); 2161 2162 return err; 2163 } 2164 2165 static int enic_dev_soft_reset(struct enic *enic) 2166 { 2167 int err; 2168 2169 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, 2170 vnic_dev_soft_reset_done, 0); 2171 if (err) 2172 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", 2173 err); 2174 2175 return err; 2176 } 2177 2178 static int enic_dev_hang_reset(struct enic *enic) 2179 { 2180 int err; 2181 2182 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, 2183 vnic_dev_hang_reset_done, 0); 2184 if (err) 2185 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", 2186 err); 2187 2188 return err; 2189 } 2190 2191 int __enic_set_rsskey(struct enic *enic) 2192 { 2193 union vnic_rss_key *rss_key_buf_va; 2194 dma_addr_t rss_key_buf_pa; 2195 int i, kidx, bidx, err; 2196 2197 rss_key_buf_va = pci_zalloc_consistent(enic->pdev, 2198 sizeof(union vnic_rss_key), 2199 &rss_key_buf_pa); 2200 if (!rss_key_buf_va) 2201 return -ENOMEM; 2202 2203 for (i = 0; i < ENIC_RSS_LEN; i++) { 2204 kidx = i / ENIC_RSS_BYTES_PER_KEY; 2205 bidx = i % ENIC_RSS_BYTES_PER_KEY; 2206 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; 2207 } 2208 spin_lock_bh(&enic->devcmd_lock); 2209 err = enic_set_rss_key(enic, 2210 rss_key_buf_pa, 2211 sizeof(union vnic_rss_key)); 2212 spin_unlock_bh(&enic->devcmd_lock); 2213 2214 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), 2215 rss_key_buf_va, rss_key_buf_pa); 2216 2217 return err; 2218 } 2219 2220 static int enic_set_rsskey(struct enic *enic) 2221 { 2222 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); 2223 2224 return __enic_set_rsskey(enic); 2225 } 2226 2227 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2228 { 2229 dma_addr_t rss_cpu_buf_pa; 2230 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2231 unsigned int i; 2232 int err; 2233 2234 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, 2235 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); 2236 if (!rss_cpu_buf_va) 2237 return -ENOMEM; 2238 2239 for (i = 0; i < (1 << rss_hash_bits); i++) 2240 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; 2241 2242 spin_lock_bh(&enic->devcmd_lock); 2243 err = enic_set_rss_cpu(enic, 2244 rss_cpu_buf_pa, 2245 sizeof(union vnic_rss_cpu)); 2246 spin_unlock_bh(&enic->devcmd_lock); 2247 2248 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), 2249 rss_cpu_buf_va, rss_cpu_buf_pa); 2250 2251 return err; 2252 } 2253 2254 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, 2255 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) 2256 { 2257 const u8 tso_ipid_split_en = 0; 2258 const u8 ig_vlan_strip_en = 1; 2259 int err; 2260 2261 /* Enable VLAN tag stripping. 2262 */ 2263 2264 spin_lock_bh(&enic->devcmd_lock); 2265 err = enic_set_nic_cfg(enic, 2266 rss_default_cpu, rss_hash_type, 2267 rss_hash_bits, rss_base_cpu, 2268 rss_enable, tso_ipid_split_en, 2269 ig_vlan_strip_en); 2270 spin_unlock_bh(&enic->devcmd_lock); 2271 2272 return err; 2273 } 2274 2275 static int enic_set_rss_nic_cfg(struct enic *enic) 2276 { 2277 struct device *dev = enic_get_dev(enic); 2278 const u8 rss_default_cpu = 0; 2279 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | 2280 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | 2281 NIC_CFG_RSS_HASH_TYPE_IPV6 | 2282 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 2283 const u8 rss_hash_bits = 7; 2284 const u8 rss_base_cpu = 0; 2285 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); 2286 2287 if (rss_enable) { 2288 if (!enic_set_rsskey(enic)) { 2289 if (enic_set_rsscpu(enic, rss_hash_bits)) { 2290 rss_enable = 0; 2291 dev_warn(dev, "RSS disabled, " 2292 "Failed to set RSS cpu indirection table."); 2293 } 2294 } else { 2295 rss_enable = 0; 2296 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); 2297 } 2298 } 2299 2300 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, 2301 rss_hash_bits, rss_base_cpu, rss_enable); 2302 } 2303 2304 static void enic_reset(struct work_struct *work) 2305 { 2306 struct enic *enic = container_of(work, struct enic, reset); 2307 2308 if (!netif_running(enic->netdev)) 2309 return; 2310 2311 rtnl_lock(); 2312 2313 spin_lock(&enic->enic_api_lock); 2314 enic_stop(enic->netdev); 2315 enic_dev_soft_reset(enic); 2316 enic_reset_addr_lists(enic); 2317 enic_init_vnic_resources(enic); 2318 enic_set_rss_nic_cfg(enic); 2319 enic_dev_set_ig_vlan_rewrite_mode(enic); 2320 enic_open(enic->netdev); 2321 spin_unlock(&enic->enic_api_lock); 2322 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2323 2324 rtnl_unlock(); 2325 } 2326 2327 static void enic_tx_hang_reset(struct work_struct *work) 2328 { 2329 struct enic *enic = container_of(work, struct enic, tx_hang_reset); 2330 2331 rtnl_lock(); 2332 2333 spin_lock(&enic->enic_api_lock); 2334 enic_dev_hang_notify(enic); 2335 enic_stop(enic->netdev); 2336 enic_dev_hang_reset(enic); 2337 enic_reset_addr_lists(enic); 2338 enic_init_vnic_resources(enic); 2339 enic_set_rss_nic_cfg(enic); 2340 enic_dev_set_ig_vlan_rewrite_mode(enic); 2341 enic_open(enic->netdev); 2342 spin_unlock(&enic->enic_api_lock); 2343 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2344 2345 rtnl_unlock(); 2346 } 2347 2348 static int enic_set_intr_mode(struct enic *enic) 2349 { 2350 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2351 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); 2352 unsigned int i; 2353 2354 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2355 * on system capabilities. 2356 * 2357 * Try MSI-X first 2358 * 2359 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs 2360 * (the second to last INTR is used for WQ/RQ errors) 2361 * (the last INTR is used for notifications) 2362 */ 2363 2364 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); 2365 for (i = 0; i < n + m + 2; i++) 2366 enic->msix_entry[i].entry = i; 2367 2368 /* Use multiple RQs if RSS is enabled 2369 */ 2370 2371 if (ENIC_SETTING(enic, RSS) && 2372 enic->config.intr_mode < 1 && 2373 enic->rq_count >= n && 2374 enic->wq_count >= m && 2375 enic->cq_count >= n + m && 2376 enic->intr_count >= n + m + 2) { 2377 2378 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2379 n + m + 2, n + m + 2) > 0) { 2380 2381 enic->rq_count = n; 2382 enic->wq_count = m; 2383 enic->cq_count = n + m; 2384 enic->intr_count = n + m + 2; 2385 2386 vnic_dev_set_intr_mode(enic->vdev, 2387 VNIC_DEV_INTR_MODE_MSIX); 2388 2389 return 0; 2390 } 2391 } 2392 2393 if (enic->config.intr_mode < 1 && 2394 enic->rq_count >= 1 && 2395 enic->wq_count >= m && 2396 enic->cq_count >= 1 + m && 2397 enic->intr_count >= 1 + m + 2) { 2398 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2399 1 + m + 2, 1 + m + 2) > 0) { 2400 2401 enic->rq_count = 1; 2402 enic->wq_count = m; 2403 enic->cq_count = 1 + m; 2404 enic->intr_count = 1 + m + 2; 2405 2406 vnic_dev_set_intr_mode(enic->vdev, 2407 VNIC_DEV_INTR_MODE_MSIX); 2408 2409 return 0; 2410 } 2411 } 2412 2413 /* Next try MSI 2414 * 2415 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR 2416 */ 2417 2418 if (enic->config.intr_mode < 2 && 2419 enic->rq_count >= 1 && 2420 enic->wq_count >= 1 && 2421 enic->cq_count >= 2 && 2422 enic->intr_count >= 1 && 2423 !pci_enable_msi(enic->pdev)) { 2424 2425 enic->rq_count = 1; 2426 enic->wq_count = 1; 2427 enic->cq_count = 2; 2428 enic->intr_count = 1; 2429 2430 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); 2431 2432 return 0; 2433 } 2434 2435 /* Next try INTx 2436 * 2437 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs 2438 * (the first INTR is used for WQ/RQ) 2439 * (the second INTR is used for WQ/RQ errors) 2440 * (the last INTR is used for notifications) 2441 */ 2442 2443 if (enic->config.intr_mode < 3 && 2444 enic->rq_count >= 1 && 2445 enic->wq_count >= 1 && 2446 enic->cq_count >= 2 && 2447 enic->intr_count >= 3) { 2448 2449 enic->rq_count = 1; 2450 enic->wq_count = 1; 2451 enic->cq_count = 2; 2452 enic->intr_count = 3; 2453 2454 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); 2455 2456 return 0; 2457 } 2458 2459 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2460 2461 return -EINVAL; 2462 } 2463 2464 static void enic_clear_intr_mode(struct enic *enic) 2465 { 2466 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2467 case VNIC_DEV_INTR_MODE_MSIX: 2468 pci_disable_msix(enic->pdev); 2469 break; 2470 case VNIC_DEV_INTR_MODE_MSI: 2471 pci_disable_msi(enic->pdev); 2472 break; 2473 default: 2474 break; 2475 } 2476 2477 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2478 } 2479 2480 static const struct net_device_ops enic_netdev_dynamic_ops = { 2481 .ndo_open = enic_open, 2482 .ndo_stop = enic_stop, 2483 .ndo_start_xmit = enic_hard_start_xmit, 2484 .ndo_get_stats64 = enic_get_stats, 2485 .ndo_validate_addr = eth_validate_addr, 2486 .ndo_set_rx_mode = enic_set_rx_mode, 2487 .ndo_set_mac_address = enic_set_mac_address_dynamic, 2488 .ndo_change_mtu = enic_change_mtu, 2489 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2490 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2491 .ndo_tx_timeout = enic_tx_timeout, 2492 .ndo_set_vf_port = enic_set_vf_port, 2493 .ndo_get_vf_port = enic_get_vf_port, 2494 .ndo_set_vf_mac = enic_set_vf_mac, 2495 #ifdef CONFIG_NET_POLL_CONTROLLER 2496 .ndo_poll_controller = enic_poll_controller, 2497 #endif 2498 #ifdef CONFIG_RFS_ACCEL 2499 .ndo_rx_flow_steer = enic_rx_flow_steer, 2500 #endif 2501 .ndo_udp_tunnel_add = enic_udp_tunnel_add, 2502 .ndo_udp_tunnel_del = enic_udp_tunnel_del, 2503 .ndo_features_check = enic_features_check, 2504 }; 2505 2506 static const struct net_device_ops enic_netdev_ops = { 2507 .ndo_open = enic_open, 2508 .ndo_stop = enic_stop, 2509 .ndo_start_xmit = enic_hard_start_xmit, 2510 .ndo_get_stats64 = enic_get_stats, 2511 .ndo_validate_addr = eth_validate_addr, 2512 .ndo_set_mac_address = enic_set_mac_address, 2513 .ndo_set_rx_mode = enic_set_rx_mode, 2514 .ndo_change_mtu = enic_change_mtu, 2515 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2516 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2517 .ndo_tx_timeout = enic_tx_timeout, 2518 .ndo_set_vf_port = enic_set_vf_port, 2519 .ndo_get_vf_port = enic_get_vf_port, 2520 .ndo_set_vf_mac = enic_set_vf_mac, 2521 #ifdef CONFIG_NET_POLL_CONTROLLER 2522 .ndo_poll_controller = enic_poll_controller, 2523 #endif 2524 #ifdef CONFIG_RFS_ACCEL 2525 .ndo_rx_flow_steer = enic_rx_flow_steer, 2526 #endif 2527 .ndo_udp_tunnel_add = enic_udp_tunnel_add, 2528 .ndo_udp_tunnel_del = enic_udp_tunnel_del, 2529 .ndo_features_check = enic_features_check, 2530 }; 2531 2532 static void enic_dev_deinit(struct enic *enic) 2533 { 2534 unsigned int i; 2535 2536 for (i = 0; i < enic->rq_count; i++) { 2537 napi_hash_del(&enic->napi[i]); 2538 netif_napi_del(&enic->napi[i]); 2539 } 2540 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 2541 for (i = 0; i < enic->wq_count; i++) 2542 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); 2543 2544 enic_free_vnic_resources(enic); 2545 enic_clear_intr_mode(enic); 2546 enic_free_affinity_hint(enic); 2547 } 2548 2549 static void enic_kdump_kernel_config(struct enic *enic) 2550 { 2551 if (is_kdump_kernel()) { 2552 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); 2553 enic->rq_count = 1; 2554 enic->wq_count = 1; 2555 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; 2556 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; 2557 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); 2558 } 2559 } 2560 2561 static int enic_dev_init(struct enic *enic) 2562 { 2563 struct device *dev = enic_get_dev(enic); 2564 struct net_device *netdev = enic->netdev; 2565 unsigned int i; 2566 int err; 2567 2568 /* Get interrupt coalesce timer info */ 2569 err = enic_dev_intr_coal_timer_info(enic); 2570 if (err) { 2571 dev_warn(dev, "Using default conversion factor for " 2572 "interrupt coalesce timer\n"); 2573 vnic_dev_intr_coal_timer_info_default(enic->vdev); 2574 } 2575 2576 /* Get vNIC configuration 2577 */ 2578 2579 err = enic_get_vnic_config(enic); 2580 if (err) { 2581 dev_err(dev, "Get vNIC configuration failed, aborting\n"); 2582 return err; 2583 } 2584 2585 /* Get available resource counts 2586 */ 2587 2588 enic_get_res_counts(enic); 2589 2590 /* modify resource count if we are in kdump_kernel 2591 */ 2592 enic_kdump_kernel_config(enic); 2593 2594 /* Set interrupt mode based on resource counts and system 2595 * capabilities 2596 */ 2597 2598 err = enic_set_intr_mode(enic); 2599 if (err) { 2600 dev_err(dev, "Failed to set intr mode based on resource " 2601 "counts and system capabilities, aborting\n"); 2602 return err; 2603 } 2604 2605 /* Allocate and configure vNIC resources 2606 */ 2607 2608 err = enic_alloc_vnic_resources(enic); 2609 if (err) { 2610 dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); 2611 goto err_out_free_vnic_resources; 2612 } 2613 2614 enic_init_vnic_resources(enic); 2615 2616 err = enic_set_rss_nic_cfg(enic); 2617 if (err) { 2618 dev_err(dev, "Failed to config nic, aborting\n"); 2619 goto err_out_free_vnic_resources; 2620 } 2621 2622 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2623 default: 2624 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2625 break; 2626 case VNIC_DEV_INTR_MODE_MSIX: 2627 for (i = 0; i < enic->rq_count; i++) { 2628 netif_napi_add(netdev, &enic->napi[i], 2629 enic_poll_msix_rq, NAPI_POLL_WEIGHT); 2630 } 2631 for (i = 0; i < enic->wq_count; i++) 2632 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], 2633 enic_poll_msix_wq, NAPI_POLL_WEIGHT); 2634 break; 2635 } 2636 2637 return 0; 2638 2639 err_out_free_vnic_resources: 2640 enic_free_affinity_hint(enic); 2641 enic_clear_intr_mode(enic); 2642 enic_free_vnic_resources(enic); 2643 2644 return err; 2645 } 2646 2647 static void enic_iounmap(struct enic *enic) 2648 { 2649 unsigned int i; 2650 2651 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) 2652 if (enic->bar[i].vaddr) 2653 iounmap(enic->bar[i].vaddr); 2654 } 2655 2656 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2657 { 2658 struct device *dev = &pdev->dev; 2659 struct net_device *netdev; 2660 struct enic *enic; 2661 int using_dac = 0; 2662 unsigned int i; 2663 int err; 2664 #ifdef CONFIG_PCI_IOV 2665 int pos = 0; 2666 #endif 2667 int num_pps = 1; 2668 2669 /* Allocate net device structure and initialize. Private 2670 * instance data is initialized to zero. 2671 */ 2672 2673 netdev = alloc_etherdev_mqs(sizeof(struct enic), 2674 ENIC_RQ_MAX, ENIC_WQ_MAX); 2675 if (!netdev) 2676 return -ENOMEM; 2677 2678 pci_set_drvdata(pdev, netdev); 2679 2680 SET_NETDEV_DEV(netdev, &pdev->dev); 2681 2682 enic = netdev_priv(netdev); 2683 enic->netdev = netdev; 2684 enic->pdev = pdev; 2685 2686 /* Setup PCI resources 2687 */ 2688 2689 err = pci_enable_device_mem(pdev); 2690 if (err) { 2691 dev_err(dev, "Cannot enable PCI device, aborting\n"); 2692 goto err_out_free_netdev; 2693 } 2694 2695 err = pci_request_regions(pdev, DRV_NAME); 2696 if (err) { 2697 dev_err(dev, "Cannot request PCI regions, aborting\n"); 2698 goto err_out_disable_device; 2699 } 2700 2701 pci_set_master(pdev); 2702 2703 /* Query PCI controller on system for DMA addressing 2704 * limitation for the device. Try 64-bit first, and 2705 * fail to 32-bit. 2706 */ 2707 2708 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2709 if (err) { 2710 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2711 if (err) { 2712 dev_err(dev, "No usable DMA configuration, aborting\n"); 2713 goto err_out_release_regions; 2714 } 2715 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2716 if (err) { 2717 dev_err(dev, "Unable to obtain %u-bit DMA " 2718 "for consistent allocations, aborting\n", 32); 2719 goto err_out_release_regions; 2720 } 2721 } else { 2722 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2723 if (err) { 2724 dev_err(dev, "Unable to obtain %u-bit DMA " 2725 "for consistent allocations, aborting\n", 64); 2726 goto err_out_release_regions; 2727 } 2728 using_dac = 1; 2729 } 2730 2731 /* Map vNIC resources from BAR0-5 2732 */ 2733 2734 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { 2735 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 2736 continue; 2737 enic->bar[i].len = pci_resource_len(pdev, i); 2738 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 2739 if (!enic->bar[i].vaddr) { 2740 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); 2741 err = -ENODEV; 2742 goto err_out_iounmap; 2743 } 2744 enic->bar[i].bus_addr = pci_resource_start(pdev, i); 2745 } 2746 2747 /* Register vNIC device 2748 */ 2749 2750 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 2751 ARRAY_SIZE(enic->bar)); 2752 if (!enic->vdev) { 2753 dev_err(dev, "vNIC registration failed, aborting\n"); 2754 err = -ENODEV; 2755 goto err_out_iounmap; 2756 } 2757 2758 err = vnic_devcmd_init(enic->vdev); 2759 2760 if (err) 2761 goto err_out_vnic_unregister; 2762 2763 #ifdef CONFIG_PCI_IOV 2764 /* Get number of subvnics */ 2765 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 2766 if (pos) { 2767 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, 2768 &enic->num_vfs); 2769 if (enic->num_vfs) { 2770 err = pci_enable_sriov(pdev, enic->num_vfs); 2771 if (err) { 2772 dev_err(dev, "SRIOV enable failed, aborting." 2773 " pci_enable_sriov() returned %d\n", 2774 err); 2775 goto err_out_vnic_unregister; 2776 } 2777 enic->priv_flags |= ENIC_SRIOV_ENABLED; 2778 num_pps = enic->num_vfs; 2779 } 2780 } 2781 #endif 2782 2783 /* Allocate structure for port profiles */ 2784 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2785 if (!enic->pp) { 2786 err = -ENOMEM; 2787 goto err_out_disable_sriov_pp; 2788 } 2789 2790 /* Issue device open to get device in known state 2791 */ 2792 2793 err = enic_dev_open(enic); 2794 if (err) { 2795 dev_err(dev, "vNIC dev open failed, aborting\n"); 2796 goto err_out_disable_sriov; 2797 } 2798 2799 /* Setup devcmd lock 2800 */ 2801 2802 spin_lock_init(&enic->devcmd_lock); 2803 spin_lock_init(&enic->enic_api_lock); 2804 2805 /* 2806 * Set ingress vlan rewrite mode before vnic initialization 2807 */ 2808 2809 err = enic_dev_set_ig_vlan_rewrite_mode(enic); 2810 if (err) { 2811 dev_err(dev, 2812 "Failed to set ingress vlan rewrite mode, aborting.\n"); 2813 goto err_out_dev_close; 2814 } 2815 2816 /* Issue device init to initialize the vnic-to-switch link. 2817 * We'll start with carrier off and wait for link UP 2818 * notification later to turn on carrier. We don't need 2819 * to wait here for the vnic-to-switch link initialization 2820 * to complete; link UP notification is the indication that 2821 * the process is complete. 2822 */ 2823 2824 netif_carrier_off(netdev); 2825 2826 /* Do not call dev_init for a dynamic vnic. 2827 * For a dynamic vnic, init_prov_info will be 2828 * called later by an upper layer. 2829 */ 2830 2831 if (!enic_is_dynamic(enic)) { 2832 err = vnic_dev_init(enic->vdev, 0); 2833 if (err) { 2834 dev_err(dev, "vNIC dev init failed, aborting\n"); 2835 goto err_out_dev_close; 2836 } 2837 } 2838 2839 err = enic_dev_init(enic); 2840 if (err) { 2841 dev_err(dev, "Device initialization failed, aborting\n"); 2842 goto err_out_dev_close; 2843 } 2844 2845 netif_set_real_num_tx_queues(netdev, enic->wq_count); 2846 netif_set_real_num_rx_queues(netdev, enic->rq_count); 2847 2848 /* Setup notification timer, HW reset task, and wq locks 2849 */ 2850 2851 init_timer(&enic->notify_timer); 2852 enic->notify_timer.function = enic_notify_timer; 2853 enic->notify_timer.data = (unsigned long)enic; 2854 2855 enic_set_rx_coal_setting(enic); 2856 INIT_WORK(&enic->reset, enic_reset); 2857 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); 2858 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2859 2860 for (i = 0; i < enic->wq_count; i++) 2861 spin_lock_init(&enic->wq_lock[i]); 2862 2863 /* Register net device 2864 */ 2865 2866 enic->port_mtu = enic->config.mtu; 2867 (void)enic_change_mtu(netdev, enic->port_mtu); 2868 2869 err = enic_set_mac_addr(netdev, enic->mac_addr); 2870 if (err) { 2871 dev_err(dev, "Invalid MAC address, aborting\n"); 2872 goto err_out_dev_deinit; 2873 } 2874 2875 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2876 /* rx coalesce time already got initialized. This gets used 2877 * if adaptive coal is turned off 2878 */ 2879 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2880 2881 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2882 netdev->netdev_ops = &enic_netdev_dynamic_ops; 2883 else 2884 netdev->netdev_ops = &enic_netdev_ops; 2885 2886 netdev->watchdog_timeo = 2 * HZ; 2887 enic_set_ethtool_ops(netdev); 2888 2889 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2890 if (ENIC_SETTING(enic, LOOP)) { 2891 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2892 enic->loop_enable = 1; 2893 enic->loop_tag = enic->config.loop_tag; 2894 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2895 } 2896 if (ENIC_SETTING(enic, TXCSUM)) 2897 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2898 if (ENIC_SETTING(enic, TSO)) 2899 netdev->hw_features |= NETIF_F_TSO | 2900 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2901 if (ENIC_SETTING(enic, RSS)) 2902 netdev->hw_features |= NETIF_F_RXHASH; 2903 if (ENIC_SETTING(enic, RXCSUM)) 2904 netdev->hw_features |= NETIF_F_RXCSUM; 2905 if (ENIC_SETTING(enic, VXLAN)) { 2906 u64 patch_level; 2907 2908 netdev->hw_enc_features |= NETIF_F_RXCSUM | 2909 NETIF_F_TSO | 2910 NETIF_F_TSO_ECN | 2911 NETIF_F_GSO_UDP_TUNNEL | 2912 NETIF_F_HW_CSUM | 2913 NETIF_F_GSO_UDP_TUNNEL_CSUM; 2914 netdev->hw_features |= netdev->hw_enc_features; 2915 /* get bit mask from hw about supported offload bit level 2916 * BIT(0) = fw supports patch_level 0 2917 * fcoe bit = encap 2918 * fcoe_fc_crc_ok = outer csum ok 2919 * BIT(1) = always set by fw 2920 * BIT(2) = fw supports patch_level 2 2921 * BIT(0) in rss_hash = encap 2922 * BIT(1,2) in rss_hash = outer_ip_csum_ok/ 2923 * outer_tcp_csum_ok 2924 * used in enic_rq_indicate_buf 2925 */ 2926 err = vnic_dev_get_supported_feature_ver(enic->vdev, 2927 VIC_FEATURE_VXLAN, 2928 &patch_level); 2929 if (err) 2930 patch_level = 0; 2931 /* mask bits that are supported by driver 2932 */ 2933 patch_level &= BIT_ULL(0) | BIT_ULL(2); 2934 patch_level = fls(patch_level); 2935 patch_level = patch_level ? patch_level - 1 : 0; 2936 enic->vxlan.patch_level = patch_level; 2937 } 2938 2939 netdev->features |= netdev->hw_features; 2940 netdev->vlan_features |= netdev->features; 2941 2942 #ifdef CONFIG_RFS_ACCEL 2943 netdev->hw_features |= NETIF_F_NTUPLE; 2944 #endif 2945 2946 if (using_dac) 2947 netdev->features |= NETIF_F_HIGHDMA; 2948 2949 netdev->priv_flags |= IFF_UNICAST_FLT; 2950 2951 /* MTU range: 68 - 9000 */ 2952 netdev->min_mtu = ENIC_MIN_MTU; 2953 netdev->max_mtu = ENIC_MAX_MTU; 2954 2955 err = register_netdev(netdev); 2956 if (err) { 2957 dev_err(dev, "Cannot register net device, aborting\n"); 2958 goto err_out_dev_deinit; 2959 } 2960 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; 2961 2962 return 0; 2963 2964 err_out_dev_deinit: 2965 enic_dev_deinit(enic); 2966 err_out_dev_close: 2967 vnic_dev_close(enic->vdev); 2968 err_out_disable_sriov: 2969 kfree(enic->pp); 2970 err_out_disable_sriov_pp: 2971 #ifdef CONFIG_PCI_IOV 2972 if (enic_sriov_enabled(enic)) { 2973 pci_disable_sriov(pdev); 2974 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2975 } 2976 #endif 2977 err_out_vnic_unregister: 2978 vnic_dev_unregister(enic->vdev); 2979 err_out_iounmap: 2980 enic_iounmap(enic); 2981 err_out_release_regions: 2982 pci_release_regions(pdev); 2983 err_out_disable_device: 2984 pci_disable_device(pdev); 2985 err_out_free_netdev: 2986 free_netdev(netdev); 2987 2988 return err; 2989 } 2990 2991 static void enic_remove(struct pci_dev *pdev) 2992 { 2993 struct net_device *netdev = pci_get_drvdata(pdev); 2994 2995 if (netdev) { 2996 struct enic *enic = netdev_priv(netdev); 2997 2998 cancel_work_sync(&enic->reset); 2999 cancel_work_sync(&enic->change_mtu_work); 3000 unregister_netdev(netdev); 3001 enic_dev_deinit(enic); 3002 vnic_dev_close(enic->vdev); 3003 #ifdef CONFIG_PCI_IOV 3004 if (enic_sriov_enabled(enic)) { 3005 pci_disable_sriov(pdev); 3006 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 3007 } 3008 #endif 3009 kfree(enic->pp); 3010 vnic_dev_unregister(enic->vdev); 3011 enic_iounmap(enic); 3012 pci_release_regions(pdev); 3013 pci_disable_device(pdev); 3014 free_netdev(netdev); 3015 } 3016 } 3017 3018 static struct pci_driver enic_driver = { 3019 .name = DRV_NAME, 3020 .id_table = enic_id_table, 3021 .probe = enic_probe, 3022 .remove = enic_remove, 3023 }; 3024 3025 static int __init enic_init_module(void) 3026 { 3027 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 3028 3029 return pci_register_driver(&enic_driver); 3030 } 3031 3032 static void __exit enic_cleanup_module(void) 3033 { 3034 pci_unregister_driver(&enic_driver); 3035 } 3036 3037 module_init(enic_init_module); 3038 module_exit(enic_cleanup_module); 3039