1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/workqueue.h> 28 #include <linux/pci.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if.h> 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/tcp.h> 38 #include <linux/rtnetlink.h> 39 #include <linux/prefetch.h> 40 #include <net/ip6_checksum.h> 41 #include <linux/ktime.h> 42 #include <linux/numa.h> 43 #ifdef CONFIG_RFS_ACCEL 44 #include <linux/cpu_rmap.h> 45 #endif 46 #ifdef CONFIG_NET_RX_BUSY_POLL 47 #include <net/busy_poll.h> 48 #endif 49 #include <linux/crash_dump.h> 50 51 #include "cq_enet_desc.h" 52 #include "vnic_dev.h" 53 #include "vnic_intr.h" 54 #include "vnic_stats.h" 55 #include "vnic_vic.h" 56 #include "enic_res.h" 57 #include "enic.h" 58 #include "enic_dev.h" 59 #include "enic_pp.h" 60 #include "enic_clsf.h" 61 62 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 63 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 64 #define MAX_TSO (1 << 16) 65 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 66 67 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 69 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 70 71 #define RX_COPYBREAK_DEFAULT 256 72 73 /* Supported devices */ 74 static const struct pci_device_id enic_id_table[] = { 75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 76 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 77 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 78 { 0, } /* end of table */ 79 }; 80 81 MODULE_DESCRIPTION(DRV_DESCRIPTION); 82 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); 83 MODULE_LICENSE("GPL"); 84 MODULE_VERSION(DRV_VERSION); 85 MODULE_DEVICE_TABLE(pci, enic_id_table); 86 87 #define ENIC_LARGE_PKT_THRESHOLD 1000 88 #define ENIC_MAX_COALESCE_TIMERS 10 89 /* Interrupt moderation table, which will be used to decide the 90 * coalescing timer values 91 * {rx_rate in Mbps, mapping percentage of the range} 92 */ 93 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { 94 {4000, 0}, 95 {4400, 10}, 96 {5060, 20}, 97 {5230, 30}, 98 {5540, 40}, 99 {5820, 50}, 100 {6120, 60}, 101 {6435, 70}, 102 {6745, 80}, 103 {7000, 90}, 104 {0xFFFFFFFF, 100} 105 }; 106 107 /* This table helps the driver to pick different ranges for rx coalescing 108 * timer depending on the link speed. 109 */ 110 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { 111 {0, 0}, /* 0 - 4 Gbps */ 112 {0, 3}, /* 4 - 10 Gbps */ 113 {3, 6}, /* 10 - 40 Gbps */ 114 }; 115 116 static void enic_init_affinity_hint(struct enic *enic) 117 { 118 int numa_node = dev_to_node(&enic->pdev->dev); 119 int i; 120 121 for (i = 0; i < enic->intr_count; i++) { 122 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || 123 (enic->msix[i].affinity_mask && 124 !cpumask_empty(enic->msix[i].affinity_mask))) 125 continue; 126 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, 127 GFP_KERNEL)) 128 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 129 enic->msix[i].affinity_mask); 130 } 131 } 132 133 static void enic_free_affinity_hint(struct enic *enic) 134 { 135 int i; 136 137 for (i = 0; i < enic->intr_count; i++) { 138 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) 139 continue; 140 free_cpumask_var(enic->msix[i].affinity_mask); 141 } 142 } 143 144 static void enic_set_affinity_hint(struct enic *enic) 145 { 146 int i; 147 int err; 148 149 for (i = 0; i < enic->intr_count; i++) { 150 if (enic_is_err_intr(enic, i) || 151 enic_is_notify_intr(enic, i) || 152 !enic->msix[i].affinity_mask || 153 cpumask_empty(enic->msix[i].affinity_mask)) 154 continue; 155 err = irq_set_affinity_hint(enic->msix_entry[i].vector, 156 enic->msix[i].affinity_mask); 157 if (err) 158 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", 159 err); 160 } 161 162 for (i = 0; i < enic->wq_count; i++) { 163 int wq_intr = enic_msix_wq_intr(enic, i); 164 165 if (enic->msix[wq_intr].affinity_mask && 166 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) 167 netif_set_xps_queue(enic->netdev, 168 enic->msix[wq_intr].affinity_mask, 169 i); 170 } 171 } 172 173 static void enic_unset_affinity_hint(struct enic *enic) 174 { 175 int i; 176 177 for (i = 0; i < enic->intr_count; i++) 178 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); 179 } 180 181 int enic_is_dynamic(struct enic *enic) 182 { 183 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 184 } 185 186 int enic_sriov_enabled(struct enic *enic) 187 { 188 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 189 } 190 191 static int enic_is_sriov_vf(struct enic *enic) 192 { 193 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 194 } 195 196 int enic_is_valid_vf(struct enic *enic, int vf) 197 { 198 #ifdef CONFIG_PCI_IOV 199 return vf >= 0 && vf < enic->num_vfs; 200 #else 201 return 0; 202 #endif 203 } 204 205 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 206 { 207 struct enic *enic = vnic_dev_priv(wq->vdev); 208 209 if (buf->sop) 210 pci_unmap_single(enic->pdev, buf->dma_addr, 211 buf->len, PCI_DMA_TODEVICE); 212 else 213 pci_unmap_page(enic->pdev, buf->dma_addr, 214 buf->len, PCI_DMA_TODEVICE); 215 216 if (buf->os_buf) 217 dev_kfree_skb_any(buf->os_buf); 218 } 219 220 static void enic_wq_free_buf(struct vnic_wq *wq, 221 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) 222 { 223 enic_free_wq_buf(wq, buf); 224 } 225 226 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 227 u8 type, u16 q_number, u16 completed_index, void *opaque) 228 { 229 struct enic *enic = vnic_dev_priv(vdev); 230 231 spin_lock(&enic->wq_lock[q_number]); 232 233 vnic_wq_service(&enic->wq[q_number], cq_desc, 234 completed_index, enic_wq_free_buf, 235 opaque); 236 237 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && 238 vnic_wq_desc_avail(&enic->wq[q_number]) >= 239 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 240 netif_wake_subqueue(enic->netdev, q_number); 241 242 spin_unlock(&enic->wq_lock[q_number]); 243 244 return 0; 245 } 246 247 static bool enic_log_q_error(struct enic *enic) 248 { 249 unsigned int i; 250 u32 error_status; 251 bool err = false; 252 253 for (i = 0; i < enic->wq_count; i++) { 254 error_status = vnic_wq_error_status(&enic->wq[i]); 255 err |= error_status; 256 if (error_status) 257 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", 258 i, error_status); 259 } 260 261 for (i = 0; i < enic->rq_count; i++) { 262 error_status = vnic_rq_error_status(&enic->rq[i]); 263 err |= error_status; 264 if (error_status) 265 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", 266 i, error_status); 267 } 268 269 return err; 270 } 271 272 static void enic_msglvl_check(struct enic *enic) 273 { 274 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 275 276 if (msg_enable != enic->msg_enable) { 277 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", 278 enic->msg_enable, msg_enable); 279 enic->msg_enable = msg_enable; 280 } 281 } 282 283 static void enic_mtu_check(struct enic *enic) 284 { 285 u32 mtu = vnic_dev_mtu(enic->vdev); 286 struct net_device *netdev = enic->netdev; 287 288 if (mtu && mtu != enic->port_mtu) { 289 enic->port_mtu = mtu; 290 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 291 mtu = max_t(int, ENIC_MIN_MTU, 292 min_t(int, ENIC_MAX_MTU, mtu)); 293 if (mtu != netdev->mtu) 294 schedule_work(&enic->change_mtu_work); 295 } else { 296 if (mtu < netdev->mtu) 297 netdev_warn(netdev, 298 "interface MTU (%d) set higher " 299 "than switch port MTU (%d)\n", 300 netdev->mtu, mtu); 301 } 302 } 303 } 304 305 static void enic_link_check(struct enic *enic) 306 { 307 int link_status = vnic_dev_link_status(enic->vdev); 308 int carrier_ok = netif_carrier_ok(enic->netdev); 309 310 if (link_status && !carrier_ok) { 311 netdev_info(enic->netdev, "Link UP\n"); 312 netif_carrier_on(enic->netdev); 313 } else if (!link_status && carrier_ok) { 314 netdev_info(enic->netdev, "Link DOWN\n"); 315 netif_carrier_off(enic->netdev); 316 } 317 } 318 319 static void enic_notify_check(struct enic *enic) 320 { 321 enic_msglvl_check(enic); 322 enic_mtu_check(enic); 323 enic_link_check(enic); 324 } 325 326 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) 327 328 static irqreturn_t enic_isr_legacy(int irq, void *data) 329 { 330 struct net_device *netdev = data; 331 struct enic *enic = netdev_priv(netdev); 332 unsigned int io_intr = enic_legacy_io_intr(); 333 unsigned int err_intr = enic_legacy_err_intr(); 334 unsigned int notify_intr = enic_legacy_notify_intr(); 335 u32 pba; 336 337 vnic_intr_mask(&enic->intr[io_intr]); 338 339 pba = vnic_intr_legacy_pba(enic->legacy_pba); 340 if (!pba) { 341 vnic_intr_unmask(&enic->intr[io_intr]); 342 return IRQ_NONE; /* not our interrupt */ 343 } 344 345 if (ENIC_TEST_INTR(pba, notify_intr)) { 346 enic_notify_check(enic); 347 vnic_intr_return_all_credits(&enic->intr[notify_intr]); 348 } 349 350 if (ENIC_TEST_INTR(pba, err_intr)) { 351 vnic_intr_return_all_credits(&enic->intr[err_intr]); 352 enic_log_q_error(enic); 353 /* schedule recovery from WQ/RQ error */ 354 schedule_work(&enic->reset); 355 return IRQ_HANDLED; 356 } 357 358 if (ENIC_TEST_INTR(pba, io_intr)) 359 napi_schedule_irqoff(&enic->napi[0]); 360 else 361 vnic_intr_unmask(&enic->intr[io_intr]); 362 363 return IRQ_HANDLED; 364 } 365 366 static irqreturn_t enic_isr_msi(int irq, void *data) 367 { 368 struct enic *enic = data; 369 370 /* With MSI, there is no sharing of interrupts, so this is 371 * our interrupt and there is no need to ack it. The device 372 * is not providing per-vector masking, so the OS will not 373 * write to PCI config space to mask/unmask the interrupt. 374 * We're using mask_on_assertion for MSI, so the device 375 * automatically masks the interrupt when the interrupt is 376 * generated. Later, when exiting polling, the interrupt 377 * will be unmasked (see enic_poll). 378 * 379 * Also, the device uses the same PCIe Traffic Class (TC) 380 * for Memory Write data and MSI, so there are no ordering 381 * issues; the MSI will always arrive at the Root Complex 382 * _after_ corresponding Memory Writes (i.e. descriptor 383 * writes). 384 */ 385 386 napi_schedule_irqoff(&enic->napi[0]); 387 388 return IRQ_HANDLED; 389 } 390 391 static irqreturn_t enic_isr_msix(int irq, void *data) 392 { 393 struct napi_struct *napi = data; 394 395 napi_schedule_irqoff(napi); 396 397 return IRQ_HANDLED; 398 } 399 400 static irqreturn_t enic_isr_msix_err(int irq, void *data) 401 { 402 struct enic *enic = data; 403 unsigned int intr = enic_msix_err_intr(enic); 404 405 vnic_intr_return_all_credits(&enic->intr[intr]); 406 407 if (enic_log_q_error(enic)) 408 /* schedule recovery from WQ/RQ error */ 409 schedule_work(&enic->reset); 410 411 return IRQ_HANDLED; 412 } 413 414 static irqreturn_t enic_isr_msix_notify(int irq, void *data) 415 { 416 struct enic *enic = data; 417 unsigned int intr = enic_msix_notify_intr(enic); 418 419 enic_notify_check(enic); 420 vnic_intr_return_all_credits(&enic->intr[intr]); 421 422 return IRQ_HANDLED; 423 } 424 425 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, 426 struct sk_buff *skb, unsigned int len_left, 427 int loopback) 428 { 429 const skb_frag_t *frag; 430 dma_addr_t dma_addr; 431 432 /* Queue additional data fragments */ 433 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 434 len_left -= skb_frag_size(frag); 435 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, 436 skb_frag_size(frag), 437 DMA_TO_DEVICE); 438 if (unlikely(enic_dma_map_check(enic, dma_addr))) 439 return -ENOMEM; 440 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), 441 (len_left == 0), /* EOP? */ 442 loopback); 443 } 444 445 return 0; 446 } 447 448 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, 449 struct sk_buff *skb, int vlan_tag_insert, 450 unsigned int vlan_tag, int loopback) 451 { 452 unsigned int head_len = skb_headlen(skb); 453 unsigned int len_left = skb->len - head_len; 454 int eop = (len_left == 0); 455 dma_addr_t dma_addr; 456 int err = 0; 457 458 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, 459 PCI_DMA_TODEVICE); 460 if (unlikely(enic_dma_map_check(enic, dma_addr))) 461 return -ENOMEM; 462 463 /* Queue the main skb fragment. The fragments are no larger 464 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 465 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 466 * per fragment is queued. 467 */ 468 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, 469 vlan_tag, eop, loopback); 470 471 if (!eop) 472 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 473 474 return err; 475 } 476 477 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, 478 struct sk_buff *skb, int vlan_tag_insert, 479 unsigned int vlan_tag, int loopback) 480 { 481 unsigned int head_len = skb_headlen(skb); 482 unsigned int len_left = skb->len - head_len; 483 unsigned int hdr_len = skb_checksum_start_offset(skb); 484 unsigned int csum_offset = hdr_len + skb->csum_offset; 485 int eop = (len_left == 0); 486 dma_addr_t dma_addr; 487 int err = 0; 488 489 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, 490 PCI_DMA_TODEVICE); 491 if (unlikely(enic_dma_map_check(enic, dma_addr))) 492 return -ENOMEM; 493 494 /* Queue the main skb fragment. The fragments are no larger 495 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 496 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 497 * per fragment is queued. 498 */ 499 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, 500 hdr_len, vlan_tag_insert, vlan_tag, eop, 501 loopback); 502 503 if (!eop) 504 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 505 506 return err; 507 } 508 509 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, 510 struct sk_buff *skb, unsigned int mss, 511 int vlan_tag_insert, unsigned int vlan_tag, 512 int loopback) 513 { 514 unsigned int frag_len_left = skb_headlen(skb); 515 unsigned int len_left = skb->len - frag_len_left; 516 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 517 int eop = (len_left == 0); 518 unsigned int len; 519 dma_addr_t dma_addr; 520 unsigned int offset = 0; 521 skb_frag_t *frag; 522 523 /* Preload TCP csum field with IP pseudo hdr calculated 524 * with IP length set to zero. HW will later add in length 525 * to each TCP segment resulting from the TSO. 526 */ 527 528 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 529 ip_hdr(skb)->check = 0; 530 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 531 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 532 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 533 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 534 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 535 } 536 537 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 538 * for the main skb fragment 539 */ 540 while (frag_len_left) { 541 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 542 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, 543 PCI_DMA_TODEVICE); 544 if (unlikely(enic_dma_map_check(enic, dma_addr))) 545 return -ENOMEM; 546 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, 547 vlan_tag_insert, vlan_tag, 548 eop && (len == frag_len_left), loopback); 549 frag_len_left -= len; 550 offset += len; 551 } 552 553 if (eop) 554 return 0; 555 556 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 557 * for additional data fragments 558 */ 559 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 560 len_left -= skb_frag_size(frag); 561 frag_len_left = skb_frag_size(frag); 562 offset = 0; 563 564 while (frag_len_left) { 565 len = min(frag_len_left, 566 (unsigned int)WQ_ENET_MAX_DESC_LEN); 567 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 568 offset, len, 569 DMA_TO_DEVICE); 570 if (unlikely(enic_dma_map_check(enic, dma_addr))) 571 return -ENOMEM; 572 enic_queue_wq_desc_cont(wq, skb, dma_addr, len, 573 (len_left == 0) && 574 (len == frag_len_left),/*EOP*/ 575 loopback); 576 frag_len_left -= len; 577 offset += len; 578 } 579 } 580 581 return 0; 582 } 583 584 static inline void enic_queue_wq_skb(struct enic *enic, 585 struct vnic_wq *wq, struct sk_buff *skb) 586 { 587 unsigned int mss = skb_shinfo(skb)->gso_size; 588 unsigned int vlan_tag = 0; 589 int vlan_tag_insert = 0; 590 int loopback = 0; 591 int err; 592 593 if (skb_vlan_tag_present(skb)) { 594 /* VLAN tag from trunking driver */ 595 vlan_tag_insert = 1; 596 vlan_tag = skb_vlan_tag_get(skb); 597 } else if (enic->loop_enable) { 598 vlan_tag = enic->loop_tag; 599 loopback = 1; 600 } 601 602 if (mss) 603 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, 604 vlan_tag_insert, vlan_tag, 605 loopback); 606 else if (skb->ip_summed == CHECKSUM_PARTIAL) 607 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, 608 vlan_tag, loopback); 609 else 610 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, 611 vlan_tag, loopback); 612 if (unlikely(err)) { 613 struct vnic_wq_buf *buf; 614 615 buf = wq->to_use->prev; 616 /* while not EOP of previous pkt && queue not empty. 617 * For all non EOP bufs, os_buf is NULL. 618 */ 619 while (!buf->os_buf && (buf->next != wq->to_clean)) { 620 enic_free_wq_buf(wq, buf); 621 wq->ring.desc_avail++; 622 buf = buf->prev; 623 } 624 wq->to_use = buf->next; 625 dev_kfree_skb(skb); 626 } 627 } 628 629 /* netif_tx_lock held, process context with BHs disabled, or BH */ 630 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 631 struct net_device *netdev) 632 { 633 struct enic *enic = netdev_priv(netdev); 634 struct vnic_wq *wq; 635 unsigned int txq_map; 636 struct netdev_queue *txq; 637 638 if (skb->len <= 0) { 639 dev_kfree_skb_any(skb); 640 return NETDEV_TX_OK; 641 } 642 643 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; 644 wq = &enic->wq[txq_map]; 645 txq = netdev_get_tx_queue(netdev, txq_map); 646 647 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 648 * which is very likely. In the off chance it's going to take 649 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 650 */ 651 652 if (skb_shinfo(skb)->gso_size == 0 && 653 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 654 skb_linearize(skb)) { 655 dev_kfree_skb_any(skb); 656 return NETDEV_TX_OK; 657 } 658 659 spin_lock(&enic->wq_lock[txq_map]); 660 661 if (vnic_wq_desc_avail(wq) < 662 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 663 netif_tx_stop_queue(txq); 664 /* This is a hard error, log it */ 665 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 666 spin_unlock(&enic->wq_lock[txq_map]); 667 return NETDEV_TX_BUSY; 668 } 669 670 enic_queue_wq_skb(enic, wq, skb); 671 672 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 673 netif_tx_stop_queue(txq); 674 if (!skb->xmit_more || netif_xmit_stopped(txq)) 675 vnic_wq_doorbell(wq); 676 677 spin_unlock(&enic->wq_lock[txq_map]); 678 679 return NETDEV_TX_OK; 680 } 681 682 /* dev_base_lock rwlock held, nominally process context */ 683 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, 684 struct rtnl_link_stats64 *net_stats) 685 { 686 struct enic *enic = netdev_priv(netdev); 687 struct vnic_stats *stats; 688 int err; 689 690 err = enic_dev_stats_dump(enic, &stats); 691 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump 692 * For other failures, like devcmd failure, we return previously 693 * recorded stats. 694 */ 695 if (err == -ENOMEM) 696 return net_stats; 697 698 net_stats->tx_packets = stats->tx.tx_frames_ok; 699 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 700 net_stats->tx_errors = stats->tx.tx_errors; 701 net_stats->tx_dropped = stats->tx.tx_drops; 702 703 net_stats->rx_packets = stats->rx.rx_frames_ok; 704 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 705 net_stats->rx_errors = stats->rx.rx_errors; 706 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 707 net_stats->rx_over_errors = enic->rq_truncated_pkts; 708 net_stats->rx_crc_errors = enic->rq_bad_fcs; 709 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; 710 711 return net_stats; 712 } 713 714 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) 715 { 716 struct enic *enic = netdev_priv(netdev); 717 718 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { 719 unsigned int mc_count = netdev_mc_count(netdev); 720 721 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n", 722 ENIC_MULTICAST_PERFECT_FILTERS, mc_count); 723 724 return -ENOSPC; 725 } 726 727 enic_dev_add_addr(enic, mc_addr); 728 enic->mc_count++; 729 730 return 0; 731 } 732 733 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr) 734 { 735 struct enic *enic = netdev_priv(netdev); 736 737 enic_dev_del_addr(enic, mc_addr); 738 enic->mc_count--; 739 740 return 0; 741 } 742 743 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr) 744 { 745 struct enic *enic = netdev_priv(netdev); 746 747 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { 748 unsigned int uc_count = netdev_uc_count(netdev); 749 750 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n", 751 ENIC_UNICAST_PERFECT_FILTERS, uc_count); 752 753 return -ENOSPC; 754 } 755 756 enic_dev_add_addr(enic, uc_addr); 757 enic->uc_count++; 758 759 return 0; 760 } 761 762 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr) 763 { 764 struct enic *enic = netdev_priv(netdev); 765 766 enic_dev_del_addr(enic, uc_addr); 767 enic->uc_count--; 768 769 return 0; 770 } 771 772 void enic_reset_addr_lists(struct enic *enic) 773 { 774 struct net_device *netdev = enic->netdev; 775 776 __dev_uc_unsync(netdev, NULL); 777 __dev_mc_unsync(netdev, NULL); 778 779 enic->mc_count = 0; 780 enic->uc_count = 0; 781 enic->flags = 0; 782 } 783 784 static int enic_set_mac_addr(struct net_device *netdev, char *addr) 785 { 786 struct enic *enic = netdev_priv(netdev); 787 788 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 789 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 790 return -EADDRNOTAVAIL; 791 } else { 792 if (!is_valid_ether_addr(addr)) 793 return -EADDRNOTAVAIL; 794 } 795 796 memcpy(netdev->dev_addr, addr, netdev->addr_len); 797 798 return 0; 799 } 800 801 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 802 { 803 struct enic *enic = netdev_priv(netdev); 804 struct sockaddr *saddr = p; 805 char *addr = saddr->sa_data; 806 int err; 807 808 if (netif_running(enic->netdev)) { 809 err = enic_dev_del_station_addr(enic); 810 if (err) 811 return err; 812 } 813 814 err = enic_set_mac_addr(netdev, addr); 815 if (err) 816 return err; 817 818 if (netif_running(enic->netdev)) { 819 err = enic_dev_add_station_addr(enic); 820 if (err) 821 return err; 822 } 823 824 return err; 825 } 826 827 static int enic_set_mac_address(struct net_device *netdev, void *p) 828 { 829 struct sockaddr *saddr = p; 830 char *addr = saddr->sa_data; 831 struct enic *enic = netdev_priv(netdev); 832 int err; 833 834 err = enic_dev_del_station_addr(enic); 835 if (err) 836 return err; 837 838 err = enic_set_mac_addr(netdev, addr); 839 if (err) 840 return err; 841 842 return enic_dev_add_station_addr(enic); 843 } 844 845 /* netif_tx_lock held, BHs disabled */ 846 static void enic_set_rx_mode(struct net_device *netdev) 847 { 848 struct enic *enic = netdev_priv(netdev); 849 int directed = 1; 850 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 851 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 852 int promisc = (netdev->flags & IFF_PROMISC) || 853 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; 854 int allmulti = (netdev->flags & IFF_ALLMULTI) || 855 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; 856 unsigned int flags = netdev->flags | 857 (allmulti ? IFF_ALLMULTI : 0) | 858 (promisc ? IFF_PROMISC : 0); 859 860 if (enic->flags != flags) { 861 enic->flags = flags; 862 enic_dev_packet_filter(enic, directed, 863 multicast, broadcast, promisc, allmulti); 864 } 865 866 if (!promisc) { 867 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync); 868 if (!allmulti) 869 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync); 870 } 871 } 872 873 /* netif_tx_lock held, BHs disabled */ 874 static void enic_tx_timeout(struct net_device *netdev) 875 { 876 struct enic *enic = netdev_priv(netdev); 877 schedule_work(&enic->tx_hang_reset); 878 } 879 880 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 881 { 882 struct enic *enic = netdev_priv(netdev); 883 struct enic_port_profile *pp; 884 int err; 885 886 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 887 if (err) 888 return err; 889 890 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { 891 if (vf == PORT_SELF_VF) { 892 memcpy(pp->vf_mac, mac, ETH_ALEN); 893 return 0; 894 } else { 895 /* 896 * For sriov vf's set the mac in hw 897 */ 898 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 899 vnic_dev_set_mac_addr, mac); 900 return enic_dev_status_to_errno(err); 901 } 902 } else 903 return -EINVAL; 904 } 905 906 static int enic_set_vf_port(struct net_device *netdev, int vf, 907 struct nlattr *port[]) 908 { 909 struct enic *enic = netdev_priv(netdev); 910 struct enic_port_profile prev_pp; 911 struct enic_port_profile *pp; 912 int err = 0, restore_pp = 1; 913 914 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 915 if (err) 916 return err; 917 918 if (!port[IFLA_PORT_REQUEST]) 919 return -EOPNOTSUPP; 920 921 memcpy(&prev_pp, pp, sizeof(*enic->pp)); 922 memset(pp, 0, sizeof(*enic->pp)); 923 924 pp->set |= ENIC_SET_REQUEST; 925 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); 926 927 if (port[IFLA_PORT_PROFILE]) { 928 pp->set |= ENIC_SET_NAME; 929 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), 930 PORT_PROFILE_MAX); 931 } 932 933 if (port[IFLA_PORT_INSTANCE_UUID]) { 934 pp->set |= ENIC_SET_INSTANCE; 935 memcpy(pp->instance_uuid, 936 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 937 } 938 939 if (port[IFLA_PORT_HOST_UUID]) { 940 pp->set |= ENIC_SET_HOST; 941 memcpy(pp->host_uuid, 942 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 943 } 944 945 if (vf == PORT_SELF_VF) { 946 /* Special case handling: mac came from IFLA_VF_MAC */ 947 if (!is_zero_ether_addr(prev_pp.vf_mac)) 948 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); 949 950 if (is_zero_ether_addr(netdev->dev_addr)) 951 eth_hw_addr_random(netdev); 952 } else { 953 /* SR-IOV VF: get mac from adapter */ 954 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 955 vnic_dev_get_mac_addr, pp->mac_addr); 956 if (err) { 957 netdev_err(netdev, "Error getting mac for vf %d\n", vf); 958 memcpy(pp, &prev_pp, sizeof(*pp)); 959 return enic_dev_status_to_errno(err); 960 } 961 } 962 963 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); 964 if (err) { 965 if (restore_pp) { 966 /* Things are still the way they were: Implicit 967 * DISASSOCIATE failed 968 */ 969 memcpy(pp, &prev_pp, sizeof(*pp)); 970 } else { 971 memset(pp, 0, sizeof(*pp)); 972 if (vf == PORT_SELF_VF) 973 eth_zero_addr(netdev->dev_addr); 974 } 975 } else { 976 /* Set flag to indicate that the port assoc/disassoc 977 * request has been sent out to fw 978 */ 979 pp->set |= ENIC_PORT_REQUEST_APPLIED; 980 981 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ 982 if (pp->request == PORT_REQUEST_DISASSOCIATE) { 983 eth_zero_addr(pp->mac_addr); 984 if (vf == PORT_SELF_VF) 985 eth_zero_addr(netdev->dev_addr); 986 } 987 } 988 989 if (vf == PORT_SELF_VF) 990 eth_zero_addr(pp->vf_mac); 991 992 return err; 993 } 994 995 static int enic_get_vf_port(struct net_device *netdev, int vf, 996 struct sk_buff *skb) 997 { 998 struct enic *enic = netdev_priv(netdev); 999 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1000 struct enic_port_profile *pp; 1001 int err; 1002 1003 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 1004 if (err) 1005 return err; 1006 1007 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) 1008 return -ENODATA; 1009 1010 err = enic_process_get_pp_request(enic, vf, pp->request, &response); 1011 if (err) 1012 return err; 1013 1014 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || 1015 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || 1016 ((pp->set & ENIC_SET_NAME) && 1017 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || 1018 ((pp->set & ENIC_SET_INSTANCE) && 1019 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1020 pp->instance_uuid)) || 1021 ((pp->set & ENIC_SET_HOST) && 1022 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) 1023 goto nla_put_failure; 1024 return 0; 1025 1026 nla_put_failure: 1027 return -EMSGSIZE; 1028 } 1029 1030 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 1031 { 1032 struct enic *enic = vnic_dev_priv(rq->vdev); 1033 1034 if (!buf->os_buf) 1035 return; 1036 1037 pci_unmap_single(enic->pdev, buf->dma_addr, 1038 buf->len, PCI_DMA_FROMDEVICE); 1039 dev_kfree_skb_any(buf->os_buf); 1040 buf->os_buf = NULL; 1041 } 1042 1043 static int enic_rq_alloc_buf(struct vnic_rq *rq) 1044 { 1045 struct enic *enic = vnic_dev_priv(rq->vdev); 1046 struct net_device *netdev = enic->netdev; 1047 struct sk_buff *skb; 1048 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; 1049 unsigned int os_buf_index = 0; 1050 dma_addr_t dma_addr; 1051 struct vnic_rq_buf *buf = rq->to_use; 1052 1053 if (buf->os_buf) { 1054 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, 1055 buf->len); 1056 1057 return 0; 1058 } 1059 skb = netdev_alloc_skb_ip_align(netdev, len); 1060 if (!skb) 1061 return -ENOMEM; 1062 1063 dma_addr = pci_map_single(enic->pdev, skb->data, len, 1064 PCI_DMA_FROMDEVICE); 1065 if (unlikely(enic_dma_map_check(enic, dma_addr))) { 1066 dev_kfree_skb(skb); 1067 return -ENOMEM; 1068 } 1069 1070 enic_queue_rq_desc(rq, skb, os_buf_index, 1071 dma_addr, len); 1072 1073 return 0; 1074 } 1075 1076 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, 1077 u32 pkt_len) 1078 { 1079 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) 1080 pkt_size->large_pkt_bytes_cnt += pkt_len; 1081 else 1082 pkt_size->small_pkt_bytes_cnt += pkt_len; 1083 } 1084 1085 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, 1086 struct vnic_rq_buf *buf, u16 len) 1087 { 1088 struct enic *enic = netdev_priv(netdev); 1089 struct sk_buff *new_skb; 1090 1091 if (len > enic->rx_copybreak) 1092 return false; 1093 new_skb = netdev_alloc_skb_ip_align(netdev, len); 1094 if (!new_skb) 1095 return false; 1096 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, 1097 DMA_FROM_DEVICE); 1098 memcpy(new_skb->data, (*skb)->data, len); 1099 *skb = new_skb; 1100 1101 return true; 1102 } 1103 1104 static void enic_rq_indicate_buf(struct vnic_rq *rq, 1105 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1106 int skipped, void *opaque) 1107 { 1108 struct enic *enic = vnic_dev_priv(rq->vdev); 1109 struct net_device *netdev = enic->netdev; 1110 struct sk_buff *skb; 1111 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1112 1113 u8 type, color, eop, sop, ingress_port, vlan_stripped; 1114 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 1115 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1116 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1117 u8 packet_error; 1118 u16 q_number, completed_index, bytes_written, vlan_tci, checksum; 1119 u32 rss_hash; 1120 1121 if (skipped) 1122 return; 1123 1124 skb = buf->os_buf; 1125 1126 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1127 &type, &color, &q_number, &completed_index, 1128 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1129 &csum_not_calc, &rss_hash, &bytes_written, 1130 &packet_error, &vlan_stripped, &vlan_tci, &checksum, 1131 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1132 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1133 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1134 &fcs_ok); 1135 1136 if (packet_error) { 1137 1138 if (!fcs_ok) { 1139 if (bytes_written > 0) 1140 enic->rq_bad_fcs++; 1141 else if (bytes_written == 0) 1142 enic->rq_truncated_pkts++; 1143 } 1144 1145 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1146 PCI_DMA_FROMDEVICE); 1147 dev_kfree_skb_any(skb); 1148 buf->os_buf = NULL; 1149 1150 return; 1151 } 1152 1153 if (eop && bytes_written > 0) { 1154 1155 /* Good receive 1156 */ 1157 1158 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { 1159 buf->os_buf = NULL; 1160 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1161 PCI_DMA_FROMDEVICE); 1162 } 1163 prefetch(skb->data - NET_IP_ALIGN); 1164 1165 skb_put(skb, bytes_written); 1166 skb->protocol = eth_type_trans(skb, netdev); 1167 skb_record_rx_queue(skb, q_number); 1168 if (netdev->features & NETIF_F_RXHASH) { 1169 skb_set_hash(skb, rss_hash, 1170 (rss_type & 1171 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX | 1172 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 | 1173 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ? 1174 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 1175 } 1176 1177 /* Hardware does not provide whole packet checksum. It only 1178 * provides pseudo checksum. Since hw validates the packet 1179 * checksum but not provide us the checksum value. use 1180 * CHECSUM_UNNECESSARY. 1181 */ 1182 if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && 1183 ipv4_csum_ok) 1184 skb->ip_summed = CHECKSUM_UNNECESSARY; 1185 1186 if (vlan_stripped) 1187 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1188 1189 skb_mark_napi_id(skb, &enic->napi[rq->index]); 1190 if (enic_poll_busy_polling(rq) || 1191 !(netdev->features & NETIF_F_GRO)) 1192 netif_receive_skb(skb); 1193 else 1194 napi_gro_receive(&enic->napi[q_number], skb); 1195 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1196 enic_intr_update_pkt_size(&cq->pkt_size_counter, 1197 bytes_written); 1198 } else { 1199 1200 /* Buffer overflow 1201 */ 1202 1203 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, 1204 PCI_DMA_FROMDEVICE); 1205 dev_kfree_skb_any(skb); 1206 buf->os_buf = NULL; 1207 } 1208 } 1209 1210 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 1211 u8 type, u16 q_number, u16 completed_index, void *opaque) 1212 { 1213 struct enic *enic = vnic_dev_priv(vdev); 1214 1215 vnic_rq_service(&enic->rq[q_number], cq_desc, 1216 completed_index, VNIC_RQ_RETURN_DESC, 1217 enic_rq_indicate_buf, opaque); 1218 1219 return 0; 1220 } 1221 1222 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) 1223 { 1224 unsigned int intr = enic_msix_rq_intr(enic, rq->index); 1225 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1226 u32 timer = cq->tobe_rx_coal_timeval; 1227 1228 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { 1229 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); 1230 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; 1231 } 1232 } 1233 1234 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) 1235 { 1236 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1237 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; 1238 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; 1239 int index; 1240 u32 timer; 1241 u32 range_start; 1242 u32 traffic; 1243 u64 delta; 1244 ktime_t now = ktime_get(); 1245 1246 delta = ktime_us_delta(now, cq->prev_ts); 1247 if (delta < ENIC_AIC_TS_BREAK) 1248 return; 1249 cq->prev_ts = now; 1250 1251 traffic = pkt_size_counter->large_pkt_bytes_cnt + 1252 pkt_size_counter->small_pkt_bytes_cnt; 1253 /* The table takes Mbps 1254 * traffic *= 8 => bits 1255 * traffic *= (10^6 / delta) => bps 1256 * traffic /= 10^6 => Mbps 1257 * 1258 * Combining, traffic *= (8 / delta) 1259 */ 1260 1261 traffic <<= 3; 1262 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; 1263 1264 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) 1265 if (traffic < mod_table[index].rx_rate) 1266 break; 1267 range_start = (pkt_size_counter->small_pkt_bytes_cnt > 1268 pkt_size_counter->large_pkt_bytes_cnt << 1) ? 1269 rx_coal->small_pkt_range_start : 1270 rx_coal->large_pkt_range_start; 1271 timer = range_start + ((rx_coal->range_end - range_start) * 1272 mod_table[index].range_percent / 100); 1273 /* Damping */ 1274 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; 1275 1276 pkt_size_counter->large_pkt_bytes_cnt = 0; 1277 pkt_size_counter->small_pkt_bytes_cnt = 0; 1278 } 1279 1280 static int enic_poll(struct napi_struct *napi, int budget) 1281 { 1282 struct net_device *netdev = napi->dev; 1283 struct enic *enic = netdev_priv(netdev); 1284 unsigned int cq_rq = enic_cq_rq(enic, 0); 1285 unsigned int cq_wq = enic_cq_wq(enic, 0); 1286 unsigned int intr = enic_legacy_io_intr(); 1287 unsigned int rq_work_to_do = budget; 1288 unsigned int wq_work_to_do = -1; /* no limit */ 1289 unsigned int work_done, rq_work_done = 0, wq_work_done; 1290 int err; 1291 1292 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, 1293 enic_wq_service, NULL); 1294 1295 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { 1296 if (wq_work_done > 0) 1297 vnic_intr_return_credits(&enic->intr[intr], 1298 wq_work_done, 1299 0 /* dont unmask intr */, 1300 0 /* dont reset intr timer */); 1301 return budget; 1302 } 1303 1304 if (budget > 0) 1305 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1306 rq_work_to_do, enic_rq_service, NULL); 1307 1308 /* Accumulate intr event credits for this polling 1309 * cycle. An intr event is the completion of a 1310 * a WQ or RQ packet. 1311 */ 1312 1313 work_done = rq_work_done + wq_work_done; 1314 1315 if (work_done > 0) 1316 vnic_intr_return_credits(&enic->intr[intr], 1317 work_done, 1318 0 /* don't unmask intr */, 1319 0 /* don't reset intr timer */); 1320 1321 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1322 enic_poll_unlock_napi(&enic->rq[cq_rq], napi); 1323 1324 /* Buffer allocation failed. Stay in polling 1325 * mode so we can try to fill the ring again. 1326 */ 1327 1328 if (err) 1329 rq_work_done = rq_work_to_do; 1330 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1331 /* Call the function which refreshes the intr coalescing timer 1332 * value based on the traffic. 1333 */ 1334 enic_calc_int_moderation(enic, &enic->rq[0]); 1335 1336 if (rq_work_done < rq_work_to_do) { 1337 1338 /* Some work done, but not enough to stay in polling, 1339 * exit polling 1340 */ 1341 1342 napi_complete(napi); 1343 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1344 enic_set_int_moderation(enic, &enic->rq[0]); 1345 vnic_intr_unmask(&enic->intr[intr]); 1346 } 1347 1348 return rq_work_done; 1349 } 1350 1351 #ifdef CONFIG_RFS_ACCEL 1352 static void enic_free_rx_cpu_rmap(struct enic *enic) 1353 { 1354 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); 1355 enic->netdev->rx_cpu_rmap = NULL; 1356 } 1357 1358 static void enic_set_rx_cpu_rmap(struct enic *enic) 1359 { 1360 int i, res; 1361 1362 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { 1363 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); 1364 if (unlikely(!enic->netdev->rx_cpu_rmap)) 1365 return; 1366 for (i = 0; i < enic->rq_count; i++) { 1367 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, 1368 enic->msix_entry[i].vector); 1369 if (unlikely(res)) { 1370 enic_free_rx_cpu_rmap(enic); 1371 return; 1372 } 1373 } 1374 } 1375 } 1376 1377 #else 1378 1379 static void enic_free_rx_cpu_rmap(struct enic *enic) 1380 { 1381 } 1382 1383 static void enic_set_rx_cpu_rmap(struct enic *enic) 1384 { 1385 } 1386 1387 #endif /* CONFIG_RFS_ACCEL */ 1388 1389 #ifdef CONFIG_NET_RX_BUSY_POLL 1390 static int enic_busy_poll(struct napi_struct *napi) 1391 { 1392 struct net_device *netdev = napi->dev; 1393 struct enic *enic = netdev_priv(netdev); 1394 unsigned int rq = (napi - &enic->napi[0]); 1395 unsigned int cq = enic_cq_rq(enic, rq); 1396 unsigned int intr = enic_msix_rq_intr(enic, rq); 1397 unsigned int work_to_do = -1; /* clean all pkts possible */ 1398 unsigned int work_done; 1399 1400 if (!enic_poll_lock_poll(&enic->rq[rq])) 1401 return LL_FLUSH_BUSY; 1402 work_done = vnic_cq_service(&enic->cq[cq], work_to_do, 1403 enic_rq_service, NULL); 1404 1405 if (work_done > 0) 1406 vnic_intr_return_credits(&enic->intr[intr], 1407 work_done, 0, 0); 1408 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); 1409 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1410 enic_calc_int_moderation(enic, &enic->rq[rq]); 1411 enic_poll_unlock_poll(&enic->rq[rq]); 1412 1413 return work_done; 1414 } 1415 #endif /* CONFIG_NET_RX_BUSY_POLL */ 1416 1417 static int enic_poll_msix_wq(struct napi_struct *napi, int budget) 1418 { 1419 struct net_device *netdev = napi->dev; 1420 struct enic *enic = netdev_priv(netdev); 1421 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; 1422 struct vnic_wq *wq = &enic->wq[wq_index]; 1423 unsigned int cq; 1424 unsigned int intr; 1425 unsigned int wq_work_to_do = -1; /* clean all desc possible */ 1426 unsigned int wq_work_done; 1427 unsigned int wq_irq; 1428 1429 wq_irq = wq->index; 1430 cq = enic_cq_wq(enic, wq_irq); 1431 intr = enic_msix_wq_intr(enic, wq_irq); 1432 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, 1433 enic_wq_service, NULL); 1434 1435 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 1436 0 /* don't unmask intr */, 1437 1 /* reset intr timer */); 1438 if (!wq_work_done) { 1439 napi_complete(napi); 1440 vnic_intr_unmask(&enic->intr[intr]); 1441 return 0; 1442 } 1443 1444 return budget; 1445 } 1446 1447 static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 1448 { 1449 struct net_device *netdev = napi->dev; 1450 struct enic *enic = netdev_priv(netdev); 1451 unsigned int rq = (napi - &enic->napi[0]); 1452 unsigned int cq = enic_cq_rq(enic, rq); 1453 unsigned int intr = enic_msix_rq_intr(enic, rq); 1454 unsigned int work_to_do = budget; 1455 unsigned int work_done = 0; 1456 int err; 1457 1458 if (!enic_poll_lock_napi(&enic->rq[rq])) 1459 return budget; 1460 /* Service RQ 1461 */ 1462 1463 if (budget > 0) 1464 work_done = vnic_cq_service(&enic->cq[cq], 1465 work_to_do, enic_rq_service, NULL); 1466 1467 /* Return intr event credits for this polling 1468 * cycle. An intr event is the completion of a 1469 * RQ packet. 1470 */ 1471 1472 if (work_done > 0) 1473 vnic_intr_return_credits(&enic->intr[intr], 1474 work_done, 1475 0 /* don't unmask intr */, 1476 0 /* don't reset intr timer */); 1477 1478 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); 1479 1480 /* Buffer allocation failed. Stay in polling mode 1481 * so we can try to fill the ring again. 1482 */ 1483 1484 if (err) 1485 work_done = work_to_do; 1486 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1487 /* Call the function which refreshes the intr coalescing timer 1488 * value based on the traffic. 1489 */ 1490 enic_calc_int_moderation(enic, &enic->rq[rq]); 1491 1492 enic_poll_unlock_napi(&enic->rq[rq], napi); 1493 if (work_done < work_to_do) { 1494 1495 /* Some work done, but not enough to stay in polling, 1496 * exit polling 1497 */ 1498 1499 napi_complete(napi); 1500 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) 1501 enic_set_int_moderation(enic, &enic->rq[rq]); 1502 vnic_intr_unmask(&enic->intr[intr]); 1503 } 1504 1505 return work_done; 1506 } 1507 1508 static void enic_notify_timer(unsigned long data) 1509 { 1510 struct enic *enic = (struct enic *)data; 1511 1512 enic_notify_check(enic); 1513 1514 mod_timer(&enic->notify_timer, 1515 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); 1516 } 1517 1518 static void enic_free_intr(struct enic *enic) 1519 { 1520 struct net_device *netdev = enic->netdev; 1521 unsigned int i; 1522 1523 enic_free_rx_cpu_rmap(enic); 1524 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1525 case VNIC_DEV_INTR_MODE_INTX: 1526 free_irq(enic->pdev->irq, netdev); 1527 break; 1528 case VNIC_DEV_INTR_MODE_MSI: 1529 free_irq(enic->pdev->irq, enic); 1530 break; 1531 case VNIC_DEV_INTR_MODE_MSIX: 1532 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1533 if (enic->msix[i].requested) 1534 free_irq(enic->msix_entry[i].vector, 1535 enic->msix[i].devid); 1536 break; 1537 default: 1538 break; 1539 } 1540 } 1541 1542 static int enic_request_intr(struct enic *enic) 1543 { 1544 struct net_device *netdev = enic->netdev; 1545 unsigned int i, intr; 1546 int err = 0; 1547 1548 enic_set_rx_cpu_rmap(enic); 1549 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1550 1551 case VNIC_DEV_INTR_MODE_INTX: 1552 1553 err = request_irq(enic->pdev->irq, enic_isr_legacy, 1554 IRQF_SHARED, netdev->name, netdev); 1555 break; 1556 1557 case VNIC_DEV_INTR_MODE_MSI: 1558 1559 err = request_irq(enic->pdev->irq, enic_isr_msi, 1560 0, netdev->name, enic); 1561 break; 1562 1563 case VNIC_DEV_INTR_MODE_MSIX: 1564 1565 for (i = 0; i < enic->rq_count; i++) { 1566 intr = enic_msix_rq_intr(enic, i); 1567 snprintf(enic->msix[intr].devname, 1568 sizeof(enic->msix[intr].devname), 1569 "%.11s-rx-%d", netdev->name, i); 1570 enic->msix[intr].isr = enic_isr_msix; 1571 enic->msix[intr].devid = &enic->napi[i]; 1572 } 1573 1574 for (i = 0; i < enic->wq_count; i++) { 1575 int wq = enic_cq_wq(enic, i); 1576 1577 intr = enic_msix_wq_intr(enic, i); 1578 snprintf(enic->msix[intr].devname, 1579 sizeof(enic->msix[intr].devname), 1580 "%.11s-tx-%d", netdev->name, i); 1581 enic->msix[intr].isr = enic_isr_msix; 1582 enic->msix[intr].devid = &enic->napi[wq]; 1583 } 1584 1585 intr = enic_msix_err_intr(enic); 1586 snprintf(enic->msix[intr].devname, 1587 sizeof(enic->msix[intr].devname), 1588 "%.11s-err", netdev->name); 1589 enic->msix[intr].isr = enic_isr_msix_err; 1590 enic->msix[intr].devid = enic; 1591 1592 intr = enic_msix_notify_intr(enic); 1593 snprintf(enic->msix[intr].devname, 1594 sizeof(enic->msix[intr].devname), 1595 "%.11s-notify", netdev->name); 1596 enic->msix[intr].isr = enic_isr_msix_notify; 1597 enic->msix[intr].devid = enic; 1598 1599 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1600 enic->msix[i].requested = 0; 1601 1602 for (i = 0; i < enic->intr_count; i++) { 1603 err = request_irq(enic->msix_entry[i].vector, 1604 enic->msix[i].isr, 0, 1605 enic->msix[i].devname, 1606 enic->msix[i].devid); 1607 if (err) { 1608 enic_free_intr(enic); 1609 break; 1610 } 1611 enic->msix[i].requested = 1; 1612 } 1613 1614 break; 1615 1616 default: 1617 break; 1618 } 1619 1620 return err; 1621 } 1622 1623 static void enic_synchronize_irqs(struct enic *enic) 1624 { 1625 unsigned int i; 1626 1627 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1628 case VNIC_DEV_INTR_MODE_INTX: 1629 case VNIC_DEV_INTR_MODE_MSI: 1630 synchronize_irq(enic->pdev->irq); 1631 break; 1632 case VNIC_DEV_INTR_MODE_MSIX: 1633 for (i = 0; i < enic->intr_count; i++) 1634 synchronize_irq(enic->msix_entry[i].vector); 1635 break; 1636 default: 1637 break; 1638 } 1639 } 1640 1641 static void enic_set_rx_coal_setting(struct enic *enic) 1642 { 1643 unsigned int speed; 1644 int index = -1; 1645 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; 1646 1647 /* 1. Read the link speed from fw 1648 * 2. Pick the default range for the speed 1649 * 3. Update it in enic->rx_coalesce_setting 1650 */ 1651 speed = vnic_dev_port_speed(enic->vdev); 1652 if (ENIC_LINK_SPEED_10G < speed) 1653 index = ENIC_LINK_40G_INDEX; 1654 else if (ENIC_LINK_SPEED_4G < speed) 1655 index = ENIC_LINK_10G_INDEX; 1656 else 1657 index = ENIC_LINK_4G_INDEX; 1658 1659 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; 1660 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; 1661 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; 1662 1663 /* Start with the value provided by UCSM */ 1664 for (index = 0; index < enic->rq_count; index++) 1665 enic->cq[index].cur_rx_coal_timeval = 1666 enic->config.intr_timer_usec; 1667 1668 rx_coal->use_adaptive_rx_coalesce = 1; 1669 } 1670 1671 static int enic_dev_notify_set(struct enic *enic) 1672 { 1673 int err; 1674 1675 spin_lock_bh(&enic->devcmd_lock); 1676 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1677 case VNIC_DEV_INTR_MODE_INTX: 1678 err = vnic_dev_notify_set(enic->vdev, 1679 enic_legacy_notify_intr()); 1680 break; 1681 case VNIC_DEV_INTR_MODE_MSIX: 1682 err = vnic_dev_notify_set(enic->vdev, 1683 enic_msix_notify_intr(enic)); 1684 break; 1685 default: 1686 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1687 break; 1688 } 1689 spin_unlock_bh(&enic->devcmd_lock); 1690 1691 return err; 1692 } 1693 1694 static void enic_notify_timer_start(struct enic *enic) 1695 { 1696 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1697 case VNIC_DEV_INTR_MODE_MSI: 1698 mod_timer(&enic->notify_timer, jiffies); 1699 break; 1700 default: 1701 /* Using intr for notification for INTx/MSI-X */ 1702 break; 1703 } 1704 } 1705 1706 /* rtnl lock is held, process context */ 1707 static int enic_open(struct net_device *netdev) 1708 { 1709 struct enic *enic = netdev_priv(netdev); 1710 unsigned int i; 1711 int err; 1712 1713 err = enic_request_intr(enic); 1714 if (err) { 1715 netdev_err(netdev, "Unable to request irq.\n"); 1716 return err; 1717 } 1718 enic_init_affinity_hint(enic); 1719 enic_set_affinity_hint(enic); 1720 1721 err = enic_dev_notify_set(enic); 1722 if (err) { 1723 netdev_err(netdev, 1724 "Failed to alloc notify buffer, aborting.\n"); 1725 goto err_out_free_intr; 1726 } 1727 1728 for (i = 0; i < enic->rq_count; i++) { 1729 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); 1730 /* Need at least one buffer on ring to get going */ 1731 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1732 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1733 err = -ENOMEM; 1734 goto err_out_free_rq; 1735 } 1736 } 1737 1738 for (i = 0; i < enic->wq_count; i++) 1739 vnic_wq_enable(&enic->wq[i]); 1740 for (i = 0; i < enic->rq_count; i++) 1741 vnic_rq_enable(&enic->rq[i]); 1742 1743 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1744 enic_dev_add_station_addr(enic); 1745 1746 enic_set_rx_mode(netdev); 1747 1748 netif_tx_wake_all_queues(netdev); 1749 1750 for (i = 0; i < enic->rq_count; i++) { 1751 enic_busy_poll_init_lock(&enic->rq[i]); 1752 napi_enable(&enic->napi[i]); 1753 } 1754 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 1755 for (i = 0; i < enic->wq_count; i++) 1756 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); 1757 enic_dev_enable(enic); 1758 1759 for (i = 0; i < enic->intr_count; i++) 1760 vnic_intr_unmask(&enic->intr[i]); 1761 1762 enic_notify_timer_start(enic); 1763 enic_rfs_flw_tbl_init(enic); 1764 1765 return 0; 1766 1767 err_out_free_rq: 1768 for (i = 0; i < enic->rq_count; i++) 1769 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1770 enic_dev_notify_unset(enic); 1771 err_out_free_intr: 1772 enic_unset_affinity_hint(enic); 1773 enic_free_intr(enic); 1774 1775 return err; 1776 } 1777 1778 /* rtnl lock is held, process context */ 1779 static int enic_stop(struct net_device *netdev) 1780 { 1781 struct enic *enic = netdev_priv(netdev); 1782 unsigned int i; 1783 int err; 1784 1785 for (i = 0; i < enic->intr_count; i++) { 1786 vnic_intr_mask(&enic->intr[i]); 1787 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ 1788 } 1789 1790 enic_synchronize_irqs(enic); 1791 1792 del_timer_sync(&enic->notify_timer); 1793 enic_rfs_flw_tbl_free(enic); 1794 1795 enic_dev_disable(enic); 1796 1797 for (i = 0; i < enic->rq_count; i++) { 1798 napi_disable(&enic->napi[i]); 1799 local_bh_disable(); 1800 while (!enic_poll_lock_napi(&enic->rq[i])) 1801 mdelay(1); 1802 local_bh_enable(); 1803 } 1804 1805 netif_carrier_off(netdev); 1806 netif_tx_disable(netdev); 1807 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 1808 for (i = 0; i < enic->wq_count; i++) 1809 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); 1810 1811 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1812 enic_dev_del_station_addr(enic); 1813 1814 for (i = 0; i < enic->wq_count; i++) { 1815 err = vnic_wq_disable(&enic->wq[i]); 1816 if (err) 1817 return err; 1818 } 1819 for (i = 0; i < enic->rq_count; i++) { 1820 err = vnic_rq_disable(&enic->rq[i]); 1821 if (err) 1822 return err; 1823 } 1824 1825 enic_dev_notify_unset(enic); 1826 enic_unset_affinity_hint(enic); 1827 enic_free_intr(enic); 1828 1829 for (i = 0; i < enic->wq_count; i++) 1830 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1831 for (i = 0; i < enic->rq_count; i++) 1832 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1833 for (i = 0; i < enic->cq_count; i++) 1834 vnic_cq_clean(&enic->cq[i]); 1835 for (i = 0; i < enic->intr_count; i++) 1836 vnic_intr_clean(&enic->intr[i]); 1837 1838 return 0; 1839 } 1840 1841 static int enic_change_mtu(struct net_device *netdev, int new_mtu) 1842 { 1843 struct enic *enic = netdev_priv(netdev); 1844 int running = netif_running(netdev); 1845 1846 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) 1847 return -EINVAL; 1848 1849 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 1850 return -EOPNOTSUPP; 1851 1852 if (running) 1853 enic_stop(netdev); 1854 1855 netdev->mtu = new_mtu; 1856 1857 if (netdev->mtu > enic->port_mtu) 1858 netdev_warn(netdev, 1859 "interface MTU (%d) set higher than port MTU (%d)\n", 1860 netdev->mtu, enic->port_mtu); 1861 1862 if (running) 1863 enic_open(netdev); 1864 1865 return 0; 1866 } 1867 1868 static void enic_change_mtu_work(struct work_struct *work) 1869 { 1870 struct enic *enic = container_of(work, struct enic, change_mtu_work); 1871 struct net_device *netdev = enic->netdev; 1872 int new_mtu = vnic_dev_mtu(enic->vdev); 1873 int err; 1874 unsigned int i; 1875 1876 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); 1877 1878 rtnl_lock(); 1879 1880 /* Stop RQ */ 1881 del_timer_sync(&enic->notify_timer); 1882 1883 for (i = 0; i < enic->rq_count; i++) 1884 napi_disable(&enic->napi[i]); 1885 1886 vnic_intr_mask(&enic->intr[0]); 1887 enic_synchronize_irqs(enic); 1888 err = vnic_rq_disable(&enic->rq[0]); 1889 if (err) { 1890 rtnl_unlock(); 1891 netdev_err(netdev, "Unable to disable RQ.\n"); 1892 return; 1893 } 1894 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); 1895 vnic_cq_clean(&enic->cq[0]); 1896 vnic_intr_clean(&enic->intr[0]); 1897 1898 /* Fill RQ with new_mtu-sized buffers */ 1899 netdev->mtu = new_mtu; 1900 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1901 /* Need at least one buffer on ring to get going */ 1902 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { 1903 rtnl_unlock(); 1904 netdev_err(netdev, "Unable to alloc receive buffers.\n"); 1905 return; 1906 } 1907 1908 /* Start RQ */ 1909 vnic_rq_enable(&enic->rq[0]); 1910 napi_enable(&enic->napi[0]); 1911 vnic_intr_unmask(&enic->intr[0]); 1912 enic_notify_timer_start(enic); 1913 1914 rtnl_unlock(); 1915 1916 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 1917 } 1918 1919 #ifdef CONFIG_NET_POLL_CONTROLLER 1920 static void enic_poll_controller(struct net_device *netdev) 1921 { 1922 struct enic *enic = netdev_priv(netdev); 1923 struct vnic_dev *vdev = enic->vdev; 1924 unsigned int i, intr; 1925 1926 switch (vnic_dev_get_intr_mode(vdev)) { 1927 case VNIC_DEV_INTR_MODE_MSIX: 1928 for (i = 0; i < enic->rq_count; i++) { 1929 intr = enic_msix_rq_intr(enic, i); 1930 enic_isr_msix(enic->msix_entry[intr].vector, 1931 &enic->napi[i]); 1932 } 1933 1934 for (i = 0; i < enic->wq_count; i++) { 1935 intr = enic_msix_wq_intr(enic, i); 1936 enic_isr_msix(enic->msix_entry[intr].vector, 1937 &enic->napi[enic_cq_wq(enic, i)]); 1938 } 1939 1940 break; 1941 case VNIC_DEV_INTR_MODE_MSI: 1942 enic_isr_msi(enic->pdev->irq, enic); 1943 break; 1944 case VNIC_DEV_INTR_MODE_INTX: 1945 enic_isr_legacy(enic->pdev->irq, netdev); 1946 break; 1947 default: 1948 break; 1949 } 1950 } 1951 #endif 1952 1953 static int enic_dev_wait(struct vnic_dev *vdev, 1954 int (*start)(struct vnic_dev *, int), 1955 int (*finished)(struct vnic_dev *, int *), 1956 int arg) 1957 { 1958 unsigned long time; 1959 int done; 1960 int err; 1961 1962 BUG_ON(in_interrupt()); 1963 1964 err = start(vdev, arg); 1965 if (err) 1966 return err; 1967 1968 /* Wait for func to complete...2 seconds max 1969 */ 1970 1971 time = jiffies + (HZ * 2); 1972 do { 1973 1974 err = finished(vdev, &done); 1975 if (err) 1976 return err; 1977 1978 if (done) 1979 return 0; 1980 1981 schedule_timeout_uninterruptible(HZ / 10); 1982 1983 } while (time_after(time, jiffies)); 1984 1985 return -ETIMEDOUT; 1986 } 1987 1988 static int enic_dev_open(struct enic *enic) 1989 { 1990 int err; 1991 1992 err = enic_dev_wait(enic->vdev, vnic_dev_open, 1993 vnic_dev_open_done, 0); 1994 if (err) 1995 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", 1996 err); 1997 1998 return err; 1999 } 2000 2001 static int enic_dev_soft_reset(struct enic *enic) 2002 { 2003 int err; 2004 2005 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, 2006 vnic_dev_soft_reset_done, 0); 2007 if (err) 2008 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", 2009 err); 2010 2011 return err; 2012 } 2013 2014 static int enic_dev_hang_reset(struct enic *enic) 2015 { 2016 int err; 2017 2018 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, 2019 vnic_dev_hang_reset_done, 0); 2020 if (err) 2021 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", 2022 err); 2023 2024 return err; 2025 } 2026 2027 int __enic_set_rsskey(struct enic *enic) 2028 { 2029 union vnic_rss_key *rss_key_buf_va; 2030 dma_addr_t rss_key_buf_pa; 2031 int i, kidx, bidx, err; 2032 2033 rss_key_buf_va = pci_zalloc_consistent(enic->pdev, 2034 sizeof(union vnic_rss_key), 2035 &rss_key_buf_pa); 2036 if (!rss_key_buf_va) 2037 return -ENOMEM; 2038 2039 for (i = 0; i < ENIC_RSS_LEN; i++) { 2040 kidx = i / ENIC_RSS_BYTES_PER_KEY; 2041 bidx = i % ENIC_RSS_BYTES_PER_KEY; 2042 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; 2043 } 2044 spin_lock_bh(&enic->devcmd_lock); 2045 err = enic_set_rss_key(enic, 2046 rss_key_buf_pa, 2047 sizeof(union vnic_rss_key)); 2048 spin_unlock_bh(&enic->devcmd_lock); 2049 2050 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), 2051 rss_key_buf_va, rss_key_buf_pa); 2052 2053 return err; 2054 } 2055 2056 static int enic_set_rsskey(struct enic *enic) 2057 { 2058 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); 2059 2060 return __enic_set_rsskey(enic); 2061 } 2062 2063 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2064 { 2065 dma_addr_t rss_cpu_buf_pa; 2066 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2067 unsigned int i; 2068 int err; 2069 2070 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, 2071 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); 2072 if (!rss_cpu_buf_va) 2073 return -ENOMEM; 2074 2075 for (i = 0; i < (1 << rss_hash_bits); i++) 2076 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; 2077 2078 spin_lock_bh(&enic->devcmd_lock); 2079 err = enic_set_rss_cpu(enic, 2080 rss_cpu_buf_pa, 2081 sizeof(union vnic_rss_cpu)); 2082 spin_unlock_bh(&enic->devcmd_lock); 2083 2084 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), 2085 rss_cpu_buf_va, rss_cpu_buf_pa); 2086 2087 return err; 2088 } 2089 2090 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, 2091 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) 2092 { 2093 const u8 tso_ipid_split_en = 0; 2094 const u8 ig_vlan_strip_en = 1; 2095 int err; 2096 2097 /* Enable VLAN tag stripping. 2098 */ 2099 2100 spin_lock_bh(&enic->devcmd_lock); 2101 err = enic_set_nic_cfg(enic, 2102 rss_default_cpu, rss_hash_type, 2103 rss_hash_bits, rss_base_cpu, 2104 rss_enable, tso_ipid_split_en, 2105 ig_vlan_strip_en); 2106 spin_unlock_bh(&enic->devcmd_lock); 2107 2108 return err; 2109 } 2110 2111 static int enic_set_rss_nic_cfg(struct enic *enic) 2112 { 2113 struct device *dev = enic_get_dev(enic); 2114 const u8 rss_default_cpu = 0; 2115 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | 2116 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | 2117 NIC_CFG_RSS_HASH_TYPE_IPV6 | 2118 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 2119 const u8 rss_hash_bits = 7; 2120 const u8 rss_base_cpu = 0; 2121 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); 2122 2123 if (rss_enable) { 2124 if (!enic_set_rsskey(enic)) { 2125 if (enic_set_rsscpu(enic, rss_hash_bits)) { 2126 rss_enable = 0; 2127 dev_warn(dev, "RSS disabled, " 2128 "Failed to set RSS cpu indirection table."); 2129 } 2130 } else { 2131 rss_enable = 0; 2132 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); 2133 } 2134 } 2135 2136 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, 2137 rss_hash_bits, rss_base_cpu, rss_enable); 2138 } 2139 2140 static void enic_reset(struct work_struct *work) 2141 { 2142 struct enic *enic = container_of(work, struct enic, reset); 2143 2144 if (!netif_running(enic->netdev)) 2145 return; 2146 2147 rtnl_lock(); 2148 2149 spin_lock(&enic->enic_api_lock); 2150 enic_stop(enic->netdev); 2151 enic_dev_soft_reset(enic); 2152 enic_reset_addr_lists(enic); 2153 enic_init_vnic_resources(enic); 2154 enic_set_rss_nic_cfg(enic); 2155 enic_dev_set_ig_vlan_rewrite_mode(enic); 2156 enic_open(enic->netdev); 2157 spin_unlock(&enic->enic_api_lock); 2158 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2159 2160 rtnl_unlock(); 2161 } 2162 2163 static void enic_tx_hang_reset(struct work_struct *work) 2164 { 2165 struct enic *enic = container_of(work, struct enic, tx_hang_reset); 2166 2167 rtnl_lock(); 2168 2169 spin_lock(&enic->enic_api_lock); 2170 enic_dev_hang_notify(enic); 2171 enic_stop(enic->netdev); 2172 enic_dev_hang_reset(enic); 2173 enic_reset_addr_lists(enic); 2174 enic_init_vnic_resources(enic); 2175 enic_set_rss_nic_cfg(enic); 2176 enic_dev_set_ig_vlan_rewrite_mode(enic); 2177 enic_open(enic->netdev); 2178 spin_unlock(&enic->enic_api_lock); 2179 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 2180 2181 rtnl_unlock(); 2182 } 2183 2184 static int enic_set_intr_mode(struct enic *enic) 2185 { 2186 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2187 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); 2188 unsigned int i; 2189 2190 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2191 * on system capabilities. 2192 * 2193 * Try MSI-X first 2194 * 2195 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs 2196 * (the second to last INTR is used for WQ/RQ errors) 2197 * (the last INTR is used for notifications) 2198 */ 2199 2200 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); 2201 for (i = 0; i < n + m + 2; i++) 2202 enic->msix_entry[i].entry = i; 2203 2204 /* Use multiple RQs if RSS is enabled 2205 */ 2206 2207 if (ENIC_SETTING(enic, RSS) && 2208 enic->config.intr_mode < 1 && 2209 enic->rq_count >= n && 2210 enic->wq_count >= m && 2211 enic->cq_count >= n + m && 2212 enic->intr_count >= n + m + 2) { 2213 2214 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2215 n + m + 2, n + m + 2) > 0) { 2216 2217 enic->rq_count = n; 2218 enic->wq_count = m; 2219 enic->cq_count = n + m; 2220 enic->intr_count = n + m + 2; 2221 2222 vnic_dev_set_intr_mode(enic->vdev, 2223 VNIC_DEV_INTR_MODE_MSIX); 2224 2225 return 0; 2226 } 2227 } 2228 2229 if (enic->config.intr_mode < 1 && 2230 enic->rq_count >= 1 && 2231 enic->wq_count >= m && 2232 enic->cq_count >= 1 + m && 2233 enic->intr_count >= 1 + m + 2) { 2234 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, 2235 1 + m + 2, 1 + m + 2) > 0) { 2236 2237 enic->rq_count = 1; 2238 enic->wq_count = m; 2239 enic->cq_count = 1 + m; 2240 enic->intr_count = 1 + m + 2; 2241 2242 vnic_dev_set_intr_mode(enic->vdev, 2243 VNIC_DEV_INTR_MODE_MSIX); 2244 2245 return 0; 2246 } 2247 } 2248 2249 /* Next try MSI 2250 * 2251 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR 2252 */ 2253 2254 if (enic->config.intr_mode < 2 && 2255 enic->rq_count >= 1 && 2256 enic->wq_count >= 1 && 2257 enic->cq_count >= 2 && 2258 enic->intr_count >= 1 && 2259 !pci_enable_msi(enic->pdev)) { 2260 2261 enic->rq_count = 1; 2262 enic->wq_count = 1; 2263 enic->cq_count = 2; 2264 enic->intr_count = 1; 2265 2266 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); 2267 2268 return 0; 2269 } 2270 2271 /* Next try INTx 2272 * 2273 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs 2274 * (the first INTR is used for WQ/RQ) 2275 * (the second INTR is used for WQ/RQ errors) 2276 * (the last INTR is used for notifications) 2277 */ 2278 2279 if (enic->config.intr_mode < 3 && 2280 enic->rq_count >= 1 && 2281 enic->wq_count >= 1 && 2282 enic->cq_count >= 2 && 2283 enic->intr_count >= 3) { 2284 2285 enic->rq_count = 1; 2286 enic->wq_count = 1; 2287 enic->cq_count = 2; 2288 enic->intr_count = 3; 2289 2290 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); 2291 2292 return 0; 2293 } 2294 2295 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2296 2297 return -EINVAL; 2298 } 2299 2300 static void enic_clear_intr_mode(struct enic *enic) 2301 { 2302 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2303 case VNIC_DEV_INTR_MODE_MSIX: 2304 pci_disable_msix(enic->pdev); 2305 break; 2306 case VNIC_DEV_INTR_MODE_MSI: 2307 pci_disable_msi(enic->pdev); 2308 break; 2309 default: 2310 break; 2311 } 2312 2313 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2314 } 2315 2316 static const struct net_device_ops enic_netdev_dynamic_ops = { 2317 .ndo_open = enic_open, 2318 .ndo_stop = enic_stop, 2319 .ndo_start_xmit = enic_hard_start_xmit, 2320 .ndo_get_stats64 = enic_get_stats, 2321 .ndo_validate_addr = eth_validate_addr, 2322 .ndo_set_rx_mode = enic_set_rx_mode, 2323 .ndo_set_mac_address = enic_set_mac_address_dynamic, 2324 .ndo_change_mtu = enic_change_mtu, 2325 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2326 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2327 .ndo_tx_timeout = enic_tx_timeout, 2328 .ndo_set_vf_port = enic_set_vf_port, 2329 .ndo_get_vf_port = enic_get_vf_port, 2330 .ndo_set_vf_mac = enic_set_vf_mac, 2331 #ifdef CONFIG_NET_POLL_CONTROLLER 2332 .ndo_poll_controller = enic_poll_controller, 2333 #endif 2334 #ifdef CONFIG_RFS_ACCEL 2335 .ndo_rx_flow_steer = enic_rx_flow_steer, 2336 #endif 2337 #ifdef CONFIG_NET_RX_BUSY_POLL 2338 .ndo_busy_poll = enic_busy_poll, 2339 #endif 2340 }; 2341 2342 static const struct net_device_ops enic_netdev_ops = { 2343 .ndo_open = enic_open, 2344 .ndo_stop = enic_stop, 2345 .ndo_start_xmit = enic_hard_start_xmit, 2346 .ndo_get_stats64 = enic_get_stats, 2347 .ndo_validate_addr = eth_validate_addr, 2348 .ndo_set_mac_address = enic_set_mac_address, 2349 .ndo_set_rx_mode = enic_set_rx_mode, 2350 .ndo_change_mtu = enic_change_mtu, 2351 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2352 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 2353 .ndo_tx_timeout = enic_tx_timeout, 2354 .ndo_set_vf_port = enic_set_vf_port, 2355 .ndo_get_vf_port = enic_get_vf_port, 2356 .ndo_set_vf_mac = enic_set_vf_mac, 2357 #ifdef CONFIG_NET_POLL_CONTROLLER 2358 .ndo_poll_controller = enic_poll_controller, 2359 #endif 2360 #ifdef CONFIG_RFS_ACCEL 2361 .ndo_rx_flow_steer = enic_rx_flow_steer, 2362 #endif 2363 #ifdef CONFIG_NET_RX_BUSY_POLL 2364 .ndo_busy_poll = enic_busy_poll, 2365 #endif 2366 }; 2367 2368 static void enic_dev_deinit(struct enic *enic) 2369 { 2370 unsigned int i; 2371 2372 for (i = 0; i < enic->rq_count; i++) { 2373 napi_hash_del(&enic->napi[i]); 2374 netif_napi_del(&enic->napi[i]); 2375 } 2376 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) 2377 for (i = 0; i < enic->wq_count; i++) 2378 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); 2379 2380 enic_free_vnic_resources(enic); 2381 enic_clear_intr_mode(enic); 2382 enic_free_affinity_hint(enic); 2383 } 2384 2385 static void enic_kdump_kernel_config(struct enic *enic) 2386 { 2387 if (is_kdump_kernel()) { 2388 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); 2389 enic->rq_count = 1; 2390 enic->wq_count = 1; 2391 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; 2392 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; 2393 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); 2394 } 2395 } 2396 2397 static int enic_dev_init(struct enic *enic) 2398 { 2399 struct device *dev = enic_get_dev(enic); 2400 struct net_device *netdev = enic->netdev; 2401 unsigned int i; 2402 int err; 2403 2404 /* Get interrupt coalesce timer info */ 2405 err = enic_dev_intr_coal_timer_info(enic); 2406 if (err) { 2407 dev_warn(dev, "Using default conversion factor for " 2408 "interrupt coalesce timer\n"); 2409 vnic_dev_intr_coal_timer_info_default(enic->vdev); 2410 } 2411 2412 /* Get vNIC configuration 2413 */ 2414 2415 err = enic_get_vnic_config(enic); 2416 if (err) { 2417 dev_err(dev, "Get vNIC configuration failed, aborting\n"); 2418 return err; 2419 } 2420 2421 /* Get available resource counts 2422 */ 2423 2424 enic_get_res_counts(enic); 2425 2426 /* modify resource count if we are in kdump_kernel 2427 */ 2428 enic_kdump_kernel_config(enic); 2429 2430 /* Set interrupt mode based on resource counts and system 2431 * capabilities 2432 */ 2433 2434 err = enic_set_intr_mode(enic); 2435 if (err) { 2436 dev_err(dev, "Failed to set intr mode based on resource " 2437 "counts and system capabilities, aborting\n"); 2438 return err; 2439 } 2440 2441 /* Allocate and configure vNIC resources 2442 */ 2443 2444 err = enic_alloc_vnic_resources(enic); 2445 if (err) { 2446 dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); 2447 goto err_out_free_vnic_resources; 2448 } 2449 2450 enic_init_vnic_resources(enic); 2451 2452 err = enic_set_rss_nic_cfg(enic); 2453 if (err) { 2454 dev_err(dev, "Failed to config nic, aborting\n"); 2455 goto err_out_free_vnic_resources; 2456 } 2457 2458 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2459 default: 2460 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2461 napi_hash_add(&enic->napi[0]); 2462 break; 2463 case VNIC_DEV_INTR_MODE_MSIX: 2464 for (i = 0; i < enic->rq_count; i++) { 2465 netif_napi_add(netdev, &enic->napi[i], 2466 enic_poll_msix_rq, NAPI_POLL_WEIGHT); 2467 napi_hash_add(&enic->napi[i]); 2468 } 2469 for (i = 0; i < enic->wq_count; i++) 2470 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], 2471 enic_poll_msix_wq, NAPI_POLL_WEIGHT); 2472 break; 2473 } 2474 2475 return 0; 2476 2477 err_out_free_vnic_resources: 2478 enic_free_affinity_hint(enic); 2479 enic_clear_intr_mode(enic); 2480 enic_free_vnic_resources(enic); 2481 2482 return err; 2483 } 2484 2485 static void enic_iounmap(struct enic *enic) 2486 { 2487 unsigned int i; 2488 2489 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) 2490 if (enic->bar[i].vaddr) 2491 iounmap(enic->bar[i].vaddr); 2492 } 2493 2494 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2495 { 2496 struct device *dev = &pdev->dev; 2497 struct net_device *netdev; 2498 struct enic *enic; 2499 int using_dac = 0; 2500 unsigned int i; 2501 int err; 2502 #ifdef CONFIG_PCI_IOV 2503 int pos = 0; 2504 #endif 2505 int num_pps = 1; 2506 2507 /* Allocate net device structure and initialize. Private 2508 * instance data is initialized to zero. 2509 */ 2510 2511 netdev = alloc_etherdev_mqs(sizeof(struct enic), 2512 ENIC_RQ_MAX, ENIC_WQ_MAX); 2513 if (!netdev) 2514 return -ENOMEM; 2515 2516 pci_set_drvdata(pdev, netdev); 2517 2518 SET_NETDEV_DEV(netdev, &pdev->dev); 2519 2520 enic = netdev_priv(netdev); 2521 enic->netdev = netdev; 2522 enic->pdev = pdev; 2523 2524 /* Setup PCI resources 2525 */ 2526 2527 err = pci_enable_device_mem(pdev); 2528 if (err) { 2529 dev_err(dev, "Cannot enable PCI device, aborting\n"); 2530 goto err_out_free_netdev; 2531 } 2532 2533 err = pci_request_regions(pdev, DRV_NAME); 2534 if (err) { 2535 dev_err(dev, "Cannot request PCI regions, aborting\n"); 2536 goto err_out_disable_device; 2537 } 2538 2539 pci_set_master(pdev); 2540 2541 /* Query PCI controller on system for DMA addressing 2542 * limitation for the device. Try 64-bit first, and 2543 * fail to 32-bit. 2544 */ 2545 2546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2547 if (err) { 2548 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2549 if (err) { 2550 dev_err(dev, "No usable DMA configuration, aborting\n"); 2551 goto err_out_release_regions; 2552 } 2553 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2554 if (err) { 2555 dev_err(dev, "Unable to obtain %u-bit DMA " 2556 "for consistent allocations, aborting\n", 32); 2557 goto err_out_release_regions; 2558 } 2559 } else { 2560 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2561 if (err) { 2562 dev_err(dev, "Unable to obtain %u-bit DMA " 2563 "for consistent allocations, aborting\n", 64); 2564 goto err_out_release_regions; 2565 } 2566 using_dac = 1; 2567 } 2568 2569 /* Map vNIC resources from BAR0-5 2570 */ 2571 2572 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { 2573 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 2574 continue; 2575 enic->bar[i].len = pci_resource_len(pdev, i); 2576 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 2577 if (!enic->bar[i].vaddr) { 2578 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); 2579 err = -ENODEV; 2580 goto err_out_iounmap; 2581 } 2582 enic->bar[i].bus_addr = pci_resource_start(pdev, i); 2583 } 2584 2585 /* Register vNIC device 2586 */ 2587 2588 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 2589 ARRAY_SIZE(enic->bar)); 2590 if (!enic->vdev) { 2591 dev_err(dev, "vNIC registration failed, aborting\n"); 2592 err = -ENODEV; 2593 goto err_out_iounmap; 2594 } 2595 2596 err = vnic_devcmd_init(enic->vdev); 2597 2598 if (err) 2599 goto err_out_vnic_unregister; 2600 2601 #ifdef CONFIG_PCI_IOV 2602 /* Get number of subvnics */ 2603 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 2604 if (pos) { 2605 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, 2606 &enic->num_vfs); 2607 if (enic->num_vfs) { 2608 err = pci_enable_sriov(pdev, enic->num_vfs); 2609 if (err) { 2610 dev_err(dev, "SRIOV enable failed, aborting." 2611 " pci_enable_sriov() returned %d\n", 2612 err); 2613 goto err_out_vnic_unregister; 2614 } 2615 enic->priv_flags |= ENIC_SRIOV_ENABLED; 2616 num_pps = enic->num_vfs; 2617 } 2618 } 2619 #endif 2620 2621 /* Allocate structure for port profiles */ 2622 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2623 if (!enic->pp) { 2624 err = -ENOMEM; 2625 goto err_out_disable_sriov_pp; 2626 } 2627 2628 /* Issue device open to get device in known state 2629 */ 2630 2631 err = enic_dev_open(enic); 2632 if (err) { 2633 dev_err(dev, "vNIC dev open failed, aborting\n"); 2634 goto err_out_disable_sriov; 2635 } 2636 2637 /* Setup devcmd lock 2638 */ 2639 2640 spin_lock_init(&enic->devcmd_lock); 2641 spin_lock_init(&enic->enic_api_lock); 2642 2643 /* 2644 * Set ingress vlan rewrite mode before vnic initialization 2645 */ 2646 2647 err = enic_dev_set_ig_vlan_rewrite_mode(enic); 2648 if (err) { 2649 dev_err(dev, 2650 "Failed to set ingress vlan rewrite mode, aborting.\n"); 2651 goto err_out_dev_close; 2652 } 2653 2654 /* Issue device init to initialize the vnic-to-switch link. 2655 * We'll start with carrier off and wait for link UP 2656 * notification later to turn on carrier. We don't need 2657 * to wait here for the vnic-to-switch link initialization 2658 * to complete; link UP notification is the indication that 2659 * the process is complete. 2660 */ 2661 2662 netif_carrier_off(netdev); 2663 2664 /* Do not call dev_init for a dynamic vnic. 2665 * For a dynamic vnic, init_prov_info will be 2666 * called later by an upper layer. 2667 */ 2668 2669 if (!enic_is_dynamic(enic)) { 2670 err = vnic_dev_init(enic->vdev, 0); 2671 if (err) { 2672 dev_err(dev, "vNIC dev init failed, aborting\n"); 2673 goto err_out_dev_close; 2674 } 2675 } 2676 2677 err = enic_dev_init(enic); 2678 if (err) { 2679 dev_err(dev, "Device initialization failed, aborting\n"); 2680 goto err_out_dev_close; 2681 } 2682 2683 netif_set_real_num_tx_queues(netdev, enic->wq_count); 2684 netif_set_real_num_rx_queues(netdev, enic->rq_count); 2685 2686 /* Setup notification timer, HW reset task, and wq locks 2687 */ 2688 2689 init_timer(&enic->notify_timer); 2690 enic->notify_timer.function = enic_notify_timer; 2691 enic->notify_timer.data = (unsigned long)enic; 2692 2693 enic_set_rx_coal_setting(enic); 2694 INIT_WORK(&enic->reset, enic_reset); 2695 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); 2696 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2697 2698 for (i = 0; i < enic->wq_count; i++) 2699 spin_lock_init(&enic->wq_lock[i]); 2700 2701 /* Register net device 2702 */ 2703 2704 enic->port_mtu = enic->config.mtu; 2705 (void)enic_change_mtu(netdev, enic->port_mtu); 2706 2707 err = enic_set_mac_addr(netdev, enic->mac_addr); 2708 if (err) { 2709 dev_err(dev, "Invalid MAC address, aborting\n"); 2710 goto err_out_dev_deinit; 2711 } 2712 2713 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2714 /* rx coalesce time already got initialized. This gets used 2715 * if adaptive coal is turned off 2716 */ 2717 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2718 2719 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2720 netdev->netdev_ops = &enic_netdev_dynamic_ops; 2721 else 2722 netdev->netdev_ops = &enic_netdev_ops; 2723 2724 netdev->watchdog_timeo = 2 * HZ; 2725 enic_set_ethtool_ops(netdev); 2726 2727 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2728 if (ENIC_SETTING(enic, LOOP)) { 2729 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2730 enic->loop_enable = 1; 2731 enic->loop_tag = enic->config.loop_tag; 2732 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2733 } 2734 if (ENIC_SETTING(enic, TXCSUM)) 2735 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2736 if (ENIC_SETTING(enic, TSO)) 2737 netdev->hw_features |= NETIF_F_TSO | 2738 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2739 if (ENIC_SETTING(enic, RSS)) 2740 netdev->hw_features |= NETIF_F_RXHASH; 2741 if (ENIC_SETTING(enic, RXCSUM)) 2742 netdev->hw_features |= NETIF_F_RXCSUM; 2743 2744 netdev->features |= netdev->hw_features; 2745 2746 #ifdef CONFIG_RFS_ACCEL 2747 netdev->hw_features |= NETIF_F_NTUPLE; 2748 #endif 2749 2750 if (using_dac) 2751 netdev->features |= NETIF_F_HIGHDMA; 2752 2753 netdev->priv_flags |= IFF_UNICAST_FLT; 2754 2755 err = register_netdev(netdev); 2756 if (err) { 2757 dev_err(dev, "Cannot register net device, aborting\n"); 2758 goto err_out_dev_deinit; 2759 } 2760 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; 2761 2762 return 0; 2763 2764 err_out_dev_deinit: 2765 enic_dev_deinit(enic); 2766 err_out_dev_close: 2767 vnic_dev_close(enic->vdev); 2768 err_out_disable_sriov: 2769 kfree(enic->pp); 2770 err_out_disable_sriov_pp: 2771 #ifdef CONFIG_PCI_IOV 2772 if (enic_sriov_enabled(enic)) { 2773 pci_disable_sriov(pdev); 2774 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2775 } 2776 #endif 2777 err_out_vnic_unregister: 2778 vnic_dev_unregister(enic->vdev); 2779 err_out_iounmap: 2780 enic_iounmap(enic); 2781 err_out_release_regions: 2782 pci_release_regions(pdev); 2783 err_out_disable_device: 2784 pci_disable_device(pdev); 2785 err_out_free_netdev: 2786 free_netdev(netdev); 2787 2788 return err; 2789 } 2790 2791 static void enic_remove(struct pci_dev *pdev) 2792 { 2793 struct net_device *netdev = pci_get_drvdata(pdev); 2794 2795 if (netdev) { 2796 struct enic *enic = netdev_priv(netdev); 2797 2798 cancel_work_sync(&enic->reset); 2799 cancel_work_sync(&enic->change_mtu_work); 2800 unregister_netdev(netdev); 2801 enic_dev_deinit(enic); 2802 vnic_dev_close(enic->vdev); 2803 #ifdef CONFIG_PCI_IOV 2804 if (enic_sriov_enabled(enic)) { 2805 pci_disable_sriov(pdev); 2806 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2807 } 2808 #endif 2809 kfree(enic->pp); 2810 vnic_dev_unregister(enic->vdev); 2811 enic_iounmap(enic); 2812 pci_release_regions(pdev); 2813 pci_disable_device(pdev); 2814 free_netdev(netdev); 2815 } 2816 } 2817 2818 static struct pci_driver enic_driver = { 2819 .name = DRV_NAME, 2820 .id_table = enic_id_table, 2821 .probe = enic_probe, 2822 .remove = enic_remove, 2823 }; 2824 2825 static int __init enic_init_module(void) 2826 { 2827 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 2828 2829 return pci_register_driver(&enic_driver); 2830 } 2831 2832 static void __exit enic_cleanup_module(void) 2833 { 2834 pci_unregister_driver(&enic_driver); 2835 } 2836 2837 module_init(enic_init_module); 2838 module_exit(enic_cleanup_module); 2839