1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/string.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/workqueue.h> 28 #include <linux/pci.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/if.h> 32 #include <linux/if_ether.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/tcp.h> 38 #include <linux/rtnetlink.h> 39 #include <linux/prefetch.h> 40 #include <net/ip6_checksum.h> 41 42 #include "cq_enet_desc.h" 43 #include "vnic_dev.h" 44 #include "vnic_intr.h" 45 #include "vnic_stats.h" 46 #include "vnic_vic.h" 47 #include "enic_res.h" 48 #include "enic.h" 49 #include "enic_dev.h" 50 #include "enic_pp.h" 51 52 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 53 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 54 #define MAX_TSO (1 << 16) 55 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 56 57 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 58 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 59 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 60 61 /* Supported devices */ 62 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { 63 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 65 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 66 { 0, } /* end of table */ 67 }; 68 69 MODULE_DESCRIPTION(DRV_DESCRIPTION); 70 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); 71 MODULE_LICENSE("GPL"); 72 MODULE_VERSION(DRV_VERSION); 73 MODULE_DEVICE_TABLE(pci, enic_id_table); 74 75 int enic_is_dynamic(struct enic *enic) 76 { 77 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 78 } 79 80 int enic_sriov_enabled(struct enic *enic) 81 { 82 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 83 } 84 85 static int enic_is_sriov_vf(struct enic *enic) 86 { 87 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 88 } 89 90 int enic_is_valid_vf(struct enic *enic, int vf) 91 { 92 #ifdef CONFIG_PCI_IOV 93 return vf >= 0 && vf < enic->num_vfs; 94 #else 95 return 0; 96 #endif 97 } 98 99 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 100 { 101 struct enic *enic = vnic_dev_priv(wq->vdev); 102 103 if (buf->sop) 104 pci_unmap_single(enic->pdev, buf->dma_addr, 105 buf->len, PCI_DMA_TODEVICE); 106 else 107 pci_unmap_page(enic->pdev, buf->dma_addr, 108 buf->len, PCI_DMA_TODEVICE); 109 110 if (buf->os_buf) 111 dev_kfree_skb_any(buf->os_buf); 112 } 113 114 static void enic_wq_free_buf(struct vnic_wq *wq, 115 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) 116 { 117 enic_free_wq_buf(wq, buf); 118 } 119 120 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 121 u8 type, u16 q_number, u16 completed_index, void *opaque) 122 { 123 struct enic *enic = vnic_dev_priv(vdev); 124 125 spin_lock(&enic->wq_lock[q_number]); 126 127 vnic_wq_service(&enic->wq[q_number], cq_desc, 128 completed_index, enic_wq_free_buf, 129 opaque); 130 131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && 132 vnic_wq_desc_avail(&enic->wq[q_number]) >= 133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 134 netif_wake_subqueue(enic->netdev, q_number); 135 136 spin_unlock(&enic->wq_lock[q_number]); 137 138 return 0; 139 } 140 141 static void enic_log_q_error(struct enic *enic) 142 { 143 unsigned int i; 144 u32 error_status; 145 146 for (i = 0; i < enic->wq_count; i++) { 147 error_status = vnic_wq_error_status(&enic->wq[i]); 148 if (error_status) 149 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", 150 i, error_status); 151 } 152 153 for (i = 0; i < enic->rq_count; i++) { 154 error_status = vnic_rq_error_status(&enic->rq[i]); 155 if (error_status) 156 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", 157 i, error_status); 158 } 159 } 160 161 static void enic_msglvl_check(struct enic *enic) 162 { 163 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 164 165 if (msg_enable != enic->msg_enable) { 166 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", 167 enic->msg_enable, msg_enable); 168 enic->msg_enable = msg_enable; 169 } 170 } 171 172 static void enic_mtu_check(struct enic *enic) 173 { 174 u32 mtu = vnic_dev_mtu(enic->vdev); 175 struct net_device *netdev = enic->netdev; 176 177 if (mtu && mtu != enic->port_mtu) { 178 enic->port_mtu = mtu; 179 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 180 mtu = max_t(int, ENIC_MIN_MTU, 181 min_t(int, ENIC_MAX_MTU, mtu)); 182 if (mtu != netdev->mtu) 183 schedule_work(&enic->change_mtu_work); 184 } else { 185 if (mtu < netdev->mtu) 186 netdev_warn(netdev, 187 "interface MTU (%d) set higher " 188 "than switch port MTU (%d)\n", 189 netdev->mtu, mtu); 190 } 191 } 192 } 193 194 static void enic_link_check(struct enic *enic) 195 { 196 int link_status = vnic_dev_link_status(enic->vdev); 197 int carrier_ok = netif_carrier_ok(enic->netdev); 198 199 if (link_status && !carrier_ok) { 200 netdev_info(enic->netdev, "Link UP\n"); 201 netif_carrier_on(enic->netdev); 202 } else if (!link_status && carrier_ok) { 203 netdev_info(enic->netdev, "Link DOWN\n"); 204 netif_carrier_off(enic->netdev); 205 } 206 } 207 208 static void enic_notify_check(struct enic *enic) 209 { 210 enic_msglvl_check(enic); 211 enic_mtu_check(enic); 212 enic_link_check(enic); 213 } 214 215 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) 216 217 static irqreturn_t enic_isr_legacy(int irq, void *data) 218 { 219 struct net_device *netdev = data; 220 struct enic *enic = netdev_priv(netdev); 221 unsigned int io_intr = enic_legacy_io_intr(); 222 unsigned int err_intr = enic_legacy_err_intr(); 223 unsigned int notify_intr = enic_legacy_notify_intr(); 224 u32 pba; 225 226 vnic_intr_mask(&enic->intr[io_intr]); 227 228 pba = vnic_intr_legacy_pba(enic->legacy_pba); 229 if (!pba) { 230 vnic_intr_unmask(&enic->intr[io_intr]); 231 return IRQ_NONE; /* not our interrupt */ 232 } 233 234 if (ENIC_TEST_INTR(pba, notify_intr)) { 235 vnic_intr_return_all_credits(&enic->intr[notify_intr]); 236 enic_notify_check(enic); 237 } 238 239 if (ENIC_TEST_INTR(pba, err_intr)) { 240 vnic_intr_return_all_credits(&enic->intr[err_intr]); 241 enic_log_q_error(enic); 242 /* schedule recovery from WQ/RQ error */ 243 schedule_work(&enic->reset); 244 return IRQ_HANDLED; 245 } 246 247 if (ENIC_TEST_INTR(pba, io_intr)) { 248 if (napi_schedule_prep(&enic->napi[0])) 249 __napi_schedule(&enic->napi[0]); 250 } else { 251 vnic_intr_unmask(&enic->intr[io_intr]); 252 } 253 254 return IRQ_HANDLED; 255 } 256 257 static irqreturn_t enic_isr_msi(int irq, void *data) 258 { 259 struct enic *enic = data; 260 261 /* With MSI, there is no sharing of interrupts, so this is 262 * our interrupt and there is no need to ack it. The device 263 * is not providing per-vector masking, so the OS will not 264 * write to PCI config space to mask/unmask the interrupt. 265 * We're using mask_on_assertion for MSI, so the device 266 * automatically masks the interrupt when the interrupt is 267 * generated. Later, when exiting polling, the interrupt 268 * will be unmasked (see enic_poll). 269 * 270 * Also, the device uses the same PCIe Traffic Class (TC) 271 * for Memory Write data and MSI, so there are no ordering 272 * issues; the MSI will always arrive at the Root Complex 273 * _after_ corresponding Memory Writes (i.e. descriptor 274 * writes). 275 */ 276 277 napi_schedule(&enic->napi[0]); 278 279 return IRQ_HANDLED; 280 } 281 282 static irqreturn_t enic_isr_msix_rq(int irq, void *data) 283 { 284 struct napi_struct *napi = data; 285 286 /* schedule NAPI polling for RQ cleanup */ 287 napi_schedule(napi); 288 289 return IRQ_HANDLED; 290 } 291 292 static irqreturn_t enic_isr_msix_wq(int irq, void *data) 293 { 294 struct enic *enic = data; 295 unsigned int cq; 296 unsigned int intr; 297 unsigned int wq_work_to_do = -1; /* no limit */ 298 unsigned int wq_work_done; 299 unsigned int wq_irq; 300 301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector; 302 cq = enic_cq_wq(enic, wq_irq); 303 intr = enic_msix_wq_intr(enic, wq_irq); 304 305 wq_work_done = vnic_cq_service(&enic->cq[cq], 306 wq_work_to_do, enic_wq_service, NULL); 307 308 vnic_intr_return_credits(&enic->intr[intr], 309 wq_work_done, 310 1 /* unmask intr */, 311 1 /* reset intr timer */); 312 313 return IRQ_HANDLED; 314 } 315 316 static irqreturn_t enic_isr_msix_err(int irq, void *data) 317 { 318 struct enic *enic = data; 319 unsigned int intr = enic_msix_err_intr(enic); 320 321 vnic_intr_return_all_credits(&enic->intr[intr]); 322 323 enic_log_q_error(enic); 324 325 /* schedule recovery from WQ/RQ error */ 326 schedule_work(&enic->reset); 327 328 return IRQ_HANDLED; 329 } 330 331 static irqreturn_t enic_isr_msix_notify(int irq, void *data) 332 { 333 struct enic *enic = data; 334 unsigned int intr = enic_msix_notify_intr(enic); 335 336 vnic_intr_return_all_credits(&enic->intr[intr]); 337 enic_notify_check(enic); 338 339 return IRQ_HANDLED; 340 } 341 342 static inline void enic_queue_wq_skb_cont(struct enic *enic, 343 struct vnic_wq *wq, struct sk_buff *skb, 344 unsigned int len_left, int loopback) 345 { 346 const skb_frag_t *frag; 347 348 /* Queue additional data fragments */ 349 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 350 len_left -= skb_frag_size(frag); 351 enic_queue_wq_desc_cont(wq, skb, 352 skb_frag_dma_map(&enic->pdev->dev, 353 frag, 0, skb_frag_size(frag), 354 DMA_TO_DEVICE), 355 skb_frag_size(frag), 356 (len_left == 0), /* EOP? */ 357 loopback); 358 } 359 } 360 361 static inline void enic_queue_wq_skb_vlan(struct enic *enic, 362 struct vnic_wq *wq, struct sk_buff *skb, 363 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 364 { 365 unsigned int head_len = skb_headlen(skb); 366 unsigned int len_left = skb->len - head_len; 367 int eop = (len_left == 0); 368 369 /* Queue the main skb fragment. The fragments are no larger 370 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 371 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 372 * per fragment is queued. 373 */ 374 enic_queue_wq_desc(wq, skb, 375 pci_map_single(enic->pdev, skb->data, 376 head_len, PCI_DMA_TODEVICE), 377 head_len, 378 vlan_tag_insert, vlan_tag, 379 eop, loopback); 380 381 if (!eop) 382 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 383 } 384 385 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, 386 struct vnic_wq *wq, struct sk_buff *skb, 387 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 388 { 389 unsigned int head_len = skb_headlen(skb); 390 unsigned int len_left = skb->len - head_len; 391 unsigned int hdr_len = skb_checksum_start_offset(skb); 392 unsigned int csum_offset = hdr_len + skb->csum_offset; 393 int eop = (len_left == 0); 394 395 /* Queue the main skb fragment. The fragments are no larger 396 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 397 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 398 * per fragment is queued. 399 */ 400 enic_queue_wq_desc_csum_l4(wq, skb, 401 pci_map_single(enic->pdev, skb->data, 402 head_len, PCI_DMA_TODEVICE), 403 head_len, 404 csum_offset, 405 hdr_len, 406 vlan_tag_insert, vlan_tag, 407 eop, loopback); 408 409 if (!eop) 410 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 411 } 412 413 static inline void enic_queue_wq_skb_tso(struct enic *enic, 414 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, 415 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 416 { 417 unsigned int frag_len_left = skb_headlen(skb); 418 unsigned int len_left = skb->len - frag_len_left; 419 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 420 int eop = (len_left == 0); 421 unsigned int len; 422 dma_addr_t dma_addr; 423 unsigned int offset = 0; 424 skb_frag_t *frag; 425 426 /* Preload TCP csum field with IP pseudo hdr calculated 427 * with IP length set to zero. HW will later add in length 428 * to each TCP segment resulting from the TSO. 429 */ 430 431 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 432 ip_hdr(skb)->check = 0; 433 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 434 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 435 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 436 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 437 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 438 } 439 440 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 441 * for the main skb fragment 442 */ 443 while (frag_len_left) { 444 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 445 dma_addr = pci_map_single(enic->pdev, skb->data + offset, 446 len, PCI_DMA_TODEVICE); 447 enic_queue_wq_desc_tso(wq, skb, 448 dma_addr, 449 len, 450 mss, hdr_len, 451 vlan_tag_insert, vlan_tag, 452 eop && (len == frag_len_left), loopback); 453 frag_len_left -= len; 454 offset += len; 455 } 456 457 if (eop) 458 return; 459 460 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 461 * for additional data fragments 462 */ 463 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 464 len_left -= skb_frag_size(frag); 465 frag_len_left = skb_frag_size(frag); 466 offset = 0; 467 468 while (frag_len_left) { 469 len = min(frag_len_left, 470 (unsigned int)WQ_ENET_MAX_DESC_LEN); 471 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 472 offset, len, 473 DMA_TO_DEVICE); 474 enic_queue_wq_desc_cont(wq, skb, 475 dma_addr, 476 len, 477 (len_left == 0) && 478 (len == frag_len_left), /* EOP? */ 479 loopback); 480 frag_len_left -= len; 481 offset += len; 482 } 483 } 484 } 485 486 static inline void enic_queue_wq_skb(struct enic *enic, 487 struct vnic_wq *wq, struct sk_buff *skb) 488 { 489 unsigned int mss = skb_shinfo(skb)->gso_size; 490 unsigned int vlan_tag = 0; 491 int vlan_tag_insert = 0; 492 int loopback = 0; 493 494 if (vlan_tx_tag_present(skb)) { 495 /* VLAN tag from trunking driver */ 496 vlan_tag_insert = 1; 497 vlan_tag = vlan_tx_tag_get(skb); 498 } else if (enic->loop_enable) { 499 vlan_tag = enic->loop_tag; 500 loopback = 1; 501 } 502 503 if (mss) 504 enic_queue_wq_skb_tso(enic, wq, skb, mss, 505 vlan_tag_insert, vlan_tag, loopback); 506 else if (skb->ip_summed == CHECKSUM_PARTIAL) 507 enic_queue_wq_skb_csum_l4(enic, wq, skb, 508 vlan_tag_insert, vlan_tag, loopback); 509 else 510 enic_queue_wq_skb_vlan(enic, wq, skb, 511 vlan_tag_insert, vlan_tag, loopback); 512 } 513 514 /* netif_tx_lock held, process context with BHs disabled, or BH */ 515 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 516 struct net_device *netdev) 517 { 518 struct enic *enic = netdev_priv(netdev); 519 struct vnic_wq *wq; 520 unsigned long flags; 521 unsigned int txq_map; 522 523 if (skb->len <= 0) { 524 dev_kfree_skb(skb); 525 return NETDEV_TX_OK; 526 } 527 528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; 529 wq = &enic->wq[txq_map]; 530 531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 532 * which is very likely. In the off chance it's going to take 533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 534 */ 535 536 if (skb_shinfo(skb)->gso_size == 0 && 537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 538 skb_linearize(skb)) { 539 dev_kfree_skb(skb); 540 return NETDEV_TX_OK; 541 } 542 543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags); 544 545 if (vnic_wq_desc_avail(wq) < 546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); 548 /* This is a hard error, log it */ 549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 551 return NETDEV_TX_BUSY; 552 } 553 554 enic_queue_wq_skb(enic, wq, skb); 555 556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); 558 559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 560 561 return NETDEV_TX_OK; 562 } 563 564 /* dev_base_lock rwlock held, nominally process context */ 565 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, 566 struct rtnl_link_stats64 *net_stats) 567 { 568 struct enic *enic = netdev_priv(netdev); 569 struct vnic_stats *stats; 570 571 enic_dev_stats_dump(enic, &stats); 572 573 net_stats->tx_packets = stats->tx.tx_frames_ok; 574 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 575 net_stats->tx_errors = stats->tx.tx_errors; 576 net_stats->tx_dropped = stats->tx.tx_drops; 577 578 net_stats->rx_packets = stats->rx.rx_frames_ok; 579 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 580 net_stats->rx_errors = stats->rx.rx_errors; 581 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 582 net_stats->rx_over_errors = enic->rq_truncated_pkts; 583 net_stats->rx_crc_errors = enic->rq_bad_fcs; 584 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; 585 586 return net_stats; 587 } 588 589 void enic_reset_addr_lists(struct enic *enic) 590 { 591 enic->mc_count = 0; 592 enic->uc_count = 0; 593 enic->flags = 0; 594 } 595 596 static int enic_set_mac_addr(struct net_device *netdev, char *addr) 597 { 598 struct enic *enic = netdev_priv(netdev); 599 600 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 601 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 602 return -EADDRNOTAVAIL; 603 } else { 604 if (!is_valid_ether_addr(addr)) 605 return -EADDRNOTAVAIL; 606 } 607 608 memcpy(netdev->dev_addr, addr, netdev->addr_len); 609 610 return 0; 611 } 612 613 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 614 { 615 struct enic *enic = netdev_priv(netdev); 616 struct sockaddr *saddr = p; 617 char *addr = saddr->sa_data; 618 int err; 619 620 if (netif_running(enic->netdev)) { 621 err = enic_dev_del_station_addr(enic); 622 if (err) 623 return err; 624 } 625 626 err = enic_set_mac_addr(netdev, addr); 627 if (err) 628 return err; 629 630 if (netif_running(enic->netdev)) { 631 err = enic_dev_add_station_addr(enic); 632 if (err) 633 return err; 634 } 635 636 return err; 637 } 638 639 static int enic_set_mac_address(struct net_device *netdev, void *p) 640 { 641 struct sockaddr *saddr = p; 642 char *addr = saddr->sa_data; 643 struct enic *enic = netdev_priv(netdev); 644 int err; 645 646 err = enic_dev_del_station_addr(enic); 647 if (err) 648 return err; 649 650 err = enic_set_mac_addr(netdev, addr); 651 if (err) 652 return err; 653 654 return enic_dev_add_station_addr(enic); 655 } 656 657 static void enic_update_multicast_addr_list(struct enic *enic) 658 { 659 struct net_device *netdev = enic->netdev; 660 struct netdev_hw_addr *ha; 661 unsigned int mc_count = netdev_mc_count(netdev); 662 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 663 unsigned int i, j; 664 665 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) { 666 netdev_warn(netdev, "Registering only %d out of %d " 667 "multicast addresses\n", 668 ENIC_MULTICAST_PERFECT_FILTERS, mc_count); 669 mc_count = ENIC_MULTICAST_PERFECT_FILTERS; 670 } 671 672 /* Is there an easier way? Trying to minimize to 673 * calls to add/del multicast addrs. We keep the 674 * addrs from the last call in enic->mc_addr and 675 * look for changes to add/del. 676 */ 677 678 i = 0; 679 netdev_for_each_mc_addr(ha, netdev) { 680 if (i == mc_count) 681 break; 682 memcpy(mc_addr[i++], ha->addr, ETH_ALEN); 683 } 684 685 for (i = 0; i < enic->mc_count; i++) { 686 for (j = 0; j < mc_count; j++) 687 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j])) 688 break; 689 if (j == mc_count) 690 enic_dev_del_addr(enic, enic->mc_addr[i]); 691 } 692 693 for (i = 0; i < mc_count; i++) { 694 for (j = 0; j < enic->mc_count; j++) 695 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j])) 696 break; 697 if (j == enic->mc_count) 698 enic_dev_add_addr(enic, mc_addr[i]); 699 } 700 701 /* Save the list to compare against next time 702 */ 703 704 for (i = 0; i < mc_count; i++) 705 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); 706 707 enic->mc_count = mc_count; 708 } 709 710 static void enic_update_unicast_addr_list(struct enic *enic) 711 { 712 struct net_device *netdev = enic->netdev; 713 struct netdev_hw_addr *ha; 714 unsigned int uc_count = netdev_uc_count(netdev); 715 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN]; 716 unsigned int i, j; 717 718 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) { 719 netdev_warn(netdev, "Registering only %d out of %d " 720 "unicast addresses\n", 721 ENIC_UNICAST_PERFECT_FILTERS, uc_count); 722 uc_count = ENIC_UNICAST_PERFECT_FILTERS; 723 } 724 725 /* Is there an easier way? Trying to minimize to 726 * calls to add/del unicast addrs. We keep the 727 * addrs from the last call in enic->uc_addr and 728 * look for changes to add/del. 729 */ 730 731 i = 0; 732 netdev_for_each_uc_addr(ha, netdev) { 733 if (i == uc_count) 734 break; 735 memcpy(uc_addr[i++], ha->addr, ETH_ALEN); 736 } 737 738 for (i = 0; i < enic->uc_count; i++) { 739 for (j = 0; j < uc_count; j++) 740 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j])) 741 break; 742 if (j == uc_count) 743 enic_dev_del_addr(enic, enic->uc_addr[i]); 744 } 745 746 for (i = 0; i < uc_count; i++) { 747 for (j = 0; j < enic->uc_count; j++) 748 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j])) 749 break; 750 if (j == enic->uc_count) 751 enic_dev_add_addr(enic, uc_addr[i]); 752 } 753 754 /* Save the list to compare against next time 755 */ 756 757 for (i = 0; i < uc_count; i++) 758 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN); 759 760 enic->uc_count = uc_count; 761 } 762 763 /* netif_tx_lock held, BHs disabled */ 764 static void enic_set_rx_mode(struct net_device *netdev) 765 { 766 struct enic *enic = netdev_priv(netdev); 767 int directed = 1; 768 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 769 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 770 int promisc = (netdev->flags & IFF_PROMISC) || 771 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; 772 int allmulti = (netdev->flags & IFF_ALLMULTI) || 773 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; 774 unsigned int flags = netdev->flags | 775 (allmulti ? IFF_ALLMULTI : 0) | 776 (promisc ? IFF_PROMISC : 0); 777 778 if (enic->flags != flags) { 779 enic->flags = flags; 780 enic_dev_packet_filter(enic, directed, 781 multicast, broadcast, promisc, allmulti); 782 } 783 784 if (!promisc) { 785 enic_update_unicast_addr_list(enic); 786 if (!allmulti) 787 enic_update_multicast_addr_list(enic); 788 } 789 } 790 791 /* netif_tx_lock held, BHs disabled */ 792 static void enic_tx_timeout(struct net_device *netdev) 793 { 794 struct enic *enic = netdev_priv(netdev); 795 schedule_work(&enic->reset); 796 } 797 798 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 799 { 800 struct enic *enic = netdev_priv(netdev); 801 struct enic_port_profile *pp; 802 int err; 803 804 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 805 if (err) 806 return err; 807 808 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { 809 if (vf == PORT_SELF_VF) { 810 memcpy(pp->vf_mac, mac, ETH_ALEN); 811 return 0; 812 } else { 813 /* 814 * For sriov vf's set the mac in hw 815 */ 816 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 817 vnic_dev_set_mac_addr, mac); 818 return enic_dev_status_to_errno(err); 819 } 820 } else 821 return -EINVAL; 822 } 823 824 static int enic_set_vf_port(struct net_device *netdev, int vf, 825 struct nlattr *port[]) 826 { 827 struct enic *enic = netdev_priv(netdev); 828 struct enic_port_profile prev_pp; 829 struct enic_port_profile *pp; 830 int err = 0, restore_pp = 1; 831 832 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 833 if (err) 834 return err; 835 836 if (!port[IFLA_PORT_REQUEST]) 837 return -EOPNOTSUPP; 838 839 memcpy(&prev_pp, pp, sizeof(*enic->pp)); 840 memset(pp, 0, sizeof(*enic->pp)); 841 842 pp->set |= ENIC_SET_REQUEST; 843 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); 844 845 if (port[IFLA_PORT_PROFILE]) { 846 pp->set |= ENIC_SET_NAME; 847 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), 848 PORT_PROFILE_MAX); 849 } 850 851 if (port[IFLA_PORT_INSTANCE_UUID]) { 852 pp->set |= ENIC_SET_INSTANCE; 853 memcpy(pp->instance_uuid, 854 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 855 } 856 857 if (port[IFLA_PORT_HOST_UUID]) { 858 pp->set |= ENIC_SET_HOST; 859 memcpy(pp->host_uuid, 860 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 861 } 862 863 if (vf == PORT_SELF_VF) { 864 /* Special case handling: mac came from IFLA_VF_MAC */ 865 if (!is_zero_ether_addr(prev_pp.vf_mac)) 866 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); 867 868 if (is_zero_ether_addr(netdev->dev_addr)) 869 eth_hw_addr_random(netdev); 870 } else { 871 /* SR-IOV VF: get mac from adapter */ 872 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, 873 vnic_dev_get_mac_addr, pp->mac_addr); 874 if (err) { 875 netdev_err(netdev, "Error getting mac for vf %d\n", vf); 876 memcpy(pp, &prev_pp, sizeof(*pp)); 877 return enic_dev_status_to_errno(err); 878 } 879 } 880 881 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); 882 if (err) { 883 if (restore_pp) { 884 /* Things are still the way they were: Implicit 885 * DISASSOCIATE failed 886 */ 887 memcpy(pp, &prev_pp, sizeof(*pp)); 888 } else { 889 memset(pp, 0, sizeof(*pp)); 890 if (vf == PORT_SELF_VF) 891 memset(netdev->dev_addr, 0, ETH_ALEN); 892 } 893 } else { 894 /* Set flag to indicate that the port assoc/disassoc 895 * request has been sent out to fw 896 */ 897 pp->set |= ENIC_PORT_REQUEST_APPLIED; 898 899 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ 900 if (pp->request == PORT_REQUEST_DISASSOCIATE) { 901 memset(pp->mac_addr, 0, ETH_ALEN); 902 if (vf == PORT_SELF_VF) 903 memset(netdev->dev_addr, 0, ETH_ALEN); 904 } 905 } 906 907 if (vf == PORT_SELF_VF) 908 memset(pp->vf_mac, 0, ETH_ALEN); 909 910 return err; 911 } 912 913 static int enic_get_vf_port(struct net_device *netdev, int vf, 914 struct sk_buff *skb) 915 { 916 struct enic *enic = netdev_priv(netdev); 917 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 918 struct enic_port_profile *pp; 919 int err; 920 921 ENIC_PP_BY_INDEX(enic, vf, pp, &err); 922 if (err) 923 return err; 924 925 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) 926 return -ENODATA; 927 928 err = enic_process_get_pp_request(enic, vf, pp->request, &response); 929 if (err) 930 return err; 931 932 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || 933 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || 934 ((pp->set & ENIC_SET_NAME) && 935 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || 936 ((pp->set & ENIC_SET_INSTANCE) && 937 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 938 pp->instance_uuid)) || 939 ((pp->set & ENIC_SET_HOST) && 940 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) 941 goto nla_put_failure; 942 return 0; 943 944 nla_put_failure: 945 return -EMSGSIZE; 946 } 947 948 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 949 { 950 struct enic *enic = vnic_dev_priv(rq->vdev); 951 952 if (!buf->os_buf) 953 return; 954 955 pci_unmap_single(enic->pdev, buf->dma_addr, 956 buf->len, PCI_DMA_FROMDEVICE); 957 dev_kfree_skb_any(buf->os_buf); 958 } 959 960 static int enic_rq_alloc_buf(struct vnic_rq *rq) 961 { 962 struct enic *enic = vnic_dev_priv(rq->vdev); 963 struct net_device *netdev = enic->netdev; 964 struct sk_buff *skb; 965 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; 966 unsigned int os_buf_index = 0; 967 dma_addr_t dma_addr; 968 969 skb = netdev_alloc_skb_ip_align(netdev, len); 970 if (!skb) 971 return -ENOMEM; 972 973 dma_addr = pci_map_single(enic->pdev, skb->data, 974 len, PCI_DMA_FROMDEVICE); 975 976 enic_queue_rq_desc(rq, skb, os_buf_index, 977 dma_addr, len); 978 979 return 0; 980 } 981 982 static void enic_rq_indicate_buf(struct vnic_rq *rq, 983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 984 int skipped, void *opaque) 985 { 986 struct enic *enic = vnic_dev_priv(rq->vdev); 987 struct net_device *netdev = enic->netdev; 988 struct sk_buff *skb; 989 990 u8 type, color, eop, sop, ingress_port, vlan_stripped; 991 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 992 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 993 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 994 u8 packet_error; 995 u16 q_number, completed_index, bytes_written, vlan_tci, checksum; 996 u32 rss_hash; 997 998 if (skipped) 999 return; 1000 1001 skb = buf->os_buf; 1002 prefetch(skb->data - NET_IP_ALIGN); 1003 pci_unmap_single(enic->pdev, buf->dma_addr, 1004 buf->len, PCI_DMA_FROMDEVICE); 1005 1006 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1007 &type, &color, &q_number, &completed_index, 1008 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1009 &csum_not_calc, &rss_hash, &bytes_written, 1010 &packet_error, &vlan_stripped, &vlan_tci, &checksum, 1011 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1012 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1013 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1014 &fcs_ok); 1015 1016 if (packet_error) { 1017 1018 if (!fcs_ok) { 1019 if (bytes_written > 0) 1020 enic->rq_bad_fcs++; 1021 else if (bytes_written == 0) 1022 enic->rq_truncated_pkts++; 1023 } 1024 1025 dev_kfree_skb_any(skb); 1026 1027 return; 1028 } 1029 1030 if (eop && bytes_written > 0) { 1031 1032 /* Good receive 1033 */ 1034 1035 skb_put(skb, bytes_written); 1036 skb->protocol = eth_type_trans(skb, netdev); 1037 skb_record_rx_queue(skb, q_number); 1038 if (netdev->features & NETIF_F_RXHASH) { 1039 skb->rxhash = rss_hash; 1040 if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX | 1041 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 | 1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) 1043 skb->l4_rxhash = true; 1044 } 1045 1046 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { 1047 skb->csum = htons(checksum); 1048 skb->ip_summed = CHECKSUM_COMPLETE; 1049 } 1050 1051 if (vlan_stripped) 1052 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1053 1054 if (netdev->features & NETIF_F_GRO) 1055 napi_gro_receive(&enic->napi[q_number], skb); 1056 else 1057 netif_receive_skb(skb); 1058 } else { 1059 1060 /* Buffer overflow 1061 */ 1062 1063 dev_kfree_skb_any(skb); 1064 } 1065 } 1066 1067 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 1068 u8 type, u16 q_number, u16 completed_index, void *opaque) 1069 { 1070 struct enic *enic = vnic_dev_priv(vdev); 1071 1072 vnic_rq_service(&enic->rq[q_number], cq_desc, 1073 completed_index, VNIC_RQ_RETURN_DESC, 1074 enic_rq_indicate_buf, opaque); 1075 1076 return 0; 1077 } 1078 1079 static int enic_poll(struct napi_struct *napi, int budget) 1080 { 1081 struct net_device *netdev = napi->dev; 1082 struct enic *enic = netdev_priv(netdev); 1083 unsigned int cq_rq = enic_cq_rq(enic, 0); 1084 unsigned int cq_wq = enic_cq_wq(enic, 0); 1085 unsigned int intr = enic_legacy_io_intr(); 1086 unsigned int rq_work_to_do = budget; 1087 unsigned int wq_work_to_do = -1; /* no limit */ 1088 unsigned int work_done, rq_work_done, wq_work_done; 1089 int err; 1090 1091 /* Service RQ (first) and WQ 1092 */ 1093 1094 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1095 rq_work_to_do, enic_rq_service, NULL); 1096 1097 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], 1098 wq_work_to_do, enic_wq_service, NULL); 1099 1100 /* Accumulate intr event credits for this polling 1101 * cycle. An intr event is the completion of a 1102 * a WQ or RQ packet. 1103 */ 1104 1105 work_done = rq_work_done + wq_work_done; 1106 1107 if (work_done > 0) 1108 vnic_intr_return_credits(&enic->intr[intr], 1109 work_done, 1110 0 /* don't unmask intr */, 1111 0 /* don't reset intr timer */); 1112 1113 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1114 1115 /* Buffer allocation failed. Stay in polling 1116 * mode so we can try to fill the ring again. 1117 */ 1118 1119 if (err) 1120 rq_work_done = rq_work_to_do; 1121 1122 if (rq_work_done < rq_work_to_do) { 1123 1124 /* Some work done, but not enough to stay in polling, 1125 * exit polling 1126 */ 1127 1128 napi_complete(napi); 1129 vnic_intr_unmask(&enic->intr[intr]); 1130 } 1131 1132 return rq_work_done; 1133 } 1134 1135 static int enic_poll_msix(struct napi_struct *napi, int budget) 1136 { 1137 struct net_device *netdev = napi->dev; 1138 struct enic *enic = netdev_priv(netdev); 1139 unsigned int rq = (napi - &enic->napi[0]); 1140 unsigned int cq = enic_cq_rq(enic, rq); 1141 unsigned int intr = enic_msix_rq_intr(enic, rq); 1142 unsigned int work_to_do = budget; 1143 unsigned int work_done; 1144 int err; 1145 1146 /* Service RQ 1147 */ 1148 1149 work_done = vnic_cq_service(&enic->cq[cq], 1150 work_to_do, enic_rq_service, NULL); 1151 1152 /* Return intr event credits for this polling 1153 * cycle. An intr event is the completion of a 1154 * RQ packet. 1155 */ 1156 1157 if (work_done > 0) 1158 vnic_intr_return_credits(&enic->intr[intr], 1159 work_done, 1160 0 /* don't unmask intr */, 1161 0 /* don't reset intr timer */); 1162 1163 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); 1164 1165 /* Buffer allocation failed. Stay in polling mode 1166 * so we can try to fill the ring again. 1167 */ 1168 1169 if (err) 1170 work_done = work_to_do; 1171 1172 if (work_done < work_to_do) { 1173 1174 /* Some work done, but not enough to stay in polling, 1175 * exit polling 1176 */ 1177 1178 napi_complete(napi); 1179 vnic_intr_unmask(&enic->intr[intr]); 1180 } 1181 1182 return work_done; 1183 } 1184 1185 static void enic_notify_timer(unsigned long data) 1186 { 1187 struct enic *enic = (struct enic *)data; 1188 1189 enic_notify_check(enic); 1190 1191 mod_timer(&enic->notify_timer, 1192 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); 1193 } 1194 1195 static void enic_free_intr(struct enic *enic) 1196 { 1197 struct net_device *netdev = enic->netdev; 1198 unsigned int i; 1199 1200 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1201 case VNIC_DEV_INTR_MODE_INTX: 1202 free_irq(enic->pdev->irq, netdev); 1203 break; 1204 case VNIC_DEV_INTR_MODE_MSI: 1205 free_irq(enic->pdev->irq, enic); 1206 break; 1207 case VNIC_DEV_INTR_MODE_MSIX: 1208 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1209 if (enic->msix[i].requested) 1210 free_irq(enic->msix_entry[i].vector, 1211 enic->msix[i].devid); 1212 break; 1213 default: 1214 break; 1215 } 1216 } 1217 1218 static int enic_request_intr(struct enic *enic) 1219 { 1220 struct net_device *netdev = enic->netdev; 1221 unsigned int i, intr; 1222 int err = 0; 1223 1224 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1225 1226 case VNIC_DEV_INTR_MODE_INTX: 1227 1228 err = request_irq(enic->pdev->irq, enic_isr_legacy, 1229 IRQF_SHARED, netdev->name, netdev); 1230 break; 1231 1232 case VNIC_DEV_INTR_MODE_MSI: 1233 1234 err = request_irq(enic->pdev->irq, enic_isr_msi, 1235 0, netdev->name, enic); 1236 break; 1237 1238 case VNIC_DEV_INTR_MODE_MSIX: 1239 1240 for (i = 0; i < enic->rq_count; i++) { 1241 intr = enic_msix_rq_intr(enic, i); 1242 snprintf(enic->msix[intr].devname, 1243 sizeof(enic->msix[intr].devname), 1244 "%.11s-rx-%d", netdev->name, i); 1245 enic->msix[intr].isr = enic_isr_msix_rq; 1246 enic->msix[intr].devid = &enic->napi[i]; 1247 } 1248 1249 for (i = 0; i < enic->wq_count; i++) { 1250 intr = enic_msix_wq_intr(enic, i); 1251 snprintf(enic->msix[intr].devname, 1252 sizeof(enic->msix[intr].devname), 1253 "%.11s-tx-%d", netdev->name, i); 1254 enic->msix[intr].isr = enic_isr_msix_wq; 1255 enic->msix[intr].devid = enic; 1256 } 1257 1258 intr = enic_msix_err_intr(enic); 1259 snprintf(enic->msix[intr].devname, 1260 sizeof(enic->msix[intr].devname), 1261 "%.11s-err", netdev->name); 1262 enic->msix[intr].isr = enic_isr_msix_err; 1263 enic->msix[intr].devid = enic; 1264 1265 intr = enic_msix_notify_intr(enic); 1266 snprintf(enic->msix[intr].devname, 1267 sizeof(enic->msix[intr].devname), 1268 "%.11s-notify", netdev->name); 1269 enic->msix[intr].isr = enic_isr_msix_notify; 1270 enic->msix[intr].devid = enic; 1271 1272 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1273 enic->msix[i].requested = 0; 1274 1275 for (i = 0; i < enic->intr_count; i++) { 1276 err = request_irq(enic->msix_entry[i].vector, 1277 enic->msix[i].isr, 0, 1278 enic->msix[i].devname, 1279 enic->msix[i].devid); 1280 if (err) { 1281 enic_free_intr(enic); 1282 break; 1283 } 1284 enic->msix[i].requested = 1; 1285 } 1286 1287 break; 1288 1289 default: 1290 break; 1291 } 1292 1293 return err; 1294 } 1295 1296 static void enic_synchronize_irqs(struct enic *enic) 1297 { 1298 unsigned int i; 1299 1300 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1301 case VNIC_DEV_INTR_MODE_INTX: 1302 case VNIC_DEV_INTR_MODE_MSI: 1303 synchronize_irq(enic->pdev->irq); 1304 break; 1305 case VNIC_DEV_INTR_MODE_MSIX: 1306 for (i = 0; i < enic->intr_count; i++) 1307 synchronize_irq(enic->msix_entry[i].vector); 1308 break; 1309 default: 1310 break; 1311 } 1312 } 1313 1314 static int enic_dev_notify_set(struct enic *enic) 1315 { 1316 int err; 1317 1318 spin_lock(&enic->devcmd_lock); 1319 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1320 case VNIC_DEV_INTR_MODE_INTX: 1321 err = vnic_dev_notify_set(enic->vdev, 1322 enic_legacy_notify_intr()); 1323 break; 1324 case VNIC_DEV_INTR_MODE_MSIX: 1325 err = vnic_dev_notify_set(enic->vdev, 1326 enic_msix_notify_intr(enic)); 1327 break; 1328 default: 1329 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1330 break; 1331 } 1332 spin_unlock(&enic->devcmd_lock); 1333 1334 return err; 1335 } 1336 1337 static void enic_notify_timer_start(struct enic *enic) 1338 { 1339 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1340 case VNIC_DEV_INTR_MODE_MSI: 1341 mod_timer(&enic->notify_timer, jiffies); 1342 break; 1343 default: 1344 /* Using intr for notification for INTx/MSI-X */ 1345 break; 1346 } 1347 } 1348 1349 /* rtnl lock is held, process context */ 1350 static int enic_open(struct net_device *netdev) 1351 { 1352 struct enic *enic = netdev_priv(netdev); 1353 unsigned int i; 1354 int err; 1355 1356 err = enic_request_intr(enic); 1357 if (err) { 1358 netdev_err(netdev, "Unable to request irq.\n"); 1359 return err; 1360 } 1361 1362 err = enic_dev_notify_set(enic); 1363 if (err) { 1364 netdev_err(netdev, 1365 "Failed to alloc notify buffer, aborting.\n"); 1366 goto err_out_free_intr; 1367 } 1368 1369 for (i = 0; i < enic->rq_count; i++) { 1370 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); 1371 /* Need at least one buffer on ring to get going */ 1372 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1373 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1374 err = -ENOMEM; 1375 goto err_out_notify_unset; 1376 } 1377 } 1378 1379 for (i = 0; i < enic->wq_count; i++) 1380 vnic_wq_enable(&enic->wq[i]); 1381 for (i = 0; i < enic->rq_count; i++) 1382 vnic_rq_enable(&enic->rq[i]); 1383 1384 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1385 enic_dev_add_station_addr(enic); 1386 1387 enic_set_rx_mode(netdev); 1388 1389 netif_tx_wake_all_queues(netdev); 1390 1391 for (i = 0; i < enic->rq_count; i++) 1392 napi_enable(&enic->napi[i]); 1393 1394 enic_dev_enable(enic); 1395 1396 for (i = 0; i < enic->intr_count; i++) 1397 vnic_intr_unmask(&enic->intr[i]); 1398 1399 enic_notify_timer_start(enic); 1400 1401 return 0; 1402 1403 err_out_notify_unset: 1404 enic_dev_notify_unset(enic); 1405 err_out_free_intr: 1406 enic_free_intr(enic); 1407 1408 return err; 1409 } 1410 1411 /* rtnl lock is held, process context */ 1412 static int enic_stop(struct net_device *netdev) 1413 { 1414 struct enic *enic = netdev_priv(netdev); 1415 unsigned int i; 1416 int err; 1417 1418 for (i = 0; i < enic->intr_count; i++) { 1419 vnic_intr_mask(&enic->intr[i]); 1420 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ 1421 } 1422 1423 enic_synchronize_irqs(enic); 1424 1425 del_timer_sync(&enic->notify_timer); 1426 1427 enic_dev_disable(enic); 1428 1429 for (i = 0; i < enic->rq_count; i++) 1430 napi_disable(&enic->napi[i]); 1431 1432 netif_carrier_off(netdev); 1433 netif_tx_disable(netdev); 1434 1435 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1436 enic_dev_del_station_addr(enic); 1437 1438 for (i = 0; i < enic->wq_count; i++) { 1439 err = vnic_wq_disable(&enic->wq[i]); 1440 if (err) 1441 return err; 1442 } 1443 for (i = 0; i < enic->rq_count; i++) { 1444 err = vnic_rq_disable(&enic->rq[i]); 1445 if (err) 1446 return err; 1447 } 1448 1449 enic_dev_notify_unset(enic); 1450 enic_free_intr(enic); 1451 1452 for (i = 0; i < enic->wq_count; i++) 1453 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1454 for (i = 0; i < enic->rq_count; i++) 1455 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1456 for (i = 0; i < enic->cq_count; i++) 1457 vnic_cq_clean(&enic->cq[i]); 1458 for (i = 0; i < enic->intr_count; i++) 1459 vnic_intr_clean(&enic->intr[i]); 1460 1461 return 0; 1462 } 1463 1464 static int enic_change_mtu(struct net_device *netdev, int new_mtu) 1465 { 1466 struct enic *enic = netdev_priv(netdev); 1467 int running = netif_running(netdev); 1468 1469 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) 1470 return -EINVAL; 1471 1472 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 1473 return -EOPNOTSUPP; 1474 1475 if (running) 1476 enic_stop(netdev); 1477 1478 netdev->mtu = new_mtu; 1479 1480 if (netdev->mtu > enic->port_mtu) 1481 netdev_warn(netdev, 1482 "interface MTU (%d) set higher than port MTU (%d)\n", 1483 netdev->mtu, enic->port_mtu); 1484 1485 if (running) 1486 enic_open(netdev); 1487 1488 return 0; 1489 } 1490 1491 static void enic_change_mtu_work(struct work_struct *work) 1492 { 1493 struct enic *enic = container_of(work, struct enic, change_mtu_work); 1494 struct net_device *netdev = enic->netdev; 1495 int new_mtu = vnic_dev_mtu(enic->vdev); 1496 int err; 1497 unsigned int i; 1498 1499 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); 1500 1501 rtnl_lock(); 1502 1503 /* Stop RQ */ 1504 del_timer_sync(&enic->notify_timer); 1505 1506 for (i = 0; i < enic->rq_count; i++) 1507 napi_disable(&enic->napi[i]); 1508 1509 vnic_intr_mask(&enic->intr[0]); 1510 enic_synchronize_irqs(enic); 1511 err = vnic_rq_disable(&enic->rq[0]); 1512 if (err) { 1513 rtnl_unlock(); 1514 netdev_err(netdev, "Unable to disable RQ.\n"); 1515 return; 1516 } 1517 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); 1518 vnic_cq_clean(&enic->cq[0]); 1519 vnic_intr_clean(&enic->intr[0]); 1520 1521 /* Fill RQ with new_mtu-sized buffers */ 1522 netdev->mtu = new_mtu; 1523 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1524 /* Need at least one buffer on ring to get going */ 1525 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { 1526 rtnl_unlock(); 1527 netdev_err(netdev, "Unable to alloc receive buffers.\n"); 1528 return; 1529 } 1530 1531 /* Start RQ */ 1532 vnic_rq_enable(&enic->rq[0]); 1533 napi_enable(&enic->napi[0]); 1534 vnic_intr_unmask(&enic->intr[0]); 1535 enic_notify_timer_start(enic); 1536 1537 rtnl_unlock(); 1538 1539 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 1540 } 1541 1542 #ifdef CONFIG_NET_POLL_CONTROLLER 1543 static void enic_poll_controller(struct net_device *netdev) 1544 { 1545 struct enic *enic = netdev_priv(netdev); 1546 struct vnic_dev *vdev = enic->vdev; 1547 unsigned int i, intr; 1548 1549 switch (vnic_dev_get_intr_mode(vdev)) { 1550 case VNIC_DEV_INTR_MODE_MSIX: 1551 for (i = 0; i < enic->rq_count; i++) { 1552 intr = enic_msix_rq_intr(enic, i); 1553 enic_isr_msix_rq(enic->msix_entry[intr].vector, 1554 &enic->napi[i]); 1555 } 1556 1557 for (i = 0; i < enic->wq_count; i++) { 1558 intr = enic_msix_wq_intr(enic, i); 1559 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); 1560 } 1561 1562 break; 1563 case VNIC_DEV_INTR_MODE_MSI: 1564 enic_isr_msi(enic->pdev->irq, enic); 1565 break; 1566 case VNIC_DEV_INTR_MODE_INTX: 1567 enic_isr_legacy(enic->pdev->irq, netdev); 1568 break; 1569 default: 1570 break; 1571 } 1572 } 1573 #endif 1574 1575 static int enic_dev_wait(struct vnic_dev *vdev, 1576 int (*start)(struct vnic_dev *, int), 1577 int (*finished)(struct vnic_dev *, int *), 1578 int arg) 1579 { 1580 unsigned long time; 1581 int done; 1582 int err; 1583 1584 BUG_ON(in_interrupt()); 1585 1586 err = start(vdev, arg); 1587 if (err) 1588 return err; 1589 1590 /* Wait for func to complete...2 seconds max 1591 */ 1592 1593 time = jiffies + (HZ * 2); 1594 do { 1595 1596 err = finished(vdev, &done); 1597 if (err) 1598 return err; 1599 1600 if (done) 1601 return 0; 1602 1603 schedule_timeout_uninterruptible(HZ / 10); 1604 1605 } while (time_after(time, jiffies)); 1606 1607 return -ETIMEDOUT; 1608 } 1609 1610 static int enic_dev_open(struct enic *enic) 1611 { 1612 int err; 1613 1614 err = enic_dev_wait(enic->vdev, vnic_dev_open, 1615 vnic_dev_open_done, 0); 1616 if (err) 1617 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", 1618 err); 1619 1620 return err; 1621 } 1622 1623 static int enic_dev_hang_reset(struct enic *enic) 1624 { 1625 int err; 1626 1627 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, 1628 vnic_dev_hang_reset_done, 0); 1629 if (err) 1630 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", 1631 err); 1632 1633 return err; 1634 } 1635 1636 static int enic_set_rsskey(struct enic *enic) 1637 { 1638 dma_addr_t rss_key_buf_pa; 1639 union vnic_rss_key *rss_key_buf_va = NULL; 1640 union vnic_rss_key rss_key = { 1641 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, 1642 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, 1643 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, 1644 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, 1645 }; 1646 int err; 1647 1648 rss_key_buf_va = pci_alloc_consistent(enic->pdev, 1649 sizeof(union vnic_rss_key), &rss_key_buf_pa); 1650 if (!rss_key_buf_va) 1651 return -ENOMEM; 1652 1653 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); 1654 1655 spin_lock(&enic->devcmd_lock); 1656 err = enic_set_rss_key(enic, 1657 rss_key_buf_pa, 1658 sizeof(union vnic_rss_key)); 1659 spin_unlock(&enic->devcmd_lock); 1660 1661 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), 1662 rss_key_buf_va, rss_key_buf_pa); 1663 1664 return err; 1665 } 1666 1667 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 1668 { 1669 dma_addr_t rss_cpu_buf_pa; 1670 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 1671 unsigned int i; 1672 int err; 1673 1674 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, 1675 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); 1676 if (!rss_cpu_buf_va) 1677 return -ENOMEM; 1678 1679 for (i = 0; i < (1 << rss_hash_bits); i++) 1680 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; 1681 1682 spin_lock(&enic->devcmd_lock); 1683 err = enic_set_rss_cpu(enic, 1684 rss_cpu_buf_pa, 1685 sizeof(union vnic_rss_cpu)); 1686 spin_unlock(&enic->devcmd_lock); 1687 1688 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), 1689 rss_cpu_buf_va, rss_cpu_buf_pa); 1690 1691 return err; 1692 } 1693 1694 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, 1695 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) 1696 { 1697 const u8 tso_ipid_split_en = 0; 1698 const u8 ig_vlan_strip_en = 1; 1699 int err; 1700 1701 /* Enable VLAN tag stripping. 1702 */ 1703 1704 spin_lock(&enic->devcmd_lock); 1705 err = enic_set_nic_cfg(enic, 1706 rss_default_cpu, rss_hash_type, 1707 rss_hash_bits, rss_base_cpu, 1708 rss_enable, tso_ipid_split_en, 1709 ig_vlan_strip_en); 1710 spin_unlock(&enic->devcmd_lock); 1711 1712 return err; 1713 } 1714 1715 static int enic_set_rss_nic_cfg(struct enic *enic) 1716 { 1717 struct device *dev = enic_get_dev(enic); 1718 const u8 rss_default_cpu = 0; 1719 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | 1720 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | 1721 NIC_CFG_RSS_HASH_TYPE_IPV6 | 1722 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 1723 const u8 rss_hash_bits = 7; 1724 const u8 rss_base_cpu = 0; 1725 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); 1726 1727 if (rss_enable) { 1728 if (!enic_set_rsskey(enic)) { 1729 if (enic_set_rsscpu(enic, rss_hash_bits)) { 1730 rss_enable = 0; 1731 dev_warn(dev, "RSS disabled, " 1732 "Failed to set RSS cpu indirection table."); 1733 } 1734 } else { 1735 rss_enable = 0; 1736 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); 1737 } 1738 } 1739 1740 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, 1741 rss_hash_bits, rss_base_cpu, rss_enable); 1742 } 1743 1744 static void enic_reset(struct work_struct *work) 1745 { 1746 struct enic *enic = container_of(work, struct enic, reset); 1747 1748 if (!netif_running(enic->netdev)) 1749 return; 1750 1751 rtnl_lock(); 1752 1753 spin_lock(&enic->enic_api_lock); 1754 enic_dev_hang_notify(enic); 1755 enic_stop(enic->netdev); 1756 enic_dev_hang_reset(enic); 1757 enic_reset_addr_lists(enic); 1758 enic_init_vnic_resources(enic); 1759 enic_set_rss_nic_cfg(enic); 1760 enic_dev_set_ig_vlan_rewrite_mode(enic); 1761 enic_open(enic->netdev); 1762 spin_unlock(&enic->enic_api_lock); 1763 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); 1764 1765 rtnl_unlock(); 1766 } 1767 1768 static int enic_set_intr_mode(struct enic *enic) 1769 { 1770 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 1771 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); 1772 unsigned int i; 1773 1774 /* Set interrupt mode (INTx, MSI, MSI-X) depending 1775 * on system capabilities. 1776 * 1777 * Try MSI-X first 1778 * 1779 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs 1780 * (the second to last INTR is used for WQ/RQ errors) 1781 * (the last INTR is used for notifications) 1782 */ 1783 1784 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); 1785 for (i = 0; i < n + m + 2; i++) 1786 enic->msix_entry[i].entry = i; 1787 1788 /* Use multiple RQs if RSS is enabled 1789 */ 1790 1791 if (ENIC_SETTING(enic, RSS) && 1792 enic->config.intr_mode < 1 && 1793 enic->rq_count >= n && 1794 enic->wq_count >= m && 1795 enic->cq_count >= n + m && 1796 enic->intr_count >= n + m + 2) { 1797 1798 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { 1799 1800 enic->rq_count = n; 1801 enic->wq_count = m; 1802 enic->cq_count = n + m; 1803 enic->intr_count = n + m + 2; 1804 1805 vnic_dev_set_intr_mode(enic->vdev, 1806 VNIC_DEV_INTR_MODE_MSIX); 1807 1808 return 0; 1809 } 1810 } 1811 1812 if (enic->config.intr_mode < 1 && 1813 enic->rq_count >= 1 && 1814 enic->wq_count >= m && 1815 enic->cq_count >= 1 + m && 1816 enic->intr_count >= 1 + m + 2) { 1817 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { 1818 1819 enic->rq_count = 1; 1820 enic->wq_count = m; 1821 enic->cq_count = 1 + m; 1822 enic->intr_count = 1 + m + 2; 1823 1824 vnic_dev_set_intr_mode(enic->vdev, 1825 VNIC_DEV_INTR_MODE_MSIX); 1826 1827 return 0; 1828 } 1829 } 1830 1831 /* Next try MSI 1832 * 1833 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR 1834 */ 1835 1836 if (enic->config.intr_mode < 2 && 1837 enic->rq_count >= 1 && 1838 enic->wq_count >= 1 && 1839 enic->cq_count >= 2 && 1840 enic->intr_count >= 1 && 1841 !pci_enable_msi(enic->pdev)) { 1842 1843 enic->rq_count = 1; 1844 enic->wq_count = 1; 1845 enic->cq_count = 2; 1846 enic->intr_count = 1; 1847 1848 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); 1849 1850 return 0; 1851 } 1852 1853 /* Next try INTx 1854 * 1855 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs 1856 * (the first INTR is used for WQ/RQ) 1857 * (the second INTR is used for WQ/RQ errors) 1858 * (the last INTR is used for notifications) 1859 */ 1860 1861 if (enic->config.intr_mode < 3 && 1862 enic->rq_count >= 1 && 1863 enic->wq_count >= 1 && 1864 enic->cq_count >= 2 && 1865 enic->intr_count >= 3) { 1866 1867 enic->rq_count = 1; 1868 enic->wq_count = 1; 1869 enic->cq_count = 2; 1870 enic->intr_count = 3; 1871 1872 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); 1873 1874 return 0; 1875 } 1876 1877 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 1878 1879 return -EINVAL; 1880 } 1881 1882 static void enic_clear_intr_mode(struct enic *enic) 1883 { 1884 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1885 case VNIC_DEV_INTR_MODE_MSIX: 1886 pci_disable_msix(enic->pdev); 1887 break; 1888 case VNIC_DEV_INTR_MODE_MSI: 1889 pci_disable_msi(enic->pdev); 1890 break; 1891 default: 1892 break; 1893 } 1894 1895 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 1896 } 1897 1898 static const struct net_device_ops enic_netdev_dynamic_ops = { 1899 .ndo_open = enic_open, 1900 .ndo_stop = enic_stop, 1901 .ndo_start_xmit = enic_hard_start_xmit, 1902 .ndo_get_stats64 = enic_get_stats, 1903 .ndo_validate_addr = eth_validate_addr, 1904 .ndo_set_rx_mode = enic_set_rx_mode, 1905 .ndo_set_mac_address = enic_set_mac_address_dynamic, 1906 .ndo_change_mtu = enic_change_mtu, 1907 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 1908 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 1909 .ndo_tx_timeout = enic_tx_timeout, 1910 .ndo_set_vf_port = enic_set_vf_port, 1911 .ndo_get_vf_port = enic_get_vf_port, 1912 .ndo_set_vf_mac = enic_set_vf_mac, 1913 #ifdef CONFIG_NET_POLL_CONTROLLER 1914 .ndo_poll_controller = enic_poll_controller, 1915 #endif 1916 }; 1917 1918 static const struct net_device_ops enic_netdev_ops = { 1919 .ndo_open = enic_open, 1920 .ndo_stop = enic_stop, 1921 .ndo_start_xmit = enic_hard_start_xmit, 1922 .ndo_get_stats64 = enic_get_stats, 1923 .ndo_validate_addr = eth_validate_addr, 1924 .ndo_set_mac_address = enic_set_mac_address, 1925 .ndo_set_rx_mode = enic_set_rx_mode, 1926 .ndo_change_mtu = enic_change_mtu, 1927 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 1928 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 1929 .ndo_tx_timeout = enic_tx_timeout, 1930 .ndo_set_vf_port = enic_set_vf_port, 1931 .ndo_get_vf_port = enic_get_vf_port, 1932 .ndo_set_vf_mac = enic_set_vf_mac, 1933 #ifdef CONFIG_NET_POLL_CONTROLLER 1934 .ndo_poll_controller = enic_poll_controller, 1935 #endif 1936 }; 1937 1938 static void enic_dev_deinit(struct enic *enic) 1939 { 1940 unsigned int i; 1941 1942 for (i = 0; i < enic->rq_count; i++) 1943 netif_napi_del(&enic->napi[i]); 1944 1945 enic_free_vnic_resources(enic); 1946 enic_clear_intr_mode(enic); 1947 } 1948 1949 static int enic_dev_init(struct enic *enic) 1950 { 1951 struct device *dev = enic_get_dev(enic); 1952 struct net_device *netdev = enic->netdev; 1953 unsigned int i; 1954 int err; 1955 1956 /* Get interrupt coalesce timer info */ 1957 err = enic_dev_intr_coal_timer_info(enic); 1958 if (err) { 1959 dev_warn(dev, "Using default conversion factor for " 1960 "interrupt coalesce timer\n"); 1961 vnic_dev_intr_coal_timer_info_default(enic->vdev); 1962 } 1963 1964 /* Get vNIC configuration 1965 */ 1966 1967 err = enic_get_vnic_config(enic); 1968 if (err) { 1969 dev_err(dev, "Get vNIC configuration failed, aborting\n"); 1970 return err; 1971 } 1972 1973 /* Get available resource counts 1974 */ 1975 1976 enic_get_res_counts(enic); 1977 1978 /* Set interrupt mode based on resource counts and system 1979 * capabilities 1980 */ 1981 1982 err = enic_set_intr_mode(enic); 1983 if (err) { 1984 dev_err(dev, "Failed to set intr mode based on resource " 1985 "counts and system capabilities, aborting\n"); 1986 return err; 1987 } 1988 1989 /* Allocate and configure vNIC resources 1990 */ 1991 1992 err = enic_alloc_vnic_resources(enic); 1993 if (err) { 1994 dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); 1995 goto err_out_free_vnic_resources; 1996 } 1997 1998 enic_init_vnic_resources(enic); 1999 2000 err = enic_set_rss_nic_cfg(enic); 2001 if (err) { 2002 dev_err(dev, "Failed to config nic, aborting\n"); 2003 goto err_out_free_vnic_resources; 2004 } 2005 2006 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2007 default: 2008 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2009 break; 2010 case VNIC_DEV_INTR_MODE_MSIX: 2011 for (i = 0; i < enic->rq_count; i++) 2012 netif_napi_add(netdev, &enic->napi[i], 2013 enic_poll_msix, 64); 2014 break; 2015 } 2016 2017 return 0; 2018 2019 err_out_free_vnic_resources: 2020 enic_clear_intr_mode(enic); 2021 enic_free_vnic_resources(enic); 2022 2023 return err; 2024 } 2025 2026 static void enic_iounmap(struct enic *enic) 2027 { 2028 unsigned int i; 2029 2030 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) 2031 if (enic->bar[i].vaddr) 2032 iounmap(enic->bar[i].vaddr); 2033 } 2034 2035 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2036 { 2037 struct device *dev = &pdev->dev; 2038 struct net_device *netdev; 2039 struct enic *enic; 2040 int using_dac = 0; 2041 unsigned int i; 2042 int err; 2043 #ifdef CONFIG_PCI_IOV 2044 int pos = 0; 2045 #endif 2046 int num_pps = 1; 2047 2048 /* Allocate net device structure and initialize. Private 2049 * instance data is initialized to zero. 2050 */ 2051 2052 netdev = alloc_etherdev_mqs(sizeof(struct enic), 2053 ENIC_RQ_MAX, ENIC_WQ_MAX); 2054 if (!netdev) 2055 return -ENOMEM; 2056 2057 pci_set_drvdata(pdev, netdev); 2058 2059 SET_NETDEV_DEV(netdev, &pdev->dev); 2060 2061 enic = netdev_priv(netdev); 2062 enic->netdev = netdev; 2063 enic->pdev = pdev; 2064 2065 /* Setup PCI resources 2066 */ 2067 2068 err = pci_enable_device_mem(pdev); 2069 if (err) { 2070 dev_err(dev, "Cannot enable PCI device, aborting\n"); 2071 goto err_out_free_netdev; 2072 } 2073 2074 err = pci_request_regions(pdev, DRV_NAME); 2075 if (err) { 2076 dev_err(dev, "Cannot request PCI regions, aborting\n"); 2077 goto err_out_disable_device; 2078 } 2079 2080 pci_set_master(pdev); 2081 2082 /* Query PCI controller on system for DMA addressing 2083 * limitation for the device. Try 64-bit first, and 2084 * fail to 32-bit. 2085 */ 2086 2087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2088 if (err) { 2089 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2090 if (err) { 2091 dev_err(dev, "No usable DMA configuration, aborting\n"); 2092 goto err_out_release_regions; 2093 } 2094 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2095 if (err) { 2096 dev_err(dev, "Unable to obtain %u-bit DMA " 2097 "for consistent allocations, aborting\n", 32); 2098 goto err_out_release_regions; 2099 } 2100 } else { 2101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2102 if (err) { 2103 dev_err(dev, "Unable to obtain %u-bit DMA " 2104 "for consistent allocations, aborting\n", 64); 2105 goto err_out_release_regions; 2106 } 2107 using_dac = 1; 2108 } 2109 2110 /* Map vNIC resources from BAR0-5 2111 */ 2112 2113 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { 2114 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 2115 continue; 2116 enic->bar[i].len = pci_resource_len(pdev, i); 2117 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 2118 if (!enic->bar[i].vaddr) { 2119 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); 2120 err = -ENODEV; 2121 goto err_out_iounmap; 2122 } 2123 enic->bar[i].bus_addr = pci_resource_start(pdev, i); 2124 } 2125 2126 /* Register vNIC device 2127 */ 2128 2129 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 2130 ARRAY_SIZE(enic->bar)); 2131 if (!enic->vdev) { 2132 dev_err(dev, "vNIC registration failed, aborting\n"); 2133 err = -ENODEV; 2134 goto err_out_iounmap; 2135 } 2136 2137 #ifdef CONFIG_PCI_IOV 2138 /* Get number of subvnics */ 2139 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 2140 if (pos) { 2141 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, 2142 &enic->num_vfs); 2143 if (enic->num_vfs) { 2144 err = pci_enable_sriov(pdev, enic->num_vfs); 2145 if (err) { 2146 dev_err(dev, "SRIOV enable failed, aborting." 2147 " pci_enable_sriov() returned %d\n", 2148 err); 2149 goto err_out_vnic_unregister; 2150 } 2151 enic->priv_flags |= ENIC_SRIOV_ENABLED; 2152 num_pps = enic->num_vfs; 2153 } 2154 } 2155 #endif 2156 2157 /* Allocate structure for port profiles */ 2158 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2159 if (!enic->pp) { 2160 err = -ENOMEM; 2161 goto err_out_disable_sriov_pp; 2162 } 2163 2164 /* Issue device open to get device in known state 2165 */ 2166 2167 err = enic_dev_open(enic); 2168 if (err) { 2169 dev_err(dev, "vNIC dev open failed, aborting\n"); 2170 goto err_out_disable_sriov; 2171 } 2172 2173 /* Setup devcmd lock 2174 */ 2175 2176 spin_lock_init(&enic->devcmd_lock); 2177 spin_lock_init(&enic->enic_api_lock); 2178 2179 /* 2180 * Set ingress vlan rewrite mode before vnic initialization 2181 */ 2182 2183 err = enic_dev_set_ig_vlan_rewrite_mode(enic); 2184 if (err) { 2185 dev_err(dev, 2186 "Failed to set ingress vlan rewrite mode, aborting.\n"); 2187 goto err_out_dev_close; 2188 } 2189 2190 /* Issue device init to initialize the vnic-to-switch link. 2191 * We'll start with carrier off and wait for link UP 2192 * notification later to turn on carrier. We don't need 2193 * to wait here for the vnic-to-switch link initialization 2194 * to complete; link UP notification is the indication that 2195 * the process is complete. 2196 */ 2197 2198 netif_carrier_off(netdev); 2199 2200 /* Do not call dev_init for a dynamic vnic. 2201 * For a dynamic vnic, init_prov_info will be 2202 * called later by an upper layer. 2203 */ 2204 2205 if (!enic_is_dynamic(enic)) { 2206 err = vnic_dev_init(enic->vdev, 0); 2207 if (err) { 2208 dev_err(dev, "vNIC dev init failed, aborting\n"); 2209 goto err_out_dev_close; 2210 } 2211 } 2212 2213 err = enic_dev_init(enic); 2214 if (err) { 2215 dev_err(dev, "Device initialization failed, aborting\n"); 2216 goto err_out_dev_close; 2217 } 2218 2219 netif_set_real_num_tx_queues(netdev, enic->wq_count); 2220 netif_set_real_num_rx_queues(netdev, enic->rq_count); 2221 2222 /* Setup notification timer, HW reset task, and wq locks 2223 */ 2224 2225 init_timer(&enic->notify_timer); 2226 enic->notify_timer.function = enic_notify_timer; 2227 enic->notify_timer.data = (unsigned long)enic; 2228 2229 INIT_WORK(&enic->reset, enic_reset); 2230 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); 2231 2232 for (i = 0; i < enic->wq_count; i++) 2233 spin_lock_init(&enic->wq_lock[i]); 2234 2235 /* Register net device 2236 */ 2237 2238 enic->port_mtu = enic->config.mtu; 2239 (void)enic_change_mtu(netdev, enic->port_mtu); 2240 2241 err = enic_set_mac_addr(netdev, enic->mac_addr); 2242 if (err) { 2243 dev_err(dev, "Invalid MAC address, aborting\n"); 2244 goto err_out_dev_deinit; 2245 } 2246 2247 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2248 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2249 2250 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2251 netdev->netdev_ops = &enic_netdev_dynamic_ops; 2252 else 2253 netdev->netdev_ops = &enic_netdev_ops; 2254 2255 netdev->watchdog_timeo = 2 * HZ; 2256 enic_set_ethtool_ops(netdev); 2257 2258 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2259 if (ENIC_SETTING(enic, LOOP)) { 2260 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2261 enic->loop_enable = 1; 2262 enic->loop_tag = enic->config.loop_tag; 2263 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2264 } 2265 if (ENIC_SETTING(enic, TXCSUM)) 2266 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2267 if (ENIC_SETTING(enic, TSO)) 2268 netdev->hw_features |= NETIF_F_TSO | 2269 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2270 if (ENIC_SETTING(enic, RSS)) 2271 netdev->hw_features |= NETIF_F_RXHASH; 2272 if (ENIC_SETTING(enic, RXCSUM)) 2273 netdev->hw_features |= NETIF_F_RXCSUM; 2274 2275 netdev->features |= netdev->hw_features; 2276 2277 if (using_dac) 2278 netdev->features |= NETIF_F_HIGHDMA; 2279 2280 netdev->priv_flags |= IFF_UNICAST_FLT; 2281 2282 err = register_netdev(netdev); 2283 if (err) { 2284 dev_err(dev, "Cannot register net device, aborting\n"); 2285 goto err_out_dev_deinit; 2286 } 2287 2288 return 0; 2289 2290 err_out_dev_deinit: 2291 enic_dev_deinit(enic); 2292 err_out_dev_close: 2293 vnic_dev_close(enic->vdev); 2294 err_out_disable_sriov: 2295 kfree(enic->pp); 2296 err_out_disable_sriov_pp: 2297 #ifdef CONFIG_PCI_IOV 2298 if (enic_sriov_enabled(enic)) { 2299 pci_disable_sriov(pdev); 2300 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2301 } 2302 err_out_vnic_unregister: 2303 #endif 2304 vnic_dev_unregister(enic->vdev); 2305 err_out_iounmap: 2306 enic_iounmap(enic); 2307 err_out_release_regions: 2308 pci_release_regions(pdev); 2309 err_out_disable_device: 2310 pci_disable_device(pdev); 2311 err_out_free_netdev: 2312 free_netdev(netdev); 2313 2314 return err; 2315 } 2316 2317 static void enic_remove(struct pci_dev *pdev) 2318 { 2319 struct net_device *netdev = pci_get_drvdata(pdev); 2320 2321 if (netdev) { 2322 struct enic *enic = netdev_priv(netdev); 2323 2324 cancel_work_sync(&enic->reset); 2325 cancel_work_sync(&enic->change_mtu_work); 2326 unregister_netdev(netdev); 2327 enic_dev_deinit(enic); 2328 vnic_dev_close(enic->vdev); 2329 #ifdef CONFIG_PCI_IOV 2330 if (enic_sriov_enabled(enic)) { 2331 pci_disable_sriov(pdev); 2332 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2333 } 2334 #endif 2335 kfree(enic->pp); 2336 vnic_dev_unregister(enic->vdev); 2337 enic_iounmap(enic); 2338 pci_release_regions(pdev); 2339 pci_disable_device(pdev); 2340 free_netdev(netdev); 2341 } 2342 } 2343 2344 static struct pci_driver enic_driver = { 2345 .name = DRV_NAME, 2346 .id_table = enic_id_table, 2347 .probe = enic_probe, 2348 .remove = enic_remove, 2349 }; 2350 2351 static int __init enic_init_module(void) 2352 { 2353 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 2354 2355 return pci_register_driver(&enic_driver); 2356 } 2357 2358 static void __exit enic_cleanup_module(void) 2359 { 2360 pci_unregister_driver(&enic_driver); 2361 } 2362 2363 module_init(enic_init_module); 2364 module_exit(enic_cleanup_module); 2365