1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 29 /****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31 ******************************************************************************/ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/types.h> 36 #include <linux/bitops.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/sctp.h> 46 #include <linux/ipv6.h> 47 #include <linux/slab.h> 48 #include <net/checksum.h> 49 #include <net/ip6_checksum.h> 50 #include <linux/ethtool.h> 51 #include <linux/if.h> 52 #include <linux/if_vlan.h> 53 #include <linux/prefetch.h> 54 55 #include "ixgbevf.h" 56 57 const char ixgbevf_driver_name[] = "ixgbevf"; 58 static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61 #define DRV_VERSION "2.11.3-k" 62 const char ixgbevf_driver_version[] = DRV_VERSION; 63 static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69 }; 70 71 /* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 82 /* required last entry */ 83 {0, } 84 }; 85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 86 87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_VERSION); 91 92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 93 static int debug = -1; 94 module_param(debug, int, 0); 95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 96 97 /* forward decls */ 98 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 99 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 100 101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 102 struct ixgbevf_ring *rx_ring, 103 u32 val) 104 { 105 /* 106 * Force memory writes to complete before letting h/w 107 * know there are new descriptors to fetch. (Only 108 * applicable for weak-ordered memory model archs, 109 * such as IA-64). 110 */ 111 wmb(); 112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 113 } 114 115 /** 116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 117 * @adapter: pointer to adapter struct 118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 119 * @queue: queue to map the corresponding interrupt to 120 * @msix_vector: the vector to map to the corresponding queue 121 */ 122 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 123 u8 queue, u8 msix_vector) 124 { 125 u32 ivar, index; 126 struct ixgbe_hw *hw = &adapter->hw; 127 if (direction == -1) { 128 /* other causes */ 129 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 130 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 131 ivar &= ~0xFF; 132 ivar |= msix_vector; 133 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 134 } else { 135 /* tx or rx causes */ 136 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 137 index = ((16 * (queue & 1)) + (8 * direction)); 138 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 139 ivar &= ~(0xFF << index); 140 ivar |= (msix_vector << index); 141 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 142 } 143 } 144 145 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 146 struct ixgbevf_tx_buffer 147 *tx_buffer_info) 148 { 149 if (tx_buffer_info->dma) { 150 if (tx_buffer_info->mapped_as_page) 151 dma_unmap_page(tx_ring->dev, 152 tx_buffer_info->dma, 153 tx_buffer_info->length, 154 DMA_TO_DEVICE); 155 else 156 dma_unmap_single(tx_ring->dev, 157 tx_buffer_info->dma, 158 tx_buffer_info->length, 159 DMA_TO_DEVICE); 160 tx_buffer_info->dma = 0; 161 } 162 if (tx_buffer_info->skb) { 163 dev_kfree_skb_any(tx_buffer_info->skb); 164 tx_buffer_info->skb = NULL; 165 } 166 tx_buffer_info->time_stamp = 0; 167 /* tx_buffer_info must be completely set up in the transmit path */ 168 } 169 170 #define IXGBE_MAX_TXD_PWR 14 171 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 172 173 /* Tx Descriptors needed, worst case */ 174 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 175 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 176 177 static void ixgbevf_tx_timeout(struct net_device *netdev); 178 179 /** 180 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 181 * @q_vector: board private structure 182 * @tx_ring: tx ring to clean 183 **/ 184 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 185 struct ixgbevf_ring *tx_ring) 186 { 187 struct ixgbevf_adapter *adapter = q_vector->adapter; 188 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 189 struct ixgbevf_tx_buffer *tx_buffer_info; 190 unsigned int i, count = 0; 191 unsigned int total_bytes = 0, total_packets = 0; 192 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 194 return true; 195 196 i = tx_ring->next_to_clean; 197 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 198 eop_desc = tx_buffer_info->next_to_watch; 199 200 do { 201 bool cleaned = false; 202 203 /* if next_to_watch is not set then there is no work pending */ 204 if (!eop_desc) 205 break; 206 207 /* prevent any other reads prior to eop_desc */ 208 read_barrier_depends(); 209 210 /* if DD is not set pending work has not been completed */ 211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 212 break; 213 214 /* clear next_to_watch to prevent false hangs */ 215 tx_buffer_info->next_to_watch = NULL; 216 217 for ( ; !cleaned; count++) { 218 struct sk_buff *skb; 219 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 220 cleaned = (tx_desc == eop_desc); 221 skb = tx_buffer_info->skb; 222 223 if (cleaned && skb) { 224 unsigned int segs, bytecount; 225 226 /* gso_segs is currently only valid for tcp */ 227 segs = skb_shinfo(skb)->gso_segs ?: 1; 228 /* multiply data chunks by size of headers */ 229 bytecount = ((segs - 1) * skb_headlen(skb)) + 230 skb->len; 231 total_packets += segs; 232 total_bytes += bytecount; 233 } 234 235 ixgbevf_unmap_and_free_tx_resource(tx_ring, 236 tx_buffer_info); 237 238 tx_desc->wb.status = 0; 239 240 i++; 241 if (i == tx_ring->count) 242 i = 0; 243 244 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 245 } 246 247 eop_desc = tx_buffer_info->next_to_watch; 248 } while (count < tx_ring->count); 249 250 tx_ring->next_to_clean = i; 251 252 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 253 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 254 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 255 /* Make sure that anybody stopping the queue after this 256 * sees the new next_to_clean. 257 */ 258 smp_mb(); 259 if (__netif_subqueue_stopped(tx_ring->netdev, 260 tx_ring->queue_index) && 261 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 262 netif_wake_subqueue(tx_ring->netdev, 263 tx_ring->queue_index); 264 ++adapter->restart_queue; 265 } 266 } 267 268 u64_stats_update_begin(&tx_ring->syncp); 269 tx_ring->total_bytes += total_bytes; 270 tx_ring->total_packets += total_packets; 271 u64_stats_update_end(&tx_ring->syncp); 272 q_vector->tx.total_bytes += total_bytes; 273 q_vector->tx.total_packets += total_packets; 274 275 return count < tx_ring->count; 276 } 277 278 /** 279 * ixgbevf_receive_skb - Send a completed packet up the stack 280 * @q_vector: structure containing interrupt and ring information 281 * @skb: packet to send up 282 * @status: hardware indication of status of receive 283 * @rx_desc: rx descriptor 284 **/ 285 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 286 struct sk_buff *skb, u8 status, 287 union ixgbe_adv_rx_desc *rx_desc) 288 { 289 struct ixgbevf_adapter *adapter = q_vector->adapter; 290 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 291 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 292 293 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 294 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 295 296 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 297 napi_gro_receive(&q_vector->napi, skb); 298 else 299 netif_rx(skb); 300 } 301 302 /** 303 * ixgbevf_rx_skb - Helper function to determine proper Rx method 304 * @q_vector: structure containing interrupt and ring information 305 * @skb: packet to send up 306 * @status: hardware indication of status of receive 307 * @rx_desc: rx descriptor 308 **/ 309 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 310 struct sk_buff *skb, u8 status, 311 union ixgbe_adv_rx_desc *rx_desc) 312 { 313 #ifdef CONFIG_NET_RX_BUSY_POLL 314 skb_mark_napi_id(skb, &q_vector->napi); 315 316 if (ixgbevf_qv_busy_polling(q_vector)) { 317 netif_receive_skb(skb); 318 /* exit early if we busy polled */ 319 return; 320 } 321 #endif /* CONFIG_NET_RX_BUSY_POLL */ 322 323 ixgbevf_receive_skb(q_vector, skb, status, rx_desc); 324 } 325 326 /** 327 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 328 * @ring: pointer to Rx descriptor ring structure 329 * @status_err: hardware indication of status of receive 330 * @skb: skb currently being received and modified 331 **/ 332 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 333 u32 status_err, struct sk_buff *skb) 334 { 335 skb_checksum_none_assert(skb); 336 337 /* Rx csum disabled */ 338 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 339 return; 340 341 /* if IP and error */ 342 if ((status_err & IXGBE_RXD_STAT_IPCS) && 343 (status_err & IXGBE_RXDADV_ERR_IPE)) { 344 ring->hw_csum_rx_error++; 345 return; 346 } 347 348 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 349 return; 350 351 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 352 ring->hw_csum_rx_error++; 353 return; 354 } 355 356 /* It must be a TCP or UDP packet with a valid checksum */ 357 skb->ip_summed = CHECKSUM_UNNECESSARY; 358 ring->hw_csum_rx_good++; 359 } 360 361 /** 362 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 363 * @adapter: address of board private structure 364 **/ 365 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 366 struct ixgbevf_ring *rx_ring, 367 int cleaned_count) 368 { 369 struct pci_dev *pdev = adapter->pdev; 370 union ixgbe_adv_rx_desc *rx_desc; 371 struct ixgbevf_rx_buffer *bi; 372 unsigned int i = rx_ring->next_to_use; 373 374 bi = &rx_ring->rx_buffer_info[i]; 375 376 while (cleaned_count--) { 377 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 378 379 if (!bi->skb) { 380 struct sk_buff *skb; 381 382 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 383 rx_ring->rx_buf_len); 384 if (!skb) { 385 adapter->alloc_rx_buff_failed++; 386 goto no_buffers; 387 } 388 bi->skb = skb; 389 390 bi->dma = dma_map_single(&pdev->dev, skb->data, 391 rx_ring->rx_buf_len, 392 DMA_FROM_DEVICE); 393 if (dma_mapping_error(&pdev->dev, bi->dma)) { 394 dev_kfree_skb(skb); 395 bi->skb = NULL; 396 dev_err(&pdev->dev, "RX DMA map failed\n"); 397 break; 398 } 399 } 400 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 401 402 i++; 403 if (i == rx_ring->count) 404 i = 0; 405 bi = &rx_ring->rx_buffer_info[i]; 406 } 407 408 no_buffers: 409 if (rx_ring->next_to_use != i) { 410 rx_ring->next_to_use = i; 411 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 412 } 413 } 414 415 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 416 u32 qmask) 417 { 418 struct ixgbe_hw *hw = &adapter->hw; 419 420 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 421 } 422 423 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 424 struct ixgbevf_ring *rx_ring, 425 int budget) 426 { 427 struct ixgbevf_adapter *adapter = q_vector->adapter; 428 struct pci_dev *pdev = adapter->pdev; 429 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 430 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 431 struct sk_buff *skb; 432 unsigned int i; 433 u32 len, staterr; 434 int cleaned_count = 0; 435 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 436 437 i = rx_ring->next_to_clean; 438 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 439 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 440 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 441 442 while (staterr & IXGBE_RXD_STAT_DD) { 443 if (!budget) 444 break; 445 budget--; 446 447 rmb(); /* read descriptor and rx_buffer_info after status DD */ 448 len = le16_to_cpu(rx_desc->wb.upper.length); 449 skb = rx_buffer_info->skb; 450 prefetch(skb->data - NET_IP_ALIGN); 451 rx_buffer_info->skb = NULL; 452 453 if (rx_buffer_info->dma) { 454 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 455 rx_ring->rx_buf_len, 456 DMA_FROM_DEVICE); 457 rx_buffer_info->dma = 0; 458 skb_put(skb, len); 459 } 460 461 i++; 462 if (i == rx_ring->count) 463 i = 0; 464 465 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 466 prefetch(next_rxd); 467 cleaned_count++; 468 469 next_buffer = &rx_ring->rx_buffer_info[i]; 470 471 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 472 skb->next = next_buffer->skb; 473 IXGBE_CB(skb->next)->prev = skb; 474 adapter->non_eop_descs++; 475 goto next_desc; 476 } 477 478 /* we should not be chaining buffers, if we did drop the skb */ 479 if (IXGBE_CB(skb)->prev) { 480 do { 481 struct sk_buff *this = skb; 482 skb = IXGBE_CB(skb)->prev; 483 dev_kfree_skb(this); 484 } while (skb); 485 goto next_desc; 486 } 487 488 /* ERR_MASK will only have valid bits if EOP set */ 489 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 490 dev_kfree_skb_irq(skb); 491 goto next_desc; 492 } 493 494 ixgbevf_rx_checksum(rx_ring, staterr, skb); 495 496 /* probably a little skewed due to removing CRC */ 497 total_rx_bytes += skb->len; 498 total_rx_packets++; 499 500 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 501 502 /* Workaround hardware that can't do proper VEPA multicast 503 * source pruning. 504 */ 505 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 506 ether_addr_equal(adapter->netdev->dev_addr, 507 eth_hdr(skb)->h_source)) { 508 dev_kfree_skb_irq(skb); 509 goto next_desc; 510 } 511 512 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 513 514 next_desc: 515 rx_desc->wb.upper.status_error = 0; 516 517 /* return some buffers to hardware, one at a time is too slow */ 518 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 519 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 520 cleaned_count); 521 cleaned_count = 0; 522 } 523 524 /* use prefetched values */ 525 rx_desc = next_rxd; 526 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 527 528 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 529 } 530 531 rx_ring->next_to_clean = i; 532 cleaned_count = ixgbevf_desc_unused(rx_ring); 533 534 if (cleaned_count) 535 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 536 537 u64_stats_update_begin(&rx_ring->syncp); 538 rx_ring->total_packets += total_rx_packets; 539 rx_ring->total_bytes += total_rx_bytes; 540 u64_stats_update_end(&rx_ring->syncp); 541 q_vector->rx.total_packets += total_rx_packets; 542 q_vector->rx.total_bytes += total_rx_bytes; 543 544 return total_rx_packets; 545 } 546 547 /** 548 * ixgbevf_poll - NAPI polling calback 549 * @napi: napi struct with our devices info in it 550 * @budget: amount of work driver is allowed to do this pass, in packets 551 * 552 * This function will clean more than one or more rings associated with a 553 * q_vector. 554 **/ 555 static int ixgbevf_poll(struct napi_struct *napi, int budget) 556 { 557 struct ixgbevf_q_vector *q_vector = 558 container_of(napi, struct ixgbevf_q_vector, napi); 559 struct ixgbevf_adapter *adapter = q_vector->adapter; 560 struct ixgbevf_ring *ring; 561 int per_ring_budget; 562 bool clean_complete = true; 563 564 ixgbevf_for_each_ring(ring, q_vector->tx) 565 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 566 567 #ifdef CONFIG_NET_RX_BUSY_POLL 568 if (!ixgbevf_qv_lock_napi(q_vector)) 569 return budget; 570 #endif 571 572 /* attempt to distribute budget to each queue fairly, but don't allow 573 * the budget to go below 1 because we'll exit polling */ 574 if (q_vector->rx.count > 1) 575 per_ring_budget = max(budget/q_vector->rx.count, 1); 576 else 577 per_ring_budget = budget; 578 579 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 580 ixgbevf_for_each_ring(ring, q_vector->rx) 581 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, 582 per_ring_budget) 583 < per_ring_budget); 584 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 585 586 #ifdef CONFIG_NET_RX_BUSY_POLL 587 ixgbevf_qv_unlock_napi(q_vector); 588 #endif 589 590 /* If all work not completed, return budget and keep polling */ 591 if (!clean_complete) 592 return budget; 593 /* all work done, exit the polling mode */ 594 napi_complete(napi); 595 if (adapter->rx_itr_setting & 1) 596 ixgbevf_set_itr(q_vector); 597 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 598 ixgbevf_irq_enable_queues(adapter, 599 1 << q_vector->v_idx); 600 601 return 0; 602 } 603 604 /** 605 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 606 * @q_vector: structure containing interrupt and ring information 607 */ 608 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 609 { 610 struct ixgbevf_adapter *adapter = q_vector->adapter; 611 struct ixgbe_hw *hw = &adapter->hw; 612 int v_idx = q_vector->v_idx; 613 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 614 615 /* 616 * set the WDIS bit to not clear the timer bits and cause an 617 * immediate assertion of the interrupt 618 */ 619 itr_reg |= IXGBE_EITR_CNT_WDIS; 620 621 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 622 } 623 624 #ifdef CONFIG_NET_RX_BUSY_POLL 625 /* must be called with local_bh_disable()d */ 626 static int ixgbevf_busy_poll_recv(struct napi_struct *napi) 627 { 628 struct ixgbevf_q_vector *q_vector = 629 container_of(napi, struct ixgbevf_q_vector, napi); 630 struct ixgbevf_adapter *adapter = q_vector->adapter; 631 struct ixgbevf_ring *ring; 632 int found = 0; 633 634 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 635 return LL_FLUSH_FAILED; 636 637 if (!ixgbevf_qv_lock_poll(q_vector)) 638 return LL_FLUSH_BUSY; 639 640 ixgbevf_for_each_ring(ring, q_vector->rx) { 641 found = ixgbevf_clean_rx_irq(q_vector, ring, 4); 642 #ifdef BP_EXTENDED_STATS 643 if (found) 644 ring->bp_cleaned += found; 645 else 646 ring->bp_misses++; 647 #endif 648 if (found) 649 break; 650 } 651 652 ixgbevf_qv_unlock_poll(q_vector); 653 654 return found; 655 } 656 #endif /* CONFIG_NET_RX_BUSY_POLL */ 657 658 /** 659 * ixgbevf_configure_msix - Configure MSI-X hardware 660 * @adapter: board private structure 661 * 662 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 663 * interrupts. 664 **/ 665 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 666 { 667 struct ixgbevf_q_vector *q_vector; 668 int q_vectors, v_idx; 669 670 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 671 adapter->eims_enable_mask = 0; 672 673 /* 674 * Populate the IVAR table and set the ITR values to the 675 * corresponding register. 676 */ 677 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 678 struct ixgbevf_ring *ring; 679 q_vector = adapter->q_vector[v_idx]; 680 681 ixgbevf_for_each_ring(ring, q_vector->rx) 682 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 683 684 ixgbevf_for_each_ring(ring, q_vector->tx) 685 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 686 687 if (q_vector->tx.ring && !q_vector->rx.ring) { 688 /* tx only vector */ 689 if (adapter->tx_itr_setting == 1) 690 q_vector->itr = IXGBE_10K_ITR; 691 else 692 q_vector->itr = adapter->tx_itr_setting; 693 } else { 694 /* rx or rx/tx vector */ 695 if (adapter->rx_itr_setting == 1) 696 q_vector->itr = IXGBE_20K_ITR; 697 else 698 q_vector->itr = adapter->rx_itr_setting; 699 } 700 701 /* add q_vector eims value to global eims_enable_mask */ 702 adapter->eims_enable_mask |= 1 << v_idx; 703 704 ixgbevf_write_eitr(q_vector); 705 } 706 707 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 708 /* setup eims_other and add value to global eims_enable_mask */ 709 adapter->eims_other = 1 << v_idx; 710 adapter->eims_enable_mask |= adapter->eims_other; 711 } 712 713 enum latency_range { 714 lowest_latency = 0, 715 low_latency = 1, 716 bulk_latency = 2, 717 latency_invalid = 255 718 }; 719 720 /** 721 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 722 * @q_vector: structure containing interrupt and ring information 723 * @ring_container: structure containing ring performance data 724 * 725 * Stores a new ITR value based on packets and byte 726 * counts during the last interrupt. The advantage of per interrupt 727 * computation is faster updates and more accurate ITR for the current 728 * traffic pattern. Constants in this function were computed 729 * based on theoretical maximum wire speed and thresholds were set based 730 * on testing data as well as attempting to minimize response time 731 * while increasing bulk throughput. 732 **/ 733 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 734 struct ixgbevf_ring_container *ring_container) 735 { 736 int bytes = ring_container->total_bytes; 737 int packets = ring_container->total_packets; 738 u32 timepassed_us; 739 u64 bytes_perint; 740 u8 itr_setting = ring_container->itr; 741 742 if (packets == 0) 743 return; 744 745 /* simple throttlerate management 746 * 0-20MB/s lowest (100000 ints/s) 747 * 20-100MB/s low (20000 ints/s) 748 * 100-1249MB/s bulk (8000 ints/s) 749 */ 750 /* what was last interrupt timeslice? */ 751 timepassed_us = q_vector->itr >> 2; 752 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 753 754 switch (itr_setting) { 755 case lowest_latency: 756 if (bytes_perint > 10) 757 itr_setting = low_latency; 758 break; 759 case low_latency: 760 if (bytes_perint > 20) 761 itr_setting = bulk_latency; 762 else if (bytes_perint <= 10) 763 itr_setting = lowest_latency; 764 break; 765 case bulk_latency: 766 if (bytes_perint <= 20) 767 itr_setting = low_latency; 768 break; 769 } 770 771 /* clear work counters since we have the values we need */ 772 ring_container->total_bytes = 0; 773 ring_container->total_packets = 0; 774 775 /* write updated itr to ring container */ 776 ring_container->itr = itr_setting; 777 } 778 779 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 780 { 781 u32 new_itr = q_vector->itr; 782 u8 current_itr; 783 784 ixgbevf_update_itr(q_vector, &q_vector->tx); 785 ixgbevf_update_itr(q_vector, &q_vector->rx); 786 787 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 788 789 switch (current_itr) { 790 /* counts and packets in update_itr are dependent on these numbers */ 791 case lowest_latency: 792 new_itr = IXGBE_100K_ITR; 793 break; 794 case low_latency: 795 new_itr = IXGBE_20K_ITR; 796 break; 797 case bulk_latency: 798 default: 799 new_itr = IXGBE_8K_ITR; 800 break; 801 } 802 803 if (new_itr != q_vector->itr) { 804 /* do an exponential smoothing */ 805 new_itr = (10 * new_itr * q_vector->itr) / 806 ((9 * new_itr) + q_vector->itr); 807 808 /* save the algorithm value here */ 809 q_vector->itr = new_itr; 810 811 ixgbevf_write_eitr(q_vector); 812 } 813 } 814 815 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 816 { 817 struct ixgbevf_adapter *adapter = data; 818 struct ixgbe_hw *hw = &adapter->hw; 819 820 hw->mac.get_link_status = 1; 821 822 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 823 mod_timer(&adapter->watchdog_timer, jiffies); 824 825 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 826 827 return IRQ_HANDLED; 828 } 829 830 /** 831 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 832 * @irq: unused 833 * @data: pointer to our q_vector struct for this interrupt vector 834 **/ 835 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 836 { 837 struct ixgbevf_q_vector *q_vector = data; 838 839 /* EIAM disabled interrupts (on this vector) for us */ 840 if (q_vector->rx.ring || q_vector->tx.ring) 841 napi_schedule(&q_vector->napi); 842 843 return IRQ_HANDLED; 844 } 845 846 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 847 int r_idx) 848 { 849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 850 851 a->rx_ring[r_idx].next = q_vector->rx.ring; 852 q_vector->rx.ring = &a->rx_ring[r_idx]; 853 q_vector->rx.count++; 854 } 855 856 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 857 int t_idx) 858 { 859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 860 861 a->tx_ring[t_idx].next = q_vector->tx.ring; 862 q_vector->tx.ring = &a->tx_ring[t_idx]; 863 q_vector->tx.count++; 864 } 865 866 /** 867 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 868 * @adapter: board private structure to initialize 869 * 870 * This function maps descriptor rings to the queue-specific vectors 871 * we were allotted through the MSI-X enabling code. Ideally, we'd have 872 * one vector per ring/queue, but on a constrained vector budget, we 873 * group the rings as "efficiently" as possible. You would add new 874 * mapping configurations in here. 875 **/ 876 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 877 { 878 int q_vectors; 879 int v_start = 0; 880 int rxr_idx = 0, txr_idx = 0; 881 int rxr_remaining = adapter->num_rx_queues; 882 int txr_remaining = adapter->num_tx_queues; 883 int i, j; 884 int rqpv, tqpv; 885 int err = 0; 886 887 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 888 889 /* 890 * The ideal configuration... 891 * We have enough vectors to map one per queue. 892 */ 893 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 894 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 895 map_vector_to_rxq(adapter, v_start, rxr_idx); 896 897 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 898 map_vector_to_txq(adapter, v_start, txr_idx); 899 goto out; 900 } 901 902 /* 903 * If we don't have enough vectors for a 1-to-1 904 * mapping, we'll have to group them so there are 905 * multiple queues per vector. 906 */ 907 /* Re-adjusting *qpv takes care of the remainder. */ 908 for (i = v_start; i < q_vectors; i++) { 909 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 910 for (j = 0; j < rqpv; j++) { 911 map_vector_to_rxq(adapter, i, rxr_idx); 912 rxr_idx++; 913 rxr_remaining--; 914 } 915 } 916 for (i = v_start; i < q_vectors; i++) { 917 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 918 for (j = 0; j < tqpv; j++) { 919 map_vector_to_txq(adapter, i, txr_idx); 920 txr_idx++; 921 txr_remaining--; 922 } 923 } 924 925 out: 926 return err; 927 } 928 929 /** 930 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 931 * @adapter: board private structure 932 * 933 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 934 * interrupts from the kernel. 935 **/ 936 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 937 { 938 struct net_device *netdev = adapter->netdev; 939 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 940 int vector, err; 941 int ri = 0, ti = 0; 942 943 for (vector = 0; vector < q_vectors; vector++) { 944 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 945 struct msix_entry *entry = &adapter->msix_entries[vector]; 946 947 if (q_vector->tx.ring && q_vector->rx.ring) { 948 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 949 "%s-%s-%d", netdev->name, "TxRx", ri++); 950 ti++; 951 } else if (q_vector->rx.ring) { 952 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 953 "%s-%s-%d", netdev->name, "rx", ri++); 954 } else if (q_vector->tx.ring) { 955 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 956 "%s-%s-%d", netdev->name, "tx", ti++); 957 } else { 958 /* skip this unused q_vector */ 959 continue; 960 } 961 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 962 q_vector->name, q_vector); 963 if (err) { 964 hw_dbg(&adapter->hw, 965 "request_irq failed for MSIX interrupt " 966 "Error: %d\n", err); 967 goto free_queue_irqs; 968 } 969 } 970 971 err = request_irq(adapter->msix_entries[vector].vector, 972 &ixgbevf_msix_other, 0, netdev->name, adapter); 973 if (err) { 974 hw_dbg(&adapter->hw, 975 "request_irq for msix_other failed: %d\n", err); 976 goto free_queue_irqs; 977 } 978 979 return 0; 980 981 free_queue_irqs: 982 while (vector) { 983 vector--; 984 free_irq(adapter->msix_entries[vector].vector, 985 adapter->q_vector[vector]); 986 } 987 /* This failure is non-recoverable - it indicates the system is 988 * out of MSIX vector resources and the VF driver cannot run 989 * without them. Set the number of msix vectors to zero 990 * indicating that not enough can be allocated. The error 991 * will be returned to the user indicating device open failed. 992 * Any further attempts to force the driver to open will also 993 * fail. The only way to recover is to unload the driver and 994 * reload it again. If the system has recovered some MSIX 995 * vectors then it may succeed. 996 */ 997 adapter->num_msix_vectors = 0; 998 return err; 999 } 1000 1001 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1002 { 1003 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1004 1005 for (i = 0; i < q_vectors; i++) { 1006 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1007 q_vector->rx.ring = NULL; 1008 q_vector->tx.ring = NULL; 1009 q_vector->rx.count = 0; 1010 q_vector->tx.count = 0; 1011 } 1012 } 1013 1014 /** 1015 * ixgbevf_request_irq - initialize interrupts 1016 * @adapter: board private structure 1017 * 1018 * Attempts to configure interrupts using the best available 1019 * capabilities of the hardware and kernel. 1020 **/ 1021 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1022 { 1023 int err = 0; 1024 1025 err = ixgbevf_request_msix_irqs(adapter); 1026 1027 if (err) 1028 hw_dbg(&adapter->hw, 1029 "request_irq failed, Error %d\n", err); 1030 1031 return err; 1032 } 1033 1034 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1035 { 1036 int i, q_vectors; 1037 1038 q_vectors = adapter->num_msix_vectors; 1039 i = q_vectors - 1; 1040 1041 free_irq(adapter->msix_entries[i].vector, adapter); 1042 i--; 1043 1044 for (; i >= 0; i--) { 1045 /* free only the irqs that were actually requested */ 1046 if (!adapter->q_vector[i]->rx.ring && 1047 !adapter->q_vector[i]->tx.ring) 1048 continue; 1049 1050 free_irq(adapter->msix_entries[i].vector, 1051 adapter->q_vector[i]); 1052 } 1053 1054 ixgbevf_reset_q_vectors(adapter); 1055 } 1056 1057 /** 1058 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1059 * @adapter: board private structure 1060 **/ 1061 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1062 { 1063 struct ixgbe_hw *hw = &adapter->hw; 1064 int i; 1065 1066 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1067 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1068 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1069 1070 IXGBE_WRITE_FLUSH(hw); 1071 1072 for (i = 0; i < adapter->num_msix_vectors; i++) 1073 synchronize_irq(adapter->msix_entries[i].vector); 1074 } 1075 1076 /** 1077 * ixgbevf_irq_enable - Enable default interrupt generation settings 1078 * @adapter: board private structure 1079 **/ 1080 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1081 { 1082 struct ixgbe_hw *hw = &adapter->hw; 1083 1084 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1085 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1086 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1087 } 1088 1089 /** 1090 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1091 * @adapter: board private structure 1092 * 1093 * Configure the Tx unit of the MAC after a reset. 1094 **/ 1095 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1096 { 1097 u64 tdba; 1098 struct ixgbe_hw *hw = &adapter->hw; 1099 u32 i, j, tdlen, txctrl; 1100 1101 /* Setup the HW Tx Head and Tail descriptor pointers */ 1102 for (i = 0; i < adapter->num_tx_queues; i++) { 1103 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1104 j = ring->reg_idx; 1105 tdba = ring->dma; 1106 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1107 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1108 (tdba & DMA_BIT_MASK(32))); 1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1110 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1112 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1113 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1114 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1115 /* Disable Tx Head Writeback RO bit, since this hoses 1116 * bookkeeping if things aren't delivered in order. 1117 */ 1118 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1119 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1120 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1121 } 1122 } 1123 1124 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1125 1126 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1127 { 1128 struct ixgbevf_ring *rx_ring; 1129 struct ixgbe_hw *hw = &adapter->hw; 1130 u32 srrctl; 1131 1132 rx_ring = &adapter->rx_ring[index]; 1133 1134 srrctl = IXGBE_SRRCTL_DROP_EN; 1135 1136 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1137 1138 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1139 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1140 1141 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1142 } 1143 1144 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1145 { 1146 struct ixgbe_hw *hw = &adapter->hw; 1147 1148 /* PSRTYPE must be initialized in 82599 */ 1149 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1150 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1151 IXGBE_PSRTYPE_L2HDR; 1152 1153 if (adapter->num_rx_queues > 1) 1154 psrtype |= 1 << 29; 1155 1156 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1157 } 1158 1159 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1160 { 1161 struct ixgbe_hw *hw = &adapter->hw; 1162 struct net_device *netdev = adapter->netdev; 1163 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1164 int i; 1165 u16 rx_buf_len; 1166 1167 /* notify the PF of our intent to use this size of frame */ 1168 ixgbevf_rlpml_set_vf(hw, max_frame); 1169 1170 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1171 max_frame += VLAN_HLEN; 1172 1173 /* 1174 * Allocate buffer sizes that fit well into 32K and 1175 * take into account max frame size of 9.5K 1176 */ 1177 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1178 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1179 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1180 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1181 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1182 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1183 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1184 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1185 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1186 else 1187 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1188 1189 for (i = 0; i < adapter->num_rx_queues; i++) 1190 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1191 } 1192 1193 /** 1194 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1195 * @adapter: board private structure 1196 * 1197 * Configure the Rx unit of the MAC after a reset. 1198 **/ 1199 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1200 { 1201 u64 rdba; 1202 struct ixgbe_hw *hw = &adapter->hw; 1203 int i, j; 1204 u32 rdlen; 1205 1206 ixgbevf_setup_psrtype(adapter); 1207 1208 /* set_rx_buffer_len must be called before ring initialization */ 1209 ixgbevf_set_rx_buffer_len(adapter); 1210 1211 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1212 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1213 * the Base and Length of the Rx Descriptor Ring */ 1214 for (i = 0; i < adapter->num_rx_queues; i++) { 1215 rdba = adapter->rx_ring[i].dma; 1216 j = adapter->rx_ring[i].reg_idx; 1217 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1218 (rdba & DMA_BIT_MASK(32))); 1219 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1220 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1221 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1222 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1223 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1224 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1225 1226 ixgbevf_configure_srrctl(adapter, j); 1227 } 1228 } 1229 1230 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1231 __be16 proto, u16 vid) 1232 { 1233 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1234 struct ixgbe_hw *hw = &adapter->hw; 1235 int err; 1236 1237 spin_lock_bh(&adapter->mbx_lock); 1238 1239 /* add VID to filter table */ 1240 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1241 1242 spin_unlock_bh(&adapter->mbx_lock); 1243 1244 /* translate error return types so error makes sense */ 1245 if (err == IXGBE_ERR_MBX) 1246 return -EIO; 1247 1248 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1249 return -EACCES; 1250 1251 set_bit(vid, adapter->active_vlans); 1252 1253 return err; 1254 } 1255 1256 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 1257 __be16 proto, u16 vid) 1258 { 1259 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1260 struct ixgbe_hw *hw = &adapter->hw; 1261 int err = -EOPNOTSUPP; 1262 1263 spin_lock_bh(&adapter->mbx_lock); 1264 1265 /* remove VID from filter table */ 1266 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1267 1268 spin_unlock_bh(&adapter->mbx_lock); 1269 1270 clear_bit(vid, adapter->active_vlans); 1271 1272 return err; 1273 } 1274 1275 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1276 { 1277 u16 vid; 1278 1279 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1280 ixgbevf_vlan_rx_add_vid(adapter->netdev, 1281 htons(ETH_P_8021Q), vid); 1282 } 1283 1284 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1285 { 1286 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1287 struct ixgbe_hw *hw = &adapter->hw; 1288 int count = 0; 1289 1290 if ((netdev_uc_count(netdev)) > 10) { 1291 pr_err("Too many unicast filters - No Space\n"); 1292 return -ENOSPC; 1293 } 1294 1295 if (!netdev_uc_empty(netdev)) { 1296 struct netdev_hw_addr *ha; 1297 netdev_for_each_uc_addr(ha, netdev) { 1298 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1299 udelay(200); 1300 } 1301 } else { 1302 /* 1303 * If the list is empty then send message to PF driver to 1304 * clear all macvlans on this VF. 1305 */ 1306 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1307 } 1308 1309 return count; 1310 } 1311 1312 /** 1313 * ixgbevf_set_rx_mode - Multicast and unicast set 1314 * @netdev: network interface device structure 1315 * 1316 * The set_rx_method entry point is called whenever the multicast address 1317 * list, unicast address list or the network interface flags are updated. 1318 * This routine is responsible for configuring the hardware for proper 1319 * multicast mode and configuring requested unicast filters. 1320 **/ 1321 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1322 { 1323 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1324 struct ixgbe_hw *hw = &adapter->hw; 1325 1326 spin_lock_bh(&adapter->mbx_lock); 1327 1328 /* reprogram multicast list */ 1329 hw->mac.ops.update_mc_addr_list(hw, netdev); 1330 1331 ixgbevf_write_uc_addr_list(netdev); 1332 1333 spin_unlock_bh(&adapter->mbx_lock); 1334 } 1335 1336 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1337 { 1338 int q_idx; 1339 struct ixgbevf_q_vector *q_vector; 1340 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1341 1342 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1343 q_vector = adapter->q_vector[q_idx]; 1344 #ifdef CONFIG_NET_RX_BUSY_POLL 1345 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); 1346 #endif 1347 napi_enable(&q_vector->napi); 1348 } 1349 } 1350 1351 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1352 { 1353 int q_idx; 1354 struct ixgbevf_q_vector *q_vector; 1355 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1356 1357 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1358 q_vector = adapter->q_vector[q_idx]; 1359 napi_disable(&q_vector->napi); 1360 #ifdef CONFIG_NET_RX_BUSY_POLL 1361 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { 1362 pr_info("QV %d locked\n", q_idx); 1363 usleep_range(1000, 20000); 1364 } 1365 #endif /* CONFIG_NET_RX_BUSY_POLL */ 1366 } 1367 } 1368 1369 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1370 { 1371 struct net_device *netdev = adapter->netdev; 1372 int i; 1373 1374 ixgbevf_set_rx_mode(netdev); 1375 1376 ixgbevf_restore_vlan(adapter); 1377 1378 ixgbevf_configure_tx(adapter); 1379 ixgbevf_configure_rx(adapter); 1380 for (i = 0; i < adapter->num_rx_queues; i++) { 1381 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1382 ixgbevf_alloc_rx_buffers(adapter, ring, 1383 ixgbevf_desc_unused(ring)); 1384 } 1385 } 1386 1387 #define IXGBEVF_MAX_RX_DESC_POLL 10 1388 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1389 int rxr) 1390 { 1391 struct ixgbe_hw *hw = &adapter->hw; 1392 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1393 u32 rxdctl; 1394 int j = adapter->rx_ring[rxr].reg_idx; 1395 1396 do { 1397 usleep_range(1000, 2000); 1398 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1399 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1400 1401 if (!wait_loop) 1402 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n", 1403 rxr); 1404 1405 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1406 (adapter->rx_ring[rxr].count - 1)); 1407 } 1408 1409 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1410 struct ixgbevf_ring *ring) 1411 { 1412 struct ixgbe_hw *hw = &adapter->hw; 1413 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1414 u32 rxdctl; 1415 u8 reg_idx = ring->reg_idx; 1416 1417 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1418 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1419 1420 /* write value back with RXDCTL.ENABLE bit cleared */ 1421 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1422 1423 /* the hardware may take up to 100us to really disable the rx queue */ 1424 do { 1425 udelay(10); 1426 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1427 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1428 1429 if (!wait_loop) 1430 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n", 1431 reg_idx); 1432 } 1433 1434 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1435 { 1436 /* Only save pre-reset stats if there are some */ 1437 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1438 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1439 adapter->stats.base_vfgprc; 1440 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1441 adapter->stats.base_vfgptc; 1442 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1443 adapter->stats.base_vfgorc; 1444 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1445 adapter->stats.base_vfgotc; 1446 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1447 adapter->stats.base_vfmprc; 1448 } 1449 } 1450 1451 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1452 { 1453 struct ixgbe_hw *hw = &adapter->hw; 1454 1455 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1456 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1457 adapter->stats.last_vfgorc |= 1458 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1459 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1460 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1461 adapter->stats.last_vfgotc |= 1462 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1463 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1464 1465 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1466 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1467 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1468 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1469 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1470 } 1471 1472 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1473 { 1474 struct ixgbe_hw *hw = &adapter->hw; 1475 int api[] = { ixgbe_mbox_api_11, 1476 ixgbe_mbox_api_10, 1477 ixgbe_mbox_api_unknown }; 1478 int err = 0, idx = 0; 1479 1480 spin_lock_bh(&adapter->mbx_lock); 1481 1482 while (api[idx] != ixgbe_mbox_api_unknown) { 1483 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1484 if (!err) 1485 break; 1486 idx++; 1487 } 1488 1489 spin_unlock_bh(&adapter->mbx_lock); 1490 } 1491 1492 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1493 { 1494 struct net_device *netdev = adapter->netdev; 1495 struct ixgbe_hw *hw = &adapter->hw; 1496 int i, j = 0; 1497 int num_rx_rings = adapter->num_rx_queues; 1498 u32 txdctl, rxdctl; 1499 1500 for (i = 0; i < adapter->num_tx_queues; i++) { 1501 j = adapter->tx_ring[i].reg_idx; 1502 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1503 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1504 txdctl |= (8 << 16); 1505 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1506 } 1507 1508 for (i = 0; i < adapter->num_tx_queues; i++) { 1509 j = adapter->tx_ring[i].reg_idx; 1510 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1511 txdctl |= IXGBE_TXDCTL_ENABLE; 1512 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1513 } 1514 1515 for (i = 0; i < num_rx_rings; i++) { 1516 j = adapter->rx_ring[i].reg_idx; 1517 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1518 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1519 if (hw->mac.type == ixgbe_mac_X540_vf) { 1520 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1521 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1522 IXGBE_RXDCTL_RLPML_EN); 1523 } 1524 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1525 ixgbevf_rx_desc_queue_enable(adapter, i); 1526 } 1527 1528 ixgbevf_configure_msix(adapter); 1529 1530 spin_lock_bh(&adapter->mbx_lock); 1531 1532 if (is_valid_ether_addr(hw->mac.addr)) 1533 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1534 else 1535 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1536 1537 spin_unlock_bh(&adapter->mbx_lock); 1538 1539 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1540 ixgbevf_napi_enable_all(adapter); 1541 1542 /* enable transmits */ 1543 netif_tx_start_all_queues(netdev); 1544 1545 ixgbevf_save_reset_stats(adapter); 1546 ixgbevf_init_last_counter_stats(adapter); 1547 1548 hw->mac.get_link_status = 1; 1549 mod_timer(&adapter->watchdog_timer, jiffies); 1550 } 1551 1552 static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1553 { 1554 struct ixgbe_hw *hw = &adapter->hw; 1555 struct ixgbevf_ring *rx_ring; 1556 unsigned int def_q = 0; 1557 unsigned int num_tcs = 0; 1558 unsigned int num_rx_queues = 1; 1559 int err, i; 1560 1561 spin_lock_bh(&adapter->mbx_lock); 1562 1563 /* fetch queue configuration from the PF */ 1564 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1565 1566 spin_unlock_bh(&adapter->mbx_lock); 1567 1568 if (err) 1569 return err; 1570 1571 if (num_tcs > 1) { 1572 /* update default Tx ring register index */ 1573 adapter->tx_ring[0].reg_idx = def_q; 1574 1575 /* we need as many queues as traffic classes */ 1576 num_rx_queues = num_tcs; 1577 } 1578 1579 /* nothing to do if we have the correct number of queues */ 1580 if (adapter->num_rx_queues == num_rx_queues) 1581 return 0; 1582 1583 /* allocate new rings */ 1584 rx_ring = kcalloc(num_rx_queues, 1585 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1586 if (!rx_ring) 1587 return -ENOMEM; 1588 1589 /* setup ring fields */ 1590 for (i = 0; i < num_rx_queues; i++) { 1591 rx_ring[i].count = adapter->rx_ring_count; 1592 rx_ring[i].queue_index = i; 1593 rx_ring[i].reg_idx = i; 1594 rx_ring[i].dev = &adapter->pdev->dev; 1595 rx_ring[i].netdev = adapter->netdev; 1596 1597 /* allocate resources on the ring */ 1598 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1599 if (err) { 1600 while (i) { 1601 i--; 1602 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1603 } 1604 kfree(rx_ring); 1605 return err; 1606 } 1607 } 1608 1609 /* free the existing rings and queues */ 1610 ixgbevf_free_all_rx_resources(adapter); 1611 adapter->num_rx_queues = 0; 1612 kfree(adapter->rx_ring); 1613 1614 /* move new rings into position on the adapter struct */ 1615 adapter->rx_ring = rx_ring; 1616 adapter->num_rx_queues = num_rx_queues; 1617 1618 /* reset ring to vector mapping */ 1619 ixgbevf_reset_q_vectors(adapter); 1620 ixgbevf_map_rings_to_vectors(adapter); 1621 1622 return 0; 1623 } 1624 1625 void ixgbevf_up(struct ixgbevf_adapter *adapter) 1626 { 1627 struct ixgbe_hw *hw = &adapter->hw; 1628 1629 ixgbevf_reset_queues(adapter); 1630 1631 ixgbevf_configure(adapter); 1632 1633 ixgbevf_up_complete(adapter); 1634 1635 /* clear any pending interrupts, may auto mask */ 1636 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1637 1638 ixgbevf_irq_enable(adapter); 1639 } 1640 1641 /** 1642 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1643 * @adapter: board private structure 1644 * @rx_ring: ring to free buffers from 1645 **/ 1646 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1647 struct ixgbevf_ring *rx_ring) 1648 { 1649 struct pci_dev *pdev = adapter->pdev; 1650 unsigned long size; 1651 unsigned int i; 1652 1653 if (!rx_ring->rx_buffer_info) 1654 return; 1655 1656 /* Free all the Rx ring sk_buffs */ 1657 for (i = 0; i < rx_ring->count; i++) { 1658 struct ixgbevf_rx_buffer *rx_buffer_info; 1659 1660 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1661 if (rx_buffer_info->dma) { 1662 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1663 rx_ring->rx_buf_len, 1664 DMA_FROM_DEVICE); 1665 rx_buffer_info->dma = 0; 1666 } 1667 if (rx_buffer_info->skb) { 1668 struct sk_buff *skb = rx_buffer_info->skb; 1669 rx_buffer_info->skb = NULL; 1670 do { 1671 struct sk_buff *this = skb; 1672 skb = IXGBE_CB(skb)->prev; 1673 dev_kfree_skb(this); 1674 } while (skb); 1675 } 1676 } 1677 1678 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1679 memset(rx_ring->rx_buffer_info, 0, size); 1680 1681 /* Zero out the descriptor ring */ 1682 memset(rx_ring->desc, 0, rx_ring->size); 1683 1684 rx_ring->next_to_clean = 0; 1685 rx_ring->next_to_use = 0; 1686 1687 if (rx_ring->head) 1688 writel(0, adapter->hw.hw_addr + rx_ring->head); 1689 if (rx_ring->tail) 1690 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1691 } 1692 1693 /** 1694 * ixgbevf_clean_tx_ring - Free Tx Buffers 1695 * @adapter: board private structure 1696 * @tx_ring: ring to be cleaned 1697 **/ 1698 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1699 struct ixgbevf_ring *tx_ring) 1700 { 1701 struct ixgbevf_tx_buffer *tx_buffer_info; 1702 unsigned long size; 1703 unsigned int i; 1704 1705 if (!tx_ring->tx_buffer_info) 1706 return; 1707 1708 /* Free all the Tx ring sk_buffs */ 1709 for (i = 0; i < tx_ring->count; i++) { 1710 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1711 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1712 } 1713 1714 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1715 memset(tx_ring->tx_buffer_info, 0, size); 1716 1717 memset(tx_ring->desc, 0, tx_ring->size); 1718 1719 tx_ring->next_to_use = 0; 1720 tx_ring->next_to_clean = 0; 1721 1722 if (tx_ring->head) 1723 writel(0, adapter->hw.hw_addr + tx_ring->head); 1724 if (tx_ring->tail) 1725 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1726 } 1727 1728 /** 1729 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1730 * @adapter: board private structure 1731 **/ 1732 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1733 { 1734 int i; 1735 1736 for (i = 0; i < adapter->num_rx_queues; i++) 1737 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1738 } 1739 1740 /** 1741 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1742 * @adapter: board private structure 1743 **/ 1744 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1745 { 1746 int i; 1747 1748 for (i = 0; i < adapter->num_tx_queues; i++) 1749 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1750 } 1751 1752 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1753 { 1754 struct net_device *netdev = adapter->netdev; 1755 struct ixgbe_hw *hw = &adapter->hw; 1756 u32 txdctl; 1757 int i, j; 1758 1759 /* signal that we are down to the interrupt handler */ 1760 set_bit(__IXGBEVF_DOWN, &adapter->state); 1761 1762 /* disable all enabled rx queues */ 1763 for (i = 0; i < adapter->num_rx_queues; i++) 1764 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); 1765 1766 netif_tx_disable(netdev); 1767 1768 msleep(10); 1769 1770 netif_tx_stop_all_queues(netdev); 1771 1772 ixgbevf_irq_disable(adapter); 1773 1774 ixgbevf_napi_disable_all(adapter); 1775 1776 del_timer_sync(&adapter->watchdog_timer); 1777 /* can't call flush scheduled work here because it can deadlock 1778 * if linkwatch_event tries to acquire the rtnl_lock which we are 1779 * holding */ 1780 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1781 msleep(1); 1782 1783 /* disable transmits in the hardware now that interrupts are off */ 1784 for (i = 0; i < adapter->num_tx_queues; i++) { 1785 j = adapter->tx_ring[i].reg_idx; 1786 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1787 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1788 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1789 } 1790 1791 netif_carrier_off(netdev); 1792 1793 if (!pci_channel_offline(adapter->pdev)) 1794 ixgbevf_reset(adapter); 1795 1796 ixgbevf_clean_all_tx_rings(adapter); 1797 ixgbevf_clean_all_rx_rings(adapter); 1798 } 1799 1800 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1801 { 1802 WARN_ON(in_interrupt()); 1803 1804 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1805 msleep(1); 1806 1807 ixgbevf_down(adapter); 1808 ixgbevf_up(adapter); 1809 1810 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1811 } 1812 1813 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1814 { 1815 struct ixgbe_hw *hw = &adapter->hw; 1816 struct net_device *netdev = adapter->netdev; 1817 1818 if (hw->mac.ops.reset_hw(hw)) { 1819 hw_dbg(hw, "PF still resetting\n"); 1820 } else { 1821 hw->mac.ops.init_hw(hw); 1822 ixgbevf_negotiate_api(adapter); 1823 } 1824 1825 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1826 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1827 netdev->addr_len); 1828 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1829 netdev->addr_len); 1830 } 1831 } 1832 1833 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1834 int vectors) 1835 { 1836 int err = 0; 1837 int vector_threshold; 1838 1839 /* We'll want at least 2 (vector_threshold): 1840 * 1) TxQ[0] + RxQ[0] handler 1841 * 2) Other (Link Status Change, etc.) 1842 */ 1843 vector_threshold = MIN_MSIX_COUNT; 1844 1845 /* The more we get, the more we will assign to Tx/Rx Cleanup 1846 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1847 * Right now, we simply care about how many we'll get; we'll 1848 * set them up later while requesting irq's. 1849 */ 1850 while (vectors >= vector_threshold) { 1851 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1852 vectors); 1853 if (!err || err < 0) /* Success or a nasty failure. */ 1854 break; 1855 else /* err == number of vectors we should try again with */ 1856 vectors = err; 1857 } 1858 1859 if (vectors < vector_threshold) 1860 err = -ENOMEM; 1861 1862 if (err) { 1863 dev_err(&adapter->pdev->dev, 1864 "Unable to allocate MSI-X interrupts\n"); 1865 kfree(adapter->msix_entries); 1866 adapter->msix_entries = NULL; 1867 } else { 1868 /* 1869 * Adjust for only the vectors we'll use, which is minimum 1870 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1871 * vectors we were allocated. 1872 */ 1873 adapter->num_msix_vectors = vectors; 1874 } 1875 1876 return err; 1877 } 1878 1879 /** 1880 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1881 * @adapter: board private structure to initialize 1882 * 1883 * This is the top level queue allocation routine. The order here is very 1884 * important, starting with the "most" number of features turned on at once, 1885 * and ending with the smallest set of features. This way large combinations 1886 * can be allocated if they're turned on, and smaller combinations are the 1887 * fallthrough conditions. 1888 * 1889 **/ 1890 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1891 { 1892 /* Start with base case */ 1893 adapter->num_rx_queues = 1; 1894 adapter->num_tx_queues = 1; 1895 } 1896 1897 /** 1898 * ixgbevf_alloc_queues - Allocate memory for all rings 1899 * @adapter: board private structure to initialize 1900 * 1901 * We allocate one ring per queue at run-time since we don't know the 1902 * number of queues at compile-time. The polling_netdev array is 1903 * intended for Multiqueue, but should work fine with a single queue. 1904 **/ 1905 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1906 { 1907 int i; 1908 1909 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1910 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1911 if (!adapter->tx_ring) 1912 goto err_tx_ring_allocation; 1913 1914 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1915 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1916 if (!adapter->rx_ring) 1917 goto err_rx_ring_allocation; 1918 1919 for (i = 0; i < adapter->num_tx_queues; i++) { 1920 adapter->tx_ring[i].count = adapter->tx_ring_count; 1921 adapter->tx_ring[i].queue_index = i; 1922 /* reg_idx may be remapped later by DCB config */ 1923 adapter->tx_ring[i].reg_idx = i; 1924 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1925 adapter->tx_ring[i].netdev = adapter->netdev; 1926 } 1927 1928 for (i = 0; i < adapter->num_rx_queues; i++) { 1929 adapter->rx_ring[i].count = adapter->rx_ring_count; 1930 adapter->rx_ring[i].queue_index = i; 1931 adapter->rx_ring[i].reg_idx = i; 1932 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1933 adapter->rx_ring[i].netdev = adapter->netdev; 1934 } 1935 1936 return 0; 1937 1938 err_rx_ring_allocation: 1939 kfree(adapter->tx_ring); 1940 err_tx_ring_allocation: 1941 return -ENOMEM; 1942 } 1943 1944 /** 1945 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1946 * @adapter: board private structure to initialize 1947 * 1948 * Attempt to configure the interrupts using the best available 1949 * capabilities of the hardware and the kernel. 1950 **/ 1951 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1952 { 1953 struct net_device *netdev = adapter->netdev; 1954 int err = 0; 1955 int vector, v_budget; 1956 1957 /* 1958 * It's easy to be greedy for MSI-X vectors, but it really 1959 * doesn't do us much good if we have a lot more vectors 1960 * than CPU's. So let's be conservative and only ask for 1961 * (roughly) the same number of vectors as there are CPU's. 1962 * The default is to use pairs of vectors. 1963 */ 1964 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1965 v_budget = min_t(int, v_budget, num_online_cpus()); 1966 v_budget += NON_Q_VECTORS; 1967 1968 /* A failure in MSI-X entry allocation isn't fatal, but it does 1969 * mean we disable MSI-X capabilities of the adapter. */ 1970 adapter->msix_entries = kcalloc(v_budget, 1971 sizeof(struct msix_entry), GFP_KERNEL); 1972 if (!adapter->msix_entries) { 1973 err = -ENOMEM; 1974 goto out; 1975 } 1976 1977 for (vector = 0; vector < v_budget; vector++) 1978 adapter->msix_entries[vector].entry = vector; 1979 1980 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1981 if (err) 1982 goto out; 1983 1984 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1985 if (err) 1986 goto out; 1987 1988 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1989 1990 out: 1991 return err; 1992 } 1993 1994 /** 1995 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1996 * @adapter: board private structure to initialize 1997 * 1998 * We allocate one q_vector per queue interrupt. If allocation fails we 1999 * return -ENOMEM. 2000 **/ 2001 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2002 { 2003 int q_idx, num_q_vectors; 2004 struct ixgbevf_q_vector *q_vector; 2005 2006 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2007 2008 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2009 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2010 if (!q_vector) 2011 goto err_out; 2012 q_vector->adapter = adapter; 2013 q_vector->v_idx = q_idx; 2014 netif_napi_add(adapter->netdev, &q_vector->napi, 2015 ixgbevf_poll, 64); 2016 #ifdef CONFIG_NET_RX_BUSY_POLL 2017 napi_hash_add(&q_vector->napi); 2018 #endif 2019 adapter->q_vector[q_idx] = q_vector; 2020 } 2021 2022 return 0; 2023 2024 err_out: 2025 while (q_idx) { 2026 q_idx--; 2027 q_vector = adapter->q_vector[q_idx]; 2028 #ifdef CONFIG_NET_RX_BUSY_POLL 2029 napi_hash_del(&q_vector->napi); 2030 #endif 2031 netif_napi_del(&q_vector->napi); 2032 kfree(q_vector); 2033 adapter->q_vector[q_idx] = NULL; 2034 } 2035 return -ENOMEM; 2036 } 2037 2038 /** 2039 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2040 * @adapter: board private structure to initialize 2041 * 2042 * This function frees the memory allocated to the q_vectors. In addition if 2043 * NAPI is enabled it will delete any references to the NAPI struct prior 2044 * to freeing the q_vector. 2045 **/ 2046 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2047 { 2048 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2049 2050 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2051 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2052 2053 adapter->q_vector[q_idx] = NULL; 2054 #ifdef CONFIG_NET_RX_BUSY_POLL 2055 napi_hash_del(&q_vector->napi); 2056 #endif 2057 netif_napi_del(&q_vector->napi); 2058 kfree(q_vector); 2059 } 2060 } 2061 2062 /** 2063 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2064 * @adapter: board private structure 2065 * 2066 **/ 2067 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2068 { 2069 pci_disable_msix(adapter->pdev); 2070 kfree(adapter->msix_entries); 2071 adapter->msix_entries = NULL; 2072 } 2073 2074 /** 2075 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2076 * @adapter: board private structure to initialize 2077 * 2078 **/ 2079 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2080 { 2081 int err; 2082 2083 /* Number of supported queues */ 2084 ixgbevf_set_num_queues(adapter); 2085 2086 err = ixgbevf_set_interrupt_capability(adapter); 2087 if (err) { 2088 hw_dbg(&adapter->hw, 2089 "Unable to setup interrupt capabilities\n"); 2090 goto err_set_interrupt; 2091 } 2092 2093 err = ixgbevf_alloc_q_vectors(adapter); 2094 if (err) { 2095 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2096 "vectors\n"); 2097 goto err_alloc_q_vectors; 2098 } 2099 2100 err = ixgbevf_alloc_queues(adapter); 2101 if (err) { 2102 pr_err("Unable to allocate memory for queues\n"); 2103 goto err_alloc_queues; 2104 } 2105 2106 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2107 "Tx Queue count = %u\n", 2108 (adapter->num_rx_queues > 1) ? "Enabled" : 2109 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2110 2111 set_bit(__IXGBEVF_DOWN, &adapter->state); 2112 2113 return 0; 2114 err_alloc_queues: 2115 ixgbevf_free_q_vectors(adapter); 2116 err_alloc_q_vectors: 2117 ixgbevf_reset_interrupt_capability(adapter); 2118 err_set_interrupt: 2119 return err; 2120 } 2121 2122 /** 2123 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2124 * @adapter: board private structure to clear interrupt scheme on 2125 * 2126 * We go through and clear interrupt specific resources and reset the structure 2127 * to pre-load conditions 2128 **/ 2129 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2130 { 2131 adapter->num_tx_queues = 0; 2132 adapter->num_rx_queues = 0; 2133 2134 ixgbevf_free_q_vectors(adapter); 2135 ixgbevf_reset_interrupt_capability(adapter); 2136 } 2137 2138 /** 2139 * ixgbevf_sw_init - Initialize general software structures 2140 * (struct ixgbevf_adapter) 2141 * @adapter: board private structure to initialize 2142 * 2143 * ixgbevf_sw_init initializes the Adapter private data structure. 2144 * Fields are initialized based on PCI device information and 2145 * OS network device settings (MTU size). 2146 **/ 2147 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2148 { 2149 struct ixgbe_hw *hw = &adapter->hw; 2150 struct pci_dev *pdev = adapter->pdev; 2151 struct net_device *netdev = adapter->netdev; 2152 int err; 2153 2154 /* PCI config space info */ 2155 2156 hw->vendor_id = pdev->vendor; 2157 hw->device_id = pdev->device; 2158 hw->revision_id = pdev->revision; 2159 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2160 hw->subsystem_device_id = pdev->subsystem_device; 2161 2162 hw->mbx.ops.init_params(hw); 2163 2164 /* assume legacy case in which PF would only give VF 2 queues */ 2165 hw->mac.max_tx_queues = 2; 2166 hw->mac.max_rx_queues = 2; 2167 2168 /* lock to protect mailbox accesses */ 2169 spin_lock_init(&adapter->mbx_lock); 2170 2171 err = hw->mac.ops.reset_hw(hw); 2172 if (err) { 2173 dev_info(&pdev->dev, 2174 "PF still in reset state. Is the PF interface up?\n"); 2175 } else { 2176 err = hw->mac.ops.init_hw(hw); 2177 if (err) { 2178 pr_err("init_shared_code failed: %d\n", err); 2179 goto out; 2180 } 2181 ixgbevf_negotiate_api(adapter); 2182 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2183 if (err) 2184 dev_info(&pdev->dev, "Error reading MAC address\n"); 2185 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2186 dev_info(&pdev->dev, 2187 "MAC address not assigned by administrator.\n"); 2188 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 2189 } 2190 2191 if (!is_valid_ether_addr(netdev->dev_addr)) { 2192 dev_info(&pdev->dev, "Assigning random MAC address\n"); 2193 eth_hw_addr_random(netdev); 2194 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); 2195 } 2196 2197 /* Enable dynamic interrupt throttling rates */ 2198 adapter->rx_itr_setting = 1; 2199 adapter->tx_itr_setting = 1; 2200 2201 /* set default ring sizes */ 2202 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2203 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2204 2205 set_bit(__IXGBEVF_DOWN, &adapter->state); 2206 return 0; 2207 2208 out: 2209 return err; 2210 } 2211 2212 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2213 { \ 2214 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2215 if (current_counter < last_counter) \ 2216 counter += 0x100000000LL; \ 2217 last_counter = current_counter; \ 2218 counter &= 0xFFFFFFFF00000000LL; \ 2219 counter |= current_counter; \ 2220 } 2221 2222 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2223 { \ 2224 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2225 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2226 u64 current_counter = (current_counter_msb << 32) | \ 2227 current_counter_lsb; \ 2228 if (current_counter < last_counter) \ 2229 counter += 0x1000000000LL; \ 2230 last_counter = current_counter; \ 2231 counter &= 0xFFFFFFF000000000LL; \ 2232 counter |= current_counter; \ 2233 } 2234 /** 2235 * ixgbevf_update_stats - Update the board statistics counters. 2236 * @adapter: board private structure 2237 **/ 2238 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2239 { 2240 struct ixgbe_hw *hw = &adapter->hw; 2241 int i; 2242 2243 if (!adapter->link_up) 2244 return; 2245 2246 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2247 adapter->stats.vfgprc); 2248 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2249 adapter->stats.vfgptc); 2250 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2251 adapter->stats.last_vfgorc, 2252 adapter->stats.vfgorc); 2253 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2254 adapter->stats.last_vfgotc, 2255 adapter->stats.vfgotc); 2256 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2257 adapter->stats.vfmprc); 2258 2259 for (i = 0; i < adapter->num_rx_queues; i++) { 2260 adapter->hw_csum_rx_error += 2261 adapter->rx_ring[i].hw_csum_rx_error; 2262 adapter->hw_csum_rx_good += 2263 adapter->rx_ring[i].hw_csum_rx_good; 2264 adapter->rx_ring[i].hw_csum_rx_error = 0; 2265 adapter->rx_ring[i].hw_csum_rx_good = 0; 2266 } 2267 } 2268 2269 /** 2270 * ixgbevf_watchdog - Timer Call-back 2271 * @data: pointer to adapter cast into an unsigned long 2272 **/ 2273 static void ixgbevf_watchdog(unsigned long data) 2274 { 2275 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2276 struct ixgbe_hw *hw = &adapter->hw; 2277 u32 eics = 0; 2278 int i; 2279 2280 /* 2281 * Do the watchdog outside of interrupt context due to the lovely 2282 * delays that some of the newer hardware requires 2283 */ 2284 2285 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2286 goto watchdog_short_circuit; 2287 2288 /* get one bit for every active tx/rx interrupt vector */ 2289 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2290 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2291 if (qv->rx.ring || qv->tx.ring) 2292 eics |= 1 << i; 2293 } 2294 2295 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2296 2297 watchdog_short_circuit: 2298 schedule_work(&adapter->watchdog_task); 2299 } 2300 2301 /** 2302 * ixgbevf_tx_timeout - Respond to a Tx Hang 2303 * @netdev: network interface device structure 2304 **/ 2305 static void ixgbevf_tx_timeout(struct net_device *netdev) 2306 { 2307 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2308 2309 /* Do the reset outside of interrupt context */ 2310 schedule_work(&adapter->reset_task); 2311 } 2312 2313 static void ixgbevf_reset_task(struct work_struct *work) 2314 { 2315 struct ixgbevf_adapter *adapter; 2316 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2317 2318 /* If we're already down or resetting, just bail */ 2319 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2320 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2321 return; 2322 2323 adapter->tx_timeout_count++; 2324 2325 ixgbevf_reinit_locked(adapter); 2326 } 2327 2328 /** 2329 * ixgbevf_watchdog_task - worker thread to bring link up 2330 * @work: pointer to work_struct containing our data 2331 **/ 2332 static void ixgbevf_watchdog_task(struct work_struct *work) 2333 { 2334 struct ixgbevf_adapter *adapter = container_of(work, 2335 struct ixgbevf_adapter, 2336 watchdog_task); 2337 struct net_device *netdev = adapter->netdev; 2338 struct ixgbe_hw *hw = &adapter->hw; 2339 u32 link_speed = adapter->link_speed; 2340 bool link_up = adapter->link_up; 2341 s32 need_reset; 2342 2343 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2344 2345 /* 2346 * Always check the link on the watchdog because we have 2347 * no LSC interrupt 2348 */ 2349 spin_lock_bh(&adapter->mbx_lock); 2350 2351 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2352 2353 spin_unlock_bh(&adapter->mbx_lock); 2354 2355 if (need_reset) { 2356 adapter->link_up = link_up; 2357 adapter->link_speed = link_speed; 2358 netif_carrier_off(netdev); 2359 netif_tx_stop_all_queues(netdev); 2360 schedule_work(&adapter->reset_task); 2361 goto pf_has_reset; 2362 } 2363 adapter->link_up = link_up; 2364 adapter->link_speed = link_speed; 2365 2366 if (link_up) { 2367 if (!netif_carrier_ok(netdev)) { 2368 char *link_speed_string; 2369 switch (link_speed) { 2370 case IXGBE_LINK_SPEED_10GB_FULL: 2371 link_speed_string = "10 Gbps"; 2372 break; 2373 case IXGBE_LINK_SPEED_1GB_FULL: 2374 link_speed_string = "1 Gbps"; 2375 break; 2376 case IXGBE_LINK_SPEED_100_FULL: 2377 link_speed_string = "100 Mbps"; 2378 break; 2379 default: 2380 link_speed_string = "unknown speed"; 2381 break; 2382 } 2383 dev_info(&adapter->pdev->dev, 2384 "NIC Link is Up, %s\n", link_speed_string); 2385 netif_carrier_on(netdev); 2386 netif_tx_wake_all_queues(netdev); 2387 } 2388 } else { 2389 adapter->link_up = false; 2390 adapter->link_speed = 0; 2391 if (netif_carrier_ok(netdev)) { 2392 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2393 netif_carrier_off(netdev); 2394 netif_tx_stop_all_queues(netdev); 2395 } 2396 } 2397 2398 ixgbevf_update_stats(adapter); 2399 2400 pf_has_reset: 2401 /* Reset the timer */ 2402 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2403 mod_timer(&adapter->watchdog_timer, 2404 round_jiffies(jiffies + (2 * HZ))); 2405 2406 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2407 } 2408 2409 /** 2410 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2411 * @adapter: board private structure 2412 * @tx_ring: Tx descriptor ring for a specific queue 2413 * 2414 * Free all transmit software resources 2415 **/ 2416 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2417 struct ixgbevf_ring *tx_ring) 2418 { 2419 struct pci_dev *pdev = adapter->pdev; 2420 2421 ixgbevf_clean_tx_ring(adapter, tx_ring); 2422 2423 vfree(tx_ring->tx_buffer_info); 2424 tx_ring->tx_buffer_info = NULL; 2425 2426 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2427 tx_ring->dma); 2428 2429 tx_ring->desc = NULL; 2430 } 2431 2432 /** 2433 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2434 * @adapter: board private structure 2435 * 2436 * Free all transmit software resources 2437 **/ 2438 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2439 { 2440 int i; 2441 2442 for (i = 0; i < adapter->num_tx_queues; i++) 2443 if (adapter->tx_ring[i].desc) 2444 ixgbevf_free_tx_resources(adapter, 2445 &adapter->tx_ring[i]); 2446 2447 } 2448 2449 /** 2450 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2451 * @adapter: board private structure 2452 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2453 * 2454 * Return 0 on success, negative on failure 2455 **/ 2456 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2457 struct ixgbevf_ring *tx_ring) 2458 { 2459 struct pci_dev *pdev = adapter->pdev; 2460 int size; 2461 2462 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2463 tx_ring->tx_buffer_info = vzalloc(size); 2464 if (!tx_ring->tx_buffer_info) 2465 goto err; 2466 2467 /* round up to nearest 4K */ 2468 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2469 tx_ring->size = ALIGN(tx_ring->size, 4096); 2470 2471 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2472 &tx_ring->dma, GFP_KERNEL); 2473 if (!tx_ring->desc) 2474 goto err; 2475 2476 tx_ring->next_to_use = 0; 2477 tx_ring->next_to_clean = 0; 2478 return 0; 2479 2480 err: 2481 vfree(tx_ring->tx_buffer_info); 2482 tx_ring->tx_buffer_info = NULL; 2483 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2484 "descriptor ring\n"); 2485 return -ENOMEM; 2486 } 2487 2488 /** 2489 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2490 * @adapter: board private structure 2491 * 2492 * If this function returns with an error, then it's possible one or 2493 * more of the rings is populated (while the rest are not). It is the 2494 * callers duty to clean those orphaned rings. 2495 * 2496 * Return 0 on success, negative on failure 2497 **/ 2498 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2499 { 2500 int i, err = 0; 2501 2502 for (i = 0; i < adapter->num_tx_queues; i++) { 2503 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2504 if (!err) 2505 continue; 2506 hw_dbg(&adapter->hw, 2507 "Allocation for Tx Queue %u failed\n", i); 2508 break; 2509 } 2510 2511 return err; 2512 } 2513 2514 /** 2515 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2516 * @adapter: board private structure 2517 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2518 * 2519 * Returns 0 on success, negative on failure 2520 **/ 2521 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2522 struct ixgbevf_ring *rx_ring) 2523 { 2524 struct pci_dev *pdev = adapter->pdev; 2525 int size; 2526 2527 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2528 rx_ring->rx_buffer_info = vzalloc(size); 2529 if (!rx_ring->rx_buffer_info) 2530 goto alloc_failed; 2531 2532 /* Round up to nearest 4K */ 2533 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2534 rx_ring->size = ALIGN(rx_ring->size, 4096); 2535 2536 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2537 &rx_ring->dma, GFP_KERNEL); 2538 2539 if (!rx_ring->desc) { 2540 vfree(rx_ring->rx_buffer_info); 2541 rx_ring->rx_buffer_info = NULL; 2542 goto alloc_failed; 2543 } 2544 2545 rx_ring->next_to_clean = 0; 2546 rx_ring->next_to_use = 0; 2547 2548 return 0; 2549 alloc_failed: 2550 return -ENOMEM; 2551 } 2552 2553 /** 2554 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2555 * @adapter: board private structure 2556 * 2557 * If this function returns with an error, then it's possible one or 2558 * more of the rings is populated (while the rest are not). It is the 2559 * callers duty to clean those orphaned rings. 2560 * 2561 * Return 0 on success, negative on failure 2562 **/ 2563 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2564 { 2565 int i, err = 0; 2566 2567 for (i = 0; i < adapter->num_rx_queues; i++) { 2568 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2569 if (!err) 2570 continue; 2571 hw_dbg(&adapter->hw, 2572 "Allocation for Rx Queue %u failed\n", i); 2573 break; 2574 } 2575 return err; 2576 } 2577 2578 /** 2579 * ixgbevf_free_rx_resources - Free Rx Resources 2580 * @adapter: board private structure 2581 * @rx_ring: ring to clean the resources from 2582 * 2583 * Free all receive software resources 2584 **/ 2585 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2586 struct ixgbevf_ring *rx_ring) 2587 { 2588 struct pci_dev *pdev = adapter->pdev; 2589 2590 ixgbevf_clean_rx_ring(adapter, rx_ring); 2591 2592 vfree(rx_ring->rx_buffer_info); 2593 rx_ring->rx_buffer_info = NULL; 2594 2595 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2596 rx_ring->dma); 2597 2598 rx_ring->desc = NULL; 2599 } 2600 2601 /** 2602 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2603 * @adapter: board private structure 2604 * 2605 * Free all receive software resources 2606 **/ 2607 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2608 { 2609 int i; 2610 2611 for (i = 0; i < adapter->num_rx_queues; i++) 2612 if (adapter->rx_ring[i].desc) 2613 ixgbevf_free_rx_resources(adapter, 2614 &adapter->rx_ring[i]); 2615 } 2616 2617 static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2618 { 2619 struct ixgbe_hw *hw = &adapter->hw; 2620 struct ixgbevf_ring *rx_ring; 2621 unsigned int def_q = 0; 2622 unsigned int num_tcs = 0; 2623 unsigned int num_rx_queues = 1; 2624 int err, i; 2625 2626 spin_lock_bh(&adapter->mbx_lock); 2627 2628 /* fetch queue configuration from the PF */ 2629 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2630 2631 spin_unlock_bh(&adapter->mbx_lock); 2632 2633 if (err) 2634 return err; 2635 2636 if (num_tcs > 1) { 2637 /* update default Tx ring register index */ 2638 adapter->tx_ring[0].reg_idx = def_q; 2639 2640 /* we need as many queues as traffic classes */ 2641 num_rx_queues = num_tcs; 2642 } 2643 2644 /* nothing to do if we have the correct number of queues */ 2645 if (adapter->num_rx_queues == num_rx_queues) 2646 return 0; 2647 2648 /* allocate new rings */ 2649 rx_ring = kcalloc(num_rx_queues, 2650 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2651 if (!rx_ring) 2652 return -ENOMEM; 2653 2654 /* setup ring fields */ 2655 for (i = 0; i < num_rx_queues; i++) { 2656 rx_ring[i].count = adapter->rx_ring_count; 2657 rx_ring[i].queue_index = i; 2658 rx_ring[i].reg_idx = i; 2659 rx_ring[i].dev = &adapter->pdev->dev; 2660 rx_ring[i].netdev = adapter->netdev; 2661 } 2662 2663 /* free the existing ring and queues */ 2664 adapter->num_rx_queues = 0; 2665 kfree(adapter->rx_ring); 2666 2667 /* move new rings into position on the adapter struct */ 2668 adapter->rx_ring = rx_ring; 2669 adapter->num_rx_queues = num_rx_queues; 2670 2671 return 0; 2672 } 2673 2674 /** 2675 * ixgbevf_open - Called when a network interface is made active 2676 * @netdev: network interface device structure 2677 * 2678 * Returns 0 on success, negative value on failure 2679 * 2680 * The open entry point is called when a network interface is made 2681 * active by the system (IFF_UP). At this point all resources needed 2682 * for transmit and receive operations are allocated, the interrupt 2683 * handler is registered with the OS, the watchdog timer is started, 2684 * and the stack is notified that the interface is ready. 2685 **/ 2686 static int ixgbevf_open(struct net_device *netdev) 2687 { 2688 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2689 struct ixgbe_hw *hw = &adapter->hw; 2690 int err; 2691 2692 /* A previous failure to open the device because of a lack of 2693 * available MSIX vector resources may have reset the number 2694 * of msix vectors variable to zero. The only way to recover 2695 * is to unload/reload the driver and hope that the system has 2696 * been able to recover some MSIX vector resources. 2697 */ 2698 if (!adapter->num_msix_vectors) 2699 return -ENOMEM; 2700 2701 /* disallow open during test */ 2702 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2703 return -EBUSY; 2704 2705 if (hw->adapter_stopped) { 2706 ixgbevf_reset(adapter); 2707 /* if adapter is still stopped then PF isn't up and 2708 * the vf can't start. */ 2709 if (hw->adapter_stopped) { 2710 err = IXGBE_ERR_MBX; 2711 pr_err("Unable to start - perhaps the PF Driver isn't " 2712 "up yet\n"); 2713 goto err_setup_reset; 2714 } 2715 } 2716 2717 /* setup queue reg_idx and Rx queue count */ 2718 err = ixgbevf_setup_queues(adapter); 2719 if (err) 2720 goto err_setup_queues; 2721 2722 /* allocate transmit descriptors */ 2723 err = ixgbevf_setup_all_tx_resources(adapter); 2724 if (err) 2725 goto err_setup_tx; 2726 2727 /* allocate receive descriptors */ 2728 err = ixgbevf_setup_all_rx_resources(adapter); 2729 if (err) 2730 goto err_setup_rx; 2731 2732 ixgbevf_configure(adapter); 2733 2734 /* 2735 * Map the Tx/Rx rings to the vectors we were allotted. 2736 * if request_irq will be called in this function map_rings 2737 * must be called *before* up_complete 2738 */ 2739 ixgbevf_map_rings_to_vectors(adapter); 2740 2741 ixgbevf_up_complete(adapter); 2742 2743 /* clear any pending interrupts, may auto mask */ 2744 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2745 err = ixgbevf_request_irq(adapter); 2746 if (err) 2747 goto err_req_irq; 2748 2749 ixgbevf_irq_enable(adapter); 2750 2751 return 0; 2752 2753 err_req_irq: 2754 ixgbevf_down(adapter); 2755 err_setup_rx: 2756 ixgbevf_free_all_rx_resources(adapter); 2757 err_setup_tx: 2758 ixgbevf_free_all_tx_resources(adapter); 2759 err_setup_queues: 2760 ixgbevf_reset(adapter); 2761 2762 err_setup_reset: 2763 2764 return err; 2765 } 2766 2767 /** 2768 * ixgbevf_close - Disables a network interface 2769 * @netdev: network interface device structure 2770 * 2771 * Returns 0, this is not allowed to fail 2772 * 2773 * The close entry point is called when an interface is de-activated 2774 * by the OS. The hardware is still under the drivers control, but 2775 * needs to be disabled. A global MAC reset is issued to stop the 2776 * hardware, and all transmit and receive resources are freed. 2777 **/ 2778 static int ixgbevf_close(struct net_device *netdev) 2779 { 2780 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2781 2782 ixgbevf_down(adapter); 2783 ixgbevf_free_irq(adapter); 2784 2785 ixgbevf_free_all_tx_resources(adapter); 2786 ixgbevf_free_all_rx_resources(adapter); 2787 2788 return 0; 2789 } 2790 2791 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2792 u32 vlan_macip_lens, u32 type_tucmd, 2793 u32 mss_l4len_idx) 2794 { 2795 struct ixgbe_adv_tx_context_desc *context_desc; 2796 u16 i = tx_ring->next_to_use; 2797 2798 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2799 2800 i++; 2801 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2802 2803 /* set bits to identify this as an advanced context descriptor */ 2804 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2805 2806 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2807 context_desc->seqnum_seed = 0; 2808 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2809 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2810 } 2811 2812 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2813 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2814 { 2815 u32 vlan_macip_lens, type_tucmd; 2816 u32 mss_l4len_idx, l4len; 2817 2818 if (!skb_is_gso(skb)) 2819 return 0; 2820 2821 if (skb_header_cloned(skb)) { 2822 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2823 if (err) 2824 return err; 2825 } 2826 2827 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2828 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2829 2830 if (skb->protocol == htons(ETH_P_IP)) { 2831 struct iphdr *iph = ip_hdr(skb); 2832 iph->tot_len = 0; 2833 iph->check = 0; 2834 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2835 iph->daddr, 0, 2836 IPPROTO_TCP, 2837 0); 2838 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2839 } else if (skb_is_gso_v6(skb)) { 2840 ipv6_hdr(skb)->payload_len = 0; 2841 tcp_hdr(skb)->check = 2842 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2843 &ipv6_hdr(skb)->daddr, 2844 0, IPPROTO_TCP, 0); 2845 } 2846 2847 /* compute header lengths */ 2848 l4len = tcp_hdrlen(skb); 2849 *hdr_len += l4len; 2850 *hdr_len = skb_transport_offset(skb) + l4len; 2851 2852 /* mss_l4len_id: use 1 as index for TSO */ 2853 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2854 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2855 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2856 2857 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2858 vlan_macip_lens = skb_network_header_len(skb); 2859 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2860 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2861 2862 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2863 type_tucmd, mss_l4len_idx); 2864 2865 return 1; 2866 } 2867 2868 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2869 struct sk_buff *skb, u32 tx_flags) 2870 { 2871 u32 vlan_macip_lens = 0; 2872 u32 mss_l4len_idx = 0; 2873 u32 type_tucmd = 0; 2874 2875 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2876 u8 l4_hdr = 0; 2877 switch (skb->protocol) { 2878 case __constant_htons(ETH_P_IP): 2879 vlan_macip_lens |= skb_network_header_len(skb); 2880 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2881 l4_hdr = ip_hdr(skb)->protocol; 2882 break; 2883 case __constant_htons(ETH_P_IPV6): 2884 vlan_macip_lens |= skb_network_header_len(skb); 2885 l4_hdr = ipv6_hdr(skb)->nexthdr; 2886 break; 2887 default: 2888 if (unlikely(net_ratelimit())) { 2889 dev_warn(tx_ring->dev, 2890 "partial checksum but proto=%x!\n", 2891 skb->protocol); 2892 } 2893 break; 2894 } 2895 2896 switch (l4_hdr) { 2897 case IPPROTO_TCP: 2898 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2899 mss_l4len_idx = tcp_hdrlen(skb) << 2900 IXGBE_ADVTXD_L4LEN_SHIFT; 2901 break; 2902 case IPPROTO_SCTP: 2903 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2904 mss_l4len_idx = sizeof(struct sctphdr) << 2905 IXGBE_ADVTXD_L4LEN_SHIFT; 2906 break; 2907 case IPPROTO_UDP: 2908 mss_l4len_idx = sizeof(struct udphdr) << 2909 IXGBE_ADVTXD_L4LEN_SHIFT; 2910 break; 2911 default: 2912 if (unlikely(net_ratelimit())) { 2913 dev_warn(tx_ring->dev, 2914 "partial checksum but l4 proto=%x!\n", 2915 l4_hdr); 2916 } 2917 break; 2918 } 2919 } 2920 2921 /* vlan_macip_lens: MACLEN, VLAN tag */ 2922 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2923 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2924 2925 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2926 type_tucmd, mss_l4len_idx); 2927 2928 return (skb->ip_summed == CHECKSUM_PARTIAL); 2929 } 2930 2931 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2932 struct sk_buff *skb, u32 tx_flags) 2933 { 2934 struct ixgbevf_tx_buffer *tx_buffer_info; 2935 unsigned int len; 2936 unsigned int total = skb->len; 2937 unsigned int offset = 0, size; 2938 int count = 0; 2939 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2940 unsigned int f; 2941 int i; 2942 2943 i = tx_ring->next_to_use; 2944 2945 len = min(skb_headlen(skb), total); 2946 while (len) { 2947 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2948 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2949 2950 tx_buffer_info->length = size; 2951 tx_buffer_info->mapped_as_page = false; 2952 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2953 skb->data + offset, 2954 size, DMA_TO_DEVICE); 2955 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2956 goto dma_error; 2957 2958 len -= size; 2959 total -= size; 2960 offset += size; 2961 count++; 2962 i++; 2963 if (i == tx_ring->count) 2964 i = 0; 2965 } 2966 2967 for (f = 0; f < nr_frags; f++) { 2968 const struct skb_frag_struct *frag; 2969 2970 frag = &skb_shinfo(skb)->frags[f]; 2971 len = min((unsigned int)skb_frag_size(frag), total); 2972 offset = 0; 2973 2974 while (len) { 2975 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2976 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2977 2978 tx_buffer_info->length = size; 2979 tx_buffer_info->dma = 2980 skb_frag_dma_map(tx_ring->dev, frag, 2981 offset, size, DMA_TO_DEVICE); 2982 if (dma_mapping_error(tx_ring->dev, 2983 tx_buffer_info->dma)) 2984 goto dma_error; 2985 tx_buffer_info->mapped_as_page = true; 2986 2987 len -= size; 2988 total -= size; 2989 offset += size; 2990 count++; 2991 i++; 2992 if (i == tx_ring->count) 2993 i = 0; 2994 } 2995 if (total == 0) 2996 break; 2997 } 2998 2999 if (i == 0) 3000 i = tx_ring->count - 1; 3001 else 3002 i = i - 1; 3003 tx_ring->tx_buffer_info[i].skb = skb; 3004 3005 return count; 3006 3007 dma_error: 3008 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3009 3010 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 3011 tx_buffer_info->dma = 0; 3012 count--; 3013 3014 /* clear timestamp and dma mappings for remaining portion of packet */ 3015 while (count >= 0) { 3016 count--; 3017 i--; 3018 if (i < 0) 3019 i += tx_ring->count; 3020 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3021 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 3022 } 3023 3024 return count; 3025 } 3026 3027 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 3028 int count, unsigned int first, u32 paylen, 3029 u8 hdr_len) 3030 { 3031 union ixgbe_adv_tx_desc *tx_desc = NULL; 3032 struct ixgbevf_tx_buffer *tx_buffer_info; 3033 u32 olinfo_status = 0, cmd_type_len = 0; 3034 unsigned int i; 3035 3036 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 3037 3038 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 3039 3040 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3041 3042 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3043 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3044 3045 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3046 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 3047 3048 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3049 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3050 3051 /* use index 1 context for tso */ 3052 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3053 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3054 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 3055 } 3056 3057 /* 3058 * Check Context must be set if Tx switch is enabled, which it 3059 * always is for case where virtual functions are running 3060 */ 3061 olinfo_status |= IXGBE_ADVTXD_CC; 3062 3063 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3064 3065 i = tx_ring->next_to_use; 3066 while (count--) { 3067 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3068 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3069 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3070 tx_desc->read.cmd_type_len = 3071 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3072 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3073 i++; 3074 if (i == tx_ring->count) 3075 i = 0; 3076 } 3077 3078 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3079 3080 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 3081 3082 /* Force memory writes to complete before letting h/w 3083 * know there are new descriptors to fetch. (Only 3084 * applicable for weak-ordered memory model archs, 3085 * such as IA-64). 3086 */ 3087 wmb(); 3088 3089 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; 3090 tx_ring->next_to_use = i; 3091 } 3092 3093 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3094 { 3095 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 3096 3097 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3098 /* Herbert's original patch had: 3099 * smp_mb__after_netif_stop_queue(); 3100 * but since that doesn't exist yet, just open code it. */ 3101 smp_mb(); 3102 3103 /* We need to check again in a case another CPU has just 3104 * made room available. */ 3105 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 3106 return -EBUSY; 3107 3108 /* A reprieve! - use start_queue because it doesn't call schedule */ 3109 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3110 ++adapter->restart_queue; 3111 return 0; 3112 } 3113 3114 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3115 { 3116 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 3117 return 0; 3118 return __ixgbevf_maybe_stop_tx(tx_ring, size); 3119 } 3120 3121 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3122 { 3123 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3124 struct ixgbevf_ring *tx_ring; 3125 unsigned int first; 3126 unsigned int tx_flags = 0; 3127 u8 hdr_len = 0; 3128 int r_idx = 0, tso; 3129 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3130 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3131 unsigned short f; 3132 #endif 3133 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3134 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3135 dev_kfree_skb(skb); 3136 return NETDEV_TX_OK; 3137 } 3138 3139 tx_ring = &adapter->tx_ring[r_idx]; 3140 3141 /* 3142 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3143 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3144 * + 2 desc gap to keep tail from touching head, 3145 * + 1 desc for context descriptor, 3146 * otherwise try next time 3147 */ 3148 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3149 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3150 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3151 #else 3152 count += skb_shinfo(skb)->nr_frags; 3153 #endif 3154 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3155 adapter->tx_busy++; 3156 return NETDEV_TX_BUSY; 3157 } 3158 3159 if (vlan_tx_tag_present(skb)) { 3160 tx_flags |= vlan_tx_tag_get(skb); 3161 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3162 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3163 } 3164 3165 first = tx_ring->next_to_use; 3166 3167 if (skb->protocol == htons(ETH_P_IP)) 3168 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3169 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3170 if (tso < 0) { 3171 dev_kfree_skb_any(skb); 3172 return NETDEV_TX_OK; 3173 } 3174 3175 if (tso) 3176 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3177 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3178 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3179 3180 ixgbevf_tx_queue(tx_ring, tx_flags, 3181 ixgbevf_tx_map(tx_ring, skb, tx_flags), 3182 first, skb->len, hdr_len); 3183 3184 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3185 3186 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3187 3188 return NETDEV_TX_OK; 3189 } 3190 3191 /** 3192 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3193 * @netdev: network interface device structure 3194 * @p: pointer to an address structure 3195 * 3196 * Returns 0 on success, negative on failure 3197 **/ 3198 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3199 { 3200 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3201 struct ixgbe_hw *hw = &adapter->hw; 3202 struct sockaddr *addr = p; 3203 3204 if (!is_valid_ether_addr(addr->sa_data)) 3205 return -EADDRNOTAVAIL; 3206 3207 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3208 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3209 3210 spin_lock_bh(&adapter->mbx_lock); 3211 3212 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3213 3214 spin_unlock_bh(&adapter->mbx_lock); 3215 3216 return 0; 3217 } 3218 3219 /** 3220 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3221 * @netdev: network interface device structure 3222 * @new_mtu: new value for maximum frame size 3223 * 3224 * Returns 0 on success, negative on failure 3225 **/ 3226 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3227 { 3228 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3229 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3230 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3231 3232 switch (adapter->hw.api_version) { 3233 case ixgbe_mbox_api_11: 3234 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3235 break; 3236 default: 3237 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3238 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3239 break; 3240 } 3241 3242 /* MTU < 68 is an error and causes problems on some kernels */ 3243 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3244 return -EINVAL; 3245 3246 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3247 netdev->mtu, new_mtu); 3248 /* must set new MTU before calling down or up */ 3249 netdev->mtu = new_mtu; 3250 3251 if (netif_running(netdev)) 3252 ixgbevf_reinit_locked(adapter); 3253 3254 return 0; 3255 } 3256 3257 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3258 { 3259 struct net_device *netdev = pci_get_drvdata(pdev); 3260 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3261 #ifdef CONFIG_PM 3262 int retval = 0; 3263 #endif 3264 3265 netif_device_detach(netdev); 3266 3267 if (netif_running(netdev)) { 3268 rtnl_lock(); 3269 ixgbevf_down(adapter); 3270 ixgbevf_free_irq(adapter); 3271 ixgbevf_free_all_tx_resources(adapter); 3272 ixgbevf_free_all_rx_resources(adapter); 3273 rtnl_unlock(); 3274 } 3275 3276 ixgbevf_clear_interrupt_scheme(adapter); 3277 3278 #ifdef CONFIG_PM 3279 retval = pci_save_state(pdev); 3280 if (retval) 3281 return retval; 3282 3283 #endif 3284 pci_disable_device(pdev); 3285 3286 return 0; 3287 } 3288 3289 #ifdef CONFIG_PM 3290 static int ixgbevf_resume(struct pci_dev *pdev) 3291 { 3292 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3293 struct net_device *netdev = adapter->netdev; 3294 u32 err; 3295 3296 pci_set_power_state(pdev, PCI_D0); 3297 pci_restore_state(pdev); 3298 /* 3299 * pci_restore_state clears dev->state_saved so call 3300 * pci_save_state to restore it. 3301 */ 3302 pci_save_state(pdev); 3303 3304 err = pci_enable_device_mem(pdev); 3305 if (err) { 3306 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3307 return err; 3308 } 3309 pci_set_master(pdev); 3310 3311 ixgbevf_reset(adapter); 3312 3313 rtnl_lock(); 3314 err = ixgbevf_init_interrupt_scheme(adapter); 3315 rtnl_unlock(); 3316 if (err) { 3317 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3318 return err; 3319 } 3320 3321 if (netif_running(netdev)) { 3322 err = ixgbevf_open(netdev); 3323 if (err) 3324 return err; 3325 } 3326 3327 netif_device_attach(netdev); 3328 3329 return err; 3330 } 3331 3332 #endif /* CONFIG_PM */ 3333 static void ixgbevf_shutdown(struct pci_dev *pdev) 3334 { 3335 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3336 } 3337 3338 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3339 struct rtnl_link_stats64 *stats) 3340 { 3341 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3342 unsigned int start; 3343 u64 bytes, packets; 3344 const struct ixgbevf_ring *ring; 3345 int i; 3346 3347 ixgbevf_update_stats(adapter); 3348 3349 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3350 3351 for (i = 0; i < adapter->num_rx_queues; i++) { 3352 ring = &adapter->rx_ring[i]; 3353 do { 3354 start = u64_stats_fetch_begin_bh(&ring->syncp); 3355 bytes = ring->total_bytes; 3356 packets = ring->total_packets; 3357 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3358 stats->rx_bytes += bytes; 3359 stats->rx_packets += packets; 3360 } 3361 3362 for (i = 0; i < adapter->num_tx_queues; i++) { 3363 ring = &adapter->tx_ring[i]; 3364 do { 3365 start = u64_stats_fetch_begin_bh(&ring->syncp); 3366 bytes = ring->total_bytes; 3367 packets = ring->total_packets; 3368 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3369 stats->tx_bytes += bytes; 3370 stats->tx_packets += packets; 3371 } 3372 3373 return stats; 3374 } 3375 3376 static const struct net_device_ops ixgbevf_netdev_ops = { 3377 .ndo_open = ixgbevf_open, 3378 .ndo_stop = ixgbevf_close, 3379 .ndo_start_xmit = ixgbevf_xmit_frame, 3380 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3381 .ndo_get_stats64 = ixgbevf_get_stats, 3382 .ndo_validate_addr = eth_validate_addr, 3383 .ndo_set_mac_address = ixgbevf_set_mac, 3384 .ndo_change_mtu = ixgbevf_change_mtu, 3385 .ndo_tx_timeout = ixgbevf_tx_timeout, 3386 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3387 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3388 #ifdef CONFIG_NET_RX_BUSY_POLL 3389 .ndo_busy_poll = ixgbevf_busy_poll_recv, 3390 #endif 3391 }; 3392 3393 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3394 { 3395 dev->netdev_ops = &ixgbevf_netdev_ops; 3396 ixgbevf_set_ethtool_ops(dev); 3397 dev->watchdog_timeo = 5 * HZ; 3398 } 3399 3400 /** 3401 * ixgbevf_probe - Device Initialization Routine 3402 * @pdev: PCI device information struct 3403 * @ent: entry in ixgbevf_pci_tbl 3404 * 3405 * Returns 0 on success, negative on failure 3406 * 3407 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3408 * The OS initialization, configuring of the adapter private structure, 3409 * and a hardware reset occur. 3410 **/ 3411 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3412 { 3413 struct net_device *netdev; 3414 struct ixgbevf_adapter *adapter = NULL; 3415 struct ixgbe_hw *hw = NULL; 3416 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3417 static int cards_found; 3418 int err, pci_using_dac; 3419 3420 err = pci_enable_device(pdev); 3421 if (err) 3422 return err; 3423 3424 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3425 pci_using_dac = 1; 3426 } else { 3427 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3428 if (err) { 3429 dev_err(&pdev->dev, "No usable DMA " 3430 "configuration, aborting\n"); 3431 goto err_dma; 3432 } 3433 pci_using_dac = 0; 3434 } 3435 3436 err = pci_request_regions(pdev, ixgbevf_driver_name); 3437 if (err) { 3438 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3439 goto err_pci_reg; 3440 } 3441 3442 pci_set_master(pdev); 3443 3444 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3445 MAX_TX_QUEUES); 3446 if (!netdev) { 3447 err = -ENOMEM; 3448 goto err_alloc_etherdev; 3449 } 3450 3451 SET_NETDEV_DEV(netdev, &pdev->dev); 3452 3453 pci_set_drvdata(pdev, netdev); 3454 adapter = netdev_priv(netdev); 3455 3456 adapter->netdev = netdev; 3457 adapter->pdev = pdev; 3458 hw = &adapter->hw; 3459 hw->back = adapter; 3460 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3461 3462 /* 3463 * call save state here in standalone driver because it relies on 3464 * adapter struct to exist, and needs to call netdev_priv 3465 */ 3466 pci_save_state(pdev); 3467 3468 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3469 pci_resource_len(pdev, 0)); 3470 if (!hw->hw_addr) { 3471 err = -EIO; 3472 goto err_ioremap; 3473 } 3474 3475 ixgbevf_assign_netdev_ops(netdev); 3476 3477 adapter->bd_number = cards_found; 3478 3479 /* Setup hw api */ 3480 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3481 hw->mac.type = ii->mac; 3482 3483 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3484 sizeof(struct ixgbe_mbx_operations)); 3485 3486 /* setup the private structure */ 3487 err = ixgbevf_sw_init(adapter); 3488 if (err) 3489 goto err_sw_init; 3490 3491 /* The HW MAC address was set and/or determined in sw_init */ 3492 if (!is_valid_ether_addr(netdev->dev_addr)) { 3493 pr_err("invalid MAC address\n"); 3494 err = -EIO; 3495 goto err_sw_init; 3496 } 3497 3498 netdev->hw_features = NETIF_F_SG | 3499 NETIF_F_IP_CSUM | 3500 NETIF_F_IPV6_CSUM | 3501 NETIF_F_TSO | 3502 NETIF_F_TSO6 | 3503 NETIF_F_RXCSUM; 3504 3505 netdev->features = netdev->hw_features | 3506 NETIF_F_HW_VLAN_CTAG_TX | 3507 NETIF_F_HW_VLAN_CTAG_RX | 3508 NETIF_F_HW_VLAN_CTAG_FILTER; 3509 3510 netdev->vlan_features |= NETIF_F_TSO; 3511 netdev->vlan_features |= NETIF_F_TSO6; 3512 netdev->vlan_features |= NETIF_F_IP_CSUM; 3513 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3514 netdev->vlan_features |= NETIF_F_SG; 3515 3516 if (pci_using_dac) 3517 netdev->features |= NETIF_F_HIGHDMA; 3518 3519 netdev->priv_flags |= IFF_UNICAST_FLT; 3520 3521 init_timer(&adapter->watchdog_timer); 3522 adapter->watchdog_timer.function = ixgbevf_watchdog; 3523 adapter->watchdog_timer.data = (unsigned long)adapter; 3524 3525 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3526 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3527 3528 err = ixgbevf_init_interrupt_scheme(adapter); 3529 if (err) 3530 goto err_sw_init; 3531 3532 strcpy(netdev->name, "eth%d"); 3533 3534 err = register_netdev(netdev); 3535 if (err) 3536 goto err_register; 3537 3538 netif_carrier_off(netdev); 3539 3540 ixgbevf_init_last_counter_stats(adapter); 3541 3542 /* print the MAC address */ 3543 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3544 3545 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3546 3547 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3548 cards_found++; 3549 return 0; 3550 3551 err_register: 3552 ixgbevf_clear_interrupt_scheme(adapter); 3553 err_sw_init: 3554 ixgbevf_reset_interrupt_capability(adapter); 3555 iounmap(hw->hw_addr); 3556 err_ioremap: 3557 free_netdev(netdev); 3558 err_alloc_etherdev: 3559 pci_release_regions(pdev); 3560 err_pci_reg: 3561 err_dma: 3562 pci_disable_device(pdev); 3563 return err; 3564 } 3565 3566 /** 3567 * ixgbevf_remove - Device Removal Routine 3568 * @pdev: PCI device information struct 3569 * 3570 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3571 * that it should release a PCI device. The could be caused by a 3572 * Hot-Plug event, or because the driver is going to be removed from 3573 * memory. 3574 **/ 3575 static void ixgbevf_remove(struct pci_dev *pdev) 3576 { 3577 struct net_device *netdev = pci_get_drvdata(pdev); 3578 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3579 3580 set_bit(__IXGBEVF_DOWN, &adapter->state); 3581 3582 del_timer_sync(&adapter->watchdog_timer); 3583 3584 cancel_work_sync(&adapter->reset_task); 3585 cancel_work_sync(&adapter->watchdog_task); 3586 3587 if (netdev->reg_state == NETREG_REGISTERED) 3588 unregister_netdev(netdev); 3589 3590 ixgbevf_clear_interrupt_scheme(adapter); 3591 ixgbevf_reset_interrupt_capability(adapter); 3592 3593 iounmap(adapter->hw.hw_addr); 3594 pci_release_regions(pdev); 3595 3596 hw_dbg(&adapter->hw, "Remove complete\n"); 3597 3598 kfree(adapter->tx_ring); 3599 kfree(adapter->rx_ring); 3600 3601 free_netdev(netdev); 3602 3603 pci_disable_device(pdev); 3604 } 3605 3606 /** 3607 * ixgbevf_io_error_detected - called when PCI error is detected 3608 * @pdev: Pointer to PCI device 3609 * @state: The current pci connection state 3610 * 3611 * This function is called after a PCI bus error affecting 3612 * this device has been detected. 3613 */ 3614 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3615 pci_channel_state_t state) 3616 { 3617 struct net_device *netdev = pci_get_drvdata(pdev); 3618 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3619 3620 netif_device_detach(netdev); 3621 3622 if (state == pci_channel_io_perm_failure) 3623 return PCI_ERS_RESULT_DISCONNECT; 3624 3625 if (netif_running(netdev)) 3626 ixgbevf_down(adapter); 3627 3628 pci_disable_device(pdev); 3629 3630 /* Request a slot slot reset. */ 3631 return PCI_ERS_RESULT_NEED_RESET; 3632 } 3633 3634 /** 3635 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3636 * @pdev: Pointer to PCI device 3637 * 3638 * Restart the card from scratch, as if from a cold-boot. Implementation 3639 * resembles the first-half of the ixgbevf_resume routine. 3640 */ 3641 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3642 { 3643 struct net_device *netdev = pci_get_drvdata(pdev); 3644 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3645 3646 if (pci_enable_device_mem(pdev)) { 3647 dev_err(&pdev->dev, 3648 "Cannot re-enable PCI device after reset.\n"); 3649 return PCI_ERS_RESULT_DISCONNECT; 3650 } 3651 3652 pci_set_master(pdev); 3653 3654 ixgbevf_reset(adapter); 3655 3656 return PCI_ERS_RESULT_RECOVERED; 3657 } 3658 3659 /** 3660 * ixgbevf_io_resume - called when traffic can start flowing again. 3661 * @pdev: Pointer to PCI device 3662 * 3663 * This callback is called when the error recovery driver tells us that 3664 * its OK to resume normal operation. Implementation resembles the 3665 * second-half of the ixgbevf_resume routine. 3666 */ 3667 static void ixgbevf_io_resume(struct pci_dev *pdev) 3668 { 3669 struct net_device *netdev = pci_get_drvdata(pdev); 3670 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3671 3672 if (netif_running(netdev)) 3673 ixgbevf_up(adapter); 3674 3675 netif_device_attach(netdev); 3676 } 3677 3678 /* PCI Error Recovery (ERS) */ 3679 static const struct pci_error_handlers ixgbevf_err_handler = { 3680 .error_detected = ixgbevf_io_error_detected, 3681 .slot_reset = ixgbevf_io_slot_reset, 3682 .resume = ixgbevf_io_resume, 3683 }; 3684 3685 static struct pci_driver ixgbevf_driver = { 3686 .name = ixgbevf_driver_name, 3687 .id_table = ixgbevf_pci_tbl, 3688 .probe = ixgbevf_probe, 3689 .remove = ixgbevf_remove, 3690 #ifdef CONFIG_PM 3691 /* Power Management Hooks */ 3692 .suspend = ixgbevf_suspend, 3693 .resume = ixgbevf_resume, 3694 #endif 3695 .shutdown = ixgbevf_shutdown, 3696 .err_handler = &ixgbevf_err_handler 3697 }; 3698 3699 /** 3700 * ixgbevf_init_module - Driver Registration Routine 3701 * 3702 * ixgbevf_init_module is the first routine called when the driver is 3703 * loaded. All it does is register with the PCI subsystem. 3704 **/ 3705 static int __init ixgbevf_init_module(void) 3706 { 3707 int ret; 3708 pr_info("%s - version %s\n", ixgbevf_driver_string, 3709 ixgbevf_driver_version); 3710 3711 pr_info("%s\n", ixgbevf_copyright); 3712 3713 ret = pci_register_driver(&ixgbevf_driver); 3714 return ret; 3715 } 3716 3717 module_init(ixgbevf_init_module); 3718 3719 /** 3720 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3721 * 3722 * ixgbevf_exit_module is called just before the driver is removed 3723 * from memory. 3724 **/ 3725 static void __exit ixgbevf_exit_module(void) 3726 { 3727 pci_unregister_driver(&ixgbevf_driver); 3728 } 3729 3730 #ifdef DEBUG 3731 /** 3732 * ixgbevf_get_hw_dev_name - return device name string 3733 * used by hardware layer to print debugging information 3734 **/ 3735 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3736 { 3737 struct ixgbevf_adapter *adapter = hw->back; 3738 return adapter->netdev->name; 3739 } 3740 3741 #endif 3742 module_exit(ixgbevf_exit_module); 3743 3744 /* ixgbevf_main.c */ 3745