1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 29 /****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31 ******************************************************************************/ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/types.h> 36 #include <linux/bitops.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/sctp.h> 46 #include <linux/ipv6.h> 47 #include <linux/slab.h> 48 #include <net/checksum.h> 49 #include <net/ip6_checksum.h> 50 #include <linux/ethtool.h> 51 #include <linux/if.h> 52 #include <linux/if_vlan.h> 53 #include <linux/prefetch.h> 54 55 #include "ixgbevf.h" 56 57 const char ixgbevf_driver_name[] = "ixgbevf"; 58 static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61 #define DRV_VERSION "2.12.1-k" 62 const char ixgbevf_driver_version[] = DRV_VERSION; 63 static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69 }; 70 71 /* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 82 /* required last entry */ 83 {0, } 84 }; 85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 86 87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_VERSION); 91 92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 93 static int debug = -1; 94 module_param(debug, int, 0); 95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 96 97 /* forward decls */ 98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 101 102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, 103 u32 val) 104 { 105 rx_ring->next_to_use = val; 106 107 /* 108 * Force memory writes to complete before letting h/w 109 * know there are new descriptors to fetch. (Only 110 * applicable for weak-ordered memory model archs, 111 * such as IA-64). 112 */ 113 wmb(); 114 writel(val, rx_ring->tail); 115 } 116 117 /** 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 119 * @adapter: pointer to adapter struct 120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 121 * @queue: queue to map the corresponding interrupt to 122 * @msix_vector: the vector to map to the corresponding queue 123 */ 124 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 125 u8 queue, u8 msix_vector) 126 { 127 u32 ivar, index; 128 struct ixgbe_hw *hw = &adapter->hw; 129 if (direction == -1) { 130 /* other causes */ 131 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 133 ivar &= ~0xFF; 134 ivar |= msix_vector; 135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 136 } else { 137 /* tx or rx causes */ 138 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 139 index = ((16 * (queue & 1)) + (8 * direction)); 140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 141 ivar &= ~(0xFF << index); 142 ivar |= (msix_vector << index); 143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 144 } 145 } 146 147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 148 struct ixgbevf_tx_buffer *tx_buffer) 149 { 150 if (tx_buffer->skb) { 151 dev_kfree_skb_any(tx_buffer->skb); 152 if (dma_unmap_len(tx_buffer, len)) 153 dma_unmap_single(tx_ring->dev, 154 dma_unmap_addr(tx_buffer, dma), 155 dma_unmap_len(tx_buffer, len), 156 DMA_TO_DEVICE); 157 } else if (dma_unmap_len(tx_buffer, len)) { 158 dma_unmap_page(tx_ring->dev, 159 dma_unmap_addr(tx_buffer, dma), 160 dma_unmap_len(tx_buffer, len), 161 DMA_TO_DEVICE); 162 } 163 tx_buffer->next_to_watch = NULL; 164 tx_buffer->skb = NULL; 165 dma_unmap_len_set(tx_buffer, len, 0); 166 /* tx_buffer must be completely set up in the transmit path */ 167 } 168 169 #define IXGBE_MAX_TXD_PWR 14 170 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 171 172 /* Tx Descriptors needed, worst case */ 173 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 174 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 175 176 static void ixgbevf_tx_timeout(struct net_device *netdev); 177 178 /** 179 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 180 * @q_vector: board private structure 181 * @tx_ring: tx ring to clean 182 **/ 183 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 184 struct ixgbevf_ring *tx_ring) 185 { 186 struct ixgbevf_adapter *adapter = q_vector->adapter; 187 struct ixgbevf_tx_buffer *tx_buffer; 188 union ixgbe_adv_tx_desc *tx_desc; 189 unsigned int total_bytes = 0, total_packets = 0; 190 unsigned int budget = tx_ring->count / 2; 191 unsigned int i = tx_ring->next_to_clean; 192 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 194 return true; 195 196 tx_buffer = &tx_ring->tx_buffer_info[i]; 197 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 198 i -= tx_ring->count; 199 200 do { 201 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 202 203 /* if next_to_watch is not set then there is no work pending */ 204 if (!eop_desc) 205 break; 206 207 /* prevent any other reads prior to eop_desc */ 208 read_barrier_depends(); 209 210 /* if DD is not set pending work has not been completed */ 211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 212 break; 213 214 /* clear next_to_watch to prevent false hangs */ 215 tx_buffer->next_to_watch = NULL; 216 217 /* update the statistics for this packet */ 218 total_bytes += tx_buffer->bytecount; 219 total_packets += tx_buffer->gso_segs; 220 221 /* free the skb */ 222 dev_kfree_skb_any(tx_buffer->skb); 223 224 /* unmap skb header data */ 225 dma_unmap_single(tx_ring->dev, 226 dma_unmap_addr(tx_buffer, dma), 227 dma_unmap_len(tx_buffer, len), 228 DMA_TO_DEVICE); 229 230 /* clear tx_buffer data */ 231 tx_buffer->skb = NULL; 232 dma_unmap_len_set(tx_buffer, len, 0); 233 234 /* unmap remaining buffers */ 235 while (tx_desc != eop_desc) { 236 tx_buffer++; 237 tx_desc++; 238 i++; 239 if (unlikely(!i)) { 240 i -= tx_ring->count; 241 tx_buffer = tx_ring->tx_buffer_info; 242 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 243 } 244 245 /* unmap any remaining paged data */ 246 if (dma_unmap_len(tx_buffer, len)) { 247 dma_unmap_page(tx_ring->dev, 248 dma_unmap_addr(tx_buffer, dma), 249 dma_unmap_len(tx_buffer, len), 250 DMA_TO_DEVICE); 251 dma_unmap_len_set(tx_buffer, len, 0); 252 } 253 } 254 255 /* move us one more past the eop_desc for start of next pkt */ 256 tx_buffer++; 257 tx_desc++; 258 i++; 259 if (unlikely(!i)) { 260 i -= tx_ring->count; 261 tx_buffer = tx_ring->tx_buffer_info; 262 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 263 } 264 265 /* issue prefetch for next Tx descriptor */ 266 prefetch(tx_desc); 267 268 /* update budget accounting */ 269 budget--; 270 } while (likely(budget)); 271 272 i += tx_ring->count; 273 tx_ring->next_to_clean = i; 274 u64_stats_update_begin(&tx_ring->syncp); 275 tx_ring->stats.bytes += total_bytes; 276 tx_ring->stats.packets += total_packets; 277 u64_stats_update_end(&tx_ring->syncp); 278 q_vector->tx.total_bytes += total_bytes; 279 q_vector->tx.total_packets += total_packets; 280 281 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 282 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 283 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 284 /* Make sure that anybody stopping the queue after this 285 * sees the new next_to_clean. 286 */ 287 smp_mb(); 288 289 if (__netif_subqueue_stopped(tx_ring->netdev, 290 tx_ring->queue_index) && 291 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 292 netif_wake_subqueue(tx_ring->netdev, 293 tx_ring->queue_index); 294 ++tx_ring->tx_stats.restart_queue; 295 } 296 } 297 298 return !!budget; 299 } 300 301 /** 302 * ixgbevf_receive_skb - Send a completed packet up the stack 303 * @q_vector: structure containing interrupt and ring information 304 * @skb: packet to send up 305 * @status: hardware indication of status of receive 306 * @rx_desc: rx descriptor 307 **/ 308 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 309 struct sk_buff *skb, u8 status, 310 union ixgbe_adv_rx_desc *rx_desc) 311 { 312 struct ixgbevf_adapter *adapter = q_vector->adapter; 313 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 314 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 315 316 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 317 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 318 319 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 320 napi_gro_receive(&q_vector->napi, skb); 321 else 322 netif_rx(skb); 323 } 324 325 /** 326 * ixgbevf_rx_skb - Helper function to determine proper Rx method 327 * @q_vector: structure containing interrupt and ring information 328 * @skb: packet to send up 329 * @status: hardware indication of status of receive 330 * @rx_desc: rx descriptor 331 **/ 332 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 333 struct sk_buff *skb, u8 status, 334 union ixgbe_adv_rx_desc *rx_desc) 335 { 336 #ifdef CONFIG_NET_RX_BUSY_POLL 337 skb_mark_napi_id(skb, &q_vector->napi); 338 339 if (ixgbevf_qv_busy_polling(q_vector)) { 340 netif_receive_skb(skb); 341 /* exit early if we busy polled */ 342 return; 343 } 344 #endif /* CONFIG_NET_RX_BUSY_POLL */ 345 346 ixgbevf_receive_skb(q_vector, skb, status, rx_desc); 347 } 348 349 /** 350 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 351 * @ring: pointer to Rx descriptor ring structure 352 * @status_err: hardware indication of status of receive 353 * @skb: skb currently being received and modified 354 **/ 355 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 356 u32 status_err, struct sk_buff *skb) 357 { 358 skb_checksum_none_assert(skb); 359 360 /* Rx csum disabled */ 361 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 362 return; 363 364 /* if IP and error */ 365 if ((status_err & IXGBE_RXD_STAT_IPCS) && 366 (status_err & IXGBE_RXDADV_ERR_IPE)) { 367 ring->rx_stats.csum_err++; 368 return; 369 } 370 371 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 372 return; 373 374 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 375 ring->rx_stats.csum_err++; 376 return; 377 } 378 379 /* It must be a TCP or UDP packet with a valid checksum */ 380 skb->ip_summed = CHECKSUM_UNNECESSARY; 381 } 382 383 /** 384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 385 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 386 **/ 387 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 388 int cleaned_count) 389 { 390 union ixgbe_adv_rx_desc *rx_desc; 391 struct ixgbevf_rx_buffer *bi; 392 unsigned int i = rx_ring->next_to_use; 393 394 while (cleaned_count--) { 395 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 396 bi = &rx_ring->rx_buffer_info[i]; 397 398 if (!bi->skb) { 399 struct sk_buff *skb; 400 401 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 402 rx_ring->rx_buf_len); 403 if (!skb) 404 goto no_buffers; 405 406 bi->skb = skb; 407 408 bi->dma = dma_map_single(rx_ring->dev, skb->data, 409 rx_ring->rx_buf_len, 410 DMA_FROM_DEVICE); 411 if (dma_mapping_error(rx_ring->dev, bi->dma)) { 412 dev_kfree_skb(skb); 413 bi->skb = NULL; 414 dev_err(rx_ring->dev, "Rx DMA map failed\n"); 415 break; 416 } 417 } 418 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 419 420 i++; 421 if (i == rx_ring->count) 422 i = 0; 423 } 424 425 no_buffers: 426 rx_ring->rx_stats.alloc_rx_buff_failed++; 427 if (rx_ring->next_to_use != i) 428 ixgbevf_release_rx_desc(rx_ring, i); 429 } 430 431 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 432 u32 qmask) 433 { 434 struct ixgbe_hw *hw = &adapter->hw; 435 436 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 437 } 438 439 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 440 struct ixgbevf_ring *rx_ring, 441 int budget) 442 { 443 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 444 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 445 struct sk_buff *skb; 446 unsigned int i; 447 u32 len, staterr; 448 int cleaned_count = 0; 449 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 450 451 i = rx_ring->next_to_clean; 452 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 453 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 454 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 455 456 while (staterr & IXGBE_RXD_STAT_DD) { 457 if (!budget) 458 break; 459 budget--; 460 461 rmb(); /* read descriptor and rx_buffer_info after status DD */ 462 len = le16_to_cpu(rx_desc->wb.upper.length); 463 skb = rx_buffer_info->skb; 464 prefetch(skb->data - NET_IP_ALIGN); 465 rx_buffer_info->skb = NULL; 466 467 if (rx_buffer_info->dma) { 468 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 469 rx_ring->rx_buf_len, 470 DMA_FROM_DEVICE); 471 rx_buffer_info->dma = 0; 472 skb_put(skb, len); 473 } 474 475 i++; 476 if (i == rx_ring->count) 477 i = 0; 478 479 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 480 prefetch(next_rxd); 481 cleaned_count++; 482 483 next_buffer = &rx_ring->rx_buffer_info[i]; 484 485 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 486 skb->next = next_buffer->skb; 487 IXGBE_CB(skb->next)->prev = skb; 488 rx_ring->rx_stats.non_eop_descs++; 489 goto next_desc; 490 } 491 492 /* we should not be chaining buffers, if we did drop the skb */ 493 if (IXGBE_CB(skb)->prev) { 494 do { 495 struct sk_buff *this = skb; 496 skb = IXGBE_CB(skb)->prev; 497 dev_kfree_skb(this); 498 } while (skb); 499 goto next_desc; 500 } 501 502 /* ERR_MASK will only have valid bits if EOP set */ 503 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 504 dev_kfree_skb_irq(skb); 505 goto next_desc; 506 } 507 508 ixgbevf_rx_checksum(rx_ring, staterr, skb); 509 510 /* probably a little skewed due to removing CRC */ 511 total_rx_bytes += skb->len; 512 total_rx_packets++; 513 514 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 515 516 /* Workaround hardware that can't do proper VEPA multicast 517 * source pruning. 518 */ 519 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 520 ether_addr_equal(rx_ring->netdev->dev_addr, 521 eth_hdr(skb)->h_source)) { 522 dev_kfree_skb_irq(skb); 523 goto next_desc; 524 } 525 526 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 527 528 next_desc: 529 rx_desc->wb.upper.status_error = 0; 530 531 /* return some buffers to hardware, one at a time is too slow */ 532 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 533 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 534 cleaned_count = 0; 535 } 536 537 /* use prefetched values */ 538 rx_desc = next_rxd; 539 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 540 541 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 542 } 543 544 rx_ring->next_to_clean = i; 545 cleaned_count = ixgbevf_desc_unused(rx_ring); 546 547 if (cleaned_count) 548 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 549 550 u64_stats_update_begin(&rx_ring->syncp); 551 rx_ring->stats.packets += total_rx_packets; 552 rx_ring->stats.bytes += total_rx_bytes; 553 u64_stats_update_end(&rx_ring->syncp); 554 q_vector->rx.total_packets += total_rx_packets; 555 q_vector->rx.total_bytes += total_rx_bytes; 556 557 return total_rx_packets; 558 } 559 560 /** 561 * ixgbevf_poll - NAPI polling calback 562 * @napi: napi struct with our devices info in it 563 * @budget: amount of work driver is allowed to do this pass, in packets 564 * 565 * This function will clean more than one or more rings associated with a 566 * q_vector. 567 **/ 568 static int ixgbevf_poll(struct napi_struct *napi, int budget) 569 { 570 struct ixgbevf_q_vector *q_vector = 571 container_of(napi, struct ixgbevf_q_vector, napi); 572 struct ixgbevf_adapter *adapter = q_vector->adapter; 573 struct ixgbevf_ring *ring; 574 int per_ring_budget; 575 bool clean_complete = true; 576 577 ixgbevf_for_each_ring(ring, q_vector->tx) 578 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 579 580 #ifdef CONFIG_NET_RX_BUSY_POLL 581 if (!ixgbevf_qv_lock_napi(q_vector)) 582 return budget; 583 #endif 584 585 /* attempt to distribute budget to each queue fairly, but don't allow 586 * the budget to go below 1 because we'll exit polling */ 587 if (q_vector->rx.count > 1) 588 per_ring_budget = max(budget/q_vector->rx.count, 1); 589 else 590 per_ring_budget = budget; 591 592 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 593 ixgbevf_for_each_ring(ring, q_vector->rx) 594 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, 595 per_ring_budget) 596 < per_ring_budget); 597 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 598 599 #ifdef CONFIG_NET_RX_BUSY_POLL 600 ixgbevf_qv_unlock_napi(q_vector); 601 #endif 602 603 /* If all work not completed, return budget and keep polling */ 604 if (!clean_complete) 605 return budget; 606 /* all work done, exit the polling mode */ 607 napi_complete(napi); 608 if (adapter->rx_itr_setting & 1) 609 ixgbevf_set_itr(q_vector); 610 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 611 ixgbevf_irq_enable_queues(adapter, 612 1 << q_vector->v_idx); 613 614 return 0; 615 } 616 617 /** 618 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 619 * @q_vector: structure containing interrupt and ring information 620 */ 621 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 622 { 623 struct ixgbevf_adapter *adapter = q_vector->adapter; 624 struct ixgbe_hw *hw = &adapter->hw; 625 int v_idx = q_vector->v_idx; 626 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 627 628 /* 629 * set the WDIS bit to not clear the timer bits and cause an 630 * immediate assertion of the interrupt 631 */ 632 itr_reg |= IXGBE_EITR_CNT_WDIS; 633 634 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 635 } 636 637 #ifdef CONFIG_NET_RX_BUSY_POLL 638 /* must be called with local_bh_disable()d */ 639 static int ixgbevf_busy_poll_recv(struct napi_struct *napi) 640 { 641 struct ixgbevf_q_vector *q_vector = 642 container_of(napi, struct ixgbevf_q_vector, napi); 643 struct ixgbevf_adapter *adapter = q_vector->adapter; 644 struct ixgbevf_ring *ring; 645 int found = 0; 646 647 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 648 return LL_FLUSH_FAILED; 649 650 if (!ixgbevf_qv_lock_poll(q_vector)) 651 return LL_FLUSH_BUSY; 652 653 ixgbevf_for_each_ring(ring, q_vector->rx) { 654 found = ixgbevf_clean_rx_irq(q_vector, ring, 4); 655 #ifdef BP_EXTENDED_STATS 656 if (found) 657 ring->stats.cleaned += found; 658 else 659 ring->stats.misses++; 660 #endif 661 if (found) 662 break; 663 } 664 665 ixgbevf_qv_unlock_poll(q_vector); 666 667 return found; 668 } 669 #endif /* CONFIG_NET_RX_BUSY_POLL */ 670 671 /** 672 * ixgbevf_configure_msix - Configure MSI-X hardware 673 * @adapter: board private structure 674 * 675 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 676 * interrupts. 677 **/ 678 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 679 { 680 struct ixgbevf_q_vector *q_vector; 681 int q_vectors, v_idx; 682 683 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 684 adapter->eims_enable_mask = 0; 685 686 /* 687 * Populate the IVAR table and set the ITR values to the 688 * corresponding register. 689 */ 690 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 691 struct ixgbevf_ring *ring; 692 q_vector = adapter->q_vector[v_idx]; 693 694 ixgbevf_for_each_ring(ring, q_vector->rx) 695 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 696 697 ixgbevf_for_each_ring(ring, q_vector->tx) 698 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 699 700 if (q_vector->tx.ring && !q_vector->rx.ring) { 701 /* tx only vector */ 702 if (adapter->tx_itr_setting == 1) 703 q_vector->itr = IXGBE_10K_ITR; 704 else 705 q_vector->itr = adapter->tx_itr_setting; 706 } else { 707 /* rx or rx/tx vector */ 708 if (adapter->rx_itr_setting == 1) 709 q_vector->itr = IXGBE_20K_ITR; 710 else 711 q_vector->itr = adapter->rx_itr_setting; 712 } 713 714 /* add q_vector eims value to global eims_enable_mask */ 715 adapter->eims_enable_mask |= 1 << v_idx; 716 717 ixgbevf_write_eitr(q_vector); 718 } 719 720 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 721 /* setup eims_other and add value to global eims_enable_mask */ 722 adapter->eims_other = 1 << v_idx; 723 adapter->eims_enable_mask |= adapter->eims_other; 724 } 725 726 enum latency_range { 727 lowest_latency = 0, 728 low_latency = 1, 729 bulk_latency = 2, 730 latency_invalid = 255 731 }; 732 733 /** 734 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 735 * @q_vector: structure containing interrupt and ring information 736 * @ring_container: structure containing ring performance data 737 * 738 * Stores a new ITR value based on packets and byte 739 * counts during the last interrupt. The advantage of per interrupt 740 * computation is faster updates and more accurate ITR for the current 741 * traffic pattern. Constants in this function were computed 742 * based on theoretical maximum wire speed and thresholds were set based 743 * on testing data as well as attempting to minimize response time 744 * while increasing bulk throughput. 745 **/ 746 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 747 struct ixgbevf_ring_container *ring_container) 748 { 749 int bytes = ring_container->total_bytes; 750 int packets = ring_container->total_packets; 751 u32 timepassed_us; 752 u64 bytes_perint; 753 u8 itr_setting = ring_container->itr; 754 755 if (packets == 0) 756 return; 757 758 /* simple throttlerate management 759 * 0-20MB/s lowest (100000 ints/s) 760 * 20-100MB/s low (20000 ints/s) 761 * 100-1249MB/s bulk (8000 ints/s) 762 */ 763 /* what was last interrupt timeslice? */ 764 timepassed_us = q_vector->itr >> 2; 765 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 766 767 switch (itr_setting) { 768 case lowest_latency: 769 if (bytes_perint > 10) 770 itr_setting = low_latency; 771 break; 772 case low_latency: 773 if (bytes_perint > 20) 774 itr_setting = bulk_latency; 775 else if (bytes_perint <= 10) 776 itr_setting = lowest_latency; 777 break; 778 case bulk_latency: 779 if (bytes_perint <= 20) 780 itr_setting = low_latency; 781 break; 782 } 783 784 /* clear work counters since we have the values we need */ 785 ring_container->total_bytes = 0; 786 ring_container->total_packets = 0; 787 788 /* write updated itr to ring container */ 789 ring_container->itr = itr_setting; 790 } 791 792 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 793 { 794 u32 new_itr = q_vector->itr; 795 u8 current_itr; 796 797 ixgbevf_update_itr(q_vector, &q_vector->tx); 798 ixgbevf_update_itr(q_vector, &q_vector->rx); 799 800 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 801 802 switch (current_itr) { 803 /* counts and packets in update_itr are dependent on these numbers */ 804 case lowest_latency: 805 new_itr = IXGBE_100K_ITR; 806 break; 807 case low_latency: 808 new_itr = IXGBE_20K_ITR; 809 break; 810 case bulk_latency: 811 default: 812 new_itr = IXGBE_8K_ITR; 813 break; 814 } 815 816 if (new_itr != q_vector->itr) { 817 /* do an exponential smoothing */ 818 new_itr = (10 * new_itr * q_vector->itr) / 819 ((9 * new_itr) + q_vector->itr); 820 821 /* save the algorithm value here */ 822 q_vector->itr = new_itr; 823 824 ixgbevf_write_eitr(q_vector); 825 } 826 } 827 828 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 829 { 830 struct ixgbevf_adapter *adapter = data; 831 struct ixgbe_hw *hw = &adapter->hw; 832 833 hw->mac.get_link_status = 1; 834 835 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 836 mod_timer(&adapter->watchdog_timer, jiffies); 837 838 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 839 840 return IRQ_HANDLED; 841 } 842 843 /** 844 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 845 * @irq: unused 846 * @data: pointer to our q_vector struct for this interrupt vector 847 **/ 848 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 849 { 850 struct ixgbevf_q_vector *q_vector = data; 851 852 /* EIAM disabled interrupts (on this vector) for us */ 853 if (q_vector->rx.ring || q_vector->tx.ring) 854 napi_schedule(&q_vector->napi); 855 856 return IRQ_HANDLED; 857 } 858 859 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 860 int r_idx) 861 { 862 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 863 864 a->rx_ring[r_idx]->next = q_vector->rx.ring; 865 q_vector->rx.ring = a->rx_ring[r_idx]; 866 q_vector->rx.count++; 867 } 868 869 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 870 int t_idx) 871 { 872 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 873 874 a->tx_ring[t_idx]->next = q_vector->tx.ring; 875 q_vector->tx.ring = a->tx_ring[t_idx]; 876 q_vector->tx.count++; 877 } 878 879 /** 880 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 881 * @adapter: board private structure to initialize 882 * 883 * This function maps descriptor rings to the queue-specific vectors 884 * we were allotted through the MSI-X enabling code. Ideally, we'd have 885 * one vector per ring/queue, but on a constrained vector budget, we 886 * group the rings as "efficiently" as possible. You would add new 887 * mapping configurations in here. 888 **/ 889 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 890 { 891 int q_vectors; 892 int v_start = 0; 893 int rxr_idx = 0, txr_idx = 0; 894 int rxr_remaining = adapter->num_rx_queues; 895 int txr_remaining = adapter->num_tx_queues; 896 int i, j; 897 int rqpv, tqpv; 898 int err = 0; 899 900 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 901 902 /* 903 * The ideal configuration... 904 * We have enough vectors to map one per queue. 905 */ 906 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 907 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 908 map_vector_to_rxq(adapter, v_start, rxr_idx); 909 910 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 911 map_vector_to_txq(adapter, v_start, txr_idx); 912 goto out; 913 } 914 915 /* 916 * If we don't have enough vectors for a 1-to-1 917 * mapping, we'll have to group them so there are 918 * multiple queues per vector. 919 */ 920 /* Re-adjusting *qpv takes care of the remainder. */ 921 for (i = v_start; i < q_vectors; i++) { 922 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 923 for (j = 0; j < rqpv; j++) { 924 map_vector_to_rxq(adapter, i, rxr_idx); 925 rxr_idx++; 926 rxr_remaining--; 927 } 928 } 929 for (i = v_start; i < q_vectors; i++) { 930 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 931 for (j = 0; j < tqpv; j++) { 932 map_vector_to_txq(adapter, i, txr_idx); 933 txr_idx++; 934 txr_remaining--; 935 } 936 } 937 938 out: 939 return err; 940 } 941 942 /** 943 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 944 * @adapter: board private structure 945 * 946 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 947 * interrupts from the kernel. 948 **/ 949 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 950 { 951 struct net_device *netdev = adapter->netdev; 952 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 953 int vector, err; 954 int ri = 0, ti = 0; 955 956 for (vector = 0; vector < q_vectors; vector++) { 957 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 958 struct msix_entry *entry = &adapter->msix_entries[vector]; 959 960 if (q_vector->tx.ring && q_vector->rx.ring) { 961 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 962 "%s-%s-%d", netdev->name, "TxRx", ri++); 963 ti++; 964 } else if (q_vector->rx.ring) { 965 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 966 "%s-%s-%d", netdev->name, "rx", ri++); 967 } else if (q_vector->tx.ring) { 968 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 969 "%s-%s-%d", netdev->name, "tx", ti++); 970 } else { 971 /* skip this unused q_vector */ 972 continue; 973 } 974 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 975 q_vector->name, q_vector); 976 if (err) { 977 hw_dbg(&adapter->hw, 978 "request_irq failed for MSIX interrupt " 979 "Error: %d\n", err); 980 goto free_queue_irqs; 981 } 982 } 983 984 err = request_irq(adapter->msix_entries[vector].vector, 985 &ixgbevf_msix_other, 0, netdev->name, adapter); 986 if (err) { 987 hw_dbg(&adapter->hw, 988 "request_irq for msix_other failed: %d\n", err); 989 goto free_queue_irqs; 990 } 991 992 return 0; 993 994 free_queue_irqs: 995 while (vector) { 996 vector--; 997 free_irq(adapter->msix_entries[vector].vector, 998 adapter->q_vector[vector]); 999 } 1000 /* This failure is non-recoverable - it indicates the system is 1001 * out of MSIX vector resources and the VF driver cannot run 1002 * without them. Set the number of msix vectors to zero 1003 * indicating that not enough can be allocated. The error 1004 * will be returned to the user indicating device open failed. 1005 * Any further attempts to force the driver to open will also 1006 * fail. The only way to recover is to unload the driver and 1007 * reload it again. If the system has recovered some MSIX 1008 * vectors then it may succeed. 1009 */ 1010 adapter->num_msix_vectors = 0; 1011 return err; 1012 } 1013 1014 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1015 { 1016 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1017 1018 for (i = 0; i < q_vectors; i++) { 1019 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1020 q_vector->rx.ring = NULL; 1021 q_vector->tx.ring = NULL; 1022 q_vector->rx.count = 0; 1023 q_vector->tx.count = 0; 1024 } 1025 } 1026 1027 /** 1028 * ixgbevf_request_irq - initialize interrupts 1029 * @adapter: board private structure 1030 * 1031 * Attempts to configure interrupts using the best available 1032 * capabilities of the hardware and kernel. 1033 **/ 1034 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1035 { 1036 int err = 0; 1037 1038 err = ixgbevf_request_msix_irqs(adapter); 1039 1040 if (err) 1041 hw_dbg(&adapter->hw, 1042 "request_irq failed, Error %d\n", err); 1043 1044 return err; 1045 } 1046 1047 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1048 { 1049 int i, q_vectors; 1050 1051 q_vectors = adapter->num_msix_vectors; 1052 i = q_vectors - 1; 1053 1054 free_irq(adapter->msix_entries[i].vector, adapter); 1055 i--; 1056 1057 for (; i >= 0; i--) { 1058 /* free only the irqs that were actually requested */ 1059 if (!adapter->q_vector[i]->rx.ring && 1060 !adapter->q_vector[i]->tx.ring) 1061 continue; 1062 1063 free_irq(adapter->msix_entries[i].vector, 1064 adapter->q_vector[i]); 1065 } 1066 1067 ixgbevf_reset_q_vectors(adapter); 1068 } 1069 1070 /** 1071 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1072 * @adapter: board private structure 1073 **/ 1074 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1075 { 1076 struct ixgbe_hw *hw = &adapter->hw; 1077 int i; 1078 1079 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1080 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1081 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1082 1083 IXGBE_WRITE_FLUSH(hw); 1084 1085 for (i = 0; i < adapter->num_msix_vectors; i++) 1086 synchronize_irq(adapter->msix_entries[i].vector); 1087 } 1088 1089 /** 1090 * ixgbevf_irq_enable - Enable default interrupt generation settings 1091 * @adapter: board private structure 1092 **/ 1093 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1094 { 1095 struct ixgbe_hw *hw = &adapter->hw; 1096 1097 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1098 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1099 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1100 } 1101 1102 /** 1103 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset 1104 * @adapter: board private structure 1105 * @ring: structure containing ring specific data 1106 * 1107 * Configure the Tx descriptor ring after a reset. 1108 **/ 1109 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, 1110 struct ixgbevf_ring *ring) 1111 { 1112 struct ixgbe_hw *hw = &adapter->hw; 1113 u64 tdba = ring->dma; 1114 int wait_loop = 10; 1115 u32 txdctl = IXGBE_TXDCTL_ENABLE; 1116 u8 reg_idx = ring->reg_idx; 1117 1118 /* disable queue to avoid issues while updating state */ 1119 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 1120 IXGBE_WRITE_FLUSH(hw); 1121 1122 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 1123 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); 1124 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), 1125 ring->count * sizeof(union ixgbe_adv_tx_desc)); 1126 1127 /* disable head writeback */ 1128 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); 1129 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); 1130 1131 /* enable relaxed ordering */ 1132 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), 1133 (IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1134 IXGBE_DCA_TXCTRL_DATA_RRO_EN)); 1135 1136 /* reset head and tail pointers */ 1137 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1138 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1139 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx); 1140 1141 /* reset ntu and ntc to place SW in sync with hardwdare */ 1142 ring->next_to_clean = 0; 1143 ring->next_to_use = 0; 1144 1145 /* In order to avoid issues WTHRESH + PTHRESH should always be equal 1146 * to or less than the number of on chip descriptors, which is 1147 * currently 40. 1148 */ 1149 txdctl |= (8 << 16); /* WTHRESH = 8 */ 1150 1151 /* Setting PTHRESH to 32 both improves performance */ 1152 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1153 32; /* PTHRESH = 32 */ 1154 1155 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1156 1157 /* poll to verify queue is enabled */ 1158 do { 1159 usleep_range(1000, 2000); 1160 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); 1161 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 1162 if (!wait_loop) 1163 pr_err("Could not enable Tx Queue %d\n", reg_idx); 1164 } 1165 1166 /** 1167 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1168 * @adapter: board private structure 1169 * 1170 * Configure the Tx unit of the MAC after a reset. 1171 **/ 1172 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1173 { 1174 u32 i; 1175 1176 /* Setup the HW Tx Head and Tail descriptor pointers */ 1177 for (i = 0; i < adapter->num_tx_queues; i++) 1178 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); 1179 } 1180 1181 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1182 1183 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1184 { 1185 struct ixgbevf_ring *rx_ring; 1186 struct ixgbe_hw *hw = &adapter->hw; 1187 u32 srrctl; 1188 1189 rx_ring = adapter->rx_ring[index]; 1190 1191 srrctl = IXGBE_SRRCTL_DROP_EN; 1192 1193 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1194 1195 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1196 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1197 1198 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1199 } 1200 1201 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1202 { 1203 struct ixgbe_hw *hw = &adapter->hw; 1204 1205 /* PSRTYPE must be initialized in 82599 */ 1206 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1207 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1208 IXGBE_PSRTYPE_L2HDR; 1209 1210 if (adapter->num_rx_queues > 1) 1211 psrtype |= 1 << 29; 1212 1213 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1214 } 1215 1216 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1217 { 1218 struct ixgbe_hw *hw = &adapter->hw; 1219 struct net_device *netdev = adapter->netdev; 1220 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1221 int i; 1222 u16 rx_buf_len; 1223 1224 /* notify the PF of our intent to use this size of frame */ 1225 ixgbevf_rlpml_set_vf(hw, max_frame); 1226 1227 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1228 max_frame += VLAN_HLEN; 1229 1230 /* 1231 * Allocate buffer sizes that fit well into 32K and 1232 * take into account max frame size of 9.5K 1233 */ 1234 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1235 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1236 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1237 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1238 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1239 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1240 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1241 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1242 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1243 else 1244 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1245 1246 for (i = 0; i < adapter->num_rx_queues; i++) 1247 adapter->rx_ring[i]->rx_buf_len = rx_buf_len; 1248 } 1249 1250 #define IXGBEVF_MAX_RX_DESC_POLL 10 1251 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1252 struct ixgbevf_ring *ring) 1253 { 1254 struct ixgbe_hw *hw = &adapter->hw; 1255 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1256 u32 rxdctl; 1257 u8 reg_idx = ring->reg_idx; 1258 1259 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1260 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1261 1262 /* write value back with RXDCTL.ENABLE bit cleared */ 1263 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1264 1265 /* the hardware may take up to 100us to really disable the rx queue */ 1266 do { 1267 udelay(10); 1268 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1269 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1270 1271 if (!wait_loop) 1272 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", 1273 reg_idx); 1274 } 1275 1276 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1277 struct ixgbevf_ring *ring) 1278 { 1279 struct ixgbe_hw *hw = &adapter->hw; 1280 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1281 u32 rxdctl; 1282 u8 reg_idx = ring->reg_idx; 1283 1284 do { 1285 usleep_range(1000, 2000); 1286 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1287 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1288 1289 if (!wait_loop) 1290 pr_err("RXDCTL.ENABLE queue %d not set while polling\n", 1291 reg_idx); 1292 } 1293 1294 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1295 struct ixgbevf_ring *ring) 1296 { 1297 struct ixgbe_hw *hw = &adapter->hw; 1298 u64 rdba = ring->dma; 1299 u32 rxdctl; 1300 u8 reg_idx = ring->reg_idx; 1301 1302 /* disable queue to avoid issues while updating state */ 1303 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1304 ixgbevf_disable_rx_queue(adapter, ring); 1305 1306 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1307 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); 1308 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), 1309 ring->count * sizeof(union ixgbe_adv_rx_desc)); 1310 1311 /* enable relaxed ordering */ 1312 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1313 IXGBE_DCA_RXCTRL_DESC_RRO_EN); 1314 1315 /* reset head and tail pointers */ 1316 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1317 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1318 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx); 1319 1320 /* reset ntu and ntc to place SW in sync with hardwdare */ 1321 ring->next_to_clean = 0; 1322 ring->next_to_use = 0; 1323 1324 ixgbevf_configure_srrctl(adapter, reg_idx); 1325 1326 /* prevent DMA from exceeding buffer space available */ 1327 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1328 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; 1329 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1330 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1331 1332 ixgbevf_rx_desc_queue_enable(adapter, ring); 1333 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); 1334 } 1335 1336 /** 1337 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1338 * @adapter: board private structure 1339 * 1340 * Configure the Rx unit of the MAC after a reset. 1341 **/ 1342 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1343 { 1344 int i; 1345 1346 ixgbevf_setup_psrtype(adapter); 1347 1348 /* set_rx_buffer_len must be called before ring initialization */ 1349 ixgbevf_set_rx_buffer_len(adapter); 1350 1351 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1352 * the Base and Length of the Rx Descriptor Ring */ 1353 for (i = 0; i < adapter->num_rx_queues; i++) 1354 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); 1355 } 1356 1357 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1358 __be16 proto, u16 vid) 1359 { 1360 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1361 struct ixgbe_hw *hw = &adapter->hw; 1362 int err; 1363 1364 spin_lock_bh(&adapter->mbx_lock); 1365 1366 /* add VID to filter table */ 1367 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1368 1369 spin_unlock_bh(&adapter->mbx_lock); 1370 1371 /* translate error return types so error makes sense */ 1372 if (err == IXGBE_ERR_MBX) 1373 return -EIO; 1374 1375 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1376 return -EACCES; 1377 1378 set_bit(vid, adapter->active_vlans); 1379 1380 return err; 1381 } 1382 1383 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 1384 __be16 proto, u16 vid) 1385 { 1386 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1387 struct ixgbe_hw *hw = &adapter->hw; 1388 int err = -EOPNOTSUPP; 1389 1390 spin_lock_bh(&adapter->mbx_lock); 1391 1392 /* remove VID from filter table */ 1393 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1394 1395 spin_unlock_bh(&adapter->mbx_lock); 1396 1397 clear_bit(vid, adapter->active_vlans); 1398 1399 return err; 1400 } 1401 1402 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1403 { 1404 u16 vid; 1405 1406 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1407 ixgbevf_vlan_rx_add_vid(adapter->netdev, 1408 htons(ETH_P_8021Q), vid); 1409 } 1410 1411 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1412 { 1413 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1414 struct ixgbe_hw *hw = &adapter->hw; 1415 int count = 0; 1416 1417 if ((netdev_uc_count(netdev)) > 10) { 1418 pr_err("Too many unicast filters - No Space\n"); 1419 return -ENOSPC; 1420 } 1421 1422 if (!netdev_uc_empty(netdev)) { 1423 struct netdev_hw_addr *ha; 1424 netdev_for_each_uc_addr(ha, netdev) { 1425 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1426 udelay(200); 1427 } 1428 } else { 1429 /* 1430 * If the list is empty then send message to PF driver to 1431 * clear all macvlans on this VF. 1432 */ 1433 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1434 } 1435 1436 return count; 1437 } 1438 1439 /** 1440 * ixgbevf_set_rx_mode - Multicast and unicast set 1441 * @netdev: network interface device structure 1442 * 1443 * The set_rx_method entry point is called whenever the multicast address 1444 * list, unicast address list or the network interface flags are updated. 1445 * This routine is responsible for configuring the hardware for proper 1446 * multicast mode and configuring requested unicast filters. 1447 **/ 1448 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1449 { 1450 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1451 struct ixgbe_hw *hw = &adapter->hw; 1452 1453 spin_lock_bh(&adapter->mbx_lock); 1454 1455 /* reprogram multicast list */ 1456 hw->mac.ops.update_mc_addr_list(hw, netdev); 1457 1458 ixgbevf_write_uc_addr_list(netdev); 1459 1460 spin_unlock_bh(&adapter->mbx_lock); 1461 } 1462 1463 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1464 { 1465 int q_idx; 1466 struct ixgbevf_q_vector *q_vector; 1467 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1468 1469 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1470 q_vector = adapter->q_vector[q_idx]; 1471 #ifdef CONFIG_NET_RX_BUSY_POLL 1472 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); 1473 #endif 1474 napi_enable(&q_vector->napi); 1475 } 1476 } 1477 1478 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1479 { 1480 int q_idx; 1481 struct ixgbevf_q_vector *q_vector; 1482 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1483 1484 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1485 q_vector = adapter->q_vector[q_idx]; 1486 napi_disable(&q_vector->napi); 1487 #ifdef CONFIG_NET_RX_BUSY_POLL 1488 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { 1489 pr_info("QV %d locked\n", q_idx); 1490 usleep_range(1000, 20000); 1491 } 1492 #endif /* CONFIG_NET_RX_BUSY_POLL */ 1493 } 1494 } 1495 1496 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) 1497 { 1498 struct ixgbe_hw *hw = &adapter->hw; 1499 unsigned int def_q = 0; 1500 unsigned int num_tcs = 0; 1501 unsigned int num_rx_queues = 1; 1502 int err; 1503 1504 spin_lock_bh(&adapter->mbx_lock); 1505 1506 /* fetch queue configuration from the PF */ 1507 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1508 1509 spin_unlock_bh(&adapter->mbx_lock); 1510 1511 if (err) 1512 return err; 1513 1514 if (num_tcs > 1) { 1515 /* update default Tx ring register index */ 1516 adapter->tx_ring[0]->reg_idx = def_q; 1517 1518 /* we need as many queues as traffic classes */ 1519 num_rx_queues = num_tcs; 1520 } 1521 1522 /* if we have a bad config abort request queue reset */ 1523 if (adapter->num_rx_queues != num_rx_queues) { 1524 /* force mailbox timeout to prevent further messages */ 1525 hw->mbx.timeout = 0; 1526 1527 /* wait for watchdog to come around and bail us out */ 1528 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 1529 } 1530 1531 return 0; 1532 } 1533 1534 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1535 { 1536 ixgbevf_configure_dcb(adapter); 1537 1538 ixgbevf_set_rx_mode(adapter->netdev); 1539 1540 ixgbevf_restore_vlan(adapter); 1541 1542 ixgbevf_configure_tx(adapter); 1543 ixgbevf_configure_rx(adapter); 1544 } 1545 1546 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1547 { 1548 /* Only save pre-reset stats if there are some */ 1549 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1550 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1551 adapter->stats.base_vfgprc; 1552 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1553 adapter->stats.base_vfgptc; 1554 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1555 adapter->stats.base_vfgorc; 1556 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1557 adapter->stats.base_vfgotc; 1558 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1559 adapter->stats.base_vfmprc; 1560 } 1561 } 1562 1563 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1564 { 1565 struct ixgbe_hw *hw = &adapter->hw; 1566 1567 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1568 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1569 adapter->stats.last_vfgorc |= 1570 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1571 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1572 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1573 adapter->stats.last_vfgotc |= 1574 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1575 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1576 1577 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1578 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1579 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1580 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1581 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1582 } 1583 1584 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1585 { 1586 struct ixgbe_hw *hw = &adapter->hw; 1587 int api[] = { ixgbe_mbox_api_11, 1588 ixgbe_mbox_api_10, 1589 ixgbe_mbox_api_unknown }; 1590 int err = 0, idx = 0; 1591 1592 spin_lock_bh(&adapter->mbx_lock); 1593 1594 while (api[idx] != ixgbe_mbox_api_unknown) { 1595 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1596 if (!err) 1597 break; 1598 idx++; 1599 } 1600 1601 spin_unlock_bh(&adapter->mbx_lock); 1602 } 1603 1604 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1605 { 1606 struct net_device *netdev = adapter->netdev; 1607 struct ixgbe_hw *hw = &adapter->hw; 1608 1609 ixgbevf_configure_msix(adapter); 1610 1611 spin_lock_bh(&adapter->mbx_lock); 1612 1613 if (is_valid_ether_addr(hw->mac.addr)) 1614 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1615 else 1616 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1617 1618 spin_unlock_bh(&adapter->mbx_lock); 1619 1620 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1621 ixgbevf_napi_enable_all(adapter); 1622 1623 /* enable transmits */ 1624 netif_tx_start_all_queues(netdev); 1625 1626 ixgbevf_save_reset_stats(adapter); 1627 ixgbevf_init_last_counter_stats(adapter); 1628 1629 hw->mac.get_link_status = 1; 1630 mod_timer(&adapter->watchdog_timer, jiffies); 1631 } 1632 1633 void ixgbevf_up(struct ixgbevf_adapter *adapter) 1634 { 1635 struct ixgbe_hw *hw = &adapter->hw; 1636 1637 ixgbevf_configure(adapter); 1638 1639 ixgbevf_up_complete(adapter); 1640 1641 /* clear any pending interrupts, may auto mask */ 1642 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1643 1644 ixgbevf_irq_enable(adapter); 1645 } 1646 1647 /** 1648 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1649 * @rx_ring: ring to free buffers from 1650 **/ 1651 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 1652 { 1653 unsigned long size; 1654 unsigned int i; 1655 1656 if (!rx_ring->rx_buffer_info) 1657 return; 1658 1659 /* Free all the Rx ring sk_buffs */ 1660 for (i = 0; i < rx_ring->count; i++) { 1661 struct ixgbevf_rx_buffer *rx_buffer_info; 1662 1663 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1664 if (rx_buffer_info->dma) { 1665 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 1666 rx_ring->rx_buf_len, 1667 DMA_FROM_DEVICE); 1668 rx_buffer_info->dma = 0; 1669 } 1670 if (rx_buffer_info->skb) { 1671 struct sk_buff *skb = rx_buffer_info->skb; 1672 rx_buffer_info->skb = NULL; 1673 do { 1674 struct sk_buff *this = skb; 1675 skb = IXGBE_CB(skb)->prev; 1676 dev_kfree_skb(this); 1677 } while (skb); 1678 } 1679 } 1680 1681 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1682 memset(rx_ring->rx_buffer_info, 0, size); 1683 1684 /* Zero out the descriptor ring */ 1685 memset(rx_ring->desc, 0, rx_ring->size); 1686 } 1687 1688 /** 1689 * ixgbevf_clean_tx_ring - Free Tx Buffers 1690 * @tx_ring: ring to be cleaned 1691 **/ 1692 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 1693 { 1694 struct ixgbevf_tx_buffer *tx_buffer_info; 1695 unsigned long size; 1696 unsigned int i; 1697 1698 if (!tx_ring->tx_buffer_info) 1699 return; 1700 1701 /* Free all the Tx ring sk_buffs */ 1702 for (i = 0; i < tx_ring->count; i++) { 1703 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1704 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1705 } 1706 1707 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1708 memset(tx_ring->tx_buffer_info, 0, size); 1709 1710 memset(tx_ring->desc, 0, tx_ring->size); 1711 } 1712 1713 /** 1714 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1715 * @adapter: board private structure 1716 **/ 1717 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1718 { 1719 int i; 1720 1721 for (i = 0; i < adapter->num_rx_queues; i++) 1722 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); 1723 } 1724 1725 /** 1726 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1727 * @adapter: board private structure 1728 **/ 1729 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1730 { 1731 int i; 1732 1733 for (i = 0; i < adapter->num_tx_queues; i++) 1734 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); 1735 } 1736 1737 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1738 { 1739 struct net_device *netdev = adapter->netdev; 1740 struct ixgbe_hw *hw = &adapter->hw; 1741 int i; 1742 1743 /* signal that we are down to the interrupt handler */ 1744 set_bit(__IXGBEVF_DOWN, &adapter->state); 1745 1746 /* disable all enabled rx queues */ 1747 for (i = 0; i < adapter->num_rx_queues; i++) 1748 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 1749 1750 netif_tx_disable(netdev); 1751 1752 msleep(10); 1753 1754 netif_tx_stop_all_queues(netdev); 1755 1756 ixgbevf_irq_disable(adapter); 1757 1758 ixgbevf_napi_disable_all(adapter); 1759 1760 del_timer_sync(&adapter->watchdog_timer); 1761 /* can't call flush scheduled work here because it can deadlock 1762 * if linkwatch_event tries to acquire the rtnl_lock which we are 1763 * holding */ 1764 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1765 msleep(1); 1766 1767 /* disable transmits in the hardware now that interrupts are off */ 1768 for (i = 0; i < adapter->num_tx_queues; i++) { 1769 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 1770 1771 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 1772 IXGBE_TXDCTL_SWFLSH); 1773 } 1774 1775 netif_carrier_off(netdev); 1776 1777 if (!pci_channel_offline(adapter->pdev)) 1778 ixgbevf_reset(adapter); 1779 1780 ixgbevf_clean_all_tx_rings(adapter); 1781 ixgbevf_clean_all_rx_rings(adapter); 1782 } 1783 1784 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1785 { 1786 WARN_ON(in_interrupt()); 1787 1788 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1789 msleep(1); 1790 1791 ixgbevf_down(adapter); 1792 ixgbevf_up(adapter); 1793 1794 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1795 } 1796 1797 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1798 { 1799 struct ixgbe_hw *hw = &adapter->hw; 1800 struct net_device *netdev = adapter->netdev; 1801 1802 if (hw->mac.ops.reset_hw(hw)) { 1803 hw_dbg(hw, "PF still resetting\n"); 1804 } else { 1805 hw->mac.ops.init_hw(hw); 1806 ixgbevf_negotiate_api(adapter); 1807 } 1808 1809 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1810 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1811 netdev->addr_len); 1812 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1813 netdev->addr_len); 1814 } 1815 } 1816 1817 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1818 int vectors) 1819 { 1820 int err = 0; 1821 int vector_threshold; 1822 1823 /* We'll want at least 2 (vector_threshold): 1824 * 1) TxQ[0] + RxQ[0] handler 1825 * 2) Other (Link Status Change, etc.) 1826 */ 1827 vector_threshold = MIN_MSIX_COUNT; 1828 1829 /* The more we get, the more we will assign to Tx/Rx Cleanup 1830 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1831 * Right now, we simply care about how many we'll get; we'll 1832 * set them up later while requesting irq's. 1833 */ 1834 while (vectors >= vector_threshold) { 1835 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1836 vectors); 1837 if (!err || err < 0) /* Success or a nasty failure. */ 1838 break; 1839 else /* err == number of vectors we should try again with */ 1840 vectors = err; 1841 } 1842 1843 if (vectors < vector_threshold) 1844 err = -ENOMEM; 1845 1846 if (err) { 1847 dev_err(&adapter->pdev->dev, 1848 "Unable to allocate MSI-X interrupts\n"); 1849 kfree(adapter->msix_entries); 1850 adapter->msix_entries = NULL; 1851 } else { 1852 /* 1853 * Adjust for only the vectors we'll use, which is minimum 1854 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1855 * vectors we were allocated. 1856 */ 1857 adapter->num_msix_vectors = vectors; 1858 } 1859 1860 return err; 1861 } 1862 1863 /** 1864 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1865 * @adapter: board private structure to initialize 1866 * 1867 * This is the top level queue allocation routine. The order here is very 1868 * important, starting with the "most" number of features turned on at once, 1869 * and ending with the smallest set of features. This way large combinations 1870 * can be allocated if they're turned on, and smaller combinations are the 1871 * fallthrough conditions. 1872 * 1873 **/ 1874 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1875 { 1876 struct ixgbe_hw *hw = &adapter->hw; 1877 unsigned int def_q = 0; 1878 unsigned int num_tcs = 0; 1879 int err; 1880 1881 /* Start with base case */ 1882 adapter->num_rx_queues = 1; 1883 adapter->num_tx_queues = 1; 1884 1885 spin_lock_bh(&adapter->mbx_lock); 1886 1887 /* fetch queue configuration from the PF */ 1888 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1889 1890 spin_unlock_bh(&adapter->mbx_lock); 1891 1892 if (err) 1893 return; 1894 1895 /* we need as many queues as traffic classes */ 1896 if (num_tcs > 1) 1897 adapter->num_rx_queues = num_tcs; 1898 } 1899 1900 /** 1901 * ixgbevf_alloc_queues - Allocate memory for all rings 1902 * @adapter: board private structure to initialize 1903 * 1904 * We allocate one ring per queue at run-time since we don't know the 1905 * number of queues at compile-time. The polling_netdev array is 1906 * intended for Multiqueue, but should work fine with a single queue. 1907 **/ 1908 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1909 { 1910 struct ixgbevf_ring *ring; 1911 int rx = 0, tx = 0; 1912 1913 for (; tx < adapter->num_tx_queues; tx++) { 1914 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1915 if (!ring) 1916 goto err_allocation; 1917 1918 ring->dev = &adapter->pdev->dev; 1919 ring->netdev = adapter->netdev; 1920 ring->count = adapter->tx_ring_count; 1921 ring->queue_index = tx; 1922 ring->reg_idx = tx; 1923 1924 adapter->tx_ring[tx] = ring; 1925 } 1926 1927 for (; rx < adapter->num_rx_queues; rx++) { 1928 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1929 if (!ring) 1930 goto err_allocation; 1931 1932 ring->dev = &adapter->pdev->dev; 1933 ring->netdev = adapter->netdev; 1934 1935 ring->count = adapter->rx_ring_count; 1936 ring->queue_index = rx; 1937 ring->reg_idx = rx; 1938 1939 adapter->rx_ring[rx] = ring; 1940 } 1941 1942 return 0; 1943 1944 err_allocation: 1945 while (tx) { 1946 kfree(adapter->tx_ring[--tx]); 1947 adapter->tx_ring[tx] = NULL; 1948 } 1949 1950 while (rx) { 1951 kfree(adapter->rx_ring[--rx]); 1952 adapter->rx_ring[rx] = NULL; 1953 } 1954 return -ENOMEM; 1955 } 1956 1957 /** 1958 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1959 * @adapter: board private structure to initialize 1960 * 1961 * Attempt to configure the interrupts using the best available 1962 * capabilities of the hardware and the kernel. 1963 **/ 1964 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1965 { 1966 struct net_device *netdev = adapter->netdev; 1967 int err = 0; 1968 int vector, v_budget; 1969 1970 /* 1971 * It's easy to be greedy for MSI-X vectors, but it really 1972 * doesn't do us much good if we have a lot more vectors 1973 * than CPU's. So let's be conservative and only ask for 1974 * (roughly) the same number of vectors as there are CPU's. 1975 * The default is to use pairs of vectors. 1976 */ 1977 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1978 v_budget = min_t(int, v_budget, num_online_cpus()); 1979 v_budget += NON_Q_VECTORS; 1980 1981 /* A failure in MSI-X entry allocation isn't fatal, but it does 1982 * mean we disable MSI-X capabilities of the adapter. */ 1983 adapter->msix_entries = kcalloc(v_budget, 1984 sizeof(struct msix_entry), GFP_KERNEL); 1985 if (!adapter->msix_entries) { 1986 err = -ENOMEM; 1987 goto out; 1988 } 1989 1990 for (vector = 0; vector < v_budget; vector++) 1991 adapter->msix_entries[vector].entry = vector; 1992 1993 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1994 if (err) 1995 goto out; 1996 1997 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1998 if (err) 1999 goto out; 2000 2001 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 2002 2003 out: 2004 return err; 2005 } 2006 2007 /** 2008 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2009 * @adapter: board private structure to initialize 2010 * 2011 * We allocate one q_vector per queue interrupt. If allocation fails we 2012 * return -ENOMEM. 2013 **/ 2014 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2015 { 2016 int q_idx, num_q_vectors; 2017 struct ixgbevf_q_vector *q_vector; 2018 2019 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2020 2021 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2022 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2023 if (!q_vector) 2024 goto err_out; 2025 q_vector->adapter = adapter; 2026 q_vector->v_idx = q_idx; 2027 netif_napi_add(adapter->netdev, &q_vector->napi, 2028 ixgbevf_poll, 64); 2029 #ifdef CONFIG_NET_RX_BUSY_POLL 2030 napi_hash_add(&q_vector->napi); 2031 #endif 2032 adapter->q_vector[q_idx] = q_vector; 2033 } 2034 2035 return 0; 2036 2037 err_out: 2038 while (q_idx) { 2039 q_idx--; 2040 q_vector = adapter->q_vector[q_idx]; 2041 #ifdef CONFIG_NET_RX_BUSY_POLL 2042 napi_hash_del(&q_vector->napi); 2043 #endif 2044 netif_napi_del(&q_vector->napi); 2045 kfree(q_vector); 2046 adapter->q_vector[q_idx] = NULL; 2047 } 2048 return -ENOMEM; 2049 } 2050 2051 /** 2052 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2053 * @adapter: board private structure to initialize 2054 * 2055 * This function frees the memory allocated to the q_vectors. In addition if 2056 * NAPI is enabled it will delete any references to the NAPI struct prior 2057 * to freeing the q_vector. 2058 **/ 2059 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2060 { 2061 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2062 2063 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2064 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2065 2066 adapter->q_vector[q_idx] = NULL; 2067 #ifdef CONFIG_NET_RX_BUSY_POLL 2068 napi_hash_del(&q_vector->napi); 2069 #endif 2070 netif_napi_del(&q_vector->napi); 2071 kfree(q_vector); 2072 } 2073 } 2074 2075 /** 2076 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2077 * @adapter: board private structure 2078 * 2079 **/ 2080 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2081 { 2082 pci_disable_msix(adapter->pdev); 2083 kfree(adapter->msix_entries); 2084 adapter->msix_entries = NULL; 2085 } 2086 2087 /** 2088 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2089 * @adapter: board private structure to initialize 2090 * 2091 **/ 2092 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2093 { 2094 int err; 2095 2096 /* Number of supported queues */ 2097 ixgbevf_set_num_queues(adapter); 2098 2099 err = ixgbevf_set_interrupt_capability(adapter); 2100 if (err) { 2101 hw_dbg(&adapter->hw, 2102 "Unable to setup interrupt capabilities\n"); 2103 goto err_set_interrupt; 2104 } 2105 2106 err = ixgbevf_alloc_q_vectors(adapter); 2107 if (err) { 2108 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2109 "vectors\n"); 2110 goto err_alloc_q_vectors; 2111 } 2112 2113 err = ixgbevf_alloc_queues(adapter); 2114 if (err) { 2115 pr_err("Unable to allocate memory for queues\n"); 2116 goto err_alloc_queues; 2117 } 2118 2119 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2120 "Tx Queue count = %u\n", 2121 (adapter->num_rx_queues > 1) ? "Enabled" : 2122 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2123 2124 set_bit(__IXGBEVF_DOWN, &adapter->state); 2125 2126 return 0; 2127 err_alloc_queues: 2128 ixgbevf_free_q_vectors(adapter); 2129 err_alloc_q_vectors: 2130 ixgbevf_reset_interrupt_capability(adapter); 2131 err_set_interrupt: 2132 return err; 2133 } 2134 2135 /** 2136 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2137 * @adapter: board private structure to clear interrupt scheme on 2138 * 2139 * We go through and clear interrupt specific resources and reset the structure 2140 * to pre-load conditions 2141 **/ 2142 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2143 { 2144 int i; 2145 2146 for (i = 0; i < adapter->num_tx_queues; i++) { 2147 kfree(adapter->tx_ring[i]); 2148 adapter->tx_ring[i] = NULL; 2149 } 2150 for (i = 0; i < adapter->num_rx_queues; i++) { 2151 kfree(adapter->rx_ring[i]); 2152 adapter->rx_ring[i] = NULL; 2153 } 2154 2155 adapter->num_tx_queues = 0; 2156 adapter->num_rx_queues = 0; 2157 2158 ixgbevf_free_q_vectors(adapter); 2159 ixgbevf_reset_interrupt_capability(adapter); 2160 } 2161 2162 /** 2163 * ixgbevf_sw_init - Initialize general software structures 2164 * (struct ixgbevf_adapter) 2165 * @adapter: board private structure to initialize 2166 * 2167 * ixgbevf_sw_init initializes the Adapter private data structure. 2168 * Fields are initialized based on PCI device information and 2169 * OS network device settings (MTU size). 2170 **/ 2171 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2172 { 2173 struct ixgbe_hw *hw = &adapter->hw; 2174 struct pci_dev *pdev = adapter->pdev; 2175 struct net_device *netdev = adapter->netdev; 2176 int err; 2177 2178 /* PCI config space info */ 2179 2180 hw->vendor_id = pdev->vendor; 2181 hw->device_id = pdev->device; 2182 hw->revision_id = pdev->revision; 2183 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2184 hw->subsystem_device_id = pdev->subsystem_device; 2185 2186 hw->mbx.ops.init_params(hw); 2187 2188 /* assume legacy case in which PF would only give VF 2 queues */ 2189 hw->mac.max_tx_queues = 2; 2190 hw->mac.max_rx_queues = 2; 2191 2192 /* lock to protect mailbox accesses */ 2193 spin_lock_init(&adapter->mbx_lock); 2194 2195 err = hw->mac.ops.reset_hw(hw); 2196 if (err) { 2197 dev_info(&pdev->dev, 2198 "PF still in reset state. Is the PF interface up?\n"); 2199 } else { 2200 err = hw->mac.ops.init_hw(hw); 2201 if (err) { 2202 pr_err("init_shared_code failed: %d\n", err); 2203 goto out; 2204 } 2205 ixgbevf_negotiate_api(adapter); 2206 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2207 if (err) 2208 dev_info(&pdev->dev, "Error reading MAC address\n"); 2209 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2210 dev_info(&pdev->dev, 2211 "MAC address not assigned by administrator.\n"); 2212 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 2213 } 2214 2215 if (!is_valid_ether_addr(netdev->dev_addr)) { 2216 dev_info(&pdev->dev, "Assigning random MAC address\n"); 2217 eth_hw_addr_random(netdev); 2218 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); 2219 } 2220 2221 /* Enable dynamic interrupt throttling rates */ 2222 adapter->rx_itr_setting = 1; 2223 adapter->tx_itr_setting = 1; 2224 2225 /* set default ring sizes */ 2226 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2227 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2228 2229 set_bit(__IXGBEVF_DOWN, &adapter->state); 2230 return 0; 2231 2232 out: 2233 return err; 2234 } 2235 2236 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2237 { \ 2238 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2239 if (current_counter < last_counter) \ 2240 counter += 0x100000000LL; \ 2241 last_counter = current_counter; \ 2242 counter &= 0xFFFFFFFF00000000LL; \ 2243 counter |= current_counter; \ 2244 } 2245 2246 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2247 { \ 2248 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2249 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2250 u64 current_counter = (current_counter_msb << 32) | \ 2251 current_counter_lsb; \ 2252 if (current_counter < last_counter) \ 2253 counter += 0x1000000000LL; \ 2254 last_counter = current_counter; \ 2255 counter &= 0xFFFFFFF000000000LL; \ 2256 counter |= current_counter; \ 2257 } 2258 /** 2259 * ixgbevf_update_stats - Update the board statistics counters. 2260 * @adapter: board private structure 2261 **/ 2262 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2263 { 2264 struct ixgbe_hw *hw = &adapter->hw; 2265 int i; 2266 2267 if (!adapter->link_up) 2268 return; 2269 2270 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2271 adapter->stats.vfgprc); 2272 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2273 adapter->stats.vfgptc); 2274 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2275 adapter->stats.last_vfgorc, 2276 adapter->stats.vfgorc); 2277 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2278 adapter->stats.last_vfgotc, 2279 adapter->stats.vfgotc); 2280 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2281 adapter->stats.vfmprc); 2282 2283 for (i = 0; i < adapter->num_rx_queues; i++) { 2284 adapter->hw_csum_rx_error += 2285 adapter->rx_ring[i]->hw_csum_rx_error; 2286 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2287 } 2288 } 2289 2290 /** 2291 * ixgbevf_watchdog - Timer Call-back 2292 * @data: pointer to adapter cast into an unsigned long 2293 **/ 2294 static void ixgbevf_watchdog(unsigned long data) 2295 { 2296 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2297 struct ixgbe_hw *hw = &adapter->hw; 2298 u32 eics = 0; 2299 int i; 2300 2301 /* 2302 * Do the watchdog outside of interrupt context due to the lovely 2303 * delays that some of the newer hardware requires 2304 */ 2305 2306 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2307 goto watchdog_short_circuit; 2308 2309 /* get one bit for every active tx/rx interrupt vector */ 2310 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2311 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2312 if (qv->rx.ring || qv->tx.ring) 2313 eics |= 1 << i; 2314 } 2315 2316 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2317 2318 watchdog_short_circuit: 2319 schedule_work(&adapter->watchdog_task); 2320 } 2321 2322 /** 2323 * ixgbevf_tx_timeout - Respond to a Tx Hang 2324 * @netdev: network interface device structure 2325 **/ 2326 static void ixgbevf_tx_timeout(struct net_device *netdev) 2327 { 2328 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2329 2330 /* Do the reset outside of interrupt context */ 2331 schedule_work(&adapter->reset_task); 2332 } 2333 2334 static void ixgbevf_reset_task(struct work_struct *work) 2335 { 2336 struct ixgbevf_adapter *adapter; 2337 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2338 2339 /* If we're already down or resetting, just bail */ 2340 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2341 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2342 return; 2343 2344 adapter->tx_timeout_count++; 2345 2346 ixgbevf_reinit_locked(adapter); 2347 } 2348 2349 /** 2350 * ixgbevf_watchdog_task - worker thread to bring link up 2351 * @work: pointer to work_struct containing our data 2352 **/ 2353 static void ixgbevf_watchdog_task(struct work_struct *work) 2354 { 2355 struct ixgbevf_adapter *adapter = container_of(work, 2356 struct ixgbevf_adapter, 2357 watchdog_task); 2358 struct net_device *netdev = adapter->netdev; 2359 struct ixgbe_hw *hw = &adapter->hw; 2360 u32 link_speed = adapter->link_speed; 2361 bool link_up = adapter->link_up; 2362 s32 need_reset; 2363 2364 ixgbevf_queue_reset_subtask(adapter); 2365 2366 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2367 2368 /* 2369 * Always check the link on the watchdog because we have 2370 * no LSC interrupt 2371 */ 2372 spin_lock_bh(&adapter->mbx_lock); 2373 2374 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2375 2376 spin_unlock_bh(&adapter->mbx_lock); 2377 2378 if (need_reset) { 2379 adapter->link_up = link_up; 2380 adapter->link_speed = link_speed; 2381 netif_carrier_off(netdev); 2382 netif_tx_stop_all_queues(netdev); 2383 schedule_work(&adapter->reset_task); 2384 goto pf_has_reset; 2385 } 2386 adapter->link_up = link_up; 2387 adapter->link_speed = link_speed; 2388 2389 if (link_up) { 2390 if (!netif_carrier_ok(netdev)) { 2391 char *link_speed_string; 2392 switch (link_speed) { 2393 case IXGBE_LINK_SPEED_10GB_FULL: 2394 link_speed_string = "10 Gbps"; 2395 break; 2396 case IXGBE_LINK_SPEED_1GB_FULL: 2397 link_speed_string = "1 Gbps"; 2398 break; 2399 case IXGBE_LINK_SPEED_100_FULL: 2400 link_speed_string = "100 Mbps"; 2401 break; 2402 default: 2403 link_speed_string = "unknown speed"; 2404 break; 2405 } 2406 dev_info(&adapter->pdev->dev, 2407 "NIC Link is Up, %s\n", link_speed_string); 2408 netif_carrier_on(netdev); 2409 netif_tx_wake_all_queues(netdev); 2410 } 2411 } else { 2412 adapter->link_up = false; 2413 adapter->link_speed = 0; 2414 if (netif_carrier_ok(netdev)) { 2415 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2416 netif_carrier_off(netdev); 2417 netif_tx_stop_all_queues(netdev); 2418 } 2419 } 2420 2421 ixgbevf_update_stats(adapter); 2422 2423 pf_has_reset: 2424 /* Reset the timer */ 2425 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2426 mod_timer(&adapter->watchdog_timer, 2427 round_jiffies(jiffies + (2 * HZ))); 2428 2429 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2430 } 2431 2432 /** 2433 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2434 * @tx_ring: Tx descriptor ring for a specific queue 2435 * 2436 * Free all transmit software resources 2437 **/ 2438 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) 2439 { 2440 ixgbevf_clean_tx_ring(tx_ring); 2441 2442 vfree(tx_ring->tx_buffer_info); 2443 tx_ring->tx_buffer_info = NULL; 2444 2445 /* if not set, then don't free */ 2446 if (!tx_ring->desc) 2447 return; 2448 2449 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, 2450 tx_ring->dma); 2451 2452 tx_ring->desc = NULL; 2453 } 2454 2455 /** 2456 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2457 * @adapter: board private structure 2458 * 2459 * Free all transmit software resources 2460 **/ 2461 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2462 { 2463 int i; 2464 2465 for (i = 0; i < adapter->num_tx_queues; i++) 2466 if (adapter->tx_ring[i]->desc) 2467 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 2468 } 2469 2470 /** 2471 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2472 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2473 * 2474 * Return 0 on success, negative on failure 2475 **/ 2476 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) 2477 { 2478 int size; 2479 2480 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2481 tx_ring->tx_buffer_info = vzalloc(size); 2482 if (!tx_ring->tx_buffer_info) 2483 goto err; 2484 2485 /* round up to nearest 4K */ 2486 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2487 tx_ring->size = ALIGN(tx_ring->size, 4096); 2488 2489 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, 2490 &tx_ring->dma, GFP_KERNEL); 2491 if (!tx_ring->desc) 2492 goto err; 2493 2494 return 0; 2495 2496 err: 2497 vfree(tx_ring->tx_buffer_info); 2498 tx_ring->tx_buffer_info = NULL; 2499 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2500 "descriptor ring\n"); 2501 return -ENOMEM; 2502 } 2503 2504 /** 2505 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2506 * @adapter: board private structure 2507 * 2508 * If this function returns with an error, then it's possible one or 2509 * more of the rings is populated (while the rest are not). It is the 2510 * callers duty to clean those orphaned rings. 2511 * 2512 * Return 0 on success, negative on failure 2513 **/ 2514 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2515 { 2516 int i, err = 0; 2517 2518 for (i = 0; i < adapter->num_tx_queues; i++) { 2519 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); 2520 if (!err) 2521 continue; 2522 hw_dbg(&adapter->hw, 2523 "Allocation for Tx Queue %u failed\n", i); 2524 break; 2525 } 2526 2527 return err; 2528 } 2529 2530 /** 2531 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2532 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2533 * 2534 * Returns 0 on success, negative on failure 2535 **/ 2536 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) 2537 { 2538 int size; 2539 2540 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2541 rx_ring->rx_buffer_info = vzalloc(size); 2542 if (!rx_ring->rx_buffer_info) 2543 goto err; 2544 2545 /* Round up to nearest 4K */ 2546 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2547 rx_ring->size = ALIGN(rx_ring->size, 4096); 2548 2549 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, 2550 &rx_ring->dma, GFP_KERNEL); 2551 2552 if (!rx_ring->desc) 2553 goto err; 2554 2555 return 0; 2556 err: 2557 vfree(rx_ring->rx_buffer_info); 2558 rx_ring->rx_buffer_info = NULL; 2559 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); 2560 return -ENOMEM; 2561 } 2562 2563 /** 2564 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2565 * @adapter: board private structure 2566 * 2567 * If this function returns with an error, then it's possible one or 2568 * more of the rings is populated (while the rest are not). It is the 2569 * callers duty to clean those orphaned rings. 2570 * 2571 * Return 0 on success, negative on failure 2572 **/ 2573 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2574 { 2575 int i, err = 0; 2576 2577 for (i = 0; i < adapter->num_rx_queues; i++) { 2578 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); 2579 if (!err) 2580 continue; 2581 hw_dbg(&adapter->hw, 2582 "Allocation for Rx Queue %u failed\n", i); 2583 break; 2584 } 2585 return err; 2586 } 2587 2588 /** 2589 * ixgbevf_free_rx_resources - Free Rx Resources 2590 * @rx_ring: ring to clean the resources from 2591 * 2592 * Free all receive software resources 2593 **/ 2594 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) 2595 { 2596 ixgbevf_clean_rx_ring(rx_ring); 2597 2598 vfree(rx_ring->rx_buffer_info); 2599 rx_ring->rx_buffer_info = NULL; 2600 2601 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, 2602 rx_ring->dma); 2603 2604 rx_ring->desc = NULL; 2605 } 2606 2607 /** 2608 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2609 * @adapter: board private structure 2610 * 2611 * Free all receive software resources 2612 **/ 2613 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2614 { 2615 int i; 2616 2617 for (i = 0; i < adapter->num_rx_queues; i++) 2618 if (adapter->rx_ring[i]->desc) 2619 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 2620 } 2621 2622 /** 2623 * ixgbevf_open - Called when a network interface is made active 2624 * @netdev: network interface device structure 2625 * 2626 * Returns 0 on success, negative value on failure 2627 * 2628 * The open entry point is called when a network interface is made 2629 * active by the system (IFF_UP). At this point all resources needed 2630 * for transmit and receive operations are allocated, the interrupt 2631 * handler is registered with the OS, the watchdog timer is started, 2632 * and the stack is notified that the interface is ready. 2633 **/ 2634 static int ixgbevf_open(struct net_device *netdev) 2635 { 2636 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2637 struct ixgbe_hw *hw = &adapter->hw; 2638 int err; 2639 2640 /* A previous failure to open the device because of a lack of 2641 * available MSIX vector resources may have reset the number 2642 * of msix vectors variable to zero. The only way to recover 2643 * is to unload/reload the driver and hope that the system has 2644 * been able to recover some MSIX vector resources. 2645 */ 2646 if (!adapter->num_msix_vectors) 2647 return -ENOMEM; 2648 2649 /* disallow open during test */ 2650 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2651 return -EBUSY; 2652 2653 if (hw->adapter_stopped) { 2654 ixgbevf_reset(adapter); 2655 /* if adapter is still stopped then PF isn't up and 2656 * the vf can't start. */ 2657 if (hw->adapter_stopped) { 2658 err = IXGBE_ERR_MBX; 2659 pr_err("Unable to start - perhaps the PF Driver isn't " 2660 "up yet\n"); 2661 goto err_setup_reset; 2662 } 2663 } 2664 2665 /* allocate transmit descriptors */ 2666 err = ixgbevf_setup_all_tx_resources(adapter); 2667 if (err) 2668 goto err_setup_tx; 2669 2670 /* allocate receive descriptors */ 2671 err = ixgbevf_setup_all_rx_resources(adapter); 2672 if (err) 2673 goto err_setup_rx; 2674 2675 ixgbevf_configure(adapter); 2676 2677 /* 2678 * Map the Tx/Rx rings to the vectors we were allotted. 2679 * if request_irq will be called in this function map_rings 2680 * must be called *before* up_complete 2681 */ 2682 ixgbevf_map_rings_to_vectors(adapter); 2683 2684 ixgbevf_up_complete(adapter); 2685 2686 /* clear any pending interrupts, may auto mask */ 2687 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2688 err = ixgbevf_request_irq(adapter); 2689 if (err) 2690 goto err_req_irq; 2691 2692 ixgbevf_irq_enable(adapter); 2693 2694 return 0; 2695 2696 err_req_irq: 2697 ixgbevf_down(adapter); 2698 err_setup_rx: 2699 ixgbevf_free_all_rx_resources(adapter); 2700 err_setup_tx: 2701 ixgbevf_free_all_tx_resources(adapter); 2702 ixgbevf_reset(adapter); 2703 2704 err_setup_reset: 2705 2706 return err; 2707 } 2708 2709 /** 2710 * ixgbevf_close - Disables a network interface 2711 * @netdev: network interface device structure 2712 * 2713 * Returns 0, this is not allowed to fail 2714 * 2715 * The close entry point is called when an interface is de-activated 2716 * by the OS. The hardware is still under the drivers control, but 2717 * needs to be disabled. A global MAC reset is issued to stop the 2718 * hardware, and all transmit and receive resources are freed. 2719 **/ 2720 static int ixgbevf_close(struct net_device *netdev) 2721 { 2722 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2723 2724 ixgbevf_down(adapter); 2725 ixgbevf_free_irq(adapter); 2726 2727 ixgbevf_free_all_tx_resources(adapter); 2728 ixgbevf_free_all_rx_resources(adapter); 2729 2730 return 0; 2731 } 2732 2733 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) 2734 { 2735 struct net_device *dev = adapter->netdev; 2736 2737 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) 2738 return; 2739 2740 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 2741 2742 /* if interface is down do nothing */ 2743 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2744 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2745 return; 2746 2747 /* Hardware has to reinitialize queues and interrupts to 2748 * match packet buffer alignment. Unfortunately, the 2749 * hardware is not flexible enough to do this dynamically. 2750 */ 2751 if (netif_running(dev)) 2752 ixgbevf_close(dev); 2753 2754 ixgbevf_clear_interrupt_scheme(adapter); 2755 ixgbevf_init_interrupt_scheme(adapter); 2756 2757 if (netif_running(dev)) 2758 ixgbevf_open(dev); 2759 } 2760 2761 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2762 u32 vlan_macip_lens, u32 type_tucmd, 2763 u32 mss_l4len_idx) 2764 { 2765 struct ixgbe_adv_tx_context_desc *context_desc; 2766 u16 i = tx_ring->next_to_use; 2767 2768 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2769 2770 i++; 2771 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2772 2773 /* set bits to identify this as an advanced context descriptor */ 2774 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2775 2776 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2777 context_desc->seqnum_seed = 0; 2778 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2779 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2780 } 2781 2782 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2783 struct ixgbevf_tx_buffer *first, 2784 u8 *hdr_len) 2785 { 2786 struct sk_buff *skb = first->skb; 2787 u32 vlan_macip_lens, type_tucmd; 2788 u32 mss_l4len_idx, l4len; 2789 2790 if (!skb_is_gso(skb)) 2791 return 0; 2792 2793 if (skb_header_cloned(skb)) { 2794 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2795 if (err) 2796 return err; 2797 } 2798 2799 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2800 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2801 2802 if (skb->protocol == htons(ETH_P_IP)) { 2803 struct iphdr *iph = ip_hdr(skb); 2804 iph->tot_len = 0; 2805 iph->check = 0; 2806 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2807 iph->daddr, 0, 2808 IPPROTO_TCP, 2809 0); 2810 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2811 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2812 IXGBE_TX_FLAGS_CSUM | 2813 IXGBE_TX_FLAGS_IPV4; 2814 } else if (skb_is_gso_v6(skb)) { 2815 ipv6_hdr(skb)->payload_len = 0; 2816 tcp_hdr(skb)->check = 2817 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2818 &ipv6_hdr(skb)->daddr, 2819 0, IPPROTO_TCP, 0); 2820 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2821 IXGBE_TX_FLAGS_CSUM; 2822 } 2823 2824 /* compute header lengths */ 2825 l4len = tcp_hdrlen(skb); 2826 *hdr_len += l4len; 2827 *hdr_len = skb_transport_offset(skb) + l4len; 2828 2829 /* update gso size and bytecount with header size */ 2830 first->gso_segs = skb_shinfo(skb)->gso_segs; 2831 first->bytecount += (first->gso_segs - 1) * *hdr_len; 2832 2833 /* mss_l4len_id: use 1 as index for TSO */ 2834 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2835 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2836 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2837 2838 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2839 vlan_macip_lens = skb_network_header_len(skb); 2840 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2841 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2842 2843 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2844 type_tucmd, mss_l4len_idx); 2845 2846 return 1; 2847 } 2848 2849 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2850 struct ixgbevf_tx_buffer *first) 2851 { 2852 struct sk_buff *skb = first->skb; 2853 u32 vlan_macip_lens = 0; 2854 u32 mss_l4len_idx = 0; 2855 u32 type_tucmd = 0; 2856 2857 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2858 u8 l4_hdr = 0; 2859 switch (skb->protocol) { 2860 case __constant_htons(ETH_P_IP): 2861 vlan_macip_lens |= skb_network_header_len(skb); 2862 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2863 l4_hdr = ip_hdr(skb)->protocol; 2864 break; 2865 case __constant_htons(ETH_P_IPV6): 2866 vlan_macip_lens |= skb_network_header_len(skb); 2867 l4_hdr = ipv6_hdr(skb)->nexthdr; 2868 break; 2869 default: 2870 if (unlikely(net_ratelimit())) { 2871 dev_warn(tx_ring->dev, 2872 "partial checksum but proto=%x!\n", 2873 first->protocol); 2874 } 2875 break; 2876 } 2877 2878 switch (l4_hdr) { 2879 case IPPROTO_TCP: 2880 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2881 mss_l4len_idx = tcp_hdrlen(skb) << 2882 IXGBE_ADVTXD_L4LEN_SHIFT; 2883 break; 2884 case IPPROTO_SCTP: 2885 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2886 mss_l4len_idx = sizeof(struct sctphdr) << 2887 IXGBE_ADVTXD_L4LEN_SHIFT; 2888 break; 2889 case IPPROTO_UDP: 2890 mss_l4len_idx = sizeof(struct udphdr) << 2891 IXGBE_ADVTXD_L4LEN_SHIFT; 2892 break; 2893 default: 2894 if (unlikely(net_ratelimit())) { 2895 dev_warn(tx_ring->dev, 2896 "partial checksum but l4 proto=%x!\n", 2897 l4_hdr); 2898 } 2899 break; 2900 } 2901 2902 /* update TX checksum flag */ 2903 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 2904 } 2905 2906 /* vlan_macip_lens: MACLEN, VLAN tag */ 2907 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2908 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2909 2910 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2911 type_tucmd, mss_l4len_idx); 2912 } 2913 2914 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) 2915 { 2916 /* set type for advanced descriptor with frame checksum insertion */ 2917 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 2918 IXGBE_ADVTXD_DCMD_IFCS | 2919 IXGBE_ADVTXD_DCMD_DEXT); 2920 2921 /* set HW vlan bit if vlan is present */ 2922 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2923 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 2924 2925 /* set segmentation enable bits for TSO/FSO */ 2926 if (tx_flags & IXGBE_TX_FLAGS_TSO) 2927 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 2928 2929 return cmd_type; 2930 } 2931 2932 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 2933 u32 tx_flags, unsigned int paylen) 2934 { 2935 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); 2936 2937 /* enable L4 checksum for TSO and TX checksum offload */ 2938 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2939 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); 2940 2941 /* enble IPv4 checksum for TSO */ 2942 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2943 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); 2944 2945 /* use index 1 context for TSO/FSO/FCOE */ 2946 if (tx_flags & IXGBE_TX_FLAGS_TSO) 2947 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); 2948 2949 /* Check Context must be set if Tx switch is enabled, which it 2950 * always is for case where virtual functions are running 2951 */ 2952 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); 2953 2954 tx_desc->read.olinfo_status = olinfo_status; 2955 } 2956 2957 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2958 struct ixgbevf_tx_buffer *first, 2959 const u8 hdr_len) 2960 { 2961 dma_addr_t dma; 2962 struct sk_buff *skb = first->skb; 2963 struct ixgbevf_tx_buffer *tx_buffer; 2964 union ixgbe_adv_tx_desc *tx_desc; 2965 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 2966 unsigned int data_len = skb->data_len; 2967 unsigned int size = skb_headlen(skb); 2968 unsigned int paylen = skb->len - hdr_len; 2969 u32 tx_flags = first->tx_flags; 2970 __le32 cmd_type; 2971 u16 i = tx_ring->next_to_use; 2972 2973 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2974 2975 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 2976 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 2977 2978 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 2979 if (dma_mapping_error(tx_ring->dev, dma)) 2980 goto dma_error; 2981 2982 /* record length, and DMA address */ 2983 dma_unmap_len_set(first, len, size); 2984 dma_unmap_addr_set(first, dma, dma); 2985 2986 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2987 2988 for (;;) { 2989 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 2990 tx_desc->read.cmd_type_len = 2991 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 2992 2993 i++; 2994 tx_desc++; 2995 if (i == tx_ring->count) { 2996 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 2997 i = 0; 2998 } 2999 3000 dma += IXGBE_MAX_DATA_PER_TXD; 3001 size -= IXGBE_MAX_DATA_PER_TXD; 3002 3003 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3004 tx_desc->read.olinfo_status = 0; 3005 } 3006 3007 if (likely(!data_len)) 3008 break; 3009 3010 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 3011 3012 i++; 3013 tx_desc++; 3014 if (i == tx_ring->count) { 3015 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3016 i = 0; 3017 } 3018 3019 size = skb_frag_size(frag); 3020 data_len -= size; 3021 3022 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3023 DMA_TO_DEVICE); 3024 if (dma_mapping_error(tx_ring->dev, dma)) 3025 goto dma_error; 3026 3027 tx_buffer = &tx_ring->tx_buffer_info[i]; 3028 dma_unmap_len_set(tx_buffer, len, size); 3029 dma_unmap_addr_set(tx_buffer, dma, dma); 3030 3031 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3032 tx_desc->read.olinfo_status = 0; 3033 3034 frag++; 3035 } 3036 3037 /* write last descriptor with RS and EOP bits */ 3038 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); 3039 tx_desc->read.cmd_type_len = cmd_type; 3040 3041 /* set the timestamp */ 3042 first->time_stamp = jiffies; 3043 3044 /* Force memory writes to complete before letting h/w know there 3045 * are new descriptors to fetch. (Only applicable for weak-ordered 3046 * memory model archs, such as IA-64). 3047 * 3048 * We also need this memory barrier (wmb) to make certain all of the 3049 * status bits have been updated before next_to_watch is written. 3050 */ 3051 wmb(); 3052 3053 /* set next_to_watch value indicating a packet is present */ 3054 first->next_to_watch = tx_desc; 3055 3056 i++; 3057 if (i == tx_ring->count) 3058 i = 0; 3059 3060 tx_ring->next_to_use = i; 3061 3062 /* notify HW of packet */ 3063 writel(i, tx_ring->tail); 3064 3065 return; 3066 dma_error: 3067 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3068 3069 /* clear dma mappings for failed tx_buffer_info map */ 3070 for (;;) { 3071 tx_buffer = &tx_ring->tx_buffer_info[i]; 3072 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); 3073 if (tx_buffer == first) 3074 break; 3075 if (i == 0) 3076 i = tx_ring->count; 3077 i--; 3078 } 3079 3080 tx_ring->next_to_use = i; 3081 } 3082 3083 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3084 { 3085 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3086 /* Herbert's original patch had: 3087 * smp_mb__after_netif_stop_queue(); 3088 * but since that doesn't exist yet, just open code it. */ 3089 smp_mb(); 3090 3091 /* We need to check again in a case another CPU has just 3092 * made room available. */ 3093 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 3094 return -EBUSY; 3095 3096 /* A reprieve! - use start_queue because it doesn't call schedule */ 3097 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3098 ++tx_ring->tx_stats.restart_queue; 3099 3100 return 0; 3101 } 3102 3103 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3104 { 3105 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 3106 return 0; 3107 return __ixgbevf_maybe_stop_tx(tx_ring, size); 3108 } 3109 3110 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3111 { 3112 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3113 struct ixgbevf_tx_buffer *first; 3114 struct ixgbevf_ring *tx_ring; 3115 int tso; 3116 u32 tx_flags = 0; 3117 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3118 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3119 unsigned short f; 3120 #endif 3121 u8 hdr_len = 0; 3122 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3123 3124 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3125 dev_kfree_skb(skb); 3126 return NETDEV_TX_OK; 3127 } 3128 3129 tx_ring = adapter->tx_ring[skb->queue_mapping]; 3130 3131 /* 3132 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3133 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3134 * + 2 desc gap to keep tail from touching head, 3135 * + 1 desc for context descriptor, 3136 * otherwise try next time 3137 */ 3138 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3139 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3140 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3141 #else 3142 count += skb_shinfo(skb)->nr_frags; 3143 #endif 3144 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3145 tx_ring->tx_stats.tx_busy++; 3146 return NETDEV_TX_BUSY; 3147 } 3148 3149 /* record the location of the first descriptor for this packet */ 3150 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 3151 first->skb = skb; 3152 first->bytecount = skb->len; 3153 first->gso_segs = 1; 3154 3155 if (vlan_tx_tag_present(skb)) { 3156 tx_flags |= vlan_tx_tag_get(skb); 3157 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3158 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3159 } 3160 3161 /* record initial flags and protocol */ 3162 first->tx_flags = tx_flags; 3163 first->protocol = vlan_get_protocol(skb); 3164 3165 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3166 if (tso < 0) 3167 goto out_drop; 3168 else 3169 ixgbevf_tx_csum(tx_ring, first); 3170 3171 ixgbevf_tx_map(tx_ring, first, hdr_len); 3172 3173 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3174 3175 return NETDEV_TX_OK; 3176 3177 out_drop: 3178 dev_kfree_skb_any(first->skb); 3179 first->skb = NULL; 3180 3181 return NETDEV_TX_OK; 3182 } 3183 3184 /** 3185 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3186 * @netdev: network interface device structure 3187 * @p: pointer to an address structure 3188 * 3189 * Returns 0 on success, negative on failure 3190 **/ 3191 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3192 { 3193 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3194 struct ixgbe_hw *hw = &adapter->hw; 3195 struct sockaddr *addr = p; 3196 3197 if (!is_valid_ether_addr(addr->sa_data)) 3198 return -EADDRNOTAVAIL; 3199 3200 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3201 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3202 3203 spin_lock_bh(&adapter->mbx_lock); 3204 3205 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3206 3207 spin_unlock_bh(&adapter->mbx_lock); 3208 3209 return 0; 3210 } 3211 3212 /** 3213 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3214 * @netdev: network interface device structure 3215 * @new_mtu: new value for maximum frame size 3216 * 3217 * Returns 0 on success, negative on failure 3218 **/ 3219 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3220 { 3221 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3222 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3223 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3224 3225 switch (adapter->hw.api_version) { 3226 case ixgbe_mbox_api_11: 3227 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3228 break; 3229 default: 3230 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3231 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3232 break; 3233 } 3234 3235 /* MTU < 68 is an error and causes problems on some kernels */ 3236 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3237 return -EINVAL; 3238 3239 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3240 netdev->mtu, new_mtu); 3241 /* must set new MTU before calling down or up */ 3242 netdev->mtu = new_mtu; 3243 3244 if (netif_running(netdev)) 3245 ixgbevf_reinit_locked(adapter); 3246 3247 return 0; 3248 } 3249 3250 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3251 { 3252 struct net_device *netdev = pci_get_drvdata(pdev); 3253 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3254 #ifdef CONFIG_PM 3255 int retval = 0; 3256 #endif 3257 3258 netif_device_detach(netdev); 3259 3260 if (netif_running(netdev)) { 3261 rtnl_lock(); 3262 ixgbevf_down(adapter); 3263 ixgbevf_free_irq(adapter); 3264 ixgbevf_free_all_tx_resources(adapter); 3265 ixgbevf_free_all_rx_resources(adapter); 3266 rtnl_unlock(); 3267 } 3268 3269 ixgbevf_clear_interrupt_scheme(adapter); 3270 3271 #ifdef CONFIG_PM 3272 retval = pci_save_state(pdev); 3273 if (retval) 3274 return retval; 3275 3276 #endif 3277 pci_disable_device(pdev); 3278 3279 return 0; 3280 } 3281 3282 #ifdef CONFIG_PM 3283 static int ixgbevf_resume(struct pci_dev *pdev) 3284 { 3285 struct net_device *netdev = pci_get_drvdata(pdev); 3286 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3287 u32 err; 3288 3289 pci_set_power_state(pdev, PCI_D0); 3290 pci_restore_state(pdev); 3291 /* 3292 * pci_restore_state clears dev->state_saved so call 3293 * pci_save_state to restore it. 3294 */ 3295 pci_save_state(pdev); 3296 3297 err = pci_enable_device_mem(pdev); 3298 if (err) { 3299 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3300 return err; 3301 } 3302 pci_set_master(pdev); 3303 3304 ixgbevf_reset(adapter); 3305 3306 rtnl_lock(); 3307 err = ixgbevf_init_interrupt_scheme(adapter); 3308 rtnl_unlock(); 3309 if (err) { 3310 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3311 return err; 3312 } 3313 3314 if (netif_running(netdev)) { 3315 err = ixgbevf_open(netdev); 3316 if (err) 3317 return err; 3318 } 3319 3320 netif_device_attach(netdev); 3321 3322 return err; 3323 } 3324 3325 #endif /* CONFIG_PM */ 3326 static void ixgbevf_shutdown(struct pci_dev *pdev) 3327 { 3328 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3329 } 3330 3331 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3332 struct rtnl_link_stats64 *stats) 3333 { 3334 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3335 unsigned int start; 3336 u64 bytes, packets; 3337 const struct ixgbevf_ring *ring; 3338 int i; 3339 3340 ixgbevf_update_stats(adapter); 3341 3342 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3343 3344 for (i = 0; i < adapter->num_rx_queues; i++) { 3345 ring = adapter->rx_ring[i]; 3346 do { 3347 start = u64_stats_fetch_begin_bh(&ring->syncp); 3348 bytes = ring->stats.bytes; 3349 packets = ring->stats.packets; 3350 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3351 stats->rx_bytes += bytes; 3352 stats->rx_packets += packets; 3353 } 3354 3355 for (i = 0; i < adapter->num_tx_queues; i++) { 3356 ring = adapter->tx_ring[i]; 3357 do { 3358 start = u64_stats_fetch_begin_bh(&ring->syncp); 3359 bytes = ring->stats.bytes; 3360 packets = ring->stats.packets; 3361 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3362 stats->tx_bytes += bytes; 3363 stats->tx_packets += packets; 3364 } 3365 3366 return stats; 3367 } 3368 3369 static const struct net_device_ops ixgbevf_netdev_ops = { 3370 .ndo_open = ixgbevf_open, 3371 .ndo_stop = ixgbevf_close, 3372 .ndo_start_xmit = ixgbevf_xmit_frame, 3373 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3374 .ndo_get_stats64 = ixgbevf_get_stats, 3375 .ndo_validate_addr = eth_validate_addr, 3376 .ndo_set_mac_address = ixgbevf_set_mac, 3377 .ndo_change_mtu = ixgbevf_change_mtu, 3378 .ndo_tx_timeout = ixgbevf_tx_timeout, 3379 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3380 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3381 #ifdef CONFIG_NET_RX_BUSY_POLL 3382 .ndo_busy_poll = ixgbevf_busy_poll_recv, 3383 #endif 3384 }; 3385 3386 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3387 { 3388 dev->netdev_ops = &ixgbevf_netdev_ops; 3389 ixgbevf_set_ethtool_ops(dev); 3390 dev->watchdog_timeo = 5 * HZ; 3391 } 3392 3393 /** 3394 * ixgbevf_probe - Device Initialization Routine 3395 * @pdev: PCI device information struct 3396 * @ent: entry in ixgbevf_pci_tbl 3397 * 3398 * Returns 0 on success, negative on failure 3399 * 3400 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3401 * The OS initialization, configuring of the adapter private structure, 3402 * and a hardware reset occur. 3403 **/ 3404 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3405 { 3406 struct net_device *netdev; 3407 struct ixgbevf_adapter *adapter = NULL; 3408 struct ixgbe_hw *hw = NULL; 3409 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3410 static int cards_found; 3411 int err, pci_using_dac; 3412 3413 err = pci_enable_device(pdev); 3414 if (err) 3415 return err; 3416 3417 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3418 pci_using_dac = 1; 3419 } else { 3420 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3421 if (err) { 3422 dev_err(&pdev->dev, "No usable DMA " 3423 "configuration, aborting\n"); 3424 goto err_dma; 3425 } 3426 pci_using_dac = 0; 3427 } 3428 3429 err = pci_request_regions(pdev, ixgbevf_driver_name); 3430 if (err) { 3431 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3432 goto err_pci_reg; 3433 } 3434 3435 pci_set_master(pdev); 3436 3437 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3438 MAX_TX_QUEUES); 3439 if (!netdev) { 3440 err = -ENOMEM; 3441 goto err_alloc_etherdev; 3442 } 3443 3444 SET_NETDEV_DEV(netdev, &pdev->dev); 3445 3446 pci_set_drvdata(pdev, netdev); 3447 adapter = netdev_priv(netdev); 3448 3449 adapter->netdev = netdev; 3450 adapter->pdev = pdev; 3451 hw = &adapter->hw; 3452 hw->back = adapter; 3453 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3454 3455 /* 3456 * call save state here in standalone driver because it relies on 3457 * adapter struct to exist, and needs to call netdev_priv 3458 */ 3459 pci_save_state(pdev); 3460 3461 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3462 pci_resource_len(pdev, 0)); 3463 if (!hw->hw_addr) { 3464 err = -EIO; 3465 goto err_ioremap; 3466 } 3467 3468 ixgbevf_assign_netdev_ops(netdev); 3469 3470 adapter->bd_number = cards_found; 3471 3472 /* Setup hw api */ 3473 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3474 hw->mac.type = ii->mac; 3475 3476 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3477 sizeof(struct ixgbe_mbx_operations)); 3478 3479 /* setup the private structure */ 3480 err = ixgbevf_sw_init(adapter); 3481 if (err) 3482 goto err_sw_init; 3483 3484 /* The HW MAC address was set and/or determined in sw_init */ 3485 if (!is_valid_ether_addr(netdev->dev_addr)) { 3486 pr_err("invalid MAC address\n"); 3487 err = -EIO; 3488 goto err_sw_init; 3489 } 3490 3491 netdev->hw_features = NETIF_F_SG | 3492 NETIF_F_IP_CSUM | 3493 NETIF_F_IPV6_CSUM | 3494 NETIF_F_TSO | 3495 NETIF_F_TSO6 | 3496 NETIF_F_RXCSUM; 3497 3498 netdev->features = netdev->hw_features | 3499 NETIF_F_HW_VLAN_CTAG_TX | 3500 NETIF_F_HW_VLAN_CTAG_RX | 3501 NETIF_F_HW_VLAN_CTAG_FILTER; 3502 3503 netdev->vlan_features |= NETIF_F_TSO; 3504 netdev->vlan_features |= NETIF_F_TSO6; 3505 netdev->vlan_features |= NETIF_F_IP_CSUM; 3506 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3507 netdev->vlan_features |= NETIF_F_SG; 3508 3509 if (pci_using_dac) 3510 netdev->features |= NETIF_F_HIGHDMA; 3511 3512 netdev->priv_flags |= IFF_UNICAST_FLT; 3513 3514 init_timer(&adapter->watchdog_timer); 3515 adapter->watchdog_timer.function = ixgbevf_watchdog; 3516 adapter->watchdog_timer.data = (unsigned long)adapter; 3517 3518 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3519 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3520 3521 err = ixgbevf_init_interrupt_scheme(adapter); 3522 if (err) 3523 goto err_sw_init; 3524 3525 strcpy(netdev->name, "eth%d"); 3526 3527 err = register_netdev(netdev); 3528 if (err) 3529 goto err_register; 3530 3531 netif_carrier_off(netdev); 3532 3533 ixgbevf_init_last_counter_stats(adapter); 3534 3535 /* print the MAC address */ 3536 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3537 3538 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3539 3540 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3541 cards_found++; 3542 return 0; 3543 3544 err_register: 3545 ixgbevf_clear_interrupt_scheme(adapter); 3546 err_sw_init: 3547 ixgbevf_reset_interrupt_capability(adapter); 3548 iounmap(hw->hw_addr); 3549 err_ioremap: 3550 free_netdev(netdev); 3551 err_alloc_etherdev: 3552 pci_release_regions(pdev); 3553 err_pci_reg: 3554 err_dma: 3555 pci_disable_device(pdev); 3556 return err; 3557 } 3558 3559 /** 3560 * ixgbevf_remove - Device Removal Routine 3561 * @pdev: PCI device information struct 3562 * 3563 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3564 * that it should release a PCI device. The could be caused by a 3565 * Hot-Plug event, or because the driver is going to be removed from 3566 * memory. 3567 **/ 3568 static void ixgbevf_remove(struct pci_dev *pdev) 3569 { 3570 struct net_device *netdev = pci_get_drvdata(pdev); 3571 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3572 3573 set_bit(__IXGBEVF_DOWN, &adapter->state); 3574 3575 del_timer_sync(&adapter->watchdog_timer); 3576 3577 cancel_work_sync(&adapter->reset_task); 3578 cancel_work_sync(&adapter->watchdog_task); 3579 3580 if (netdev->reg_state == NETREG_REGISTERED) 3581 unregister_netdev(netdev); 3582 3583 ixgbevf_clear_interrupt_scheme(adapter); 3584 ixgbevf_reset_interrupt_capability(adapter); 3585 3586 iounmap(adapter->hw.hw_addr); 3587 pci_release_regions(pdev); 3588 3589 hw_dbg(&adapter->hw, "Remove complete\n"); 3590 3591 free_netdev(netdev); 3592 3593 pci_disable_device(pdev); 3594 } 3595 3596 /** 3597 * ixgbevf_io_error_detected - called when PCI error is detected 3598 * @pdev: Pointer to PCI device 3599 * @state: The current pci connection state 3600 * 3601 * This function is called after a PCI bus error affecting 3602 * this device has been detected. 3603 */ 3604 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3605 pci_channel_state_t state) 3606 { 3607 struct net_device *netdev = pci_get_drvdata(pdev); 3608 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3609 3610 netif_device_detach(netdev); 3611 3612 if (state == pci_channel_io_perm_failure) 3613 return PCI_ERS_RESULT_DISCONNECT; 3614 3615 if (netif_running(netdev)) 3616 ixgbevf_down(adapter); 3617 3618 pci_disable_device(pdev); 3619 3620 /* Request a slot slot reset. */ 3621 return PCI_ERS_RESULT_NEED_RESET; 3622 } 3623 3624 /** 3625 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3626 * @pdev: Pointer to PCI device 3627 * 3628 * Restart the card from scratch, as if from a cold-boot. Implementation 3629 * resembles the first-half of the ixgbevf_resume routine. 3630 */ 3631 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3632 { 3633 struct net_device *netdev = pci_get_drvdata(pdev); 3634 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3635 3636 if (pci_enable_device_mem(pdev)) { 3637 dev_err(&pdev->dev, 3638 "Cannot re-enable PCI device after reset.\n"); 3639 return PCI_ERS_RESULT_DISCONNECT; 3640 } 3641 3642 pci_set_master(pdev); 3643 3644 ixgbevf_reset(adapter); 3645 3646 return PCI_ERS_RESULT_RECOVERED; 3647 } 3648 3649 /** 3650 * ixgbevf_io_resume - called when traffic can start flowing again. 3651 * @pdev: Pointer to PCI device 3652 * 3653 * This callback is called when the error recovery driver tells us that 3654 * its OK to resume normal operation. Implementation resembles the 3655 * second-half of the ixgbevf_resume routine. 3656 */ 3657 static void ixgbevf_io_resume(struct pci_dev *pdev) 3658 { 3659 struct net_device *netdev = pci_get_drvdata(pdev); 3660 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3661 3662 if (netif_running(netdev)) 3663 ixgbevf_up(adapter); 3664 3665 netif_device_attach(netdev); 3666 } 3667 3668 /* PCI Error Recovery (ERS) */ 3669 static const struct pci_error_handlers ixgbevf_err_handler = { 3670 .error_detected = ixgbevf_io_error_detected, 3671 .slot_reset = ixgbevf_io_slot_reset, 3672 .resume = ixgbevf_io_resume, 3673 }; 3674 3675 static struct pci_driver ixgbevf_driver = { 3676 .name = ixgbevf_driver_name, 3677 .id_table = ixgbevf_pci_tbl, 3678 .probe = ixgbevf_probe, 3679 .remove = ixgbevf_remove, 3680 #ifdef CONFIG_PM 3681 /* Power Management Hooks */ 3682 .suspend = ixgbevf_suspend, 3683 .resume = ixgbevf_resume, 3684 #endif 3685 .shutdown = ixgbevf_shutdown, 3686 .err_handler = &ixgbevf_err_handler 3687 }; 3688 3689 /** 3690 * ixgbevf_init_module - Driver Registration Routine 3691 * 3692 * ixgbevf_init_module is the first routine called when the driver is 3693 * loaded. All it does is register with the PCI subsystem. 3694 **/ 3695 static int __init ixgbevf_init_module(void) 3696 { 3697 int ret; 3698 pr_info("%s - version %s\n", ixgbevf_driver_string, 3699 ixgbevf_driver_version); 3700 3701 pr_info("%s\n", ixgbevf_copyright); 3702 3703 ret = pci_register_driver(&ixgbevf_driver); 3704 return ret; 3705 } 3706 3707 module_init(ixgbevf_init_module); 3708 3709 /** 3710 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3711 * 3712 * ixgbevf_exit_module is called just before the driver is removed 3713 * from memory. 3714 **/ 3715 static void __exit ixgbevf_exit_module(void) 3716 { 3717 pci_unregister_driver(&ixgbevf_driver); 3718 } 3719 3720 #ifdef DEBUG 3721 /** 3722 * ixgbevf_get_hw_dev_name - return device name string 3723 * used by hardware layer to print debugging information 3724 **/ 3725 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3726 { 3727 struct ixgbevf_adapter *adapter = hw->back; 3728 return adapter->netdev->name; 3729 } 3730 3731 #endif 3732 module_exit(ixgbevf_exit_module); 3733 3734 /* ixgbevf_main.c */ 3735