1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2014 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 29 /****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31 ******************************************************************************/ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/types.h> 36 #include <linux/bitops.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/string.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/sctp.h> 46 #include <linux/ipv6.h> 47 #include <linux/slab.h> 48 #include <net/checksum.h> 49 #include <net/ip6_checksum.h> 50 #include <linux/ethtool.h> 51 #include <linux/if.h> 52 #include <linux/if_vlan.h> 53 #include <linux/prefetch.h> 54 55 #include "ixgbevf.h" 56 57 const char ixgbevf_driver_name[] = "ixgbevf"; 58 static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61 #define DRV_VERSION "2.12.1-k" 62 const char ixgbevf_driver_version[] = DRV_VERSION; 63 static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69 }; 70 71 /* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 82 /* required last entry */ 83 {0, } 84 }; 85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 86 87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_VERSION); 91 92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 93 static int debug = -1; 94 module_param(debug, int, 0); 95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 96 97 /* forward decls */ 98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 101 102 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) 103 { 104 struct ixgbevf_adapter *adapter = hw->back; 105 106 if (!hw->hw_addr) 107 return; 108 hw->hw_addr = NULL; 109 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 110 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 111 schedule_work(&adapter->watchdog_task); 112 } 113 114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 115 { 116 u32 value; 117 118 /* The following check not only optimizes a bit by not 119 * performing a read on the status register when the 120 * register just read was a status register read that 121 * returned IXGBE_FAILED_READ_REG. It also blocks any 122 * potential recursion. 123 */ 124 if (reg == IXGBE_VFSTATUS) { 125 ixgbevf_remove_adapter(hw); 126 return; 127 } 128 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); 129 if (value == IXGBE_FAILED_READ_REG) 130 ixgbevf_remove_adapter(hw); 131 } 132 133 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) 134 { 135 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 136 u32 value; 137 138 if (IXGBE_REMOVED(reg_addr)) 139 return IXGBE_FAILED_READ_REG; 140 value = readl(reg_addr + reg); 141 if (unlikely(value == IXGBE_FAILED_READ_REG)) 142 ixgbevf_check_remove(hw, reg); 143 return value; 144 } 145 146 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, 147 u32 val) 148 { 149 rx_ring->next_to_use = val; 150 151 /* 152 * Force memory writes to complete before letting h/w 153 * know there are new descriptors to fetch. (Only 154 * applicable for weak-ordered memory model archs, 155 * such as IA-64). 156 */ 157 wmb(); 158 ixgbevf_write_tail(rx_ring, val); 159 } 160 161 /** 162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 163 * @adapter: pointer to adapter struct 164 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 165 * @queue: queue to map the corresponding interrupt to 166 * @msix_vector: the vector to map to the corresponding queue 167 */ 168 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 169 u8 queue, u8 msix_vector) 170 { 171 u32 ivar, index; 172 struct ixgbe_hw *hw = &adapter->hw; 173 if (direction == -1) { 174 /* other causes */ 175 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 176 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 177 ivar &= ~0xFF; 178 ivar |= msix_vector; 179 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 180 } else { 181 /* tx or rx causes */ 182 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 183 index = ((16 * (queue & 1)) + (8 * direction)); 184 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 185 ivar &= ~(0xFF << index); 186 ivar |= (msix_vector << index); 187 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 188 } 189 } 190 191 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 192 struct ixgbevf_tx_buffer *tx_buffer) 193 { 194 if (tx_buffer->skb) { 195 dev_kfree_skb_any(tx_buffer->skb); 196 if (dma_unmap_len(tx_buffer, len)) 197 dma_unmap_single(tx_ring->dev, 198 dma_unmap_addr(tx_buffer, dma), 199 dma_unmap_len(tx_buffer, len), 200 DMA_TO_DEVICE); 201 } else if (dma_unmap_len(tx_buffer, len)) { 202 dma_unmap_page(tx_ring->dev, 203 dma_unmap_addr(tx_buffer, dma), 204 dma_unmap_len(tx_buffer, len), 205 DMA_TO_DEVICE); 206 } 207 tx_buffer->next_to_watch = NULL; 208 tx_buffer->skb = NULL; 209 dma_unmap_len_set(tx_buffer, len, 0); 210 /* tx_buffer must be completely set up in the transmit path */ 211 } 212 213 #define IXGBE_MAX_TXD_PWR 14 214 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 215 216 /* Tx Descriptors needed, worst case */ 217 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 218 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 219 220 static void ixgbevf_tx_timeout(struct net_device *netdev); 221 222 /** 223 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 224 * @q_vector: board private structure 225 * @tx_ring: tx ring to clean 226 **/ 227 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 228 struct ixgbevf_ring *tx_ring) 229 { 230 struct ixgbevf_adapter *adapter = q_vector->adapter; 231 struct ixgbevf_tx_buffer *tx_buffer; 232 union ixgbe_adv_tx_desc *tx_desc; 233 unsigned int total_bytes = 0, total_packets = 0; 234 unsigned int budget = tx_ring->count / 2; 235 unsigned int i = tx_ring->next_to_clean; 236 237 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 238 return true; 239 240 tx_buffer = &tx_ring->tx_buffer_info[i]; 241 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 242 i -= tx_ring->count; 243 244 do { 245 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 246 247 /* if next_to_watch is not set then there is no work pending */ 248 if (!eop_desc) 249 break; 250 251 /* prevent any other reads prior to eop_desc */ 252 read_barrier_depends(); 253 254 /* if DD is not set pending work has not been completed */ 255 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 256 break; 257 258 /* clear next_to_watch to prevent false hangs */ 259 tx_buffer->next_to_watch = NULL; 260 261 /* update the statistics for this packet */ 262 total_bytes += tx_buffer->bytecount; 263 total_packets += tx_buffer->gso_segs; 264 265 /* free the skb */ 266 dev_kfree_skb_any(tx_buffer->skb); 267 268 /* unmap skb header data */ 269 dma_unmap_single(tx_ring->dev, 270 dma_unmap_addr(tx_buffer, dma), 271 dma_unmap_len(tx_buffer, len), 272 DMA_TO_DEVICE); 273 274 /* clear tx_buffer data */ 275 tx_buffer->skb = NULL; 276 dma_unmap_len_set(tx_buffer, len, 0); 277 278 /* unmap remaining buffers */ 279 while (tx_desc != eop_desc) { 280 tx_buffer++; 281 tx_desc++; 282 i++; 283 if (unlikely(!i)) { 284 i -= tx_ring->count; 285 tx_buffer = tx_ring->tx_buffer_info; 286 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 287 } 288 289 /* unmap any remaining paged data */ 290 if (dma_unmap_len(tx_buffer, len)) { 291 dma_unmap_page(tx_ring->dev, 292 dma_unmap_addr(tx_buffer, dma), 293 dma_unmap_len(tx_buffer, len), 294 DMA_TO_DEVICE); 295 dma_unmap_len_set(tx_buffer, len, 0); 296 } 297 } 298 299 /* move us one more past the eop_desc for start of next pkt */ 300 tx_buffer++; 301 tx_desc++; 302 i++; 303 if (unlikely(!i)) { 304 i -= tx_ring->count; 305 tx_buffer = tx_ring->tx_buffer_info; 306 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 307 } 308 309 /* issue prefetch for next Tx descriptor */ 310 prefetch(tx_desc); 311 312 /* update budget accounting */ 313 budget--; 314 } while (likely(budget)); 315 316 i += tx_ring->count; 317 tx_ring->next_to_clean = i; 318 u64_stats_update_begin(&tx_ring->syncp); 319 tx_ring->stats.bytes += total_bytes; 320 tx_ring->stats.packets += total_packets; 321 u64_stats_update_end(&tx_ring->syncp); 322 q_vector->tx.total_bytes += total_bytes; 323 q_vector->tx.total_packets += total_packets; 324 325 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 326 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 327 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 328 /* Make sure that anybody stopping the queue after this 329 * sees the new next_to_clean. 330 */ 331 smp_mb(); 332 333 if (__netif_subqueue_stopped(tx_ring->netdev, 334 tx_ring->queue_index) && 335 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 336 netif_wake_subqueue(tx_ring->netdev, 337 tx_ring->queue_index); 338 ++tx_ring->tx_stats.restart_queue; 339 } 340 } 341 342 return !!budget; 343 } 344 345 /** 346 * ixgbevf_receive_skb - Send a completed packet up the stack 347 * @q_vector: structure containing interrupt and ring information 348 * @skb: packet to send up 349 * @status: hardware indication of status of receive 350 * @rx_desc: rx descriptor 351 **/ 352 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 353 struct sk_buff *skb, u8 status, 354 union ixgbe_adv_rx_desc *rx_desc) 355 { 356 struct ixgbevf_adapter *adapter = q_vector->adapter; 357 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 358 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 359 360 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 362 363 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 364 napi_gro_receive(&q_vector->napi, skb); 365 else 366 netif_rx(skb); 367 } 368 369 /** 370 * ixgbevf_rx_skb - Helper function to determine proper Rx method 371 * @q_vector: structure containing interrupt and ring information 372 * @skb: packet to send up 373 * @status: hardware indication of status of receive 374 * @rx_desc: rx descriptor 375 **/ 376 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 377 struct sk_buff *skb, u8 status, 378 union ixgbe_adv_rx_desc *rx_desc) 379 { 380 #ifdef CONFIG_NET_RX_BUSY_POLL 381 skb_mark_napi_id(skb, &q_vector->napi); 382 383 if (ixgbevf_qv_busy_polling(q_vector)) { 384 netif_receive_skb(skb); 385 /* exit early if we busy polled */ 386 return; 387 } 388 #endif /* CONFIG_NET_RX_BUSY_POLL */ 389 390 ixgbevf_receive_skb(q_vector, skb, status, rx_desc); 391 } 392 393 /** 394 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 395 * @ring: pointer to Rx descriptor ring structure 396 * @status_err: hardware indication of status of receive 397 * @skb: skb currently being received and modified 398 **/ 399 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 400 u32 status_err, struct sk_buff *skb) 401 { 402 skb_checksum_none_assert(skb); 403 404 /* Rx csum disabled */ 405 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 406 return; 407 408 /* if IP and error */ 409 if ((status_err & IXGBE_RXD_STAT_IPCS) && 410 (status_err & IXGBE_RXDADV_ERR_IPE)) { 411 ring->rx_stats.csum_err++; 412 return; 413 } 414 415 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 416 return; 417 418 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 419 ring->rx_stats.csum_err++; 420 return; 421 } 422 423 /* It must be a TCP or UDP packet with a valid checksum */ 424 skb->ip_summed = CHECKSUM_UNNECESSARY; 425 } 426 427 /** 428 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 429 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 430 **/ 431 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 432 int cleaned_count) 433 { 434 union ixgbe_adv_rx_desc *rx_desc; 435 struct ixgbevf_rx_buffer *bi; 436 unsigned int i = rx_ring->next_to_use; 437 438 while (cleaned_count--) { 439 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 440 bi = &rx_ring->rx_buffer_info[i]; 441 442 if (!bi->skb) { 443 struct sk_buff *skb; 444 445 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 446 rx_ring->rx_buf_len); 447 if (!skb) 448 goto no_buffers; 449 450 bi->skb = skb; 451 452 bi->dma = dma_map_single(rx_ring->dev, skb->data, 453 rx_ring->rx_buf_len, 454 DMA_FROM_DEVICE); 455 if (dma_mapping_error(rx_ring->dev, bi->dma)) { 456 dev_kfree_skb(skb); 457 bi->skb = NULL; 458 dev_err(rx_ring->dev, "Rx DMA map failed\n"); 459 break; 460 } 461 } 462 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 463 464 i++; 465 if (i == rx_ring->count) 466 i = 0; 467 } 468 469 no_buffers: 470 rx_ring->rx_stats.alloc_rx_buff_failed++; 471 if (rx_ring->next_to_use != i) 472 ixgbevf_release_rx_desc(rx_ring, i); 473 } 474 475 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 476 u32 qmask) 477 { 478 struct ixgbe_hw *hw = &adapter->hw; 479 480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 481 } 482 483 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 484 struct ixgbevf_ring *rx_ring, 485 int budget) 486 { 487 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 488 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 489 struct sk_buff *skb; 490 unsigned int i; 491 u32 len, staterr; 492 int cleaned_count = 0; 493 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 494 495 i = rx_ring->next_to_clean; 496 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 497 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 498 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 499 500 while (staterr & IXGBE_RXD_STAT_DD) { 501 if (!budget) 502 break; 503 budget--; 504 505 rmb(); /* read descriptor and rx_buffer_info after status DD */ 506 len = le16_to_cpu(rx_desc->wb.upper.length); 507 skb = rx_buffer_info->skb; 508 prefetch(skb->data - NET_IP_ALIGN); 509 rx_buffer_info->skb = NULL; 510 511 if (rx_buffer_info->dma) { 512 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 513 rx_ring->rx_buf_len, 514 DMA_FROM_DEVICE); 515 rx_buffer_info->dma = 0; 516 skb_put(skb, len); 517 } 518 519 i++; 520 if (i == rx_ring->count) 521 i = 0; 522 523 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 524 prefetch(next_rxd); 525 cleaned_count++; 526 527 next_buffer = &rx_ring->rx_buffer_info[i]; 528 529 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 530 skb->next = next_buffer->skb; 531 IXGBE_CB(skb->next)->prev = skb; 532 rx_ring->rx_stats.non_eop_descs++; 533 goto next_desc; 534 } 535 536 /* we should not be chaining buffers, if we did drop the skb */ 537 if (IXGBE_CB(skb)->prev) { 538 do { 539 struct sk_buff *this = skb; 540 skb = IXGBE_CB(skb)->prev; 541 dev_kfree_skb(this); 542 } while (skb); 543 goto next_desc; 544 } 545 546 /* ERR_MASK will only have valid bits if EOP set */ 547 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 548 dev_kfree_skb_irq(skb); 549 goto next_desc; 550 } 551 552 ixgbevf_rx_checksum(rx_ring, staterr, skb); 553 554 /* probably a little skewed due to removing CRC */ 555 total_rx_bytes += skb->len; 556 total_rx_packets++; 557 558 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 559 560 /* Workaround hardware that can't do proper VEPA multicast 561 * source pruning. 562 */ 563 if ((skb->pkt_type == PACKET_BROADCAST || 564 skb->pkt_type == PACKET_MULTICAST) && 565 ether_addr_equal(rx_ring->netdev->dev_addr, 566 eth_hdr(skb)->h_source)) { 567 dev_kfree_skb_irq(skb); 568 goto next_desc; 569 } 570 571 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 572 573 next_desc: 574 rx_desc->wb.upper.status_error = 0; 575 576 /* return some buffers to hardware, one at a time is too slow */ 577 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 578 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 579 cleaned_count = 0; 580 } 581 582 /* use prefetched values */ 583 rx_desc = next_rxd; 584 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 585 586 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 587 } 588 589 rx_ring->next_to_clean = i; 590 cleaned_count = ixgbevf_desc_unused(rx_ring); 591 592 if (cleaned_count) 593 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 594 595 u64_stats_update_begin(&rx_ring->syncp); 596 rx_ring->stats.packets += total_rx_packets; 597 rx_ring->stats.bytes += total_rx_bytes; 598 u64_stats_update_end(&rx_ring->syncp); 599 q_vector->rx.total_packets += total_rx_packets; 600 q_vector->rx.total_bytes += total_rx_bytes; 601 602 return total_rx_packets; 603 } 604 605 /** 606 * ixgbevf_poll - NAPI polling calback 607 * @napi: napi struct with our devices info in it 608 * @budget: amount of work driver is allowed to do this pass, in packets 609 * 610 * This function will clean more than one or more rings associated with a 611 * q_vector. 612 **/ 613 static int ixgbevf_poll(struct napi_struct *napi, int budget) 614 { 615 struct ixgbevf_q_vector *q_vector = 616 container_of(napi, struct ixgbevf_q_vector, napi); 617 struct ixgbevf_adapter *adapter = q_vector->adapter; 618 struct ixgbevf_ring *ring; 619 int per_ring_budget; 620 bool clean_complete = true; 621 622 ixgbevf_for_each_ring(ring, q_vector->tx) 623 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 624 625 #ifdef CONFIG_NET_RX_BUSY_POLL 626 if (!ixgbevf_qv_lock_napi(q_vector)) 627 return budget; 628 #endif 629 630 /* attempt to distribute budget to each queue fairly, but don't allow 631 * the budget to go below 1 because we'll exit polling */ 632 if (q_vector->rx.count > 1) 633 per_ring_budget = max(budget/q_vector->rx.count, 1); 634 else 635 per_ring_budget = budget; 636 637 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 638 ixgbevf_for_each_ring(ring, q_vector->rx) 639 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, 640 per_ring_budget) 641 < per_ring_budget); 642 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 643 644 #ifdef CONFIG_NET_RX_BUSY_POLL 645 ixgbevf_qv_unlock_napi(q_vector); 646 #endif 647 648 /* If all work not completed, return budget and keep polling */ 649 if (!clean_complete) 650 return budget; 651 /* all work done, exit the polling mode */ 652 napi_complete(napi); 653 if (adapter->rx_itr_setting & 1) 654 ixgbevf_set_itr(q_vector); 655 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 656 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) 657 ixgbevf_irq_enable_queues(adapter, 658 1 << q_vector->v_idx); 659 660 return 0; 661 } 662 663 /** 664 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 665 * @q_vector: structure containing interrupt and ring information 666 */ 667 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 668 { 669 struct ixgbevf_adapter *adapter = q_vector->adapter; 670 struct ixgbe_hw *hw = &adapter->hw; 671 int v_idx = q_vector->v_idx; 672 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 673 674 /* 675 * set the WDIS bit to not clear the timer bits and cause an 676 * immediate assertion of the interrupt 677 */ 678 itr_reg |= IXGBE_EITR_CNT_WDIS; 679 680 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 681 } 682 683 #ifdef CONFIG_NET_RX_BUSY_POLL 684 /* must be called with local_bh_disable()d */ 685 static int ixgbevf_busy_poll_recv(struct napi_struct *napi) 686 { 687 struct ixgbevf_q_vector *q_vector = 688 container_of(napi, struct ixgbevf_q_vector, napi); 689 struct ixgbevf_adapter *adapter = q_vector->adapter; 690 struct ixgbevf_ring *ring; 691 int found = 0; 692 693 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 694 return LL_FLUSH_FAILED; 695 696 if (!ixgbevf_qv_lock_poll(q_vector)) 697 return LL_FLUSH_BUSY; 698 699 ixgbevf_for_each_ring(ring, q_vector->rx) { 700 found = ixgbevf_clean_rx_irq(q_vector, ring, 4); 701 #ifdef BP_EXTENDED_STATS 702 if (found) 703 ring->stats.cleaned += found; 704 else 705 ring->stats.misses++; 706 #endif 707 if (found) 708 break; 709 } 710 711 ixgbevf_qv_unlock_poll(q_vector); 712 713 return found; 714 } 715 #endif /* CONFIG_NET_RX_BUSY_POLL */ 716 717 /** 718 * ixgbevf_configure_msix - Configure MSI-X hardware 719 * @adapter: board private structure 720 * 721 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 722 * interrupts. 723 **/ 724 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 725 { 726 struct ixgbevf_q_vector *q_vector; 727 int q_vectors, v_idx; 728 729 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 730 adapter->eims_enable_mask = 0; 731 732 /* 733 * Populate the IVAR table and set the ITR values to the 734 * corresponding register. 735 */ 736 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 737 struct ixgbevf_ring *ring; 738 q_vector = adapter->q_vector[v_idx]; 739 740 ixgbevf_for_each_ring(ring, q_vector->rx) 741 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 742 743 ixgbevf_for_each_ring(ring, q_vector->tx) 744 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 745 746 if (q_vector->tx.ring && !q_vector->rx.ring) { 747 /* tx only vector */ 748 if (adapter->tx_itr_setting == 1) 749 q_vector->itr = IXGBE_10K_ITR; 750 else 751 q_vector->itr = adapter->tx_itr_setting; 752 } else { 753 /* rx or rx/tx vector */ 754 if (adapter->rx_itr_setting == 1) 755 q_vector->itr = IXGBE_20K_ITR; 756 else 757 q_vector->itr = adapter->rx_itr_setting; 758 } 759 760 /* add q_vector eims value to global eims_enable_mask */ 761 adapter->eims_enable_mask |= 1 << v_idx; 762 763 ixgbevf_write_eitr(q_vector); 764 } 765 766 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 767 /* setup eims_other and add value to global eims_enable_mask */ 768 adapter->eims_other = 1 << v_idx; 769 adapter->eims_enable_mask |= adapter->eims_other; 770 } 771 772 enum latency_range { 773 lowest_latency = 0, 774 low_latency = 1, 775 bulk_latency = 2, 776 latency_invalid = 255 777 }; 778 779 /** 780 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 781 * @q_vector: structure containing interrupt and ring information 782 * @ring_container: structure containing ring performance data 783 * 784 * Stores a new ITR value based on packets and byte 785 * counts during the last interrupt. The advantage of per interrupt 786 * computation is faster updates and more accurate ITR for the current 787 * traffic pattern. Constants in this function were computed 788 * based on theoretical maximum wire speed and thresholds were set based 789 * on testing data as well as attempting to minimize response time 790 * while increasing bulk throughput. 791 **/ 792 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 793 struct ixgbevf_ring_container *ring_container) 794 { 795 int bytes = ring_container->total_bytes; 796 int packets = ring_container->total_packets; 797 u32 timepassed_us; 798 u64 bytes_perint; 799 u8 itr_setting = ring_container->itr; 800 801 if (packets == 0) 802 return; 803 804 /* simple throttlerate management 805 * 0-20MB/s lowest (100000 ints/s) 806 * 20-100MB/s low (20000 ints/s) 807 * 100-1249MB/s bulk (8000 ints/s) 808 */ 809 /* what was last interrupt timeslice? */ 810 timepassed_us = q_vector->itr >> 2; 811 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 812 813 switch (itr_setting) { 814 case lowest_latency: 815 if (bytes_perint > 10) 816 itr_setting = low_latency; 817 break; 818 case low_latency: 819 if (bytes_perint > 20) 820 itr_setting = bulk_latency; 821 else if (bytes_perint <= 10) 822 itr_setting = lowest_latency; 823 break; 824 case bulk_latency: 825 if (bytes_perint <= 20) 826 itr_setting = low_latency; 827 break; 828 } 829 830 /* clear work counters since we have the values we need */ 831 ring_container->total_bytes = 0; 832 ring_container->total_packets = 0; 833 834 /* write updated itr to ring container */ 835 ring_container->itr = itr_setting; 836 } 837 838 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 839 { 840 u32 new_itr = q_vector->itr; 841 u8 current_itr; 842 843 ixgbevf_update_itr(q_vector, &q_vector->tx); 844 ixgbevf_update_itr(q_vector, &q_vector->rx); 845 846 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 847 848 switch (current_itr) { 849 /* counts and packets in update_itr are dependent on these numbers */ 850 case lowest_latency: 851 new_itr = IXGBE_100K_ITR; 852 break; 853 case low_latency: 854 new_itr = IXGBE_20K_ITR; 855 break; 856 case bulk_latency: 857 default: 858 new_itr = IXGBE_8K_ITR; 859 break; 860 } 861 862 if (new_itr != q_vector->itr) { 863 /* do an exponential smoothing */ 864 new_itr = (10 * new_itr * q_vector->itr) / 865 ((9 * new_itr) + q_vector->itr); 866 867 /* save the algorithm value here */ 868 q_vector->itr = new_itr; 869 870 ixgbevf_write_eitr(q_vector); 871 } 872 } 873 874 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 875 { 876 struct ixgbevf_adapter *adapter = data; 877 struct ixgbe_hw *hw = &adapter->hw; 878 879 hw->mac.get_link_status = 1; 880 881 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 882 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) 883 mod_timer(&adapter->watchdog_timer, jiffies); 884 885 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 886 887 return IRQ_HANDLED; 888 } 889 890 /** 891 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 892 * @irq: unused 893 * @data: pointer to our q_vector struct for this interrupt vector 894 **/ 895 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 896 { 897 struct ixgbevf_q_vector *q_vector = data; 898 899 /* EIAM disabled interrupts (on this vector) for us */ 900 if (q_vector->rx.ring || q_vector->tx.ring) 901 napi_schedule(&q_vector->napi); 902 903 return IRQ_HANDLED; 904 } 905 906 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 907 int r_idx) 908 { 909 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 910 911 a->rx_ring[r_idx]->next = q_vector->rx.ring; 912 q_vector->rx.ring = a->rx_ring[r_idx]; 913 q_vector->rx.count++; 914 } 915 916 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 917 int t_idx) 918 { 919 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 920 921 a->tx_ring[t_idx]->next = q_vector->tx.ring; 922 q_vector->tx.ring = a->tx_ring[t_idx]; 923 q_vector->tx.count++; 924 } 925 926 /** 927 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 928 * @adapter: board private structure to initialize 929 * 930 * This function maps descriptor rings to the queue-specific vectors 931 * we were allotted through the MSI-X enabling code. Ideally, we'd have 932 * one vector per ring/queue, but on a constrained vector budget, we 933 * group the rings as "efficiently" as possible. You would add new 934 * mapping configurations in here. 935 **/ 936 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 937 { 938 int q_vectors; 939 int v_start = 0; 940 int rxr_idx = 0, txr_idx = 0; 941 int rxr_remaining = adapter->num_rx_queues; 942 int txr_remaining = adapter->num_tx_queues; 943 int i, j; 944 int rqpv, tqpv; 945 int err = 0; 946 947 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 948 949 /* 950 * The ideal configuration... 951 * We have enough vectors to map one per queue. 952 */ 953 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 954 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 955 map_vector_to_rxq(adapter, v_start, rxr_idx); 956 957 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 958 map_vector_to_txq(adapter, v_start, txr_idx); 959 goto out; 960 } 961 962 /* 963 * If we don't have enough vectors for a 1-to-1 964 * mapping, we'll have to group them so there are 965 * multiple queues per vector. 966 */ 967 /* Re-adjusting *qpv takes care of the remainder. */ 968 for (i = v_start; i < q_vectors; i++) { 969 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 970 for (j = 0; j < rqpv; j++) { 971 map_vector_to_rxq(adapter, i, rxr_idx); 972 rxr_idx++; 973 rxr_remaining--; 974 } 975 } 976 for (i = v_start; i < q_vectors; i++) { 977 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 978 for (j = 0; j < tqpv; j++) { 979 map_vector_to_txq(adapter, i, txr_idx); 980 txr_idx++; 981 txr_remaining--; 982 } 983 } 984 985 out: 986 return err; 987 } 988 989 /** 990 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 991 * @adapter: board private structure 992 * 993 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 994 * interrupts from the kernel. 995 **/ 996 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 997 { 998 struct net_device *netdev = adapter->netdev; 999 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1000 int vector, err; 1001 int ri = 0, ti = 0; 1002 1003 for (vector = 0; vector < q_vectors; vector++) { 1004 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 1005 struct msix_entry *entry = &adapter->msix_entries[vector]; 1006 1007 if (q_vector->tx.ring && q_vector->rx.ring) { 1008 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1009 "%s-%s-%d", netdev->name, "TxRx", ri++); 1010 ti++; 1011 } else if (q_vector->rx.ring) { 1012 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1013 "%s-%s-%d", netdev->name, "rx", ri++); 1014 } else if (q_vector->tx.ring) { 1015 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1016 "%s-%s-%d", netdev->name, "tx", ti++); 1017 } else { 1018 /* skip this unused q_vector */ 1019 continue; 1020 } 1021 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 1022 q_vector->name, q_vector); 1023 if (err) { 1024 hw_dbg(&adapter->hw, 1025 "request_irq failed for MSIX interrupt " 1026 "Error: %d\n", err); 1027 goto free_queue_irqs; 1028 } 1029 } 1030 1031 err = request_irq(adapter->msix_entries[vector].vector, 1032 &ixgbevf_msix_other, 0, netdev->name, adapter); 1033 if (err) { 1034 hw_dbg(&adapter->hw, 1035 "request_irq for msix_other failed: %d\n", err); 1036 goto free_queue_irqs; 1037 } 1038 1039 return 0; 1040 1041 free_queue_irqs: 1042 while (vector) { 1043 vector--; 1044 free_irq(adapter->msix_entries[vector].vector, 1045 adapter->q_vector[vector]); 1046 } 1047 /* This failure is non-recoverable - it indicates the system is 1048 * out of MSIX vector resources and the VF driver cannot run 1049 * without them. Set the number of msix vectors to zero 1050 * indicating that not enough can be allocated. The error 1051 * will be returned to the user indicating device open failed. 1052 * Any further attempts to force the driver to open will also 1053 * fail. The only way to recover is to unload the driver and 1054 * reload it again. If the system has recovered some MSIX 1055 * vectors then it may succeed. 1056 */ 1057 adapter->num_msix_vectors = 0; 1058 return err; 1059 } 1060 1061 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1062 { 1063 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1064 1065 for (i = 0; i < q_vectors; i++) { 1066 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1067 q_vector->rx.ring = NULL; 1068 q_vector->tx.ring = NULL; 1069 q_vector->rx.count = 0; 1070 q_vector->tx.count = 0; 1071 } 1072 } 1073 1074 /** 1075 * ixgbevf_request_irq - initialize interrupts 1076 * @adapter: board private structure 1077 * 1078 * Attempts to configure interrupts using the best available 1079 * capabilities of the hardware and kernel. 1080 **/ 1081 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1082 { 1083 int err = 0; 1084 1085 err = ixgbevf_request_msix_irqs(adapter); 1086 1087 if (err) 1088 hw_dbg(&adapter->hw, 1089 "request_irq failed, Error %d\n", err); 1090 1091 return err; 1092 } 1093 1094 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1095 { 1096 int i, q_vectors; 1097 1098 q_vectors = adapter->num_msix_vectors; 1099 i = q_vectors - 1; 1100 1101 free_irq(adapter->msix_entries[i].vector, adapter); 1102 i--; 1103 1104 for (; i >= 0; i--) { 1105 /* free only the irqs that were actually requested */ 1106 if (!adapter->q_vector[i]->rx.ring && 1107 !adapter->q_vector[i]->tx.ring) 1108 continue; 1109 1110 free_irq(adapter->msix_entries[i].vector, 1111 adapter->q_vector[i]); 1112 } 1113 1114 ixgbevf_reset_q_vectors(adapter); 1115 } 1116 1117 /** 1118 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1119 * @adapter: board private structure 1120 **/ 1121 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1122 { 1123 struct ixgbe_hw *hw = &adapter->hw; 1124 int i; 1125 1126 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1127 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1128 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1129 1130 IXGBE_WRITE_FLUSH(hw); 1131 1132 for (i = 0; i < adapter->num_msix_vectors; i++) 1133 synchronize_irq(adapter->msix_entries[i].vector); 1134 } 1135 1136 /** 1137 * ixgbevf_irq_enable - Enable default interrupt generation settings 1138 * @adapter: board private structure 1139 **/ 1140 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1141 { 1142 struct ixgbe_hw *hw = &adapter->hw; 1143 1144 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1145 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1146 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1147 } 1148 1149 /** 1150 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset 1151 * @adapter: board private structure 1152 * @ring: structure containing ring specific data 1153 * 1154 * Configure the Tx descriptor ring after a reset. 1155 **/ 1156 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, 1157 struct ixgbevf_ring *ring) 1158 { 1159 struct ixgbe_hw *hw = &adapter->hw; 1160 u64 tdba = ring->dma; 1161 int wait_loop = 10; 1162 u32 txdctl = IXGBE_TXDCTL_ENABLE; 1163 u8 reg_idx = ring->reg_idx; 1164 1165 /* disable queue to avoid issues while updating state */ 1166 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 1167 IXGBE_WRITE_FLUSH(hw); 1168 1169 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 1170 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); 1171 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), 1172 ring->count * sizeof(union ixgbe_adv_tx_desc)); 1173 1174 /* disable head writeback */ 1175 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); 1176 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); 1177 1178 /* enable relaxed ordering */ 1179 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), 1180 (IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1181 IXGBE_DCA_TXCTRL_DATA_RRO_EN)); 1182 1183 /* reset head and tail pointers */ 1184 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1185 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1186 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); 1187 1188 /* reset ntu and ntc to place SW in sync with hardwdare */ 1189 ring->next_to_clean = 0; 1190 ring->next_to_use = 0; 1191 1192 /* In order to avoid issues WTHRESH + PTHRESH should always be equal 1193 * to or less than the number of on chip descriptors, which is 1194 * currently 40. 1195 */ 1196 txdctl |= (8 << 16); /* WTHRESH = 8 */ 1197 1198 /* Setting PTHRESH to 32 both improves performance */ 1199 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1200 32; /* PTHRESH = 32 */ 1201 1202 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1203 1204 /* poll to verify queue is enabled */ 1205 do { 1206 usleep_range(1000, 2000); 1207 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); 1208 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 1209 if (!wait_loop) 1210 pr_err("Could not enable Tx Queue %d\n", reg_idx); 1211 } 1212 1213 /** 1214 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1215 * @adapter: board private structure 1216 * 1217 * Configure the Tx unit of the MAC after a reset. 1218 **/ 1219 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1220 { 1221 u32 i; 1222 1223 /* Setup the HW Tx Head and Tail descriptor pointers */ 1224 for (i = 0; i < adapter->num_tx_queues; i++) 1225 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); 1226 } 1227 1228 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1229 1230 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1231 { 1232 struct ixgbevf_ring *rx_ring; 1233 struct ixgbe_hw *hw = &adapter->hw; 1234 u32 srrctl; 1235 1236 rx_ring = adapter->rx_ring[index]; 1237 1238 srrctl = IXGBE_SRRCTL_DROP_EN; 1239 1240 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1241 1242 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1243 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1244 1245 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1246 } 1247 1248 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1249 { 1250 struct ixgbe_hw *hw = &adapter->hw; 1251 1252 /* PSRTYPE must be initialized in 82599 */ 1253 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1254 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1255 IXGBE_PSRTYPE_L2HDR; 1256 1257 if (adapter->num_rx_queues > 1) 1258 psrtype |= 1 << 29; 1259 1260 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1261 } 1262 1263 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1264 { 1265 struct ixgbe_hw *hw = &adapter->hw; 1266 struct net_device *netdev = adapter->netdev; 1267 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1268 int i; 1269 u16 rx_buf_len; 1270 1271 /* notify the PF of our intent to use this size of frame */ 1272 ixgbevf_rlpml_set_vf(hw, max_frame); 1273 1274 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1275 max_frame += VLAN_HLEN; 1276 1277 /* 1278 * Allocate buffer sizes that fit well into 32K and 1279 * take into account max frame size of 9.5K 1280 */ 1281 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1282 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1283 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1284 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1285 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1286 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1287 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1288 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1289 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1290 else 1291 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1292 1293 for (i = 0; i < adapter->num_rx_queues; i++) 1294 adapter->rx_ring[i]->rx_buf_len = rx_buf_len; 1295 } 1296 1297 #define IXGBEVF_MAX_RX_DESC_POLL 10 1298 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1299 struct ixgbevf_ring *ring) 1300 { 1301 struct ixgbe_hw *hw = &adapter->hw; 1302 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1303 u32 rxdctl; 1304 u8 reg_idx = ring->reg_idx; 1305 1306 if (IXGBE_REMOVED(hw->hw_addr)) 1307 return; 1308 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1309 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1310 1311 /* write value back with RXDCTL.ENABLE bit cleared */ 1312 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1313 1314 /* the hardware may take up to 100us to really disable the rx queue */ 1315 do { 1316 udelay(10); 1317 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1318 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1319 1320 if (!wait_loop) 1321 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", 1322 reg_idx); 1323 } 1324 1325 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1326 struct ixgbevf_ring *ring) 1327 { 1328 struct ixgbe_hw *hw = &adapter->hw; 1329 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1330 u32 rxdctl; 1331 u8 reg_idx = ring->reg_idx; 1332 1333 if (IXGBE_REMOVED(hw->hw_addr)) 1334 return; 1335 do { 1336 usleep_range(1000, 2000); 1337 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1338 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1339 1340 if (!wait_loop) 1341 pr_err("RXDCTL.ENABLE queue %d not set while polling\n", 1342 reg_idx); 1343 } 1344 1345 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1346 struct ixgbevf_ring *ring) 1347 { 1348 struct ixgbe_hw *hw = &adapter->hw; 1349 u64 rdba = ring->dma; 1350 u32 rxdctl; 1351 u8 reg_idx = ring->reg_idx; 1352 1353 /* disable queue to avoid issues while updating state */ 1354 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1355 ixgbevf_disable_rx_queue(adapter, ring); 1356 1357 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1358 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); 1359 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), 1360 ring->count * sizeof(union ixgbe_adv_rx_desc)); 1361 1362 /* enable relaxed ordering */ 1363 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1364 IXGBE_DCA_RXCTRL_DESC_RRO_EN); 1365 1366 /* reset head and tail pointers */ 1367 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1368 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1369 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); 1370 1371 /* reset ntu and ntc to place SW in sync with hardwdare */ 1372 ring->next_to_clean = 0; 1373 ring->next_to_use = 0; 1374 1375 ixgbevf_configure_srrctl(adapter, reg_idx); 1376 1377 /* prevent DMA from exceeding buffer space available */ 1378 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1379 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; 1380 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1381 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1382 1383 ixgbevf_rx_desc_queue_enable(adapter, ring); 1384 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); 1385 } 1386 1387 /** 1388 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1389 * @adapter: board private structure 1390 * 1391 * Configure the Rx unit of the MAC after a reset. 1392 **/ 1393 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1394 { 1395 int i; 1396 1397 ixgbevf_setup_psrtype(adapter); 1398 1399 /* set_rx_buffer_len must be called before ring initialization */ 1400 ixgbevf_set_rx_buffer_len(adapter); 1401 1402 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1403 * the Base and Length of the Rx Descriptor Ring */ 1404 for (i = 0; i < adapter->num_rx_queues; i++) 1405 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); 1406 } 1407 1408 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1409 __be16 proto, u16 vid) 1410 { 1411 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1412 struct ixgbe_hw *hw = &adapter->hw; 1413 int err; 1414 1415 spin_lock_bh(&adapter->mbx_lock); 1416 1417 /* add VID to filter table */ 1418 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1419 1420 spin_unlock_bh(&adapter->mbx_lock); 1421 1422 /* translate error return types so error makes sense */ 1423 if (err == IXGBE_ERR_MBX) 1424 return -EIO; 1425 1426 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1427 return -EACCES; 1428 1429 set_bit(vid, adapter->active_vlans); 1430 1431 return err; 1432 } 1433 1434 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 1435 __be16 proto, u16 vid) 1436 { 1437 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1438 struct ixgbe_hw *hw = &adapter->hw; 1439 int err = -EOPNOTSUPP; 1440 1441 spin_lock_bh(&adapter->mbx_lock); 1442 1443 /* remove VID from filter table */ 1444 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1445 1446 spin_unlock_bh(&adapter->mbx_lock); 1447 1448 clear_bit(vid, adapter->active_vlans); 1449 1450 return err; 1451 } 1452 1453 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1454 { 1455 u16 vid; 1456 1457 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1458 ixgbevf_vlan_rx_add_vid(adapter->netdev, 1459 htons(ETH_P_8021Q), vid); 1460 } 1461 1462 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1463 { 1464 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1465 struct ixgbe_hw *hw = &adapter->hw; 1466 int count = 0; 1467 1468 if ((netdev_uc_count(netdev)) > 10) { 1469 pr_err("Too many unicast filters - No Space\n"); 1470 return -ENOSPC; 1471 } 1472 1473 if (!netdev_uc_empty(netdev)) { 1474 struct netdev_hw_addr *ha; 1475 netdev_for_each_uc_addr(ha, netdev) { 1476 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1477 udelay(200); 1478 } 1479 } else { 1480 /* 1481 * If the list is empty then send message to PF driver to 1482 * clear all macvlans on this VF. 1483 */ 1484 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1485 } 1486 1487 return count; 1488 } 1489 1490 /** 1491 * ixgbevf_set_rx_mode - Multicast and unicast set 1492 * @netdev: network interface device structure 1493 * 1494 * The set_rx_method entry point is called whenever the multicast address 1495 * list, unicast address list or the network interface flags are updated. 1496 * This routine is responsible for configuring the hardware for proper 1497 * multicast mode and configuring requested unicast filters. 1498 **/ 1499 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1500 { 1501 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1502 struct ixgbe_hw *hw = &adapter->hw; 1503 1504 spin_lock_bh(&adapter->mbx_lock); 1505 1506 /* reprogram multicast list */ 1507 hw->mac.ops.update_mc_addr_list(hw, netdev); 1508 1509 ixgbevf_write_uc_addr_list(netdev); 1510 1511 spin_unlock_bh(&adapter->mbx_lock); 1512 } 1513 1514 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1515 { 1516 int q_idx; 1517 struct ixgbevf_q_vector *q_vector; 1518 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1519 1520 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1521 q_vector = adapter->q_vector[q_idx]; 1522 #ifdef CONFIG_NET_RX_BUSY_POLL 1523 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); 1524 #endif 1525 napi_enable(&q_vector->napi); 1526 } 1527 } 1528 1529 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1530 { 1531 int q_idx; 1532 struct ixgbevf_q_vector *q_vector; 1533 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1534 1535 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1536 q_vector = adapter->q_vector[q_idx]; 1537 napi_disable(&q_vector->napi); 1538 #ifdef CONFIG_NET_RX_BUSY_POLL 1539 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { 1540 pr_info("QV %d locked\n", q_idx); 1541 usleep_range(1000, 20000); 1542 } 1543 #endif /* CONFIG_NET_RX_BUSY_POLL */ 1544 } 1545 } 1546 1547 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) 1548 { 1549 struct ixgbe_hw *hw = &adapter->hw; 1550 unsigned int def_q = 0; 1551 unsigned int num_tcs = 0; 1552 unsigned int num_rx_queues = 1; 1553 int err; 1554 1555 spin_lock_bh(&adapter->mbx_lock); 1556 1557 /* fetch queue configuration from the PF */ 1558 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1559 1560 spin_unlock_bh(&adapter->mbx_lock); 1561 1562 if (err) 1563 return err; 1564 1565 if (num_tcs > 1) { 1566 /* update default Tx ring register index */ 1567 adapter->tx_ring[0]->reg_idx = def_q; 1568 1569 /* we need as many queues as traffic classes */ 1570 num_rx_queues = num_tcs; 1571 } 1572 1573 /* if we have a bad config abort request queue reset */ 1574 if (adapter->num_rx_queues != num_rx_queues) { 1575 /* force mailbox timeout to prevent further messages */ 1576 hw->mbx.timeout = 0; 1577 1578 /* wait for watchdog to come around and bail us out */ 1579 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1586 { 1587 ixgbevf_configure_dcb(adapter); 1588 1589 ixgbevf_set_rx_mode(adapter->netdev); 1590 1591 ixgbevf_restore_vlan(adapter); 1592 1593 ixgbevf_configure_tx(adapter); 1594 ixgbevf_configure_rx(adapter); 1595 } 1596 1597 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1598 { 1599 /* Only save pre-reset stats if there are some */ 1600 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1601 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1602 adapter->stats.base_vfgprc; 1603 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1604 adapter->stats.base_vfgptc; 1605 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1606 adapter->stats.base_vfgorc; 1607 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1608 adapter->stats.base_vfgotc; 1609 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1610 adapter->stats.base_vfmprc; 1611 } 1612 } 1613 1614 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1615 { 1616 struct ixgbe_hw *hw = &adapter->hw; 1617 1618 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1619 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1620 adapter->stats.last_vfgorc |= 1621 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1622 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1623 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1624 adapter->stats.last_vfgotc |= 1625 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1626 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1627 1628 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1629 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1630 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1631 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1632 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1633 } 1634 1635 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1636 { 1637 struct ixgbe_hw *hw = &adapter->hw; 1638 int api[] = { ixgbe_mbox_api_11, 1639 ixgbe_mbox_api_10, 1640 ixgbe_mbox_api_unknown }; 1641 int err = 0, idx = 0; 1642 1643 spin_lock_bh(&adapter->mbx_lock); 1644 1645 while (api[idx] != ixgbe_mbox_api_unknown) { 1646 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1647 if (!err) 1648 break; 1649 idx++; 1650 } 1651 1652 spin_unlock_bh(&adapter->mbx_lock); 1653 } 1654 1655 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1656 { 1657 struct net_device *netdev = adapter->netdev; 1658 struct ixgbe_hw *hw = &adapter->hw; 1659 1660 ixgbevf_configure_msix(adapter); 1661 1662 spin_lock_bh(&adapter->mbx_lock); 1663 1664 if (is_valid_ether_addr(hw->mac.addr)) 1665 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1666 else 1667 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1668 1669 spin_unlock_bh(&adapter->mbx_lock); 1670 1671 smp_mb__before_clear_bit(); 1672 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1673 ixgbevf_napi_enable_all(adapter); 1674 1675 /* enable transmits */ 1676 netif_tx_start_all_queues(netdev); 1677 1678 ixgbevf_save_reset_stats(adapter); 1679 ixgbevf_init_last_counter_stats(adapter); 1680 1681 hw->mac.get_link_status = 1; 1682 mod_timer(&adapter->watchdog_timer, jiffies); 1683 } 1684 1685 void ixgbevf_up(struct ixgbevf_adapter *adapter) 1686 { 1687 struct ixgbe_hw *hw = &adapter->hw; 1688 1689 ixgbevf_configure(adapter); 1690 1691 ixgbevf_up_complete(adapter); 1692 1693 /* clear any pending interrupts, may auto mask */ 1694 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1695 1696 ixgbevf_irq_enable(adapter); 1697 } 1698 1699 /** 1700 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1701 * @rx_ring: ring to free buffers from 1702 **/ 1703 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 1704 { 1705 unsigned long size; 1706 unsigned int i; 1707 1708 if (!rx_ring->rx_buffer_info) 1709 return; 1710 1711 /* Free all the Rx ring sk_buffs */ 1712 for (i = 0; i < rx_ring->count; i++) { 1713 struct ixgbevf_rx_buffer *rx_buffer_info; 1714 1715 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1716 if (rx_buffer_info->dma) { 1717 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 1718 rx_ring->rx_buf_len, 1719 DMA_FROM_DEVICE); 1720 rx_buffer_info->dma = 0; 1721 } 1722 if (rx_buffer_info->skb) { 1723 struct sk_buff *skb = rx_buffer_info->skb; 1724 rx_buffer_info->skb = NULL; 1725 do { 1726 struct sk_buff *this = skb; 1727 skb = IXGBE_CB(skb)->prev; 1728 dev_kfree_skb(this); 1729 } while (skb); 1730 } 1731 } 1732 1733 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1734 memset(rx_ring->rx_buffer_info, 0, size); 1735 1736 /* Zero out the descriptor ring */ 1737 memset(rx_ring->desc, 0, rx_ring->size); 1738 } 1739 1740 /** 1741 * ixgbevf_clean_tx_ring - Free Tx Buffers 1742 * @tx_ring: ring to be cleaned 1743 **/ 1744 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 1745 { 1746 struct ixgbevf_tx_buffer *tx_buffer_info; 1747 unsigned long size; 1748 unsigned int i; 1749 1750 if (!tx_ring->tx_buffer_info) 1751 return; 1752 1753 /* Free all the Tx ring sk_buffs */ 1754 for (i = 0; i < tx_ring->count; i++) { 1755 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1756 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1757 } 1758 1759 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1760 memset(tx_ring->tx_buffer_info, 0, size); 1761 1762 memset(tx_ring->desc, 0, tx_ring->size); 1763 } 1764 1765 /** 1766 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1767 * @adapter: board private structure 1768 **/ 1769 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1770 { 1771 int i; 1772 1773 for (i = 0; i < adapter->num_rx_queues; i++) 1774 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); 1775 } 1776 1777 /** 1778 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1779 * @adapter: board private structure 1780 **/ 1781 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1782 { 1783 int i; 1784 1785 for (i = 0; i < adapter->num_tx_queues; i++) 1786 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); 1787 } 1788 1789 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1790 { 1791 struct net_device *netdev = adapter->netdev; 1792 struct ixgbe_hw *hw = &adapter->hw; 1793 int i; 1794 1795 /* signal that we are down to the interrupt handler */ 1796 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) 1797 return; /* do nothing if already down */ 1798 1799 /* disable all enabled rx queues */ 1800 for (i = 0; i < adapter->num_rx_queues; i++) 1801 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 1802 1803 netif_tx_disable(netdev); 1804 1805 msleep(10); 1806 1807 netif_tx_stop_all_queues(netdev); 1808 1809 ixgbevf_irq_disable(adapter); 1810 1811 ixgbevf_napi_disable_all(adapter); 1812 1813 del_timer_sync(&adapter->watchdog_timer); 1814 /* can't call flush scheduled work here because it can deadlock 1815 * if linkwatch_event tries to acquire the rtnl_lock which we are 1816 * holding */ 1817 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1818 msleep(1); 1819 1820 /* disable transmits in the hardware now that interrupts are off */ 1821 for (i = 0; i < adapter->num_tx_queues; i++) { 1822 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 1823 1824 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 1825 IXGBE_TXDCTL_SWFLSH); 1826 } 1827 1828 netif_carrier_off(netdev); 1829 1830 if (!pci_channel_offline(adapter->pdev)) 1831 ixgbevf_reset(adapter); 1832 1833 ixgbevf_clean_all_tx_rings(adapter); 1834 ixgbevf_clean_all_rx_rings(adapter); 1835 } 1836 1837 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1838 { 1839 WARN_ON(in_interrupt()); 1840 1841 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1842 msleep(1); 1843 1844 ixgbevf_down(adapter); 1845 ixgbevf_up(adapter); 1846 1847 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1848 } 1849 1850 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1851 { 1852 struct ixgbe_hw *hw = &adapter->hw; 1853 struct net_device *netdev = adapter->netdev; 1854 1855 if (hw->mac.ops.reset_hw(hw)) { 1856 hw_dbg(hw, "PF still resetting\n"); 1857 } else { 1858 hw->mac.ops.init_hw(hw); 1859 ixgbevf_negotiate_api(adapter); 1860 } 1861 1862 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1863 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1864 netdev->addr_len); 1865 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1866 netdev->addr_len); 1867 } 1868 } 1869 1870 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1871 int vectors) 1872 { 1873 int vector_threshold; 1874 1875 /* We'll want at least 2 (vector_threshold): 1876 * 1) TxQ[0] + RxQ[0] handler 1877 * 2) Other (Link Status Change, etc.) 1878 */ 1879 vector_threshold = MIN_MSIX_COUNT; 1880 1881 /* The more we get, the more we will assign to Tx/Rx Cleanup 1882 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1883 * Right now, we simply care about how many we'll get; we'll 1884 * set them up later while requesting irq's. 1885 */ 1886 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1887 vector_threshold, vectors); 1888 1889 if (vectors < 0) { 1890 dev_err(&adapter->pdev->dev, 1891 "Unable to allocate MSI-X interrupts\n"); 1892 kfree(adapter->msix_entries); 1893 adapter->msix_entries = NULL; 1894 return vectors; 1895 } 1896 1897 /* Adjust for only the vectors we'll use, which is minimum 1898 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1899 * vectors we were allocated. 1900 */ 1901 adapter->num_msix_vectors = vectors; 1902 1903 return 0; 1904 } 1905 1906 /** 1907 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1908 * @adapter: board private structure to initialize 1909 * 1910 * This is the top level queue allocation routine. The order here is very 1911 * important, starting with the "most" number of features turned on at once, 1912 * and ending with the smallest set of features. This way large combinations 1913 * can be allocated if they're turned on, and smaller combinations are the 1914 * fallthrough conditions. 1915 * 1916 **/ 1917 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1918 { 1919 struct ixgbe_hw *hw = &adapter->hw; 1920 unsigned int def_q = 0; 1921 unsigned int num_tcs = 0; 1922 int err; 1923 1924 /* Start with base case */ 1925 adapter->num_rx_queues = 1; 1926 adapter->num_tx_queues = 1; 1927 1928 spin_lock_bh(&adapter->mbx_lock); 1929 1930 /* fetch queue configuration from the PF */ 1931 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1932 1933 spin_unlock_bh(&adapter->mbx_lock); 1934 1935 if (err) 1936 return; 1937 1938 /* we need as many queues as traffic classes */ 1939 if (num_tcs > 1) 1940 adapter->num_rx_queues = num_tcs; 1941 } 1942 1943 /** 1944 * ixgbevf_alloc_queues - Allocate memory for all rings 1945 * @adapter: board private structure to initialize 1946 * 1947 * We allocate one ring per queue at run-time since we don't know the 1948 * number of queues at compile-time. The polling_netdev array is 1949 * intended for Multiqueue, but should work fine with a single queue. 1950 **/ 1951 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1952 { 1953 struct ixgbevf_ring *ring; 1954 int rx = 0, tx = 0; 1955 1956 for (; tx < adapter->num_tx_queues; tx++) { 1957 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1958 if (!ring) 1959 goto err_allocation; 1960 1961 ring->dev = &adapter->pdev->dev; 1962 ring->netdev = adapter->netdev; 1963 ring->count = adapter->tx_ring_count; 1964 ring->queue_index = tx; 1965 ring->reg_idx = tx; 1966 1967 adapter->tx_ring[tx] = ring; 1968 } 1969 1970 for (; rx < adapter->num_rx_queues; rx++) { 1971 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1972 if (!ring) 1973 goto err_allocation; 1974 1975 ring->dev = &adapter->pdev->dev; 1976 ring->netdev = adapter->netdev; 1977 1978 ring->count = adapter->rx_ring_count; 1979 ring->queue_index = rx; 1980 ring->reg_idx = rx; 1981 1982 adapter->rx_ring[rx] = ring; 1983 } 1984 1985 return 0; 1986 1987 err_allocation: 1988 while (tx) { 1989 kfree(adapter->tx_ring[--tx]); 1990 adapter->tx_ring[tx] = NULL; 1991 } 1992 1993 while (rx) { 1994 kfree(adapter->rx_ring[--rx]); 1995 adapter->rx_ring[rx] = NULL; 1996 } 1997 return -ENOMEM; 1998 } 1999 2000 /** 2001 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2002 * @adapter: board private structure to initialize 2003 * 2004 * Attempt to configure the interrupts using the best available 2005 * capabilities of the hardware and the kernel. 2006 **/ 2007 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2008 { 2009 struct net_device *netdev = adapter->netdev; 2010 int err = 0; 2011 int vector, v_budget; 2012 2013 /* 2014 * It's easy to be greedy for MSI-X vectors, but it really 2015 * doesn't do us much good if we have a lot more vectors 2016 * than CPU's. So let's be conservative and only ask for 2017 * (roughly) the same number of vectors as there are CPU's. 2018 * The default is to use pairs of vectors. 2019 */ 2020 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 2021 v_budget = min_t(int, v_budget, num_online_cpus()); 2022 v_budget += NON_Q_VECTORS; 2023 2024 /* A failure in MSI-X entry allocation isn't fatal, but it does 2025 * mean we disable MSI-X capabilities of the adapter. */ 2026 adapter->msix_entries = kcalloc(v_budget, 2027 sizeof(struct msix_entry), GFP_KERNEL); 2028 if (!adapter->msix_entries) { 2029 err = -ENOMEM; 2030 goto out; 2031 } 2032 2033 for (vector = 0; vector < v_budget; vector++) 2034 adapter->msix_entries[vector].entry = vector; 2035 2036 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 2037 if (err) 2038 goto out; 2039 2040 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 2041 if (err) 2042 goto out; 2043 2044 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 2045 2046 out: 2047 return err; 2048 } 2049 2050 /** 2051 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2052 * @adapter: board private structure to initialize 2053 * 2054 * We allocate one q_vector per queue interrupt. If allocation fails we 2055 * return -ENOMEM. 2056 **/ 2057 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2058 { 2059 int q_idx, num_q_vectors; 2060 struct ixgbevf_q_vector *q_vector; 2061 2062 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2063 2064 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2065 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2066 if (!q_vector) 2067 goto err_out; 2068 q_vector->adapter = adapter; 2069 q_vector->v_idx = q_idx; 2070 netif_napi_add(adapter->netdev, &q_vector->napi, 2071 ixgbevf_poll, 64); 2072 #ifdef CONFIG_NET_RX_BUSY_POLL 2073 napi_hash_add(&q_vector->napi); 2074 #endif 2075 adapter->q_vector[q_idx] = q_vector; 2076 } 2077 2078 return 0; 2079 2080 err_out: 2081 while (q_idx) { 2082 q_idx--; 2083 q_vector = adapter->q_vector[q_idx]; 2084 #ifdef CONFIG_NET_RX_BUSY_POLL 2085 napi_hash_del(&q_vector->napi); 2086 #endif 2087 netif_napi_del(&q_vector->napi); 2088 kfree(q_vector); 2089 adapter->q_vector[q_idx] = NULL; 2090 } 2091 return -ENOMEM; 2092 } 2093 2094 /** 2095 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2096 * @adapter: board private structure to initialize 2097 * 2098 * This function frees the memory allocated to the q_vectors. In addition if 2099 * NAPI is enabled it will delete any references to the NAPI struct prior 2100 * to freeing the q_vector. 2101 **/ 2102 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2103 { 2104 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2105 2106 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2107 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2108 2109 adapter->q_vector[q_idx] = NULL; 2110 #ifdef CONFIG_NET_RX_BUSY_POLL 2111 napi_hash_del(&q_vector->napi); 2112 #endif 2113 netif_napi_del(&q_vector->napi); 2114 kfree(q_vector); 2115 } 2116 } 2117 2118 /** 2119 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2120 * @adapter: board private structure 2121 * 2122 **/ 2123 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2124 { 2125 pci_disable_msix(adapter->pdev); 2126 kfree(adapter->msix_entries); 2127 adapter->msix_entries = NULL; 2128 } 2129 2130 /** 2131 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2132 * @adapter: board private structure to initialize 2133 * 2134 **/ 2135 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2136 { 2137 int err; 2138 2139 /* Number of supported queues */ 2140 ixgbevf_set_num_queues(adapter); 2141 2142 err = ixgbevf_set_interrupt_capability(adapter); 2143 if (err) { 2144 hw_dbg(&adapter->hw, 2145 "Unable to setup interrupt capabilities\n"); 2146 goto err_set_interrupt; 2147 } 2148 2149 err = ixgbevf_alloc_q_vectors(adapter); 2150 if (err) { 2151 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2152 "vectors\n"); 2153 goto err_alloc_q_vectors; 2154 } 2155 2156 err = ixgbevf_alloc_queues(adapter); 2157 if (err) { 2158 pr_err("Unable to allocate memory for queues\n"); 2159 goto err_alloc_queues; 2160 } 2161 2162 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2163 "Tx Queue count = %u\n", 2164 (adapter->num_rx_queues > 1) ? "Enabled" : 2165 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2166 2167 set_bit(__IXGBEVF_DOWN, &adapter->state); 2168 2169 return 0; 2170 err_alloc_queues: 2171 ixgbevf_free_q_vectors(adapter); 2172 err_alloc_q_vectors: 2173 ixgbevf_reset_interrupt_capability(adapter); 2174 err_set_interrupt: 2175 return err; 2176 } 2177 2178 /** 2179 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2180 * @adapter: board private structure to clear interrupt scheme on 2181 * 2182 * We go through and clear interrupt specific resources and reset the structure 2183 * to pre-load conditions 2184 **/ 2185 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2186 { 2187 int i; 2188 2189 for (i = 0; i < adapter->num_tx_queues; i++) { 2190 kfree(adapter->tx_ring[i]); 2191 adapter->tx_ring[i] = NULL; 2192 } 2193 for (i = 0; i < adapter->num_rx_queues; i++) { 2194 kfree(adapter->rx_ring[i]); 2195 adapter->rx_ring[i] = NULL; 2196 } 2197 2198 adapter->num_tx_queues = 0; 2199 adapter->num_rx_queues = 0; 2200 2201 ixgbevf_free_q_vectors(adapter); 2202 ixgbevf_reset_interrupt_capability(adapter); 2203 } 2204 2205 /** 2206 * ixgbevf_sw_init - Initialize general software structures 2207 * (struct ixgbevf_adapter) 2208 * @adapter: board private structure to initialize 2209 * 2210 * ixgbevf_sw_init initializes the Adapter private data structure. 2211 * Fields are initialized based on PCI device information and 2212 * OS network device settings (MTU size). 2213 **/ 2214 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2215 { 2216 struct ixgbe_hw *hw = &adapter->hw; 2217 struct pci_dev *pdev = adapter->pdev; 2218 struct net_device *netdev = adapter->netdev; 2219 int err; 2220 2221 /* PCI config space info */ 2222 2223 hw->vendor_id = pdev->vendor; 2224 hw->device_id = pdev->device; 2225 hw->revision_id = pdev->revision; 2226 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2227 hw->subsystem_device_id = pdev->subsystem_device; 2228 2229 hw->mbx.ops.init_params(hw); 2230 2231 /* assume legacy case in which PF would only give VF 2 queues */ 2232 hw->mac.max_tx_queues = 2; 2233 hw->mac.max_rx_queues = 2; 2234 2235 /* lock to protect mailbox accesses */ 2236 spin_lock_init(&adapter->mbx_lock); 2237 2238 err = hw->mac.ops.reset_hw(hw); 2239 if (err) { 2240 dev_info(&pdev->dev, 2241 "PF still in reset state. Is the PF interface up?\n"); 2242 } else { 2243 err = hw->mac.ops.init_hw(hw); 2244 if (err) { 2245 pr_err("init_shared_code failed: %d\n", err); 2246 goto out; 2247 } 2248 ixgbevf_negotiate_api(adapter); 2249 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2250 if (err) 2251 dev_info(&pdev->dev, "Error reading MAC address\n"); 2252 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2253 dev_info(&pdev->dev, 2254 "MAC address not assigned by administrator.\n"); 2255 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 2256 } 2257 2258 if (!is_valid_ether_addr(netdev->dev_addr)) { 2259 dev_info(&pdev->dev, "Assigning random MAC address\n"); 2260 eth_hw_addr_random(netdev); 2261 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); 2262 } 2263 2264 /* Enable dynamic interrupt throttling rates */ 2265 adapter->rx_itr_setting = 1; 2266 adapter->tx_itr_setting = 1; 2267 2268 /* set default ring sizes */ 2269 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2270 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2271 2272 set_bit(__IXGBEVF_DOWN, &adapter->state); 2273 return 0; 2274 2275 out: 2276 return err; 2277 } 2278 2279 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2280 { \ 2281 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2282 if (current_counter < last_counter) \ 2283 counter += 0x100000000LL; \ 2284 last_counter = current_counter; \ 2285 counter &= 0xFFFFFFFF00000000LL; \ 2286 counter |= current_counter; \ 2287 } 2288 2289 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2290 { \ 2291 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2292 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2293 u64 current_counter = (current_counter_msb << 32) | \ 2294 current_counter_lsb; \ 2295 if (current_counter < last_counter) \ 2296 counter += 0x1000000000LL; \ 2297 last_counter = current_counter; \ 2298 counter &= 0xFFFFFFF000000000LL; \ 2299 counter |= current_counter; \ 2300 } 2301 /** 2302 * ixgbevf_update_stats - Update the board statistics counters. 2303 * @adapter: board private structure 2304 **/ 2305 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2306 { 2307 struct ixgbe_hw *hw = &adapter->hw; 2308 int i; 2309 2310 if (!adapter->link_up) 2311 return; 2312 2313 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2314 adapter->stats.vfgprc); 2315 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2316 adapter->stats.vfgptc); 2317 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2318 adapter->stats.last_vfgorc, 2319 adapter->stats.vfgorc); 2320 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2321 adapter->stats.last_vfgotc, 2322 adapter->stats.vfgotc); 2323 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2324 adapter->stats.vfmprc); 2325 2326 for (i = 0; i < adapter->num_rx_queues; i++) { 2327 adapter->hw_csum_rx_error += 2328 adapter->rx_ring[i]->hw_csum_rx_error; 2329 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2330 } 2331 } 2332 2333 /** 2334 * ixgbevf_watchdog - Timer Call-back 2335 * @data: pointer to adapter cast into an unsigned long 2336 **/ 2337 static void ixgbevf_watchdog(unsigned long data) 2338 { 2339 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2340 struct ixgbe_hw *hw = &adapter->hw; 2341 u32 eics = 0; 2342 int i; 2343 2344 /* 2345 * Do the watchdog outside of interrupt context due to the lovely 2346 * delays that some of the newer hardware requires 2347 */ 2348 2349 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2350 goto watchdog_short_circuit; 2351 2352 /* get one bit for every active tx/rx interrupt vector */ 2353 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2354 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2355 if (qv->rx.ring || qv->tx.ring) 2356 eics |= 1 << i; 2357 } 2358 2359 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2360 2361 watchdog_short_circuit: 2362 schedule_work(&adapter->watchdog_task); 2363 } 2364 2365 /** 2366 * ixgbevf_tx_timeout - Respond to a Tx Hang 2367 * @netdev: network interface device structure 2368 **/ 2369 static void ixgbevf_tx_timeout(struct net_device *netdev) 2370 { 2371 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2372 2373 /* Do the reset outside of interrupt context */ 2374 schedule_work(&adapter->reset_task); 2375 } 2376 2377 static void ixgbevf_reset_task(struct work_struct *work) 2378 { 2379 struct ixgbevf_adapter *adapter; 2380 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2381 2382 /* If we're already down or resetting, just bail */ 2383 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2384 test_bit(__IXGBEVF_REMOVING, &adapter->state) || 2385 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2386 return; 2387 2388 adapter->tx_timeout_count++; 2389 2390 ixgbevf_reinit_locked(adapter); 2391 } 2392 2393 /** 2394 * ixgbevf_watchdog_task - worker thread to bring link up 2395 * @work: pointer to work_struct containing our data 2396 **/ 2397 static void ixgbevf_watchdog_task(struct work_struct *work) 2398 { 2399 struct ixgbevf_adapter *adapter = container_of(work, 2400 struct ixgbevf_adapter, 2401 watchdog_task); 2402 struct net_device *netdev = adapter->netdev; 2403 struct ixgbe_hw *hw = &adapter->hw; 2404 u32 link_speed = adapter->link_speed; 2405 bool link_up = adapter->link_up; 2406 s32 need_reset; 2407 2408 if (IXGBE_REMOVED(hw->hw_addr)) { 2409 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 2410 rtnl_lock(); 2411 ixgbevf_down(adapter); 2412 rtnl_unlock(); 2413 } 2414 return; 2415 } 2416 ixgbevf_queue_reset_subtask(adapter); 2417 2418 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2419 2420 /* 2421 * Always check the link on the watchdog because we have 2422 * no LSC interrupt 2423 */ 2424 spin_lock_bh(&adapter->mbx_lock); 2425 2426 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2427 2428 spin_unlock_bh(&adapter->mbx_lock); 2429 2430 if (need_reset) { 2431 adapter->link_up = link_up; 2432 adapter->link_speed = link_speed; 2433 netif_carrier_off(netdev); 2434 netif_tx_stop_all_queues(netdev); 2435 schedule_work(&adapter->reset_task); 2436 goto pf_has_reset; 2437 } 2438 adapter->link_up = link_up; 2439 adapter->link_speed = link_speed; 2440 2441 if (link_up) { 2442 if (!netif_carrier_ok(netdev)) { 2443 char *link_speed_string; 2444 switch (link_speed) { 2445 case IXGBE_LINK_SPEED_10GB_FULL: 2446 link_speed_string = "10 Gbps"; 2447 break; 2448 case IXGBE_LINK_SPEED_1GB_FULL: 2449 link_speed_string = "1 Gbps"; 2450 break; 2451 case IXGBE_LINK_SPEED_100_FULL: 2452 link_speed_string = "100 Mbps"; 2453 break; 2454 default: 2455 link_speed_string = "unknown speed"; 2456 break; 2457 } 2458 dev_info(&adapter->pdev->dev, 2459 "NIC Link is Up, %s\n", link_speed_string); 2460 netif_carrier_on(netdev); 2461 netif_tx_wake_all_queues(netdev); 2462 } 2463 } else { 2464 adapter->link_up = false; 2465 adapter->link_speed = 0; 2466 if (netif_carrier_ok(netdev)) { 2467 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2468 netif_carrier_off(netdev); 2469 netif_tx_stop_all_queues(netdev); 2470 } 2471 } 2472 2473 ixgbevf_update_stats(adapter); 2474 2475 pf_has_reset: 2476 /* Reset the timer */ 2477 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 2478 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) 2479 mod_timer(&adapter->watchdog_timer, 2480 round_jiffies(jiffies + (2 * HZ))); 2481 2482 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2483 } 2484 2485 /** 2486 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2487 * @tx_ring: Tx descriptor ring for a specific queue 2488 * 2489 * Free all transmit software resources 2490 **/ 2491 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) 2492 { 2493 ixgbevf_clean_tx_ring(tx_ring); 2494 2495 vfree(tx_ring->tx_buffer_info); 2496 tx_ring->tx_buffer_info = NULL; 2497 2498 /* if not set, then don't free */ 2499 if (!tx_ring->desc) 2500 return; 2501 2502 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, 2503 tx_ring->dma); 2504 2505 tx_ring->desc = NULL; 2506 } 2507 2508 /** 2509 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2510 * @adapter: board private structure 2511 * 2512 * Free all transmit software resources 2513 **/ 2514 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2515 { 2516 int i; 2517 2518 for (i = 0; i < adapter->num_tx_queues; i++) 2519 if (adapter->tx_ring[i]->desc) 2520 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 2521 } 2522 2523 /** 2524 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2525 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2526 * 2527 * Return 0 on success, negative on failure 2528 **/ 2529 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) 2530 { 2531 int size; 2532 2533 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2534 tx_ring->tx_buffer_info = vzalloc(size); 2535 if (!tx_ring->tx_buffer_info) 2536 goto err; 2537 2538 /* round up to nearest 4K */ 2539 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2540 tx_ring->size = ALIGN(tx_ring->size, 4096); 2541 2542 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, 2543 &tx_ring->dma, GFP_KERNEL); 2544 if (!tx_ring->desc) 2545 goto err; 2546 2547 return 0; 2548 2549 err: 2550 vfree(tx_ring->tx_buffer_info); 2551 tx_ring->tx_buffer_info = NULL; 2552 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2553 "descriptor ring\n"); 2554 return -ENOMEM; 2555 } 2556 2557 /** 2558 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2559 * @adapter: board private structure 2560 * 2561 * If this function returns with an error, then it's possible one or 2562 * more of the rings is populated (while the rest are not). It is the 2563 * callers duty to clean those orphaned rings. 2564 * 2565 * Return 0 on success, negative on failure 2566 **/ 2567 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2568 { 2569 int i, err = 0; 2570 2571 for (i = 0; i < adapter->num_tx_queues; i++) { 2572 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); 2573 if (!err) 2574 continue; 2575 hw_dbg(&adapter->hw, 2576 "Allocation for Tx Queue %u failed\n", i); 2577 break; 2578 } 2579 2580 return err; 2581 } 2582 2583 /** 2584 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2585 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2586 * 2587 * Returns 0 on success, negative on failure 2588 **/ 2589 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) 2590 { 2591 int size; 2592 2593 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2594 rx_ring->rx_buffer_info = vzalloc(size); 2595 if (!rx_ring->rx_buffer_info) 2596 goto err; 2597 2598 /* Round up to nearest 4K */ 2599 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2600 rx_ring->size = ALIGN(rx_ring->size, 4096); 2601 2602 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, 2603 &rx_ring->dma, GFP_KERNEL); 2604 2605 if (!rx_ring->desc) 2606 goto err; 2607 2608 return 0; 2609 err: 2610 vfree(rx_ring->rx_buffer_info); 2611 rx_ring->rx_buffer_info = NULL; 2612 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); 2613 return -ENOMEM; 2614 } 2615 2616 /** 2617 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2618 * @adapter: board private structure 2619 * 2620 * If this function returns with an error, then it's possible one or 2621 * more of the rings is populated (while the rest are not). It is the 2622 * callers duty to clean those orphaned rings. 2623 * 2624 * Return 0 on success, negative on failure 2625 **/ 2626 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2627 { 2628 int i, err = 0; 2629 2630 for (i = 0; i < adapter->num_rx_queues; i++) { 2631 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); 2632 if (!err) 2633 continue; 2634 hw_dbg(&adapter->hw, 2635 "Allocation for Rx Queue %u failed\n", i); 2636 break; 2637 } 2638 return err; 2639 } 2640 2641 /** 2642 * ixgbevf_free_rx_resources - Free Rx Resources 2643 * @rx_ring: ring to clean the resources from 2644 * 2645 * Free all receive software resources 2646 **/ 2647 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) 2648 { 2649 ixgbevf_clean_rx_ring(rx_ring); 2650 2651 vfree(rx_ring->rx_buffer_info); 2652 rx_ring->rx_buffer_info = NULL; 2653 2654 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, 2655 rx_ring->dma); 2656 2657 rx_ring->desc = NULL; 2658 } 2659 2660 /** 2661 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2662 * @adapter: board private structure 2663 * 2664 * Free all receive software resources 2665 **/ 2666 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2667 { 2668 int i; 2669 2670 for (i = 0; i < adapter->num_rx_queues; i++) 2671 if (adapter->rx_ring[i]->desc) 2672 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 2673 } 2674 2675 /** 2676 * ixgbevf_open - Called when a network interface is made active 2677 * @netdev: network interface device structure 2678 * 2679 * Returns 0 on success, negative value on failure 2680 * 2681 * The open entry point is called when a network interface is made 2682 * active by the system (IFF_UP). At this point all resources needed 2683 * for transmit and receive operations are allocated, the interrupt 2684 * handler is registered with the OS, the watchdog timer is started, 2685 * and the stack is notified that the interface is ready. 2686 **/ 2687 static int ixgbevf_open(struct net_device *netdev) 2688 { 2689 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2690 struct ixgbe_hw *hw = &adapter->hw; 2691 int err; 2692 2693 /* A previous failure to open the device because of a lack of 2694 * available MSIX vector resources may have reset the number 2695 * of msix vectors variable to zero. The only way to recover 2696 * is to unload/reload the driver and hope that the system has 2697 * been able to recover some MSIX vector resources. 2698 */ 2699 if (!adapter->num_msix_vectors) 2700 return -ENOMEM; 2701 2702 /* disallow open during test */ 2703 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2704 return -EBUSY; 2705 2706 if (hw->adapter_stopped) { 2707 ixgbevf_reset(adapter); 2708 /* if adapter is still stopped then PF isn't up and 2709 * the vf can't start. */ 2710 if (hw->adapter_stopped) { 2711 err = IXGBE_ERR_MBX; 2712 pr_err("Unable to start - perhaps the PF Driver isn't " 2713 "up yet\n"); 2714 goto err_setup_reset; 2715 } 2716 } 2717 2718 /* allocate transmit descriptors */ 2719 err = ixgbevf_setup_all_tx_resources(adapter); 2720 if (err) 2721 goto err_setup_tx; 2722 2723 /* allocate receive descriptors */ 2724 err = ixgbevf_setup_all_rx_resources(adapter); 2725 if (err) 2726 goto err_setup_rx; 2727 2728 ixgbevf_configure(adapter); 2729 2730 /* 2731 * Map the Tx/Rx rings to the vectors we were allotted. 2732 * if request_irq will be called in this function map_rings 2733 * must be called *before* up_complete 2734 */ 2735 ixgbevf_map_rings_to_vectors(adapter); 2736 2737 ixgbevf_up_complete(adapter); 2738 2739 /* clear any pending interrupts, may auto mask */ 2740 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2741 err = ixgbevf_request_irq(adapter); 2742 if (err) 2743 goto err_req_irq; 2744 2745 ixgbevf_irq_enable(adapter); 2746 2747 return 0; 2748 2749 err_req_irq: 2750 ixgbevf_down(adapter); 2751 err_setup_rx: 2752 ixgbevf_free_all_rx_resources(adapter); 2753 err_setup_tx: 2754 ixgbevf_free_all_tx_resources(adapter); 2755 ixgbevf_reset(adapter); 2756 2757 err_setup_reset: 2758 2759 return err; 2760 } 2761 2762 /** 2763 * ixgbevf_close - Disables a network interface 2764 * @netdev: network interface device structure 2765 * 2766 * Returns 0, this is not allowed to fail 2767 * 2768 * The close entry point is called when an interface is de-activated 2769 * by the OS. The hardware is still under the drivers control, but 2770 * needs to be disabled. A global MAC reset is issued to stop the 2771 * hardware, and all transmit and receive resources are freed. 2772 **/ 2773 static int ixgbevf_close(struct net_device *netdev) 2774 { 2775 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2776 2777 ixgbevf_down(adapter); 2778 ixgbevf_free_irq(adapter); 2779 2780 ixgbevf_free_all_tx_resources(adapter); 2781 ixgbevf_free_all_rx_resources(adapter); 2782 2783 return 0; 2784 } 2785 2786 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) 2787 { 2788 struct net_device *dev = adapter->netdev; 2789 2790 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) 2791 return; 2792 2793 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 2794 2795 /* if interface is down do nothing */ 2796 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2797 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2798 return; 2799 2800 /* Hardware has to reinitialize queues and interrupts to 2801 * match packet buffer alignment. Unfortunately, the 2802 * hardware is not flexible enough to do this dynamically. 2803 */ 2804 if (netif_running(dev)) 2805 ixgbevf_close(dev); 2806 2807 ixgbevf_clear_interrupt_scheme(adapter); 2808 ixgbevf_init_interrupt_scheme(adapter); 2809 2810 if (netif_running(dev)) 2811 ixgbevf_open(dev); 2812 } 2813 2814 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2815 u32 vlan_macip_lens, u32 type_tucmd, 2816 u32 mss_l4len_idx) 2817 { 2818 struct ixgbe_adv_tx_context_desc *context_desc; 2819 u16 i = tx_ring->next_to_use; 2820 2821 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2822 2823 i++; 2824 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2825 2826 /* set bits to identify this as an advanced context descriptor */ 2827 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2828 2829 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2830 context_desc->seqnum_seed = 0; 2831 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2832 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2833 } 2834 2835 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2836 struct ixgbevf_tx_buffer *first, 2837 u8 *hdr_len) 2838 { 2839 struct sk_buff *skb = first->skb; 2840 u32 vlan_macip_lens, type_tucmd; 2841 u32 mss_l4len_idx, l4len; 2842 int err; 2843 2844 if (skb->ip_summed != CHECKSUM_PARTIAL) 2845 return 0; 2846 2847 if (!skb_is_gso(skb)) 2848 return 0; 2849 2850 err = skb_cow_head(skb, 0); 2851 if (err < 0) 2852 return err; 2853 2854 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2855 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2856 2857 if (skb->protocol == htons(ETH_P_IP)) { 2858 struct iphdr *iph = ip_hdr(skb); 2859 iph->tot_len = 0; 2860 iph->check = 0; 2861 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2862 iph->daddr, 0, 2863 IPPROTO_TCP, 2864 0); 2865 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2866 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2867 IXGBE_TX_FLAGS_CSUM | 2868 IXGBE_TX_FLAGS_IPV4; 2869 } else if (skb_is_gso_v6(skb)) { 2870 ipv6_hdr(skb)->payload_len = 0; 2871 tcp_hdr(skb)->check = 2872 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2873 &ipv6_hdr(skb)->daddr, 2874 0, IPPROTO_TCP, 0); 2875 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2876 IXGBE_TX_FLAGS_CSUM; 2877 } 2878 2879 /* compute header lengths */ 2880 l4len = tcp_hdrlen(skb); 2881 *hdr_len += l4len; 2882 *hdr_len = skb_transport_offset(skb) + l4len; 2883 2884 /* update gso size and bytecount with header size */ 2885 first->gso_segs = skb_shinfo(skb)->gso_segs; 2886 first->bytecount += (first->gso_segs - 1) * *hdr_len; 2887 2888 /* mss_l4len_id: use 1 as index for TSO */ 2889 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2890 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2891 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2892 2893 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2894 vlan_macip_lens = skb_network_header_len(skb); 2895 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2896 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2897 2898 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2899 type_tucmd, mss_l4len_idx); 2900 2901 return 1; 2902 } 2903 2904 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2905 struct ixgbevf_tx_buffer *first) 2906 { 2907 struct sk_buff *skb = first->skb; 2908 u32 vlan_macip_lens = 0; 2909 u32 mss_l4len_idx = 0; 2910 u32 type_tucmd = 0; 2911 2912 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2913 u8 l4_hdr = 0; 2914 switch (skb->protocol) { 2915 case htons(ETH_P_IP): 2916 vlan_macip_lens |= skb_network_header_len(skb); 2917 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2918 l4_hdr = ip_hdr(skb)->protocol; 2919 break; 2920 case htons(ETH_P_IPV6): 2921 vlan_macip_lens |= skb_network_header_len(skb); 2922 l4_hdr = ipv6_hdr(skb)->nexthdr; 2923 break; 2924 default: 2925 if (unlikely(net_ratelimit())) { 2926 dev_warn(tx_ring->dev, 2927 "partial checksum but proto=%x!\n", 2928 first->protocol); 2929 } 2930 break; 2931 } 2932 2933 switch (l4_hdr) { 2934 case IPPROTO_TCP: 2935 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2936 mss_l4len_idx = tcp_hdrlen(skb) << 2937 IXGBE_ADVTXD_L4LEN_SHIFT; 2938 break; 2939 case IPPROTO_SCTP: 2940 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2941 mss_l4len_idx = sizeof(struct sctphdr) << 2942 IXGBE_ADVTXD_L4LEN_SHIFT; 2943 break; 2944 case IPPROTO_UDP: 2945 mss_l4len_idx = sizeof(struct udphdr) << 2946 IXGBE_ADVTXD_L4LEN_SHIFT; 2947 break; 2948 default: 2949 if (unlikely(net_ratelimit())) { 2950 dev_warn(tx_ring->dev, 2951 "partial checksum but l4 proto=%x!\n", 2952 l4_hdr); 2953 } 2954 break; 2955 } 2956 2957 /* update TX checksum flag */ 2958 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 2959 } 2960 2961 /* vlan_macip_lens: MACLEN, VLAN tag */ 2962 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2963 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2964 2965 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2966 type_tucmd, mss_l4len_idx); 2967 } 2968 2969 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) 2970 { 2971 /* set type for advanced descriptor with frame checksum insertion */ 2972 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 2973 IXGBE_ADVTXD_DCMD_IFCS | 2974 IXGBE_ADVTXD_DCMD_DEXT); 2975 2976 /* set HW vlan bit if vlan is present */ 2977 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2978 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 2979 2980 /* set segmentation enable bits for TSO/FSO */ 2981 if (tx_flags & IXGBE_TX_FLAGS_TSO) 2982 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 2983 2984 return cmd_type; 2985 } 2986 2987 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 2988 u32 tx_flags, unsigned int paylen) 2989 { 2990 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); 2991 2992 /* enable L4 checksum for TSO and TX checksum offload */ 2993 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2994 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); 2995 2996 /* enble IPv4 checksum for TSO */ 2997 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2998 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); 2999 3000 /* use index 1 context for TSO/FSO/FCOE */ 3001 if (tx_flags & IXGBE_TX_FLAGS_TSO) 3002 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); 3003 3004 /* Check Context must be set if Tx switch is enabled, which it 3005 * always is for case where virtual functions are running 3006 */ 3007 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); 3008 3009 tx_desc->read.olinfo_status = olinfo_status; 3010 } 3011 3012 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 3013 struct ixgbevf_tx_buffer *first, 3014 const u8 hdr_len) 3015 { 3016 dma_addr_t dma; 3017 struct sk_buff *skb = first->skb; 3018 struct ixgbevf_tx_buffer *tx_buffer; 3019 union ixgbe_adv_tx_desc *tx_desc; 3020 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 3021 unsigned int data_len = skb->data_len; 3022 unsigned int size = skb_headlen(skb); 3023 unsigned int paylen = skb->len - hdr_len; 3024 u32 tx_flags = first->tx_flags; 3025 __le32 cmd_type; 3026 u16 i = tx_ring->next_to_use; 3027 3028 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3029 3030 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 3031 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3032 3033 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3034 if (dma_mapping_error(tx_ring->dev, dma)) 3035 goto dma_error; 3036 3037 /* record length, and DMA address */ 3038 dma_unmap_len_set(first, len, size); 3039 dma_unmap_addr_set(first, dma, dma); 3040 3041 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3042 3043 for (;;) { 3044 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3045 tx_desc->read.cmd_type_len = 3046 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 3047 3048 i++; 3049 tx_desc++; 3050 if (i == tx_ring->count) { 3051 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3052 i = 0; 3053 } 3054 3055 dma += IXGBE_MAX_DATA_PER_TXD; 3056 size -= IXGBE_MAX_DATA_PER_TXD; 3057 3058 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3059 tx_desc->read.olinfo_status = 0; 3060 } 3061 3062 if (likely(!data_len)) 3063 break; 3064 3065 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 3066 3067 i++; 3068 tx_desc++; 3069 if (i == tx_ring->count) { 3070 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3071 i = 0; 3072 } 3073 3074 size = skb_frag_size(frag); 3075 data_len -= size; 3076 3077 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3078 DMA_TO_DEVICE); 3079 if (dma_mapping_error(tx_ring->dev, dma)) 3080 goto dma_error; 3081 3082 tx_buffer = &tx_ring->tx_buffer_info[i]; 3083 dma_unmap_len_set(tx_buffer, len, size); 3084 dma_unmap_addr_set(tx_buffer, dma, dma); 3085 3086 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3087 tx_desc->read.olinfo_status = 0; 3088 3089 frag++; 3090 } 3091 3092 /* write last descriptor with RS and EOP bits */ 3093 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); 3094 tx_desc->read.cmd_type_len = cmd_type; 3095 3096 /* set the timestamp */ 3097 first->time_stamp = jiffies; 3098 3099 /* Force memory writes to complete before letting h/w know there 3100 * are new descriptors to fetch. (Only applicable for weak-ordered 3101 * memory model archs, such as IA-64). 3102 * 3103 * We also need this memory barrier (wmb) to make certain all of the 3104 * status bits have been updated before next_to_watch is written. 3105 */ 3106 wmb(); 3107 3108 /* set next_to_watch value indicating a packet is present */ 3109 first->next_to_watch = tx_desc; 3110 3111 i++; 3112 if (i == tx_ring->count) 3113 i = 0; 3114 3115 tx_ring->next_to_use = i; 3116 3117 /* notify HW of packet */ 3118 ixgbevf_write_tail(tx_ring, i); 3119 3120 return; 3121 dma_error: 3122 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3123 3124 /* clear dma mappings for failed tx_buffer_info map */ 3125 for (;;) { 3126 tx_buffer = &tx_ring->tx_buffer_info[i]; 3127 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); 3128 if (tx_buffer == first) 3129 break; 3130 if (i == 0) 3131 i = tx_ring->count; 3132 i--; 3133 } 3134 3135 tx_ring->next_to_use = i; 3136 } 3137 3138 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3139 { 3140 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3141 /* Herbert's original patch had: 3142 * smp_mb__after_netif_stop_queue(); 3143 * but since that doesn't exist yet, just open code it. */ 3144 smp_mb(); 3145 3146 /* We need to check again in a case another CPU has just 3147 * made room available. */ 3148 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 3149 return -EBUSY; 3150 3151 /* A reprieve! - use start_queue because it doesn't call schedule */ 3152 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3153 ++tx_ring->tx_stats.restart_queue; 3154 3155 return 0; 3156 } 3157 3158 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3159 { 3160 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 3161 return 0; 3162 return __ixgbevf_maybe_stop_tx(tx_ring, size); 3163 } 3164 3165 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3166 { 3167 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3168 struct ixgbevf_tx_buffer *first; 3169 struct ixgbevf_ring *tx_ring; 3170 int tso; 3171 u32 tx_flags = 0; 3172 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3173 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3174 unsigned short f; 3175 #endif 3176 u8 hdr_len = 0; 3177 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3178 3179 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3180 dev_kfree_skb(skb); 3181 return NETDEV_TX_OK; 3182 } 3183 3184 tx_ring = adapter->tx_ring[skb->queue_mapping]; 3185 3186 /* 3187 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3188 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3189 * + 2 desc gap to keep tail from touching head, 3190 * + 1 desc for context descriptor, 3191 * otherwise try next time 3192 */ 3193 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3194 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3195 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3196 #else 3197 count += skb_shinfo(skb)->nr_frags; 3198 #endif 3199 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3200 tx_ring->tx_stats.tx_busy++; 3201 return NETDEV_TX_BUSY; 3202 } 3203 3204 /* record the location of the first descriptor for this packet */ 3205 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 3206 first->skb = skb; 3207 first->bytecount = skb->len; 3208 first->gso_segs = 1; 3209 3210 if (vlan_tx_tag_present(skb)) { 3211 tx_flags |= vlan_tx_tag_get(skb); 3212 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3213 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3214 } 3215 3216 /* record initial flags and protocol */ 3217 first->tx_flags = tx_flags; 3218 first->protocol = vlan_get_protocol(skb); 3219 3220 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3221 if (tso < 0) 3222 goto out_drop; 3223 else if (!tso) 3224 ixgbevf_tx_csum(tx_ring, first); 3225 3226 ixgbevf_tx_map(tx_ring, first, hdr_len); 3227 3228 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3229 3230 return NETDEV_TX_OK; 3231 3232 out_drop: 3233 dev_kfree_skb_any(first->skb); 3234 first->skb = NULL; 3235 3236 return NETDEV_TX_OK; 3237 } 3238 3239 /** 3240 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3241 * @netdev: network interface device structure 3242 * @p: pointer to an address structure 3243 * 3244 * Returns 0 on success, negative on failure 3245 **/ 3246 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3247 { 3248 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3249 struct ixgbe_hw *hw = &adapter->hw; 3250 struct sockaddr *addr = p; 3251 3252 if (!is_valid_ether_addr(addr->sa_data)) 3253 return -EADDRNOTAVAIL; 3254 3255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3256 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3257 3258 spin_lock_bh(&adapter->mbx_lock); 3259 3260 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3261 3262 spin_unlock_bh(&adapter->mbx_lock); 3263 3264 return 0; 3265 } 3266 3267 /** 3268 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3269 * @netdev: network interface device structure 3270 * @new_mtu: new value for maximum frame size 3271 * 3272 * Returns 0 on success, negative on failure 3273 **/ 3274 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3275 { 3276 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3277 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3278 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3279 3280 switch (adapter->hw.api_version) { 3281 case ixgbe_mbox_api_11: 3282 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3283 break; 3284 default: 3285 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3286 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3287 break; 3288 } 3289 3290 /* MTU < 68 is an error and causes problems on some kernels */ 3291 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3292 return -EINVAL; 3293 3294 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3295 netdev->mtu, new_mtu); 3296 /* must set new MTU before calling down or up */ 3297 netdev->mtu = new_mtu; 3298 3299 if (netif_running(netdev)) 3300 ixgbevf_reinit_locked(adapter); 3301 3302 return 0; 3303 } 3304 3305 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3306 { 3307 struct net_device *netdev = pci_get_drvdata(pdev); 3308 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3309 #ifdef CONFIG_PM 3310 int retval = 0; 3311 #endif 3312 3313 netif_device_detach(netdev); 3314 3315 if (netif_running(netdev)) { 3316 rtnl_lock(); 3317 ixgbevf_down(adapter); 3318 ixgbevf_free_irq(adapter); 3319 ixgbevf_free_all_tx_resources(adapter); 3320 ixgbevf_free_all_rx_resources(adapter); 3321 rtnl_unlock(); 3322 } 3323 3324 ixgbevf_clear_interrupt_scheme(adapter); 3325 3326 #ifdef CONFIG_PM 3327 retval = pci_save_state(pdev); 3328 if (retval) 3329 return retval; 3330 3331 #endif 3332 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3333 pci_disable_device(pdev); 3334 3335 return 0; 3336 } 3337 3338 #ifdef CONFIG_PM 3339 static int ixgbevf_resume(struct pci_dev *pdev) 3340 { 3341 struct net_device *netdev = pci_get_drvdata(pdev); 3342 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3343 u32 err; 3344 3345 pci_restore_state(pdev); 3346 /* 3347 * pci_restore_state clears dev->state_saved so call 3348 * pci_save_state to restore it. 3349 */ 3350 pci_save_state(pdev); 3351 3352 err = pci_enable_device_mem(pdev); 3353 if (err) { 3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3355 return err; 3356 } 3357 smp_mb__before_clear_bit(); 3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3359 pci_set_master(pdev); 3360 3361 ixgbevf_reset(adapter); 3362 3363 rtnl_lock(); 3364 err = ixgbevf_init_interrupt_scheme(adapter); 3365 rtnl_unlock(); 3366 if (err) { 3367 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3368 return err; 3369 } 3370 3371 if (netif_running(netdev)) { 3372 err = ixgbevf_open(netdev); 3373 if (err) 3374 return err; 3375 } 3376 3377 netif_device_attach(netdev); 3378 3379 return err; 3380 } 3381 3382 #endif /* CONFIG_PM */ 3383 static void ixgbevf_shutdown(struct pci_dev *pdev) 3384 { 3385 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3386 } 3387 3388 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3389 struct rtnl_link_stats64 *stats) 3390 { 3391 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3392 unsigned int start; 3393 u64 bytes, packets; 3394 const struct ixgbevf_ring *ring; 3395 int i; 3396 3397 ixgbevf_update_stats(adapter); 3398 3399 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3400 3401 for (i = 0; i < adapter->num_rx_queues; i++) { 3402 ring = adapter->rx_ring[i]; 3403 do { 3404 start = u64_stats_fetch_begin_irq(&ring->syncp); 3405 bytes = ring->stats.bytes; 3406 packets = ring->stats.packets; 3407 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3408 stats->rx_bytes += bytes; 3409 stats->rx_packets += packets; 3410 } 3411 3412 for (i = 0; i < adapter->num_tx_queues; i++) { 3413 ring = adapter->tx_ring[i]; 3414 do { 3415 start = u64_stats_fetch_begin_irq(&ring->syncp); 3416 bytes = ring->stats.bytes; 3417 packets = ring->stats.packets; 3418 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3419 stats->tx_bytes += bytes; 3420 stats->tx_packets += packets; 3421 } 3422 3423 return stats; 3424 } 3425 3426 static const struct net_device_ops ixgbevf_netdev_ops = { 3427 .ndo_open = ixgbevf_open, 3428 .ndo_stop = ixgbevf_close, 3429 .ndo_start_xmit = ixgbevf_xmit_frame, 3430 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3431 .ndo_get_stats64 = ixgbevf_get_stats, 3432 .ndo_validate_addr = eth_validate_addr, 3433 .ndo_set_mac_address = ixgbevf_set_mac, 3434 .ndo_change_mtu = ixgbevf_change_mtu, 3435 .ndo_tx_timeout = ixgbevf_tx_timeout, 3436 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3437 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3438 #ifdef CONFIG_NET_RX_BUSY_POLL 3439 .ndo_busy_poll = ixgbevf_busy_poll_recv, 3440 #endif 3441 }; 3442 3443 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3444 { 3445 dev->netdev_ops = &ixgbevf_netdev_ops; 3446 ixgbevf_set_ethtool_ops(dev); 3447 dev->watchdog_timeo = 5 * HZ; 3448 } 3449 3450 /** 3451 * ixgbevf_probe - Device Initialization Routine 3452 * @pdev: PCI device information struct 3453 * @ent: entry in ixgbevf_pci_tbl 3454 * 3455 * Returns 0 on success, negative on failure 3456 * 3457 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3458 * The OS initialization, configuring of the adapter private structure, 3459 * and a hardware reset occur. 3460 **/ 3461 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3462 { 3463 struct net_device *netdev; 3464 struct ixgbevf_adapter *adapter = NULL; 3465 struct ixgbe_hw *hw = NULL; 3466 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3467 static int cards_found; 3468 int err, pci_using_dac; 3469 3470 err = pci_enable_device(pdev); 3471 if (err) 3472 return err; 3473 3474 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3475 pci_using_dac = 1; 3476 } else { 3477 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3478 if (err) { 3479 dev_err(&pdev->dev, "No usable DMA " 3480 "configuration, aborting\n"); 3481 goto err_dma; 3482 } 3483 pci_using_dac = 0; 3484 } 3485 3486 err = pci_request_regions(pdev, ixgbevf_driver_name); 3487 if (err) { 3488 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3489 goto err_pci_reg; 3490 } 3491 3492 pci_set_master(pdev); 3493 3494 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3495 MAX_TX_QUEUES); 3496 if (!netdev) { 3497 err = -ENOMEM; 3498 goto err_alloc_etherdev; 3499 } 3500 3501 SET_NETDEV_DEV(netdev, &pdev->dev); 3502 3503 pci_set_drvdata(pdev, netdev); 3504 adapter = netdev_priv(netdev); 3505 3506 adapter->netdev = netdev; 3507 adapter->pdev = pdev; 3508 hw = &adapter->hw; 3509 hw->back = adapter; 3510 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3511 3512 /* 3513 * call save state here in standalone driver because it relies on 3514 * adapter struct to exist, and needs to call netdev_priv 3515 */ 3516 pci_save_state(pdev); 3517 3518 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3519 pci_resource_len(pdev, 0)); 3520 adapter->io_addr = hw->hw_addr; 3521 if (!hw->hw_addr) { 3522 err = -EIO; 3523 goto err_ioremap; 3524 } 3525 3526 ixgbevf_assign_netdev_ops(netdev); 3527 3528 adapter->bd_number = cards_found; 3529 3530 /* Setup hw api */ 3531 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3532 hw->mac.type = ii->mac; 3533 3534 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3535 sizeof(struct ixgbe_mbx_operations)); 3536 3537 /* setup the private structure */ 3538 err = ixgbevf_sw_init(adapter); 3539 if (err) 3540 goto err_sw_init; 3541 3542 /* The HW MAC address was set and/or determined in sw_init */ 3543 if (!is_valid_ether_addr(netdev->dev_addr)) { 3544 pr_err("invalid MAC address\n"); 3545 err = -EIO; 3546 goto err_sw_init; 3547 } 3548 3549 netdev->hw_features = NETIF_F_SG | 3550 NETIF_F_IP_CSUM | 3551 NETIF_F_IPV6_CSUM | 3552 NETIF_F_TSO | 3553 NETIF_F_TSO6 | 3554 NETIF_F_RXCSUM; 3555 3556 netdev->features = netdev->hw_features | 3557 NETIF_F_HW_VLAN_CTAG_TX | 3558 NETIF_F_HW_VLAN_CTAG_RX | 3559 NETIF_F_HW_VLAN_CTAG_FILTER; 3560 3561 netdev->vlan_features |= NETIF_F_TSO; 3562 netdev->vlan_features |= NETIF_F_TSO6; 3563 netdev->vlan_features |= NETIF_F_IP_CSUM; 3564 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3565 netdev->vlan_features |= NETIF_F_SG; 3566 3567 if (pci_using_dac) 3568 netdev->features |= NETIF_F_HIGHDMA; 3569 3570 netdev->priv_flags |= IFF_UNICAST_FLT; 3571 3572 init_timer(&adapter->watchdog_timer); 3573 adapter->watchdog_timer.function = ixgbevf_watchdog; 3574 adapter->watchdog_timer.data = (unsigned long)adapter; 3575 3576 if (IXGBE_REMOVED(hw->hw_addr)) { 3577 err = -EIO; 3578 goto err_sw_init; 3579 } 3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3582 set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 3583 3584 err = ixgbevf_init_interrupt_scheme(adapter); 3585 if (err) 3586 goto err_sw_init; 3587 3588 strcpy(netdev->name, "eth%d"); 3589 3590 err = register_netdev(netdev); 3591 if (err) 3592 goto err_register; 3593 3594 netif_carrier_off(netdev); 3595 3596 ixgbevf_init_last_counter_stats(adapter); 3597 3598 /* print the MAC address */ 3599 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3600 3601 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3602 3603 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3604 cards_found++; 3605 return 0; 3606 3607 err_register: 3608 ixgbevf_clear_interrupt_scheme(adapter); 3609 err_sw_init: 3610 ixgbevf_reset_interrupt_capability(adapter); 3611 iounmap(adapter->io_addr); 3612 err_ioremap: 3613 free_netdev(netdev); 3614 err_alloc_etherdev: 3615 pci_release_regions(pdev); 3616 err_pci_reg: 3617 err_dma: 3618 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3619 pci_disable_device(pdev); 3620 return err; 3621 } 3622 3623 /** 3624 * ixgbevf_remove - Device Removal Routine 3625 * @pdev: PCI device information struct 3626 * 3627 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3628 * that it should release a PCI device. The could be caused by a 3629 * Hot-Plug event, or because the driver is going to be removed from 3630 * memory. 3631 **/ 3632 static void ixgbevf_remove(struct pci_dev *pdev) 3633 { 3634 struct net_device *netdev = pci_get_drvdata(pdev); 3635 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3636 3637 set_bit(__IXGBEVF_REMOVING, &adapter->state); 3638 3639 del_timer_sync(&adapter->watchdog_timer); 3640 3641 cancel_work_sync(&adapter->reset_task); 3642 cancel_work_sync(&adapter->watchdog_task); 3643 3644 if (netdev->reg_state == NETREG_REGISTERED) 3645 unregister_netdev(netdev); 3646 3647 ixgbevf_clear_interrupt_scheme(adapter); 3648 ixgbevf_reset_interrupt_capability(adapter); 3649 3650 iounmap(adapter->io_addr); 3651 pci_release_regions(pdev); 3652 3653 hw_dbg(&adapter->hw, "Remove complete\n"); 3654 3655 free_netdev(netdev); 3656 3657 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3658 pci_disable_device(pdev); 3659 } 3660 3661 /** 3662 * ixgbevf_io_error_detected - called when PCI error is detected 3663 * @pdev: Pointer to PCI device 3664 * @state: The current pci connection state 3665 * 3666 * This function is called after a PCI bus error affecting 3667 * this device has been detected. 3668 */ 3669 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3670 pci_channel_state_t state) 3671 { 3672 struct net_device *netdev = pci_get_drvdata(pdev); 3673 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3674 3675 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 3676 return PCI_ERS_RESULT_DISCONNECT; 3677 3678 rtnl_lock(); 3679 netif_device_detach(netdev); 3680 3681 if (state == pci_channel_io_perm_failure) { 3682 rtnl_unlock(); 3683 return PCI_ERS_RESULT_DISCONNECT; 3684 } 3685 3686 if (netif_running(netdev)) 3687 ixgbevf_down(adapter); 3688 3689 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3690 pci_disable_device(pdev); 3691 rtnl_unlock(); 3692 3693 /* Request a slot slot reset. */ 3694 return PCI_ERS_RESULT_NEED_RESET; 3695 } 3696 3697 /** 3698 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3699 * @pdev: Pointer to PCI device 3700 * 3701 * Restart the card from scratch, as if from a cold-boot. Implementation 3702 * resembles the first-half of the ixgbevf_resume routine. 3703 */ 3704 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3705 { 3706 struct net_device *netdev = pci_get_drvdata(pdev); 3707 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3708 3709 if (pci_enable_device_mem(pdev)) { 3710 dev_err(&pdev->dev, 3711 "Cannot re-enable PCI device after reset.\n"); 3712 return PCI_ERS_RESULT_DISCONNECT; 3713 } 3714 3715 smp_mb__before_clear_bit(); 3716 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3717 pci_set_master(pdev); 3718 3719 ixgbevf_reset(adapter); 3720 3721 return PCI_ERS_RESULT_RECOVERED; 3722 } 3723 3724 /** 3725 * ixgbevf_io_resume - called when traffic can start flowing again. 3726 * @pdev: Pointer to PCI device 3727 * 3728 * This callback is called when the error recovery driver tells us that 3729 * its OK to resume normal operation. Implementation resembles the 3730 * second-half of the ixgbevf_resume routine. 3731 */ 3732 static void ixgbevf_io_resume(struct pci_dev *pdev) 3733 { 3734 struct net_device *netdev = pci_get_drvdata(pdev); 3735 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3736 3737 if (netif_running(netdev)) 3738 ixgbevf_up(adapter); 3739 3740 netif_device_attach(netdev); 3741 } 3742 3743 /* PCI Error Recovery (ERS) */ 3744 static const struct pci_error_handlers ixgbevf_err_handler = { 3745 .error_detected = ixgbevf_io_error_detected, 3746 .slot_reset = ixgbevf_io_slot_reset, 3747 .resume = ixgbevf_io_resume, 3748 }; 3749 3750 static struct pci_driver ixgbevf_driver = { 3751 .name = ixgbevf_driver_name, 3752 .id_table = ixgbevf_pci_tbl, 3753 .probe = ixgbevf_probe, 3754 .remove = ixgbevf_remove, 3755 #ifdef CONFIG_PM 3756 /* Power Management Hooks */ 3757 .suspend = ixgbevf_suspend, 3758 .resume = ixgbevf_resume, 3759 #endif 3760 .shutdown = ixgbevf_shutdown, 3761 .err_handler = &ixgbevf_err_handler 3762 }; 3763 3764 /** 3765 * ixgbevf_init_module - Driver Registration Routine 3766 * 3767 * ixgbevf_init_module is the first routine called when the driver is 3768 * loaded. All it does is register with the PCI subsystem. 3769 **/ 3770 static int __init ixgbevf_init_module(void) 3771 { 3772 int ret; 3773 pr_info("%s - version %s\n", ixgbevf_driver_string, 3774 ixgbevf_driver_version); 3775 3776 pr_info("%s\n", ixgbevf_copyright); 3777 3778 ret = pci_register_driver(&ixgbevf_driver); 3779 return ret; 3780 } 3781 3782 module_init(ixgbevf_init_module); 3783 3784 /** 3785 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3786 * 3787 * ixgbevf_exit_module is called just before the driver is removed 3788 * from memory. 3789 **/ 3790 static void __exit ixgbevf_exit_module(void) 3791 { 3792 pci_unregister_driver(&ixgbevf_driver); 3793 } 3794 3795 #ifdef DEBUG 3796 /** 3797 * ixgbevf_get_hw_dev_name - return device name string 3798 * used by hardware layer to print debugging information 3799 **/ 3800 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3801 { 3802 struct ixgbevf_adapter *adapter = hw->back; 3803 return adapter->netdev->name; 3804 } 3805 3806 #endif 3807 module_exit(ixgbevf_exit_module); 3808 3809 /* ixgbevf_main.c */ 3810