1 /******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2015 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, see <http://www.gnu.org/licenses/>. 17 18 The full GNU General Public License is included in this distribution in 19 the file called "COPYING". 20 21 Contact Information: 22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 25 *******************************************************************************/ 26 27 /****************************************************************************** 28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 29 ******************************************************************************/ 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/types.h> 34 #include <linux/bitops.h> 35 #include <linux/module.h> 36 #include <linux/pci.h> 37 #include <linux/netdevice.h> 38 #include <linux/vmalloc.h> 39 #include <linux/string.h> 40 #include <linux/in.h> 41 #include <linux/ip.h> 42 #include <linux/tcp.h> 43 #include <linux/sctp.h> 44 #include <linux/ipv6.h> 45 #include <linux/slab.h> 46 #include <net/checksum.h> 47 #include <net/ip6_checksum.h> 48 #include <linux/ethtool.h> 49 #include <linux/if.h> 50 #include <linux/if_vlan.h> 51 #include <linux/prefetch.h> 52 53 #include "ixgbevf.h" 54 55 const char ixgbevf_driver_name[] = "ixgbevf"; 56 static const char ixgbevf_driver_string[] = 57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 59 #define DRV_VERSION "3.2.2-k" 60 const char ixgbevf_driver_version[] = DRV_VERSION; 61 static char ixgbevf_copyright[] = 62 "Copyright (c) 2009 - 2015 Intel Corporation."; 63 64 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 65 [board_82599_vf] = &ixgbevf_82599_vf_info, 66 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, 67 [board_X540_vf] = &ixgbevf_X540_vf_info, 68 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, 69 [board_X550_vf] = &ixgbevf_X550_vf_info, 70 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, 71 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, 72 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, 73 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, 74 }; 75 76 /* ixgbevf_pci_tbl - PCI Device ID Table 77 * 78 * Wildcard entries (PCI_ANY_ID) should come last 79 * Last entry must be all 0s 80 * 81 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 82 * Class, Class Mask, private data (not used) } 83 */ 84 static const struct pci_device_id ixgbevf_pci_tbl[] = { 85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, 87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, 89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, 90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, 91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, 92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, 93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, 94 /* required last entry */ 95 {0, } 96 }; 97 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 98 99 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 100 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); 101 MODULE_LICENSE("GPL"); 102 MODULE_VERSION(DRV_VERSION); 103 104 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 105 static int debug = -1; 106 module_param(debug, int, 0); 107 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 108 109 static struct workqueue_struct *ixgbevf_wq; 110 111 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) 112 { 113 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 114 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && 115 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) 116 queue_work(ixgbevf_wq, &adapter->service_task); 117 } 118 119 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) 120 { 121 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); 122 123 /* flush memory to make sure state is correct before next watchdog */ 124 smp_mb__before_atomic(); 125 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); 126 } 127 128 /* forward decls */ 129 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 130 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 131 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 132 133 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) 134 { 135 struct ixgbevf_adapter *adapter = hw->back; 136 137 if (!hw->hw_addr) 138 return; 139 hw->hw_addr = NULL; 140 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 141 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) 142 ixgbevf_service_event_schedule(adapter); 143 } 144 145 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 146 { 147 u32 value; 148 149 /* The following check not only optimizes a bit by not 150 * performing a read on the status register when the 151 * register just read was a status register read that 152 * returned IXGBE_FAILED_READ_REG. It also blocks any 153 * potential recursion. 154 */ 155 if (reg == IXGBE_VFSTATUS) { 156 ixgbevf_remove_adapter(hw); 157 return; 158 } 159 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); 160 if (value == IXGBE_FAILED_READ_REG) 161 ixgbevf_remove_adapter(hw); 162 } 163 164 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) 165 { 166 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); 167 u32 value; 168 169 if (IXGBE_REMOVED(reg_addr)) 170 return IXGBE_FAILED_READ_REG; 171 value = readl(reg_addr + reg); 172 if (unlikely(value == IXGBE_FAILED_READ_REG)) 173 ixgbevf_check_remove(hw, reg); 174 return value; 175 } 176 177 /** 178 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 179 * @adapter: pointer to adapter struct 180 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 181 * @queue: queue to map the corresponding interrupt to 182 * @msix_vector: the vector to map to the corresponding queue 183 **/ 184 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 185 u8 queue, u8 msix_vector) 186 { 187 u32 ivar, index; 188 struct ixgbe_hw *hw = &adapter->hw; 189 190 if (direction == -1) { 191 /* other causes */ 192 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 193 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 194 ivar &= ~0xFF; 195 ivar |= msix_vector; 196 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 197 } else { 198 /* Tx or Rx causes */ 199 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 200 index = ((16 * (queue & 1)) + (8 * direction)); 201 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 202 ivar &= ~(0xFF << index); 203 ivar |= (msix_vector << index); 204 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 205 } 206 } 207 208 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 209 struct ixgbevf_tx_buffer *tx_buffer) 210 { 211 if (tx_buffer->skb) { 212 dev_kfree_skb_any(tx_buffer->skb); 213 if (dma_unmap_len(tx_buffer, len)) 214 dma_unmap_single(tx_ring->dev, 215 dma_unmap_addr(tx_buffer, dma), 216 dma_unmap_len(tx_buffer, len), 217 DMA_TO_DEVICE); 218 } else if (dma_unmap_len(tx_buffer, len)) { 219 dma_unmap_page(tx_ring->dev, 220 dma_unmap_addr(tx_buffer, dma), 221 dma_unmap_len(tx_buffer, len), 222 DMA_TO_DEVICE); 223 } 224 tx_buffer->next_to_watch = NULL; 225 tx_buffer->skb = NULL; 226 dma_unmap_len_set(tx_buffer, len, 0); 227 /* tx_buffer must be completely set up in the transmit path */ 228 } 229 230 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) 231 { 232 return ring->stats.packets; 233 } 234 235 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) 236 { 237 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); 238 struct ixgbe_hw *hw = &adapter->hw; 239 240 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); 241 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); 242 243 if (head != tail) 244 return (head < tail) ? 245 tail - head : (tail + ring->count - head); 246 247 return 0; 248 } 249 250 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) 251 { 252 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); 253 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 254 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); 255 256 clear_check_for_tx_hang(tx_ring); 257 258 /* Check for a hung queue, but be thorough. This verifies 259 * that a transmit has been completed since the previous 260 * check AND there is at least one packet pending. The 261 * ARMED bit is set to indicate a potential hang. 262 */ 263 if ((tx_done_old == tx_done) && tx_pending) { 264 /* make sure it is true for two checks in a row */ 265 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, 266 &tx_ring->state); 267 } 268 /* reset the countdown */ 269 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); 270 271 /* update completed stats and continue */ 272 tx_ring->tx_stats.tx_done_old = tx_done; 273 274 return false; 275 } 276 277 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) 278 { 279 /* Do the reset outside of interrupt context */ 280 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 281 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); 282 ixgbevf_service_event_schedule(adapter); 283 } 284 } 285 286 /** 287 * ixgbevf_tx_timeout - Respond to a Tx Hang 288 * @netdev: network interface device structure 289 **/ 290 static void ixgbevf_tx_timeout(struct net_device *netdev) 291 { 292 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 293 294 ixgbevf_tx_timeout_reset(adapter); 295 } 296 297 /** 298 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 299 * @q_vector: board private structure 300 * @tx_ring: tx ring to clean 301 * @napi_budget: Used to determine if we are in netpoll 302 **/ 303 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 304 struct ixgbevf_ring *tx_ring, int napi_budget) 305 { 306 struct ixgbevf_adapter *adapter = q_vector->adapter; 307 struct ixgbevf_tx_buffer *tx_buffer; 308 union ixgbe_adv_tx_desc *tx_desc; 309 unsigned int total_bytes = 0, total_packets = 0; 310 unsigned int budget = tx_ring->count / 2; 311 unsigned int i = tx_ring->next_to_clean; 312 313 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 314 return true; 315 316 tx_buffer = &tx_ring->tx_buffer_info[i]; 317 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 318 i -= tx_ring->count; 319 320 do { 321 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 322 323 /* if next_to_watch is not set then there is no work pending */ 324 if (!eop_desc) 325 break; 326 327 /* prevent any other reads prior to eop_desc */ 328 read_barrier_depends(); 329 330 /* if DD is not set pending work has not been completed */ 331 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 332 break; 333 334 /* clear next_to_watch to prevent false hangs */ 335 tx_buffer->next_to_watch = NULL; 336 337 /* update the statistics for this packet */ 338 total_bytes += tx_buffer->bytecount; 339 total_packets += tx_buffer->gso_segs; 340 341 /* free the skb */ 342 napi_consume_skb(tx_buffer->skb, napi_budget); 343 344 /* unmap skb header data */ 345 dma_unmap_single(tx_ring->dev, 346 dma_unmap_addr(tx_buffer, dma), 347 dma_unmap_len(tx_buffer, len), 348 DMA_TO_DEVICE); 349 350 /* clear tx_buffer data */ 351 tx_buffer->skb = NULL; 352 dma_unmap_len_set(tx_buffer, len, 0); 353 354 /* unmap remaining buffers */ 355 while (tx_desc != eop_desc) { 356 tx_buffer++; 357 tx_desc++; 358 i++; 359 if (unlikely(!i)) { 360 i -= tx_ring->count; 361 tx_buffer = tx_ring->tx_buffer_info; 362 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 363 } 364 365 /* unmap any remaining paged data */ 366 if (dma_unmap_len(tx_buffer, len)) { 367 dma_unmap_page(tx_ring->dev, 368 dma_unmap_addr(tx_buffer, dma), 369 dma_unmap_len(tx_buffer, len), 370 DMA_TO_DEVICE); 371 dma_unmap_len_set(tx_buffer, len, 0); 372 } 373 } 374 375 /* move us one more past the eop_desc for start of next pkt */ 376 tx_buffer++; 377 tx_desc++; 378 i++; 379 if (unlikely(!i)) { 380 i -= tx_ring->count; 381 tx_buffer = tx_ring->tx_buffer_info; 382 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 383 } 384 385 /* issue prefetch for next Tx descriptor */ 386 prefetch(tx_desc); 387 388 /* update budget accounting */ 389 budget--; 390 } while (likely(budget)); 391 392 i += tx_ring->count; 393 tx_ring->next_to_clean = i; 394 u64_stats_update_begin(&tx_ring->syncp); 395 tx_ring->stats.bytes += total_bytes; 396 tx_ring->stats.packets += total_packets; 397 u64_stats_update_end(&tx_ring->syncp); 398 q_vector->tx.total_bytes += total_bytes; 399 q_vector->tx.total_packets += total_packets; 400 401 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { 402 struct ixgbe_hw *hw = &adapter->hw; 403 union ixgbe_adv_tx_desc *eop_desc; 404 405 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; 406 407 pr_err("Detected Tx Unit Hang\n" 408 " Tx Queue <%d>\n" 409 " TDH, TDT <%x>, <%x>\n" 410 " next_to_use <%x>\n" 411 " next_to_clean <%x>\n" 412 "tx_buffer_info[next_to_clean]\n" 413 " next_to_watch <%p>\n" 414 " eop_desc->wb.status <%x>\n" 415 " time_stamp <%lx>\n" 416 " jiffies <%lx>\n", 417 tx_ring->queue_index, 418 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), 419 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), 420 tx_ring->next_to_use, i, 421 eop_desc, (eop_desc ? eop_desc->wb.status : 0), 422 tx_ring->tx_buffer_info[i].time_stamp, jiffies); 423 424 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 425 426 /* schedule immediate reset if we believe we hung */ 427 ixgbevf_tx_timeout_reset(adapter); 428 429 return true; 430 } 431 432 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 433 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 434 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 435 /* Make sure that anybody stopping the queue after this 436 * sees the new next_to_clean. 437 */ 438 smp_mb(); 439 440 if (__netif_subqueue_stopped(tx_ring->netdev, 441 tx_ring->queue_index) && 442 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 443 netif_wake_subqueue(tx_ring->netdev, 444 tx_ring->queue_index); 445 ++tx_ring->tx_stats.restart_queue; 446 } 447 } 448 449 return !!budget; 450 } 451 452 /** 453 * ixgbevf_rx_skb - Helper function to determine proper Rx method 454 * @q_vector: structure containing interrupt and ring information 455 * @skb: packet to send up 456 **/ 457 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 458 struct sk_buff *skb) 459 { 460 napi_gro_receive(&q_vector->napi, skb); 461 } 462 463 #define IXGBE_RSS_L4_TYPES_MASK \ 464 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ 465 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ 466 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ 467 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) 468 469 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, 470 union ixgbe_adv_rx_desc *rx_desc, 471 struct sk_buff *skb) 472 { 473 u16 rss_type; 474 475 if (!(ring->netdev->features & NETIF_F_RXHASH)) 476 return; 477 478 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 479 IXGBE_RXDADV_RSSTYPE_MASK; 480 481 if (!rss_type) 482 return; 483 484 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 485 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 486 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 487 } 488 489 /** 490 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 491 * @ring: structure containig ring specific data 492 * @rx_desc: current Rx descriptor being processed 493 * @skb: skb currently being received and modified 494 **/ 495 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 496 union ixgbe_adv_rx_desc *rx_desc, 497 struct sk_buff *skb) 498 { 499 skb_checksum_none_assert(skb); 500 501 /* Rx csum disabled */ 502 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 503 return; 504 505 /* if IP and error */ 506 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 507 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 508 ring->rx_stats.csum_err++; 509 return; 510 } 511 512 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) 513 return; 514 515 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 516 ring->rx_stats.csum_err++; 517 return; 518 } 519 520 /* It must be a TCP or UDP packet with a valid checksum */ 521 skb->ip_summed = CHECKSUM_UNNECESSARY; 522 } 523 524 /** 525 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor 526 * @rx_ring: rx descriptor ring packet is being transacted on 527 * @rx_desc: pointer to the EOP Rx descriptor 528 * @skb: pointer to current skb being populated 529 * 530 * This function checks the ring, descriptor, and packet information in 531 * order to populate the checksum, VLAN, protocol, and other fields within 532 * the skb. 533 **/ 534 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, 535 union ixgbe_adv_rx_desc *rx_desc, 536 struct sk_buff *skb) 537 { 538 ixgbevf_rx_hash(rx_ring, rx_desc, skb); 539 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); 540 541 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 542 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 543 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); 544 545 if (test_bit(vid & VLAN_VID_MASK, active_vlans)) 546 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 547 } 548 549 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 550 } 551 552 /** 553 * ixgbevf_is_non_eop - process handling of non-EOP buffers 554 * @rx_ring: Rx ring being processed 555 * @rx_desc: Rx descriptor for current buffer 556 * @skb: current socket buffer containing buffer in progress 557 * 558 * This function updates next to clean. If the buffer is an EOP buffer 559 * this function exits returning false, otherwise it will place the 560 * sk_buff in the next buffer to be chained and return true indicating 561 * that this is in fact a non-EOP buffer. 562 **/ 563 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, 564 union ixgbe_adv_rx_desc *rx_desc) 565 { 566 u32 ntc = rx_ring->next_to_clean + 1; 567 568 /* fetch, update, and store next to clean */ 569 ntc = (ntc < rx_ring->count) ? ntc : 0; 570 rx_ring->next_to_clean = ntc; 571 572 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); 573 574 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 575 return false; 576 577 return true; 578 } 579 580 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, 581 struct ixgbevf_rx_buffer *bi) 582 { 583 struct page *page = bi->page; 584 dma_addr_t dma = bi->dma; 585 586 /* since we are recycling buffers we should seldom need to alloc */ 587 if (likely(page)) 588 return true; 589 590 /* alloc new page for storage */ 591 page = dev_alloc_page(); 592 if (unlikely(!page)) { 593 rx_ring->rx_stats.alloc_rx_page_failed++; 594 return false; 595 } 596 597 /* map page for use */ 598 dma = dma_map_page(rx_ring->dev, page, 0, 599 PAGE_SIZE, DMA_FROM_DEVICE); 600 601 /* if mapping failed free memory back to system since 602 * there isn't much point in holding memory we can't use 603 */ 604 if (dma_mapping_error(rx_ring->dev, dma)) { 605 __free_page(page); 606 607 rx_ring->rx_stats.alloc_rx_buff_failed++; 608 return false; 609 } 610 611 bi->dma = dma; 612 bi->page = page; 613 bi->page_offset = 0; 614 615 return true; 616 } 617 618 /** 619 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 620 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 621 * @cleaned_count: number of buffers to replace 622 **/ 623 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 624 u16 cleaned_count) 625 { 626 union ixgbe_adv_rx_desc *rx_desc; 627 struct ixgbevf_rx_buffer *bi; 628 unsigned int i = rx_ring->next_to_use; 629 630 /* nothing to do or no valid netdev defined */ 631 if (!cleaned_count || !rx_ring->netdev) 632 return; 633 634 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 635 bi = &rx_ring->rx_buffer_info[i]; 636 i -= rx_ring->count; 637 638 do { 639 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) 640 break; 641 642 /* Refresh the desc even if pkt_addr didn't change 643 * because each write-back erases this info. 644 */ 645 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 646 647 rx_desc++; 648 bi++; 649 i++; 650 if (unlikely(!i)) { 651 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); 652 bi = rx_ring->rx_buffer_info; 653 i -= rx_ring->count; 654 } 655 656 /* clear the hdr_addr for the next_to_use descriptor */ 657 rx_desc->read.hdr_addr = 0; 658 659 cleaned_count--; 660 } while (cleaned_count); 661 662 i += rx_ring->count; 663 664 if (rx_ring->next_to_use != i) { 665 /* record the next descriptor to use */ 666 rx_ring->next_to_use = i; 667 668 /* update next to alloc since we have filled the ring */ 669 rx_ring->next_to_alloc = i; 670 671 /* Force memory writes to complete before letting h/w 672 * know there are new descriptors to fetch. (Only 673 * applicable for weak-ordered memory model archs, 674 * such as IA-64). 675 */ 676 wmb(); 677 ixgbevf_write_tail(rx_ring, i); 678 } 679 } 680 681 /** 682 * ixgbevf_cleanup_headers - Correct corrupted or empty headers 683 * @rx_ring: rx descriptor ring packet is being transacted on 684 * @rx_desc: pointer to the EOP Rx descriptor 685 * @skb: pointer to current skb being fixed 686 * 687 * Check for corrupted packet headers caused by senders on the local L2 688 * embedded NIC switch not setting up their Tx Descriptors right. These 689 * should be very rare. 690 * 691 * Also address the case where we are pulling data in on pages only 692 * and as such no data is present in the skb header. 693 * 694 * In addition if skb is not at least 60 bytes we need to pad it so that 695 * it is large enough to qualify as a valid Ethernet frame. 696 * 697 * Returns true if an error was encountered and skb was freed. 698 **/ 699 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, 700 union ixgbe_adv_rx_desc *rx_desc, 701 struct sk_buff *skb) 702 { 703 /* verify that the packet does not have any known errors */ 704 if (unlikely(ixgbevf_test_staterr(rx_desc, 705 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { 706 struct net_device *netdev = rx_ring->netdev; 707 708 if (!(netdev->features & NETIF_F_RXALL)) { 709 dev_kfree_skb_any(skb); 710 return true; 711 } 712 } 713 714 /* if eth_skb_pad returns an error the skb was freed */ 715 if (eth_skb_pad(skb)) 716 return true; 717 718 return false; 719 } 720 721 /** 722 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring 723 * @rx_ring: rx descriptor ring to store buffers on 724 * @old_buff: donor buffer to have page reused 725 * 726 * Synchronizes page for reuse by the adapter 727 **/ 728 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 729 struct ixgbevf_rx_buffer *old_buff) 730 { 731 struct ixgbevf_rx_buffer *new_buff; 732 u16 nta = rx_ring->next_to_alloc; 733 734 new_buff = &rx_ring->rx_buffer_info[nta]; 735 736 /* update, and store next to alloc */ 737 nta++; 738 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 739 740 /* transfer page from old buffer to new buffer */ 741 new_buff->page = old_buff->page; 742 new_buff->dma = old_buff->dma; 743 new_buff->page_offset = old_buff->page_offset; 744 745 /* sync the buffer for use by the device */ 746 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 747 new_buff->page_offset, 748 IXGBEVF_RX_BUFSZ, 749 DMA_FROM_DEVICE); 750 } 751 752 static inline bool ixgbevf_page_is_reserved(struct page *page) 753 { 754 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 755 } 756 757 /** 758 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff 759 * @rx_ring: rx descriptor ring to transact packets on 760 * @rx_buffer: buffer containing page to add 761 * @rx_desc: descriptor containing length of buffer written by hardware 762 * @skb: sk_buff to place the data into 763 * 764 * This function will add the data contained in rx_buffer->page to the skb. 765 * This is done either through a direct copy if the data in the buffer is 766 * less than the skb header size, otherwise it will just attach the page as 767 * a frag to the skb. 768 * 769 * The function will then update the page offset if necessary and return 770 * true if the buffer can be reused by the adapter. 771 **/ 772 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, 773 struct ixgbevf_rx_buffer *rx_buffer, 774 union ixgbe_adv_rx_desc *rx_desc, 775 struct sk_buff *skb) 776 { 777 struct page *page = rx_buffer->page; 778 unsigned char *va = page_address(page) + rx_buffer->page_offset; 779 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 780 #if (PAGE_SIZE < 8192) 781 unsigned int truesize = IXGBEVF_RX_BUFSZ; 782 #else 783 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); 784 #endif 785 unsigned int pull_len; 786 787 if (unlikely(skb_is_nonlinear(skb))) 788 goto add_tail_frag; 789 790 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { 791 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 792 793 /* page is not reserved, we can reuse buffer as is */ 794 if (likely(!ixgbevf_page_is_reserved(page))) 795 return true; 796 797 /* this page cannot be reused so discard it */ 798 put_page(page); 799 return false; 800 } 801 802 /* we need the header to contain the greater of either ETH_HLEN or 803 * 60 bytes if the skb->len is less than 60 for skb_pad. 804 */ 805 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); 806 807 /* align pull length to size of long to optimize memcpy performance */ 808 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 809 810 /* update all of the pointers */ 811 va += pull_len; 812 size -= pull_len; 813 814 add_tail_frag: 815 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 816 (unsigned long)va & ~PAGE_MASK, size, truesize); 817 818 /* avoid re-using remote pages */ 819 if (unlikely(ixgbevf_page_is_reserved(page))) 820 return false; 821 822 #if (PAGE_SIZE < 8192) 823 /* if we are only owner of page we can reuse it */ 824 if (unlikely(page_count(page) != 1)) 825 return false; 826 827 /* flip page offset to other buffer */ 828 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; 829 830 #else 831 /* move offset up to the next cache line */ 832 rx_buffer->page_offset += truesize; 833 834 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) 835 return false; 836 837 #endif 838 /* Even if we own the page, we are not allowed to use atomic_set() 839 * This would break get_page_unless_zero() users. 840 */ 841 page_ref_inc(page); 842 843 return true; 844 } 845 846 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, 847 union ixgbe_adv_rx_desc *rx_desc, 848 struct sk_buff *skb) 849 { 850 struct ixgbevf_rx_buffer *rx_buffer; 851 struct page *page; 852 853 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 854 page = rx_buffer->page; 855 prefetchw(page); 856 857 if (likely(!skb)) { 858 void *page_addr = page_address(page) + 859 rx_buffer->page_offset; 860 861 /* prefetch first cache line of first page */ 862 prefetch(page_addr); 863 #if L1_CACHE_BYTES < 128 864 prefetch(page_addr + L1_CACHE_BYTES); 865 #endif 866 867 /* allocate a skb to store the frags */ 868 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 869 IXGBEVF_RX_HDR_SIZE); 870 if (unlikely(!skb)) { 871 rx_ring->rx_stats.alloc_rx_buff_failed++; 872 return NULL; 873 } 874 875 /* we will be copying header into skb->data in 876 * pskb_may_pull so it is in our interest to prefetch 877 * it now to avoid a possible cache miss 878 */ 879 prefetchw(skb->data); 880 } 881 882 /* we are reusing so sync this buffer for CPU use */ 883 dma_sync_single_range_for_cpu(rx_ring->dev, 884 rx_buffer->dma, 885 rx_buffer->page_offset, 886 IXGBEVF_RX_BUFSZ, 887 DMA_FROM_DEVICE); 888 889 /* pull page into skb */ 890 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 891 /* hand second half of page back to the ring */ 892 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); 893 } else { 894 /* we are not reusing the buffer so unmap it */ 895 dma_unmap_page(rx_ring->dev, rx_buffer->dma, 896 PAGE_SIZE, DMA_FROM_DEVICE); 897 } 898 899 /* clear contents of buffer_info */ 900 rx_buffer->dma = 0; 901 rx_buffer->page = NULL; 902 903 return skb; 904 } 905 906 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 907 u32 qmask) 908 { 909 struct ixgbe_hw *hw = &adapter->hw; 910 911 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 912 } 913 914 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 915 struct ixgbevf_ring *rx_ring, 916 int budget) 917 { 918 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 919 u16 cleaned_count = ixgbevf_desc_unused(rx_ring); 920 struct sk_buff *skb = rx_ring->skb; 921 922 while (likely(total_rx_packets < budget)) { 923 union ixgbe_adv_rx_desc *rx_desc; 924 925 /* return some buffers to hardware, one at a time is too slow */ 926 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 927 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 928 cleaned_count = 0; 929 } 930 931 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 932 933 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 934 break; 935 936 /* This memory barrier is needed to keep us from reading 937 * any other fields out of the rx_desc until we know the 938 * RXD_STAT_DD bit is set 939 */ 940 rmb(); 941 942 /* retrieve a buffer from the ring */ 943 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); 944 945 /* exit if we failed to retrieve a buffer */ 946 if (!skb) 947 break; 948 949 cleaned_count++; 950 951 /* fetch next buffer in frame if non-eop */ 952 if (ixgbevf_is_non_eop(rx_ring, rx_desc)) 953 continue; 954 955 /* verify the packet layout is correct */ 956 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { 957 skb = NULL; 958 continue; 959 } 960 961 /* probably a little skewed due to removing CRC */ 962 total_rx_bytes += skb->len; 963 964 /* Workaround hardware that can't do proper VEPA multicast 965 * source pruning. 966 */ 967 if ((skb->pkt_type == PACKET_BROADCAST || 968 skb->pkt_type == PACKET_MULTICAST) && 969 ether_addr_equal(rx_ring->netdev->dev_addr, 970 eth_hdr(skb)->h_source)) { 971 dev_kfree_skb_irq(skb); 972 continue; 973 } 974 975 /* populate checksum, VLAN, and protocol */ 976 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); 977 978 ixgbevf_rx_skb(q_vector, skb); 979 980 /* reset skb pointer */ 981 skb = NULL; 982 983 /* update budget accounting */ 984 total_rx_packets++; 985 } 986 987 /* place incomplete frames back on ring for completion */ 988 rx_ring->skb = skb; 989 990 u64_stats_update_begin(&rx_ring->syncp); 991 rx_ring->stats.packets += total_rx_packets; 992 rx_ring->stats.bytes += total_rx_bytes; 993 u64_stats_update_end(&rx_ring->syncp); 994 q_vector->rx.total_packets += total_rx_packets; 995 q_vector->rx.total_bytes += total_rx_bytes; 996 997 return total_rx_packets; 998 } 999 1000 /** 1001 * ixgbevf_poll - NAPI polling calback 1002 * @napi: napi struct with our devices info in it 1003 * @budget: amount of work driver is allowed to do this pass, in packets 1004 * 1005 * This function will clean more than one or more rings associated with a 1006 * q_vector. 1007 **/ 1008 static int ixgbevf_poll(struct napi_struct *napi, int budget) 1009 { 1010 struct ixgbevf_q_vector *q_vector = 1011 container_of(napi, struct ixgbevf_q_vector, napi); 1012 struct ixgbevf_adapter *adapter = q_vector->adapter; 1013 struct ixgbevf_ring *ring; 1014 int per_ring_budget, work_done = 0; 1015 bool clean_complete = true; 1016 1017 ixgbevf_for_each_ring(ring, q_vector->tx) { 1018 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) 1019 clean_complete = false; 1020 } 1021 1022 if (budget <= 0) 1023 return budget; 1024 1025 /* attempt to distribute budget to each queue fairly, but don't allow 1026 * the budget to go below 1 because we'll exit polling 1027 */ 1028 if (q_vector->rx.count > 1) 1029 per_ring_budget = max(budget/q_vector->rx.count, 1); 1030 else 1031 per_ring_budget = budget; 1032 1033 ixgbevf_for_each_ring(ring, q_vector->rx) { 1034 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, 1035 per_ring_budget); 1036 work_done += cleaned; 1037 if (cleaned >= per_ring_budget) 1038 clean_complete = false; 1039 } 1040 1041 /* If all work not completed, return budget and keep polling */ 1042 if (!clean_complete) 1043 return budget; 1044 /* all work done, exit the polling mode */ 1045 napi_complete_done(napi, work_done); 1046 if (adapter->rx_itr_setting == 1) 1047 ixgbevf_set_itr(q_vector); 1048 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 1049 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) 1050 ixgbevf_irq_enable_queues(adapter, 1051 BIT(q_vector->v_idx)); 1052 1053 return 0; 1054 } 1055 1056 /** 1057 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 1058 * @q_vector: structure containing interrupt and ring information 1059 **/ 1060 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 1061 { 1062 struct ixgbevf_adapter *adapter = q_vector->adapter; 1063 struct ixgbe_hw *hw = &adapter->hw; 1064 int v_idx = q_vector->v_idx; 1065 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 1066 1067 /* set the WDIS bit to not clear the timer bits and cause an 1068 * immediate assertion of the interrupt 1069 */ 1070 itr_reg |= IXGBE_EITR_CNT_WDIS; 1071 1072 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 1073 } 1074 1075 /** 1076 * ixgbevf_configure_msix - Configure MSI-X hardware 1077 * @adapter: board private structure 1078 * 1079 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 1080 * interrupts. 1081 **/ 1082 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 1083 { 1084 struct ixgbevf_q_vector *q_vector; 1085 int q_vectors, v_idx; 1086 1087 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1088 adapter->eims_enable_mask = 0; 1089 1090 /* Populate the IVAR table and set the ITR values to the 1091 * corresponding register. 1092 */ 1093 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1094 struct ixgbevf_ring *ring; 1095 1096 q_vector = adapter->q_vector[v_idx]; 1097 1098 ixgbevf_for_each_ring(ring, q_vector->rx) 1099 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 1100 1101 ixgbevf_for_each_ring(ring, q_vector->tx) 1102 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 1103 1104 if (q_vector->tx.ring && !q_vector->rx.ring) { 1105 /* Tx only vector */ 1106 if (adapter->tx_itr_setting == 1) 1107 q_vector->itr = IXGBE_12K_ITR; 1108 else 1109 q_vector->itr = adapter->tx_itr_setting; 1110 } else { 1111 /* Rx or Rx/Tx vector */ 1112 if (adapter->rx_itr_setting == 1) 1113 q_vector->itr = IXGBE_20K_ITR; 1114 else 1115 q_vector->itr = adapter->rx_itr_setting; 1116 } 1117 1118 /* add q_vector eims value to global eims_enable_mask */ 1119 adapter->eims_enable_mask |= BIT(v_idx); 1120 1121 ixgbevf_write_eitr(q_vector); 1122 } 1123 1124 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 1125 /* setup eims_other and add value to global eims_enable_mask */ 1126 adapter->eims_other = BIT(v_idx); 1127 adapter->eims_enable_mask |= adapter->eims_other; 1128 } 1129 1130 enum latency_range { 1131 lowest_latency = 0, 1132 low_latency = 1, 1133 bulk_latency = 2, 1134 latency_invalid = 255 1135 }; 1136 1137 /** 1138 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 1139 * @q_vector: structure containing interrupt and ring information 1140 * @ring_container: structure containing ring performance data 1141 * 1142 * Stores a new ITR value based on packets and byte 1143 * counts during the last interrupt. The advantage of per interrupt 1144 * computation is faster updates and more accurate ITR for the current 1145 * traffic pattern. Constants in this function were computed 1146 * based on theoretical maximum wire speed and thresholds were set based 1147 * on testing data as well as attempting to minimize response time 1148 * while increasing bulk throughput. 1149 **/ 1150 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 1151 struct ixgbevf_ring_container *ring_container) 1152 { 1153 int bytes = ring_container->total_bytes; 1154 int packets = ring_container->total_packets; 1155 u32 timepassed_us; 1156 u64 bytes_perint; 1157 u8 itr_setting = ring_container->itr; 1158 1159 if (packets == 0) 1160 return; 1161 1162 /* simple throttle rate management 1163 * 0-20MB/s lowest (100000 ints/s) 1164 * 20-100MB/s low (20000 ints/s) 1165 * 100-1249MB/s bulk (12000 ints/s) 1166 */ 1167 /* what was last interrupt timeslice? */ 1168 timepassed_us = q_vector->itr >> 2; 1169 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 1170 1171 switch (itr_setting) { 1172 case lowest_latency: 1173 if (bytes_perint > 10) 1174 itr_setting = low_latency; 1175 break; 1176 case low_latency: 1177 if (bytes_perint > 20) 1178 itr_setting = bulk_latency; 1179 else if (bytes_perint <= 10) 1180 itr_setting = lowest_latency; 1181 break; 1182 case bulk_latency: 1183 if (bytes_perint <= 20) 1184 itr_setting = low_latency; 1185 break; 1186 } 1187 1188 /* clear work counters since we have the values we need */ 1189 ring_container->total_bytes = 0; 1190 ring_container->total_packets = 0; 1191 1192 /* write updated itr to ring container */ 1193 ring_container->itr = itr_setting; 1194 } 1195 1196 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 1197 { 1198 u32 new_itr = q_vector->itr; 1199 u8 current_itr; 1200 1201 ixgbevf_update_itr(q_vector, &q_vector->tx); 1202 ixgbevf_update_itr(q_vector, &q_vector->rx); 1203 1204 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 1205 1206 switch (current_itr) { 1207 /* counts and packets in update_itr are dependent on these numbers */ 1208 case lowest_latency: 1209 new_itr = IXGBE_100K_ITR; 1210 break; 1211 case low_latency: 1212 new_itr = IXGBE_20K_ITR; 1213 break; 1214 case bulk_latency: 1215 new_itr = IXGBE_12K_ITR; 1216 break; 1217 default: 1218 break; 1219 } 1220 1221 if (new_itr != q_vector->itr) { 1222 /* do an exponential smoothing */ 1223 new_itr = (10 * new_itr * q_vector->itr) / 1224 ((9 * new_itr) + q_vector->itr); 1225 1226 /* save the algorithm value here */ 1227 q_vector->itr = new_itr; 1228 1229 ixgbevf_write_eitr(q_vector); 1230 } 1231 } 1232 1233 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 1234 { 1235 struct ixgbevf_adapter *adapter = data; 1236 struct ixgbe_hw *hw = &adapter->hw; 1237 1238 hw->mac.get_link_status = 1; 1239 1240 ixgbevf_service_event_schedule(adapter); 1241 1242 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 1243 1244 return IRQ_HANDLED; 1245 } 1246 1247 /** 1248 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 1249 * @irq: unused 1250 * @data: pointer to our q_vector struct for this interrupt vector 1251 **/ 1252 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 1253 { 1254 struct ixgbevf_q_vector *q_vector = data; 1255 1256 /* EIAM disabled interrupts (on this vector) for us */ 1257 if (q_vector->rx.ring || q_vector->tx.ring) 1258 napi_schedule_irqoff(&q_vector->napi); 1259 1260 return IRQ_HANDLED; 1261 } 1262 1263 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 1264 int r_idx) 1265 { 1266 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1267 1268 a->rx_ring[r_idx]->next = q_vector->rx.ring; 1269 q_vector->rx.ring = a->rx_ring[r_idx]; 1270 q_vector->rx.count++; 1271 } 1272 1273 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 1274 int t_idx) 1275 { 1276 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1277 1278 a->tx_ring[t_idx]->next = q_vector->tx.ring; 1279 q_vector->tx.ring = a->tx_ring[t_idx]; 1280 q_vector->tx.count++; 1281 } 1282 1283 /** 1284 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 1285 * @adapter: board private structure to initialize 1286 * 1287 * This function maps descriptor rings to the queue-specific vectors 1288 * we were allotted through the MSI-X enabling code. Ideally, we'd have 1289 * one vector per ring/queue, but on a constrained vector budget, we 1290 * group the rings as "efficiently" as possible. You would add new 1291 * mapping configurations in here. 1292 **/ 1293 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 1294 { 1295 int q_vectors; 1296 int v_start = 0; 1297 int rxr_idx = 0, txr_idx = 0; 1298 int rxr_remaining = adapter->num_rx_queues; 1299 int txr_remaining = adapter->num_tx_queues; 1300 int i, j; 1301 int rqpv, tqpv; 1302 1303 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1304 1305 /* The ideal configuration... 1306 * We have enough vectors to map one per queue. 1307 */ 1308 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 1309 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 1310 map_vector_to_rxq(adapter, v_start, rxr_idx); 1311 1312 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 1313 map_vector_to_txq(adapter, v_start, txr_idx); 1314 return 0; 1315 } 1316 1317 /* If we don't have enough vectors for a 1-to-1 1318 * mapping, we'll have to group them so there are 1319 * multiple queues per vector. 1320 */ 1321 /* Re-adjusting *qpv takes care of the remainder. */ 1322 for (i = v_start; i < q_vectors; i++) { 1323 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 1324 for (j = 0; j < rqpv; j++) { 1325 map_vector_to_rxq(adapter, i, rxr_idx); 1326 rxr_idx++; 1327 rxr_remaining--; 1328 } 1329 } 1330 for (i = v_start; i < q_vectors; i++) { 1331 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 1332 for (j = 0; j < tqpv; j++) { 1333 map_vector_to_txq(adapter, i, txr_idx); 1334 txr_idx++; 1335 txr_remaining--; 1336 } 1337 } 1338 1339 return 0; 1340 } 1341 1342 /** 1343 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 1344 * @adapter: board private structure 1345 * 1346 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 1347 * interrupts from the kernel. 1348 **/ 1349 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 1350 { 1351 struct net_device *netdev = adapter->netdev; 1352 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1353 int vector, err; 1354 int ri = 0, ti = 0; 1355 1356 for (vector = 0; vector < q_vectors; vector++) { 1357 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 1358 struct msix_entry *entry = &adapter->msix_entries[vector]; 1359 1360 if (q_vector->tx.ring && q_vector->rx.ring) { 1361 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1362 "%s-%s-%d", netdev->name, "TxRx", ri++); 1363 ti++; 1364 } else if (q_vector->rx.ring) { 1365 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1366 "%s-%s-%d", netdev->name, "rx", ri++); 1367 } else if (q_vector->tx.ring) { 1368 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1369 "%s-%s-%d", netdev->name, "tx", ti++); 1370 } else { 1371 /* skip this unused q_vector */ 1372 continue; 1373 } 1374 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 1375 q_vector->name, q_vector); 1376 if (err) { 1377 hw_dbg(&adapter->hw, 1378 "request_irq failed for MSIX interrupt Error: %d\n", 1379 err); 1380 goto free_queue_irqs; 1381 } 1382 } 1383 1384 err = request_irq(adapter->msix_entries[vector].vector, 1385 &ixgbevf_msix_other, 0, netdev->name, adapter); 1386 if (err) { 1387 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", 1388 err); 1389 goto free_queue_irqs; 1390 } 1391 1392 return 0; 1393 1394 free_queue_irqs: 1395 while (vector) { 1396 vector--; 1397 free_irq(adapter->msix_entries[vector].vector, 1398 adapter->q_vector[vector]); 1399 } 1400 /* This failure is non-recoverable - it indicates the system is 1401 * out of MSIX vector resources and the VF driver cannot run 1402 * without them. Set the number of msix vectors to zero 1403 * indicating that not enough can be allocated. The error 1404 * will be returned to the user indicating device open failed. 1405 * Any further attempts to force the driver to open will also 1406 * fail. The only way to recover is to unload the driver and 1407 * reload it again. If the system has recovered some MSIX 1408 * vectors then it may succeed. 1409 */ 1410 adapter->num_msix_vectors = 0; 1411 return err; 1412 } 1413 1414 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1415 { 1416 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1417 1418 for (i = 0; i < q_vectors; i++) { 1419 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1420 1421 q_vector->rx.ring = NULL; 1422 q_vector->tx.ring = NULL; 1423 q_vector->rx.count = 0; 1424 q_vector->tx.count = 0; 1425 } 1426 } 1427 1428 /** 1429 * ixgbevf_request_irq - initialize interrupts 1430 * @adapter: board private structure 1431 * 1432 * Attempts to configure interrupts using the best available 1433 * capabilities of the hardware and kernel. 1434 **/ 1435 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1436 { 1437 int err = ixgbevf_request_msix_irqs(adapter); 1438 1439 if (err) 1440 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); 1441 1442 return err; 1443 } 1444 1445 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1446 { 1447 int i, q_vectors; 1448 1449 if (!adapter->msix_entries) 1450 return; 1451 1452 q_vectors = adapter->num_msix_vectors; 1453 i = q_vectors - 1; 1454 1455 free_irq(adapter->msix_entries[i].vector, adapter); 1456 i--; 1457 1458 for (; i >= 0; i--) { 1459 /* free only the irqs that were actually requested */ 1460 if (!adapter->q_vector[i]->rx.ring && 1461 !adapter->q_vector[i]->tx.ring) 1462 continue; 1463 1464 free_irq(adapter->msix_entries[i].vector, 1465 adapter->q_vector[i]); 1466 } 1467 1468 ixgbevf_reset_q_vectors(adapter); 1469 } 1470 1471 /** 1472 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1473 * @adapter: board private structure 1474 **/ 1475 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1476 { 1477 struct ixgbe_hw *hw = &adapter->hw; 1478 int i; 1479 1480 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1481 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1482 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1483 1484 IXGBE_WRITE_FLUSH(hw); 1485 1486 for (i = 0; i < adapter->num_msix_vectors; i++) 1487 synchronize_irq(adapter->msix_entries[i].vector); 1488 } 1489 1490 /** 1491 * ixgbevf_irq_enable - Enable default interrupt generation settings 1492 * @adapter: board private structure 1493 **/ 1494 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1495 { 1496 struct ixgbe_hw *hw = &adapter->hw; 1497 1498 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1499 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1500 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1501 } 1502 1503 /** 1504 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset 1505 * @adapter: board private structure 1506 * @ring: structure containing ring specific data 1507 * 1508 * Configure the Tx descriptor ring after a reset. 1509 **/ 1510 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, 1511 struct ixgbevf_ring *ring) 1512 { 1513 struct ixgbe_hw *hw = &adapter->hw; 1514 u64 tdba = ring->dma; 1515 int wait_loop = 10; 1516 u32 txdctl = IXGBE_TXDCTL_ENABLE; 1517 u8 reg_idx = ring->reg_idx; 1518 1519 /* disable queue to avoid issues while updating state */ 1520 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 1521 IXGBE_WRITE_FLUSH(hw); 1522 1523 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 1524 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); 1525 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), 1526 ring->count * sizeof(union ixgbe_adv_tx_desc)); 1527 1528 /* disable head writeback */ 1529 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); 1530 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); 1531 1532 /* enable relaxed ordering */ 1533 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), 1534 (IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1535 IXGBE_DCA_TXCTRL_DATA_RRO_EN)); 1536 1537 /* reset head and tail pointers */ 1538 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1539 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1540 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); 1541 1542 /* reset ntu and ntc to place SW in sync with hardwdare */ 1543 ring->next_to_clean = 0; 1544 ring->next_to_use = 0; 1545 1546 /* In order to avoid issues WTHRESH + PTHRESH should always be equal 1547 * to or less than the number of on chip descriptors, which is 1548 * currently 40. 1549 */ 1550 txdctl |= (8 << 16); /* WTHRESH = 8 */ 1551 1552 /* Setting PTHRESH to 32 both improves performance */ 1553 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 1554 32; /* PTHRESH = 32 */ 1555 1556 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); 1557 1558 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1559 1560 /* poll to verify queue is enabled */ 1561 do { 1562 usleep_range(1000, 2000); 1563 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); 1564 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 1565 if (!wait_loop) 1566 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); 1567 } 1568 1569 /** 1570 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1571 * @adapter: board private structure 1572 * 1573 * Configure the Tx unit of the MAC after a reset. 1574 **/ 1575 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1576 { 1577 u32 i; 1578 1579 /* Setup the HW Tx Head and Tail descriptor pointers */ 1580 for (i = 0; i < adapter->num_tx_queues; i++) 1581 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); 1582 } 1583 1584 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1585 1586 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1587 { 1588 struct ixgbe_hw *hw = &adapter->hw; 1589 u32 srrctl; 1590 1591 srrctl = IXGBE_SRRCTL_DROP_EN; 1592 1593 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 1594 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1595 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1596 1597 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1598 } 1599 1600 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1601 { 1602 struct ixgbe_hw *hw = &adapter->hw; 1603 1604 /* PSRTYPE must be initialized in 82599 */ 1605 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1606 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1607 IXGBE_PSRTYPE_L2HDR; 1608 1609 if (adapter->num_rx_queues > 1) 1610 psrtype |= BIT(29); 1611 1612 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1613 } 1614 1615 #define IXGBEVF_MAX_RX_DESC_POLL 10 1616 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1617 struct ixgbevf_ring *ring) 1618 { 1619 struct ixgbe_hw *hw = &adapter->hw; 1620 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1621 u32 rxdctl; 1622 u8 reg_idx = ring->reg_idx; 1623 1624 if (IXGBE_REMOVED(hw->hw_addr)) 1625 return; 1626 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1627 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1628 1629 /* write value back with RXDCTL.ENABLE bit cleared */ 1630 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1631 1632 /* the hardware may take up to 100us to really disable the Rx queue */ 1633 do { 1634 udelay(10); 1635 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1636 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1637 1638 if (!wait_loop) 1639 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", 1640 reg_idx); 1641 } 1642 1643 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1644 struct ixgbevf_ring *ring) 1645 { 1646 struct ixgbe_hw *hw = &adapter->hw; 1647 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1648 u32 rxdctl; 1649 u8 reg_idx = ring->reg_idx; 1650 1651 if (IXGBE_REMOVED(hw->hw_addr)) 1652 return; 1653 do { 1654 usleep_range(1000, 2000); 1655 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1656 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1657 1658 if (!wait_loop) 1659 pr_err("RXDCTL.ENABLE queue %d not set while polling\n", 1660 reg_idx); 1661 } 1662 1663 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) 1664 { 1665 struct ixgbe_hw *hw = &adapter->hw; 1666 u32 vfmrqc = 0, vfreta = 0; 1667 u16 rss_i = adapter->num_rx_queues; 1668 u8 i, j; 1669 1670 /* Fill out hash function seeds */ 1671 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); 1672 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) 1673 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); 1674 1675 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { 1676 if (j == rss_i) 1677 j = 0; 1678 1679 adapter->rss_indir_tbl[i] = j; 1680 1681 vfreta |= j << (i & 0x3) * 8; 1682 if ((i & 3) == 3) { 1683 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); 1684 vfreta = 0; 1685 } 1686 } 1687 1688 /* Perform hash on these packet types */ 1689 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | 1690 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | 1691 IXGBE_VFMRQC_RSS_FIELD_IPV6 | 1692 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; 1693 1694 vfmrqc |= IXGBE_VFMRQC_RSSEN; 1695 1696 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); 1697 } 1698 1699 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1700 struct ixgbevf_ring *ring) 1701 { 1702 struct ixgbe_hw *hw = &adapter->hw; 1703 u64 rdba = ring->dma; 1704 u32 rxdctl; 1705 u8 reg_idx = ring->reg_idx; 1706 1707 /* disable queue to avoid issues while updating state */ 1708 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1709 ixgbevf_disable_rx_queue(adapter, ring); 1710 1711 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1712 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); 1713 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), 1714 ring->count * sizeof(union ixgbe_adv_rx_desc)); 1715 1716 #ifndef CONFIG_SPARC 1717 /* enable relaxed ordering */ 1718 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1719 IXGBE_DCA_RXCTRL_DESC_RRO_EN); 1720 #else 1721 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1722 IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1723 IXGBE_DCA_RXCTRL_DATA_WRO_EN); 1724 #endif 1725 1726 /* reset head and tail pointers */ 1727 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1728 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1729 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); 1730 1731 /* reset ntu and ntc to place SW in sync with hardwdare */ 1732 ring->next_to_clean = 0; 1733 ring->next_to_use = 0; 1734 ring->next_to_alloc = 0; 1735 1736 ixgbevf_configure_srrctl(adapter, reg_idx); 1737 1738 /* allow any size packet since we can handle overflow */ 1739 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; 1740 1741 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1742 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1743 1744 ixgbevf_rx_desc_queue_enable(adapter, ring); 1745 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); 1746 } 1747 1748 /** 1749 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1750 * @adapter: board private structure 1751 * 1752 * Configure the Rx unit of the MAC after a reset. 1753 **/ 1754 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1755 { 1756 struct ixgbe_hw *hw = &adapter->hw; 1757 struct net_device *netdev = adapter->netdev; 1758 int i, ret; 1759 1760 ixgbevf_setup_psrtype(adapter); 1761 if (hw->mac.type >= ixgbe_mac_X550_vf) 1762 ixgbevf_setup_vfmrqc(adapter); 1763 1764 spin_lock_bh(&adapter->mbx_lock); 1765 /* notify the PF of our intent to use this size of frame */ 1766 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); 1767 spin_unlock_bh(&adapter->mbx_lock); 1768 if (ret) 1769 dev_err(&adapter->pdev->dev, 1770 "Failed to set MTU at %d\n", netdev->mtu); 1771 1772 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1773 * the Base and Length of the Rx Descriptor Ring 1774 */ 1775 for (i = 0; i < adapter->num_rx_queues; i++) 1776 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); 1777 } 1778 1779 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1780 __be16 proto, u16 vid) 1781 { 1782 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1783 struct ixgbe_hw *hw = &adapter->hw; 1784 int err; 1785 1786 spin_lock_bh(&adapter->mbx_lock); 1787 1788 /* add VID to filter table */ 1789 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1790 1791 spin_unlock_bh(&adapter->mbx_lock); 1792 1793 /* translate error return types so error makes sense */ 1794 if (err == IXGBE_ERR_MBX) 1795 return -EIO; 1796 1797 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1798 return -EACCES; 1799 1800 set_bit(vid, adapter->active_vlans); 1801 1802 return err; 1803 } 1804 1805 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 1806 __be16 proto, u16 vid) 1807 { 1808 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1809 struct ixgbe_hw *hw = &adapter->hw; 1810 int err; 1811 1812 spin_lock_bh(&adapter->mbx_lock); 1813 1814 /* remove VID from filter table */ 1815 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1816 1817 spin_unlock_bh(&adapter->mbx_lock); 1818 1819 clear_bit(vid, adapter->active_vlans); 1820 1821 return err; 1822 } 1823 1824 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1825 { 1826 u16 vid; 1827 1828 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1829 ixgbevf_vlan_rx_add_vid(adapter->netdev, 1830 htons(ETH_P_8021Q), vid); 1831 } 1832 1833 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1834 { 1835 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1836 struct ixgbe_hw *hw = &adapter->hw; 1837 int count = 0; 1838 1839 if ((netdev_uc_count(netdev)) > 10) { 1840 pr_err("Too many unicast filters - No Space\n"); 1841 return -ENOSPC; 1842 } 1843 1844 if (!netdev_uc_empty(netdev)) { 1845 struct netdev_hw_addr *ha; 1846 1847 netdev_for_each_uc_addr(ha, netdev) { 1848 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1849 udelay(200); 1850 } 1851 } else { 1852 /* If the list is empty then send message to PF driver to 1853 * clear all MAC VLANs on this VF. 1854 */ 1855 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1856 } 1857 1858 return count; 1859 } 1860 1861 /** 1862 * ixgbevf_set_rx_mode - Multicast and unicast set 1863 * @netdev: network interface device structure 1864 * 1865 * The set_rx_method entry point is called whenever the multicast address 1866 * list, unicast address list or the network interface flags are updated. 1867 * This routine is responsible for configuring the hardware for proper 1868 * multicast mode and configuring requested unicast filters. 1869 **/ 1870 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1871 { 1872 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1873 struct ixgbe_hw *hw = &adapter->hw; 1874 unsigned int flags = netdev->flags; 1875 int xcast_mode; 1876 1877 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : 1878 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? 1879 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; 1880 1881 /* request the most inclusive mode we need */ 1882 if (flags & IFF_PROMISC) 1883 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; 1884 else if (flags & IFF_ALLMULTI) 1885 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; 1886 else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) 1887 xcast_mode = IXGBEVF_XCAST_MODE_MULTI; 1888 else 1889 xcast_mode = IXGBEVF_XCAST_MODE_NONE; 1890 1891 spin_lock_bh(&adapter->mbx_lock); 1892 1893 hw->mac.ops.update_xcast_mode(hw, xcast_mode); 1894 1895 /* reprogram multicast list */ 1896 hw->mac.ops.update_mc_addr_list(hw, netdev); 1897 1898 ixgbevf_write_uc_addr_list(netdev); 1899 1900 spin_unlock_bh(&adapter->mbx_lock); 1901 } 1902 1903 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1904 { 1905 int q_idx; 1906 struct ixgbevf_q_vector *q_vector; 1907 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1908 1909 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1910 q_vector = adapter->q_vector[q_idx]; 1911 napi_enable(&q_vector->napi); 1912 } 1913 } 1914 1915 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1916 { 1917 int q_idx; 1918 struct ixgbevf_q_vector *q_vector; 1919 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1920 1921 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1922 q_vector = adapter->q_vector[q_idx]; 1923 napi_disable(&q_vector->napi); 1924 } 1925 } 1926 1927 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) 1928 { 1929 struct ixgbe_hw *hw = &adapter->hw; 1930 unsigned int def_q = 0; 1931 unsigned int num_tcs = 0; 1932 unsigned int num_rx_queues = adapter->num_rx_queues; 1933 unsigned int num_tx_queues = adapter->num_tx_queues; 1934 int err; 1935 1936 spin_lock_bh(&adapter->mbx_lock); 1937 1938 /* fetch queue configuration from the PF */ 1939 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1940 1941 spin_unlock_bh(&adapter->mbx_lock); 1942 1943 if (err) 1944 return err; 1945 1946 if (num_tcs > 1) { 1947 /* we need only one Tx queue */ 1948 num_tx_queues = 1; 1949 1950 /* update default Tx ring register index */ 1951 adapter->tx_ring[0]->reg_idx = def_q; 1952 1953 /* we need as many queues as traffic classes */ 1954 num_rx_queues = num_tcs; 1955 } 1956 1957 /* if we have a bad config abort request queue reset */ 1958 if ((adapter->num_rx_queues != num_rx_queues) || 1959 (adapter->num_tx_queues != num_tx_queues)) { 1960 /* force mailbox timeout to prevent further messages */ 1961 hw->mbx.timeout = 0; 1962 1963 /* wait for watchdog to come around and bail us out */ 1964 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); 1965 } 1966 1967 return 0; 1968 } 1969 1970 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1971 { 1972 ixgbevf_configure_dcb(adapter); 1973 1974 ixgbevf_set_rx_mode(adapter->netdev); 1975 1976 ixgbevf_restore_vlan(adapter); 1977 1978 ixgbevf_configure_tx(adapter); 1979 ixgbevf_configure_rx(adapter); 1980 } 1981 1982 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1983 { 1984 /* Only save pre-reset stats if there are some */ 1985 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1986 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1987 adapter->stats.base_vfgprc; 1988 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1989 adapter->stats.base_vfgptc; 1990 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1991 adapter->stats.base_vfgorc; 1992 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1993 adapter->stats.base_vfgotc; 1994 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1995 adapter->stats.base_vfmprc; 1996 } 1997 } 1998 1999 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 2000 { 2001 struct ixgbe_hw *hw = &adapter->hw; 2002 2003 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2004 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2005 adapter->stats.last_vfgorc |= 2006 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2007 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2008 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2009 adapter->stats.last_vfgotc |= 2010 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2011 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2012 2013 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 2014 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 2015 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 2016 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 2017 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 2018 } 2019 2020 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 2021 { 2022 struct ixgbe_hw *hw = &adapter->hw; 2023 int api[] = { ixgbe_mbox_api_13, 2024 ixgbe_mbox_api_12, 2025 ixgbe_mbox_api_11, 2026 ixgbe_mbox_api_10, 2027 ixgbe_mbox_api_unknown }; 2028 int err, idx = 0; 2029 2030 spin_lock_bh(&adapter->mbx_lock); 2031 2032 while (api[idx] != ixgbe_mbox_api_unknown) { 2033 err = hw->mac.ops.negotiate_api_version(hw, api[idx]); 2034 if (!err) 2035 break; 2036 idx++; 2037 } 2038 2039 spin_unlock_bh(&adapter->mbx_lock); 2040 } 2041 2042 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 2043 { 2044 struct net_device *netdev = adapter->netdev; 2045 struct ixgbe_hw *hw = &adapter->hw; 2046 2047 ixgbevf_configure_msix(adapter); 2048 2049 spin_lock_bh(&adapter->mbx_lock); 2050 2051 if (is_valid_ether_addr(hw->mac.addr)) 2052 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 2053 else 2054 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 2055 2056 spin_unlock_bh(&adapter->mbx_lock); 2057 2058 smp_mb__before_atomic(); 2059 clear_bit(__IXGBEVF_DOWN, &adapter->state); 2060 ixgbevf_napi_enable_all(adapter); 2061 2062 /* clear any pending interrupts, may auto mask */ 2063 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2064 ixgbevf_irq_enable(adapter); 2065 2066 /* enable transmits */ 2067 netif_tx_start_all_queues(netdev); 2068 2069 ixgbevf_save_reset_stats(adapter); 2070 ixgbevf_init_last_counter_stats(adapter); 2071 2072 hw->mac.get_link_status = 1; 2073 mod_timer(&adapter->service_timer, jiffies); 2074 } 2075 2076 void ixgbevf_up(struct ixgbevf_adapter *adapter) 2077 { 2078 ixgbevf_configure(adapter); 2079 2080 ixgbevf_up_complete(adapter); 2081 } 2082 2083 /** 2084 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 2085 * @rx_ring: ring to free buffers from 2086 **/ 2087 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 2088 { 2089 struct device *dev = rx_ring->dev; 2090 unsigned long size; 2091 unsigned int i; 2092 2093 /* Free Rx ring sk_buff */ 2094 if (rx_ring->skb) { 2095 dev_kfree_skb(rx_ring->skb); 2096 rx_ring->skb = NULL; 2097 } 2098 2099 /* ring already cleared, nothing to do */ 2100 if (!rx_ring->rx_buffer_info) 2101 return; 2102 2103 /* Free all the Rx ring pages */ 2104 for (i = 0; i < rx_ring->count; i++) { 2105 struct ixgbevf_rx_buffer *rx_buffer; 2106 2107 rx_buffer = &rx_ring->rx_buffer_info[i]; 2108 if (rx_buffer->dma) 2109 dma_unmap_page(dev, rx_buffer->dma, 2110 PAGE_SIZE, DMA_FROM_DEVICE); 2111 rx_buffer->dma = 0; 2112 if (rx_buffer->page) 2113 __free_page(rx_buffer->page); 2114 rx_buffer->page = NULL; 2115 } 2116 2117 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2118 memset(rx_ring->rx_buffer_info, 0, size); 2119 2120 /* Zero out the descriptor ring */ 2121 memset(rx_ring->desc, 0, rx_ring->size); 2122 } 2123 2124 /** 2125 * ixgbevf_clean_tx_ring - Free Tx Buffers 2126 * @tx_ring: ring to be cleaned 2127 **/ 2128 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 2129 { 2130 struct ixgbevf_tx_buffer *tx_buffer_info; 2131 unsigned long size; 2132 unsigned int i; 2133 2134 if (!tx_ring->tx_buffer_info) 2135 return; 2136 2137 /* Free all the Tx ring sk_buffs */ 2138 for (i = 0; i < tx_ring->count; i++) { 2139 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2140 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2141 } 2142 2143 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2144 memset(tx_ring->tx_buffer_info, 0, size); 2145 2146 memset(tx_ring->desc, 0, tx_ring->size); 2147 } 2148 2149 /** 2150 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 2151 * @adapter: board private structure 2152 **/ 2153 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 2154 { 2155 int i; 2156 2157 for (i = 0; i < adapter->num_rx_queues; i++) 2158 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); 2159 } 2160 2161 /** 2162 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 2163 * @adapter: board private structure 2164 **/ 2165 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 2166 { 2167 int i; 2168 2169 for (i = 0; i < adapter->num_tx_queues; i++) 2170 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); 2171 } 2172 2173 void ixgbevf_down(struct ixgbevf_adapter *adapter) 2174 { 2175 struct net_device *netdev = adapter->netdev; 2176 struct ixgbe_hw *hw = &adapter->hw; 2177 int i; 2178 2179 /* signal that we are down to the interrupt handler */ 2180 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) 2181 return; /* do nothing if already down */ 2182 2183 /* disable all enabled Rx queues */ 2184 for (i = 0; i < adapter->num_rx_queues; i++) 2185 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 2186 2187 usleep_range(10000, 20000); 2188 2189 netif_tx_stop_all_queues(netdev); 2190 2191 /* call carrier off first to avoid false dev_watchdog timeouts */ 2192 netif_carrier_off(netdev); 2193 netif_tx_disable(netdev); 2194 2195 ixgbevf_irq_disable(adapter); 2196 2197 ixgbevf_napi_disable_all(adapter); 2198 2199 del_timer_sync(&adapter->service_timer); 2200 2201 /* disable transmits in the hardware now that interrupts are off */ 2202 for (i = 0; i < adapter->num_tx_queues; i++) { 2203 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 2204 2205 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 2206 IXGBE_TXDCTL_SWFLSH); 2207 } 2208 2209 if (!pci_channel_offline(adapter->pdev)) 2210 ixgbevf_reset(adapter); 2211 2212 ixgbevf_clean_all_tx_rings(adapter); 2213 ixgbevf_clean_all_rx_rings(adapter); 2214 } 2215 2216 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 2217 { 2218 WARN_ON(in_interrupt()); 2219 2220 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 2221 msleep(1); 2222 2223 ixgbevf_down(adapter); 2224 ixgbevf_up(adapter); 2225 2226 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 2227 } 2228 2229 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 2230 { 2231 struct ixgbe_hw *hw = &adapter->hw; 2232 struct net_device *netdev = adapter->netdev; 2233 2234 if (hw->mac.ops.reset_hw(hw)) { 2235 hw_dbg(hw, "PF still resetting\n"); 2236 } else { 2237 hw->mac.ops.init_hw(hw); 2238 ixgbevf_negotiate_api(adapter); 2239 } 2240 2241 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 2242 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 2243 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2244 } 2245 2246 adapter->last_reset = jiffies; 2247 } 2248 2249 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 2250 int vectors) 2251 { 2252 int vector_threshold; 2253 2254 /* We'll want at least 2 (vector_threshold): 2255 * 1) TxQ[0] + RxQ[0] handler 2256 * 2) Other (Link Status Change, etc.) 2257 */ 2258 vector_threshold = MIN_MSIX_COUNT; 2259 2260 /* The more we get, the more we will assign to Tx/Rx Cleanup 2261 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 2262 * Right now, we simply care about how many we'll get; we'll 2263 * set them up later while requesting irq's. 2264 */ 2265 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 2266 vector_threshold, vectors); 2267 2268 if (vectors < 0) { 2269 dev_err(&adapter->pdev->dev, 2270 "Unable to allocate MSI-X interrupts\n"); 2271 kfree(adapter->msix_entries); 2272 adapter->msix_entries = NULL; 2273 return vectors; 2274 } 2275 2276 /* Adjust for only the vectors we'll use, which is minimum 2277 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 2278 * vectors we were allocated. 2279 */ 2280 adapter->num_msix_vectors = vectors; 2281 2282 return 0; 2283 } 2284 2285 /** 2286 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 2287 * @adapter: board private structure to initialize 2288 * 2289 * This is the top level queue allocation routine. The order here is very 2290 * important, starting with the "most" number of features turned on at once, 2291 * and ending with the smallest set of features. This way large combinations 2292 * can be allocated if they're turned on, and smaller combinations are the 2293 * fallthrough conditions. 2294 * 2295 **/ 2296 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 2297 { 2298 struct ixgbe_hw *hw = &adapter->hw; 2299 unsigned int def_q = 0; 2300 unsigned int num_tcs = 0; 2301 int err; 2302 2303 /* Start with base case */ 2304 adapter->num_rx_queues = 1; 2305 adapter->num_tx_queues = 1; 2306 2307 spin_lock_bh(&adapter->mbx_lock); 2308 2309 /* fetch queue configuration from the PF */ 2310 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2311 2312 spin_unlock_bh(&adapter->mbx_lock); 2313 2314 if (err) 2315 return; 2316 2317 /* we need as many queues as traffic classes */ 2318 if (num_tcs > 1) { 2319 adapter->num_rx_queues = num_tcs; 2320 } else { 2321 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES); 2322 2323 switch (hw->api_version) { 2324 case ixgbe_mbox_api_11: 2325 case ixgbe_mbox_api_12: 2326 case ixgbe_mbox_api_13: 2327 adapter->num_rx_queues = rss; 2328 adapter->num_tx_queues = rss; 2329 default: 2330 break; 2331 } 2332 } 2333 } 2334 2335 /** 2336 * ixgbevf_alloc_queues - Allocate memory for all rings 2337 * @adapter: board private structure to initialize 2338 * 2339 * We allocate one ring per queue at run-time since we don't know the 2340 * number of queues at compile-time. The polling_netdev array is 2341 * intended for Multiqueue, but should work fine with a single queue. 2342 **/ 2343 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 2344 { 2345 struct ixgbevf_ring *ring; 2346 int rx = 0, tx = 0; 2347 2348 for (; tx < adapter->num_tx_queues; tx++) { 2349 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2350 if (!ring) 2351 goto err_allocation; 2352 2353 ring->dev = &adapter->pdev->dev; 2354 ring->netdev = adapter->netdev; 2355 ring->count = adapter->tx_ring_count; 2356 ring->queue_index = tx; 2357 ring->reg_idx = tx; 2358 2359 adapter->tx_ring[tx] = ring; 2360 } 2361 2362 for (; rx < adapter->num_rx_queues; rx++) { 2363 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2364 if (!ring) 2365 goto err_allocation; 2366 2367 ring->dev = &adapter->pdev->dev; 2368 ring->netdev = adapter->netdev; 2369 2370 ring->count = adapter->rx_ring_count; 2371 ring->queue_index = rx; 2372 ring->reg_idx = rx; 2373 2374 adapter->rx_ring[rx] = ring; 2375 } 2376 2377 return 0; 2378 2379 err_allocation: 2380 while (tx) { 2381 kfree(adapter->tx_ring[--tx]); 2382 adapter->tx_ring[tx] = NULL; 2383 } 2384 2385 while (rx) { 2386 kfree(adapter->rx_ring[--rx]); 2387 adapter->rx_ring[rx] = NULL; 2388 } 2389 return -ENOMEM; 2390 } 2391 2392 /** 2393 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2394 * @adapter: board private structure to initialize 2395 * 2396 * Attempt to configure the interrupts using the best available 2397 * capabilities of the hardware and the kernel. 2398 **/ 2399 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2400 { 2401 struct net_device *netdev = adapter->netdev; 2402 int err; 2403 int vector, v_budget; 2404 2405 /* It's easy to be greedy for MSI-X vectors, but it really 2406 * doesn't do us much good if we have a lot more vectors 2407 * than CPU's. So let's be conservative and only ask for 2408 * (roughly) the same number of vectors as there are CPU's. 2409 * The default is to use pairs of vectors. 2410 */ 2411 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 2412 v_budget = min_t(int, v_budget, num_online_cpus()); 2413 v_budget += NON_Q_VECTORS; 2414 2415 /* A failure in MSI-X entry allocation isn't fatal, but it does 2416 * mean we disable MSI-X capabilities of the adapter. 2417 */ 2418 adapter->msix_entries = kcalloc(v_budget, 2419 sizeof(struct msix_entry), GFP_KERNEL); 2420 if (!adapter->msix_entries) 2421 return -ENOMEM; 2422 2423 for (vector = 0; vector < v_budget; vector++) 2424 adapter->msix_entries[vector].entry = vector; 2425 2426 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 2427 if (err) 2428 return err; 2429 2430 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 2431 if (err) 2432 return err; 2433 2434 return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 2435 } 2436 2437 /** 2438 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2439 * @adapter: board private structure to initialize 2440 * 2441 * We allocate one q_vector per queue interrupt. If allocation fails we 2442 * return -ENOMEM. 2443 **/ 2444 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2445 { 2446 int q_idx, num_q_vectors; 2447 struct ixgbevf_q_vector *q_vector; 2448 2449 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2450 2451 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2452 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2453 if (!q_vector) 2454 goto err_out; 2455 q_vector->adapter = adapter; 2456 q_vector->v_idx = q_idx; 2457 netif_napi_add(adapter->netdev, &q_vector->napi, 2458 ixgbevf_poll, 64); 2459 adapter->q_vector[q_idx] = q_vector; 2460 } 2461 2462 return 0; 2463 2464 err_out: 2465 while (q_idx) { 2466 q_idx--; 2467 q_vector = adapter->q_vector[q_idx]; 2468 #ifdef CONFIG_NET_RX_BUSY_POLL 2469 napi_hash_del(&q_vector->napi); 2470 #endif 2471 netif_napi_del(&q_vector->napi); 2472 kfree(q_vector); 2473 adapter->q_vector[q_idx] = NULL; 2474 } 2475 return -ENOMEM; 2476 } 2477 2478 /** 2479 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2480 * @adapter: board private structure to initialize 2481 * 2482 * This function frees the memory allocated to the q_vectors. In addition if 2483 * NAPI is enabled it will delete any references to the NAPI struct prior 2484 * to freeing the q_vector. 2485 **/ 2486 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2487 { 2488 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2489 2490 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2491 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2492 2493 adapter->q_vector[q_idx] = NULL; 2494 #ifdef CONFIG_NET_RX_BUSY_POLL 2495 napi_hash_del(&q_vector->napi); 2496 #endif 2497 netif_napi_del(&q_vector->napi); 2498 kfree(q_vector); 2499 } 2500 } 2501 2502 /** 2503 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2504 * @adapter: board private structure 2505 * 2506 **/ 2507 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2508 { 2509 if (!adapter->msix_entries) 2510 return; 2511 2512 pci_disable_msix(adapter->pdev); 2513 kfree(adapter->msix_entries); 2514 adapter->msix_entries = NULL; 2515 } 2516 2517 /** 2518 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2519 * @adapter: board private structure to initialize 2520 * 2521 **/ 2522 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2523 { 2524 int err; 2525 2526 /* Number of supported queues */ 2527 ixgbevf_set_num_queues(adapter); 2528 2529 err = ixgbevf_set_interrupt_capability(adapter); 2530 if (err) { 2531 hw_dbg(&adapter->hw, 2532 "Unable to setup interrupt capabilities\n"); 2533 goto err_set_interrupt; 2534 } 2535 2536 err = ixgbevf_alloc_q_vectors(adapter); 2537 if (err) { 2538 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); 2539 goto err_alloc_q_vectors; 2540 } 2541 2542 err = ixgbevf_alloc_queues(adapter); 2543 if (err) { 2544 pr_err("Unable to allocate memory for queues\n"); 2545 goto err_alloc_queues; 2546 } 2547 2548 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 2549 (adapter->num_rx_queues > 1) ? "Enabled" : 2550 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2551 2552 set_bit(__IXGBEVF_DOWN, &adapter->state); 2553 2554 return 0; 2555 err_alloc_queues: 2556 ixgbevf_free_q_vectors(adapter); 2557 err_alloc_q_vectors: 2558 ixgbevf_reset_interrupt_capability(adapter); 2559 err_set_interrupt: 2560 return err; 2561 } 2562 2563 /** 2564 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2565 * @adapter: board private structure to clear interrupt scheme on 2566 * 2567 * We go through and clear interrupt specific resources and reset the structure 2568 * to pre-load conditions 2569 **/ 2570 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2571 { 2572 int i; 2573 2574 for (i = 0; i < adapter->num_tx_queues; i++) { 2575 kfree(adapter->tx_ring[i]); 2576 adapter->tx_ring[i] = NULL; 2577 } 2578 for (i = 0; i < adapter->num_rx_queues; i++) { 2579 kfree(adapter->rx_ring[i]); 2580 adapter->rx_ring[i] = NULL; 2581 } 2582 2583 adapter->num_tx_queues = 0; 2584 adapter->num_rx_queues = 0; 2585 2586 ixgbevf_free_q_vectors(adapter); 2587 ixgbevf_reset_interrupt_capability(adapter); 2588 } 2589 2590 /** 2591 * ixgbevf_sw_init - Initialize general software structures 2592 * @adapter: board private structure to initialize 2593 * 2594 * ixgbevf_sw_init initializes the Adapter private data structure. 2595 * Fields are initialized based on PCI device information and 2596 * OS network device settings (MTU size). 2597 **/ 2598 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2599 { 2600 struct ixgbe_hw *hw = &adapter->hw; 2601 struct pci_dev *pdev = adapter->pdev; 2602 struct net_device *netdev = adapter->netdev; 2603 int err; 2604 2605 /* PCI config space info */ 2606 hw->vendor_id = pdev->vendor; 2607 hw->device_id = pdev->device; 2608 hw->revision_id = pdev->revision; 2609 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2610 hw->subsystem_device_id = pdev->subsystem_device; 2611 2612 hw->mbx.ops.init_params(hw); 2613 2614 /* assume legacy case in which PF would only give VF 2 queues */ 2615 hw->mac.max_tx_queues = 2; 2616 hw->mac.max_rx_queues = 2; 2617 2618 /* lock to protect mailbox accesses */ 2619 spin_lock_init(&adapter->mbx_lock); 2620 2621 err = hw->mac.ops.reset_hw(hw); 2622 if (err) { 2623 dev_info(&pdev->dev, 2624 "PF still in reset state. Is the PF interface up?\n"); 2625 } else { 2626 err = hw->mac.ops.init_hw(hw); 2627 if (err) { 2628 pr_err("init_shared_code failed: %d\n", err); 2629 goto out; 2630 } 2631 ixgbevf_negotiate_api(adapter); 2632 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2633 if (err) 2634 dev_info(&pdev->dev, "Error reading MAC address\n"); 2635 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2636 dev_info(&pdev->dev, 2637 "MAC address not assigned by administrator.\n"); 2638 ether_addr_copy(netdev->dev_addr, hw->mac.addr); 2639 } 2640 2641 if (!is_valid_ether_addr(netdev->dev_addr)) { 2642 dev_info(&pdev->dev, "Assigning random MAC address\n"); 2643 eth_hw_addr_random(netdev); 2644 ether_addr_copy(hw->mac.addr, netdev->dev_addr); 2645 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); 2646 } 2647 2648 /* Enable dynamic interrupt throttling rates */ 2649 adapter->rx_itr_setting = 1; 2650 adapter->tx_itr_setting = 1; 2651 2652 /* set default ring sizes */ 2653 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2654 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2655 2656 set_bit(__IXGBEVF_DOWN, &adapter->state); 2657 return 0; 2658 2659 out: 2660 return err; 2661 } 2662 2663 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2664 { \ 2665 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2666 if (current_counter < last_counter) \ 2667 counter += 0x100000000LL; \ 2668 last_counter = current_counter; \ 2669 counter &= 0xFFFFFFFF00000000LL; \ 2670 counter |= current_counter; \ 2671 } 2672 2673 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2674 { \ 2675 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2676 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2677 u64 current_counter = (current_counter_msb << 32) | \ 2678 current_counter_lsb; \ 2679 if (current_counter < last_counter) \ 2680 counter += 0x1000000000LL; \ 2681 last_counter = current_counter; \ 2682 counter &= 0xFFFFFFF000000000LL; \ 2683 counter |= current_counter; \ 2684 } 2685 /** 2686 * ixgbevf_update_stats - Update the board statistics counters. 2687 * @adapter: board private structure 2688 **/ 2689 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2690 { 2691 struct ixgbe_hw *hw = &adapter->hw; 2692 int i; 2693 2694 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2695 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2696 return; 2697 2698 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2699 adapter->stats.vfgprc); 2700 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2701 adapter->stats.vfgptc); 2702 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2703 adapter->stats.last_vfgorc, 2704 adapter->stats.vfgorc); 2705 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2706 adapter->stats.last_vfgotc, 2707 adapter->stats.vfgotc); 2708 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2709 adapter->stats.vfmprc); 2710 2711 for (i = 0; i < adapter->num_rx_queues; i++) { 2712 adapter->hw_csum_rx_error += 2713 adapter->rx_ring[i]->hw_csum_rx_error; 2714 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2715 } 2716 } 2717 2718 /** 2719 * ixgbevf_service_timer - Timer Call-back 2720 * @data: pointer to adapter cast into an unsigned long 2721 **/ 2722 static void ixgbevf_service_timer(unsigned long data) 2723 { 2724 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2725 2726 /* Reset the timer */ 2727 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); 2728 2729 ixgbevf_service_event_schedule(adapter); 2730 } 2731 2732 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) 2733 { 2734 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) 2735 return; 2736 2737 /* If we're already down or resetting, just bail */ 2738 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2739 test_bit(__IXGBEVF_REMOVING, &adapter->state) || 2740 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2741 return; 2742 2743 adapter->tx_timeout_count++; 2744 2745 rtnl_lock(); 2746 ixgbevf_reinit_locked(adapter); 2747 rtnl_unlock(); 2748 } 2749 2750 /** 2751 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts 2752 * @adapter: pointer to the device adapter structure 2753 * 2754 * This function serves two purposes. First it strobes the interrupt lines 2755 * in order to make certain interrupts are occurring. Secondly it sets the 2756 * bits needed to check for TX hangs. As a result we should immediately 2757 * determine if a hang has occurred. 2758 **/ 2759 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) 2760 { 2761 struct ixgbe_hw *hw = &adapter->hw; 2762 u32 eics = 0; 2763 int i; 2764 2765 /* If we're down or resetting, just bail */ 2766 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2767 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2768 return; 2769 2770 /* Force detection of hung controller */ 2771 if (netif_carrier_ok(adapter->netdev)) { 2772 for (i = 0; i < adapter->num_tx_queues; i++) 2773 set_check_for_tx_hang(adapter->tx_ring[i]); 2774 } 2775 2776 /* get one bit for every active Tx/Rx interrupt vector */ 2777 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2778 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2779 2780 if (qv->rx.ring || qv->tx.ring) 2781 eics |= BIT(i); 2782 } 2783 2784 /* Cause software interrupt to ensure rings are cleaned */ 2785 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2786 } 2787 2788 /** 2789 * ixgbevf_watchdog_update_link - update the link status 2790 * @adapter: pointer to the device adapter structure 2791 **/ 2792 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) 2793 { 2794 struct ixgbe_hw *hw = &adapter->hw; 2795 u32 link_speed = adapter->link_speed; 2796 bool link_up = adapter->link_up; 2797 s32 err; 2798 2799 spin_lock_bh(&adapter->mbx_lock); 2800 2801 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2802 2803 spin_unlock_bh(&adapter->mbx_lock); 2804 2805 /* if check for link returns error we will need to reset */ 2806 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { 2807 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); 2808 link_up = false; 2809 } 2810 2811 adapter->link_up = link_up; 2812 adapter->link_speed = link_speed; 2813 } 2814 2815 /** 2816 * ixgbevf_watchdog_link_is_up - update netif_carrier status and 2817 * print link up message 2818 * @adapter: pointer to the device adapter structure 2819 **/ 2820 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) 2821 { 2822 struct net_device *netdev = adapter->netdev; 2823 2824 /* only continue if link was previously down */ 2825 if (netif_carrier_ok(netdev)) 2826 return; 2827 2828 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", 2829 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2830 "10 Gbps" : 2831 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? 2832 "1 Gbps" : 2833 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? 2834 "100 Mbps" : 2835 "unknown speed"); 2836 2837 netif_carrier_on(netdev); 2838 } 2839 2840 /** 2841 * ixgbevf_watchdog_link_is_down - update netif_carrier status and 2842 * print link down message 2843 * @adapter: pointer to the adapter structure 2844 **/ 2845 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) 2846 { 2847 struct net_device *netdev = adapter->netdev; 2848 2849 adapter->link_speed = 0; 2850 2851 /* only continue if link was up previously */ 2852 if (!netif_carrier_ok(netdev)) 2853 return; 2854 2855 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2856 2857 netif_carrier_off(netdev); 2858 } 2859 2860 /** 2861 * ixgbevf_watchdog_subtask - worker thread to bring link up 2862 * @work: pointer to work_struct containing our data 2863 **/ 2864 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) 2865 { 2866 /* if interface is down do nothing */ 2867 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2868 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2869 return; 2870 2871 ixgbevf_watchdog_update_link(adapter); 2872 2873 if (adapter->link_up) 2874 ixgbevf_watchdog_link_is_up(adapter); 2875 else 2876 ixgbevf_watchdog_link_is_down(adapter); 2877 2878 ixgbevf_update_stats(adapter); 2879 } 2880 2881 /** 2882 * ixgbevf_service_task - manages and runs subtasks 2883 * @work: pointer to work_struct containing our data 2884 **/ 2885 static void ixgbevf_service_task(struct work_struct *work) 2886 { 2887 struct ixgbevf_adapter *adapter = container_of(work, 2888 struct ixgbevf_adapter, 2889 service_task); 2890 struct ixgbe_hw *hw = &adapter->hw; 2891 2892 if (IXGBE_REMOVED(hw->hw_addr)) { 2893 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 2894 rtnl_lock(); 2895 ixgbevf_down(adapter); 2896 rtnl_unlock(); 2897 } 2898 return; 2899 } 2900 2901 ixgbevf_queue_reset_subtask(adapter); 2902 ixgbevf_reset_subtask(adapter); 2903 ixgbevf_watchdog_subtask(adapter); 2904 ixgbevf_check_hang_subtask(adapter); 2905 2906 ixgbevf_service_event_complete(adapter); 2907 } 2908 2909 /** 2910 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2911 * @tx_ring: Tx descriptor ring for a specific queue 2912 * 2913 * Free all transmit software resources 2914 **/ 2915 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) 2916 { 2917 ixgbevf_clean_tx_ring(tx_ring); 2918 2919 vfree(tx_ring->tx_buffer_info); 2920 tx_ring->tx_buffer_info = NULL; 2921 2922 /* if not set, then don't free */ 2923 if (!tx_ring->desc) 2924 return; 2925 2926 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, 2927 tx_ring->dma); 2928 2929 tx_ring->desc = NULL; 2930 } 2931 2932 /** 2933 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2934 * @adapter: board private structure 2935 * 2936 * Free all transmit software resources 2937 **/ 2938 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2939 { 2940 int i; 2941 2942 for (i = 0; i < adapter->num_tx_queues; i++) 2943 if (adapter->tx_ring[i]->desc) 2944 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 2945 } 2946 2947 /** 2948 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2949 * @tx_ring: Tx descriptor ring (for a specific queue) to setup 2950 * 2951 * Return 0 on success, negative on failure 2952 **/ 2953 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) 2954 { 2955 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2956 int size; 2957 2958 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2959 tx_ring->tx_buffer_info = vzalloc(size); 2960 if (!tx_ring->tx_buffer_info) 2961 goto err; 2962 2963 /* round up to nearest 4K */ 2964 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2965 tx_ring->size = ALIGN(tx_ring->size, 4096); 2966 2967 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, 2968 &tx_ring->dma, GFP_KERNEL); 2969 if (!tx_ring->desc) 2970 goto err; 2971 2972 return 0; 2973 2974 err: 2975 vfree(tx_ring->tx_buffer_info); 2976 tx_ring->tx_buffer_info = NULL; 2977 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); 2978 return -ENOMEM; 2979 } 2980 2981 /** 2982 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2983 * @adapter: board private structure 2984 * 2985 * If this function returns with an error, then it's possible one or 2986 * more of the rings is populated (while the rest are not). It is the 2987 * callers duty to clean those orphaned rings. 2988 * 2989 * Return 0 on success, negative on failure 2990 **/ 2991 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2992 { 2993 int i, err = 0; 2994 2995 for (i = 0; i < adapter->num_tx_queues; i++) { 2996 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); 2997 if (!err) 2998 continue; 2999 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); 3000 break; 3001 } 3002 3003 return err; 3004 } 3005 3006 /** 3007 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 3008 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 3009 * 3010 * Returns 0 on success, negative on failure 3011 **/ 3012 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) 3013 { 3014 int size; 3015 3016 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 3017 rx_ring->rx_buffer_info = vzalloc(size); 3018 if (!rx_ring->rx_buffer_info) 3019 goto err; 3020 3021 /* Round up to nearest 4K */ 3022 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 3023 rx_ring->size = ALIGN(rx_ring->size, 4096); 3024 3025 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, 3026 &rx_ring->dma, GFP_KERNEL); 3027 3028 if (!rx_ring->desc) 3029 goto err; 3030 3031 return 0; 3032 err: 3033 vfree(rx_ring->rx_buffer_info); 3034 rx_ring->rx_buffer_info = NULL; 3035 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); 3036 return -ENOMEM; 3037 } 3038 3039 /** 3040 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 3041 * @adapter: board private structure 3042 * 3043 * If this function returns with an error, then it's possible one or 3044 * more of the rings is populated (while the rest are not). It is the 3045 * callers duty to clean those orphaned rings. 3046 * 3047 * Return 0 on success, negative on failure 3048 **/ 3049 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 3050 { 3051 int i, err = 0; 3052 3053 for (i = 0; i < adapter->num_rx_queues; i++) { 3054 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); 3055 if (!err) 3056 continue; 3057 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); 3058 break; 3059 } 3060 return err; 3061 } 3062 3063 /** 3064 * ixgbevf_free_rx_resources - Free Rx Resources 3065 * @rx_ring: ring to clean the resources from 3066 * 3067 * Free all receive software resources 3068 **/ 3069 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) 3070 { 3071 ixgbevf_clean_rx_ring(rx_ring); 3072 3073 vfree(rx_ring->rx_buffer_info); 3074 rx_ring->rx_buffer_info = NULL; 3075 3076 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, 3077 rx_ring->dma); 3078 3079 rx_ring->desc = NULL; 3080 } 3081 3082 /** 3083 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 3084 * @adapter: board private structure 3085 * 3086 * Free all receive software resources 3087 **/ 3088 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 3089 { 3090 int i; 3091 3092 for (i = 0; i < adapter->num_rx_queues; i++) 3093 if (adapter->rx_ring[i]->desc) 3094 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 3095 } 3096 3097 /** 3098 * ixgbevf_open - Called when a network interface is made active 3099 * @netdev: network interface device structure 3100 * 3101 * Returns 0 on success, negative value on failure 3102 * 3103 * The open entry point is called when a network interface is made 3104 * active by the system (IFF_UP). At this point all resources needed 3105 * for transmit and receive operations are allocated, the interrupt 3106 * handler is registered with the OS, the watchdog timer is started, 3107 * and the stack is notified that the interface is ready. 3108 **/ 3109 int ixgbevf_open(struct net_device *netdev) 3110 { 3111 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3112 struct ixgbe_hw *hw = &adapter->hw; 3113 int err; 3114 3115 /* A previous failure to open the device because of a lack of 3116 * available MSIX vector resources may have reset the number 3117 * of msix vectors variable to zero. The only way to recover 3118 * is to unload/reload the driver and hope that the system has 3119 * been able to recover some MSIX vector resources. 3120 */ 3121 if (!adapter->num_msix_vectors) 3122 return -ENOMEM; 3123 3124 if (hw->adapter_stopped) { 3125 ixgbevf_reset(adapter); 3126 /* if adapter is still stopped then PF isn't up and 3127 * the VF can't start. 3128 */ 3129 if (hw->adapter_stopped) { 3130 err = IXGBE_ERR_MBX; 3131 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); 3132 goto err_setup_reset; 3133 } 3134 } 3135 3136 /* disallow open during test */ 3137 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 3138 return -EBUSY; 3139 3140 netif_carrier_off(netdev); 3141 3142 /* allocate transmit descriptors */ 3143 err = ixgbevf_setup_all_tx_resources(adapter); 3144 if (err) 3145 goto err_setup_tx; 3146 3147 /* allocate receive descriptors */ 3148 err = ixgbevf_setup_all_rx_resources(adapter); 3149 if (err) 3150 goto err_setup_rx; 3151 3152 ixgbevf_configure(adapter); 3153 3154 /* Map the Tx/Rx rings to the vectors we were allotted. 3155 * if request_irq will be called in this function map_rings 3156 * must be called *before* up_complete 3157 */ 3158 ixgbevf_map_rings_to_vectors(adapter); 3159 3160 err = ixgbevf_request_irq(adapter); 3161 if (err) 3162 goto err_req_irq; 3163 3164 ixgbevf_up_complete(adapter); 3165 3166 return 0; 3167 3168 err_req_irq: 3169 ixgbevf_down(adapter); 3170 err_setup_rx: 3171 ixgbevf_free_all_rx_resources(adapter); 3172 err_setup_tx: 3173 ixgbevf_free_all_tx_resources(adapter); 3174 ixgbevf_reset(adapter); 3175 3176 err_setup_reset: 3177 3178 return err; 3179 } 3180 3181 /** 3182 * ixgbevf_close_suspend - actions necessary to both suspend and close flows 3183 * @adapter: the private adapter struct 3184 * 3185 * This function should contain the necessary work common to both suspending 3186 * and closing of the device. 3187 */ 3188 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) 3189 { 3190 ixgbevf_down(adapter); 3191 ixgbevf_free_irq(adapter); 3192 ixgbevf_free_all_tx_resources(adapter); 3193 ixgbevf_free_all_rx_resources(adapter); 3194 } 3195 3196 /** 3197 * ixgbevf_close - Disables a network interface 3198 * @netdev: network interface device structure 3199 * 3200 * Returns 0, this is not allowed to fail 3201 * 3202 * The close entry point is called when an interface is de-activated 3203 * by the OS. The hardware is still under the drivers control, but 3204 * needs to be disabled. A global MAC reset is issued to stop the 3205 * hardware, and all transmit and receive resources are freed. 3206 **/ 3207 int ixgbevf_close(struct net_device *netdev) 3208 { 3209 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3210 3211 if (netif_device_present(netdev)) 3212 ixgbevf_close_suspend(adapter); 3213 3214 return 0; 3215 } 3216 3217 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) 3218 { 3219 struct net_device *dev = adapter->netdev; 3220 3221 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, 3222 &adapter->state)) 3223 return; 3224 3225 /* if interface is down do nothing */ 3226 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3227 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3228 return; 3229 3230 /* Hardware has to reinitialize queues and interrupts to 3231 * match packet buffer alignment. Unfortunately, the 3232 * hardware is not flexible enough to do this dynamically. 3233 */ 3234 rtnl_lock(); 3235 3236 if (netif_running(dev)) 3237 ixgbevf_close(dev); 3238 3239 ixgbevf_clear_interrupt_scheme(adapter); 3240 ixgbevf_init_interrupt_scheme(adapter); 3241 3242 if (netif_running(dev)) 3243 ixgbevf_open(dev); 3244 3245 rtnl_unlock(); 3246 } 3247 3248 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 3249 u32 vlan_macip_lens, u32 type_tucmd, 3250 u32 mss_l4len_idx) 3251 { 3252 struct ixgbe_adv_tx_context_desc *context_desc; 3253 u16 i = tx_ring->next_to_use; 3254 3255 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 3256 3257 i++; 3258 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3259 3260 /* set bits to identify this as an advanced context descriptor */ 3261 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 3262 3263 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3264 context_desc->seqnum_seed = 0; 3265 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 3266 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3267 } 3268 3269 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 3270 struct ixgbevf_tx_buffer *first, 3271 u8 *hdr_len) 3272 { 3273 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 3274 struct sk_buff *skb = first->skb; 3275 union { 3276 struct iphdr *v4; 3277 struct ipv6hdr *v6; 3278 unsigned char *hdr; 3279 } ip; 3280 union { 3281 struct tcphdr *tcp; 3282 unsigned char *hdr; 3283 } l4; 3284 u32 paylen, l4_offset; 3285 int err; 3286 3287 if (skb->ip_summed != CHECKSUM_PARTIAL) 3288 return 0; 3289 3290 if (!skb_is_gso(skb)) 3291 return 0; 3292 3293 err = skb_cow_head(skb, 0); 3294 if (err < 0) 3295 return err; 3296 3297 ip.hdr = skb_network_header(skb); 3298 l4.hdr = skb_checksum_start(skb); 3299 3300 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3301 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3302 3303 /* initialize outer IP header fields */ 3304 if (ip.v4->version == 4) { 3305 unsigned char *csum_start = skb_checksum_start(skb); 3306 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 3307 3308 /* IP header will have to cancel out any data that 3309 * is not a part of the outer IP header 3310 */ 3311 ip.v4->check = csum_fold(csum_partial(trans_start, 3312 csum_start - trans_start, 3313 0)); 3314 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3315 3316 ip.v4->tot_len = 0; 3317 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 3318 IXGBE_TX_FLAGS_CSUM | 3319 IXGBE_TX_FLAGS_IPV4; 3320 } else { 3321 ip.v6->payload_len = 0; 3322 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 3323 IXGBE_TX_FLAGS_CSUM; 3324 } 3325 3326 /* determine offset of inner transport header */ 3327 l4_offset = l4.hdr - skb->data; 3328 3329 /* compute length of segmentation header */ 3330 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 3331 3332 /* remove payload length from inner checksum */ 3333 paylen = skb->len - l4_offset; 3334 csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 3335 3336 /* update gso size and bytecount with header size */ 3337 first->gso_segs = skb_shinfo(skb)->gso_segs; 3338 first->bytecount += (first->gso_segs - 1) * *hdr_len; 3339 3340 /* mss_l4len_id: use 1 as index for TSO */ 3341 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; 3342 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 3343 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); 3344 3345 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 3346 vlan_macip_lens = l4.hdr - ip.hdr; 3347 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; 3348 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 3349 3350 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 3351 type_tucmd, mss_l4len_idx); 3352 3353 return 1; 3354 } 3355 3356 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) 3357 { 3358 unsigned int offset = 0; 3359 3360 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); 3361 3362 return offset == skb_checksum_start_offset(skb); 3363 } 3364 3365 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 3366 struct ixgbevf_tx_buffer *first) 3367 { 3368 struct sk_buff *skb = first->skb; 3369 u32 vlan_macip_lens = 0; 3370 u32 type_tucmd = 0; 3371 3372 if (skb->ip_summed != CHECKSUM_PARTIAL) 3373 goto no_csum; 3374 3375 switch (skb->csum_offset) { 3376 case offsetof(struct tcphdr, check): 3377 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3378 /* fall through */ 3379 case offsetof(struct udphdr, check): 3380 break; 3381 case offsetof(struct sctphdr, checksum): 3382 /* validate that this is actually an SCTP request */ 3383 if (((first->protocol == htons(ETH_P_IP)) && 3384 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || 3385 ((first->protocol == htons(ETH_P_IPV6)) && 3386 ixgbevf_ipv6_csum_is_sctp(skb))) { 3387 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; 3388 break; 3389 } 3390 /* fall through */ 3391 default: 3392 skb_checksum_help(skb); 3393 goto no_csum; 3394 } 3395 /* update TX checksum flag */ 3396 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 3397 vlan_macip_lens = skb_checksum_start_offset(skb) - 3398 skb_network_offset(skb); 3399 no_csum: 3400 /* vlan_macip_lens: MACLEN, VLAN tag */ 3401 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 3402 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 3403 3404 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); 3405 } 3406 3407 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) 3408 { 3409 /* set type for advanced descriptor with frame checksum insertion */ 3410 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 3411 IXGBE_ADVTXD_DCMD_IFCS | 3412 IXGBE_ADVTXD_DCMD_DEXT); 3413 3414 /* set HW VLAN bit if VLAN is present */ 3415 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3416 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 3417 3418 /* set segmentation enable bits for TSO/FSO */ 3419 if (tx_flags & IXGBE_TX_FLAGS_TSO) 3420 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 3421 3422 return cmd_type; 3423 } 3424 3425 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 3426 u32 tx_flags, unsigned int paylen) 3427 { 3428 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); 3429 3430 /* enable L4 checksum for TSO and TX checksum offload */ 3431 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3432 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); 3433 3434 /* enble IPv4 checksum for TSO */ 3435 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3436 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); 3437 3438 /* use index 1 context for TSO/FSO/FCOE */ 3439 if (tx_flags & IXGBE_TX_FLAGS_TSO) 3440 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); 3441 3442 /* Check Context must be set if Tx switch is enabled, which it 3443 * always is for case where virtual functions are running 3444 */ 3445 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); 3446 3447 tx_desc->read.olinfo_status = olinfo_status; 3448 } 3449 3450 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 3451 struct ixgbevf_tx_buffer *first, 3452 const u8 hdr_len) 3453 { 3454 dma_addr_t dma; 3455 struct sk_buff *skb = first->skb; 3456 struct ixgbevf_tx_buffer *tx_buffer; 3457 union ixgbe_adv_tx_desc *tx_desc; 3458 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 3459 unsigned int data_len = skb->data_len; 3460 unsigned int size = skb_headlen(skb); 3461 unsigned int paylen = skb->len - hdr_len; 3462 u32 tx_flags = first->tx_flags; 3463 __le32 cmd_type; 3464 u16 i = tx_ring->next_to_use; 3465 3466 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3467 3468 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 3469 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3470 3471 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3472 if (dma_mapping_error(tx_ring->dev, dma)) 3473 goto dma_error; 3474 3475 /* record length, and DMA address */ 3476 dma_unmap_len_set(first, len, size); 3477 dma_unmap_addr_set(first, dma, dma); 3478 3479 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3480 3481 for (;;) { 3482 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3483 tx_desc->read.cmd_type_len = 3484 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 3485 3486 i++; 3487 tx_desc++; 3488 if (i == tx_ring->count) { 3489 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3490 i = 0; 3491 } 3492 3493 dma += IXGBE_MAX_DATA_PER_TXD; 3494 size -= IXGBE_MAX_DATA_PER_TXD; 3495 3496 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3497 tx_desc->read.olinfo_status = 0; 3498 } 3499 3500 if (likely(!data_len)) 3501 break; 3502 3503 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 3504 3505 i++; 3506 tx_desc++; 3507 if (i == tx_ring->count) { 3508 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3509 i = 0; 3510 } 3511 3512 size = skb_frag_size(frag); 3513 data_len -= size; 3514 3515 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3516 DMA_TO_DEVICE); 3517 if (dma_mapping_error(tx_ring->dev, dma)) 3518 goto dma_error; 3519 3520 tx_buffer = &tx_ring->tx_buffer_info[i]; 3521 dma_unmap_len_set(tx_buffer, len, size); 3522 dma_unmap_addr_set(tx_buffer, dma, dma); 3523 3524 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3525 tx_desc->read.olinfo_status = 0; 3526 3527 frag++; 3528 } 3529 3530 /* write last descriptor with RS and EOP bits */ 3531 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); 3532 tx_desc->read.cmd_type_len = cmd_type; 3533 3534 /* set the timestamp */ 3535 first->time_stamp = jiffies; 3536 3537 /* Force memory writes to complete before letting h/w know there 3538 * are new descriptors to fetch. (Only applicable for weak-ordered 3539 * memory model archs, such as IA-64). 3540 * 3541 * We also need this memory barrier (wmb) to make certain all of the 3542 * status bits have been updated before next_to_watch is written. 3543 */ 3544 wmb(); 3545 3546 /* set next_to_watch value indicating a packet is present */ 3547 first->next_to_watch = tx_desc; 3548 3549 i++; 3550 if (i == tx_ring->count) 3551 i = 0; 3552 3553 tx_ring->next_to_use = i; 3554 3555 /* notify HW of packet */ 3556 ixgbevf_write_tail(tx_ring, i); 3557 3558 return; 3559 dma_error: 3560 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3561 3562 /* clear dma mappings for failed tx_buffer_info map */ 3563 for (;;) { 3564 tx_buffer = &tx_ring->tx_buffer_info[i]; 3565 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); 3566 if (tx_buffer == first) 3567 break; 3568 if (i == 0) 3569 i = tx_ring->count; 3570 i--; 3571 } 3572 3573 tx_ring->next_to_use = i; 3574 } 3575 3576 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3577 { 3578 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3579 /* Herbert's original patch had: 3580 * smp_mb__after_netif_stop_queue(); 3581 * but since that doesn't exist yet, just open code it. 3582 */ 3583 smp_mb(); 3584 3585 /* We need to check again in a case another CPU has just 3586 * made room available. 3587 */ 3588 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 3589 return -EBUSY; 3590 3591 /* A reprieve! - use start_queue because it doesn't call schedule */ 3592 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3593 ++tx_ring->tx_stats.restart_queue; 3594 3595 return 0; 3596 } 3597 3598 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3599 { 3600 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 3601 return 0; 3602 return __ixgbevf_maybe_stop_tx(tx_ring, size); 3603 } 3604 3605 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3606 { 3607 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3608 struct ixgbevf_tx_buffer *first; 3609 struct ixgbevf_ring *tx_ring; 3610 int tso; 3611 u32 tx_flags = 0; 3612 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3613 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3614 unsigned short f; 3615 #endif 3616 u8 hdr_len = 0; 3617 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3618 3619 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3620 dev_kfree_skb_any(skb); 3621 return NETDEV_TX_OK; 3622 } 3623 3624 tx_ring = adapter->tx_ring[skb->queue_mapping]; 3625 3626 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3627 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3628 * + 2 desc gap to keep tail from touching head, 3629 * + 1 desc for context descriptor, 3630 * otherwise try next time 3631 */ 3632 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3633 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3634 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3635 #else 3636 count += skb_shinfo(skb)->nr_frags; 3637 #endif 3638 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3639 tx_ring->tx_stats.tx_busy++; 3640 return NETDEV_TX_BUSY; 3641 } 3642 3643 /* record the location of the first descriptor for this packet */ 3644 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 3645 first->skb = skb; 3646 first->bytecount = skb->len; 3647 first->gso_segs = 1; 3648 3649 if (skb_vlan_tag_present(skb)) { 3650 tx_flags |= skb_vlan_tag_get(skb); 3651 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3652 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3653 } 3654 3655 /* record initial flags and protocol */ 3656 first->tx_flags = tx_flags; 3657 first->protocol = vlan_get_protocol(skb); 3658 3659 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3660 if (tso < 0) 3661 goto out_drop; 3662 else if (!tso) 3663 ixgbevf_tx_csum(tx_ring, first); 3664 3665 ixgbevf_tx_map(tx_ring, first, hdr_len); 3666 3667 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3668 3669 return NETDEV_TX_OK; 3670 3671 out_drop: 3672 dev_kfree_skb_any(first->skb); 3673 first->skb = NULL; 3674 3675 return NETDEV_TX_OK; 3676 } 3677 3678 /** 3679 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3680 * @netdev: network interface device structure 3681 * @p: pointer to an address structure 3682 * 3683 * Returns 0 on success, negative on failure 3684 **/ 3685 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3686 { 3687 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3688 struct ixgbe_hw *hw = &adapter->hw; 3689 struct sockaddr *addr = p; 3690 int err; 3691 3692 if (!is_valid_ether_addr(addr->sa_data)) 3693 return -EADDRNOTAVAIL; 3694 3695 spin_lock_bh(&adapter->mbx_lock); 3696 3697 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); 3698 3699 spin_unlock_bh(&adapter->mbx_lock); 3700 3701 if (err) 3702 return -EPERM; 3703 3704 ether_addr_copy(hw->mac.addr, addr->sa_data); 3705 ether_addr_copy(netdev->dev_addr, addr->sa_data); 3706 3707 return 0; 3708 } 3709 3710 /** 3711 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3712 * @netdev: network interface device structure 3713 * @new_mtu: new value for maximum frame size 3714 * 3715 * Returns 0 on success, negative on failure 3716 **/ 3717 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3718 { 3719 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3720 struct ixgbe_hw *hw = &adapter->hw; 3721 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3722 int ret; 3723 3724 spin_lock_bh(&adapter->mbx_lock); 3725 /* notify the PF of our intent to use this size of frame */ 3726 ret = hw->mac.ops.set_rlpml(hw, max_frame); 3727 spin_unlock_bh(&adapter->mbx_lock); 3728 if (ret) 3729 return -EINVAL; 3730 3731 hw_dbg(hw, "changing MTU from %d to %d\n", 3732 netdev->mtu, new_mtu); 3733 3734 /* must set new MTU before calling down or up */ 3735 netdev->mtu = new_mtu; 3736 3737 return 0; 3738 } 3739 3740 #ifdef CONFIG_NET_POLL_CONTROLLER 3741 /* Polling 'interrupt' - used by things like netconsole to send skbs 3742 * without having to re-enable interrupts. It's not called while 3743 * the interrupt routine is executing. 3744 */ 3745 static void ixgbevf_netpoll(struct net_device *netdev) 3746 { 3747 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3748 int i; 3749 3750 /* if interface is down do nothing */ 3751 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 3752 return; 3753 for (i = 0; i < adapter->num_rx_queues; i++) 3754 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); 3755 } 3756 #endif /* CONFIG_NET_POLL_CONTROLLER */ 3757 3758 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3759 { 3760 struct net_device *netdev = pci_get_drvdata(pdev); 3761 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3762 #ifdef CONFIG_PM 3763 int retval = 0; 3764 #endif 3765 3766 rtnl_lock(); 3767 netif_device_detach(netdev); 3768 3769 if (netif_running(netdev)) 3770 ixgbevf_close_suspend(adapter); 3771 3772 ixgbevf_clear_interrupt_scheme(adapter); 3773 rtnl_unlock(); 3774 3775 #ifdef CONFIG_PM 3776 retval = pci_save_state(pdev); 3777 if (retval) 3778 return retval; 3779 3780 #endif 3781 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3782 pci_disable_device(pdev); 3783 3784 return 0; 3785 } 3786 3787 #ifdef CONFIG_PM 3788 static int ixgbevf_resume(struct pci_dev *pdev) 3789 { 3790 struct net_device *netdev = pci_get_drvdata(pdev); 3791 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3792 u32 err; 3793 3794 pci_restore_state(pdev); 3795 /* pci_restore_state clears dev->state_saved so call 3796 * pci_save_state to restore it. 3797 */ 3798 pci_save_state(pdev); 3799 3800 err = pci_enable_device_mem(pdev); 3801 if (err) { 3802 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3803 return err; 3804 } 3805 3806 adapter->hw.hw_addr = adapter->io_addr; 3807 smp_mb__before_atomic(); 3808 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3809 pci_set_master(pdev); 3810 3811 ixgbevf_reset(adapter); 3812 3813 rtnl_lock(); 3814 err = ixgbevf_init_interrupt_scheme(adapter); 3815 rtnl_unlock(); 3816 if (err) { 3817 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3818 return err; 3819 } 3820 3821 if (netif_running(netdev)) { 3822 err = ixgbevf_open(netdev); 3823 if (err) 3824 return err; 3825 } 3826 3827 netif_device_attach(netdev); 3828 3829 return err; 3830 } 3831 3832 #endif /* CONFIG_PM */ 3833 static void ixgbevf_shutdown(struct pci_dev *pdev) 3834 { 3835 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3836 } 3837 3838 static void ixgbevf_get_stats(struct net_device *netdev, 3839 struct rtnl_link_stats64 *stats) 3840 { 3841 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3842 unsigned int start; 3843 u64 bytes, packets; 3844 const struct ixgbevf_ring *ring; 3845 int i; 3846 3847 ixgbevf_update_stats(adapter); 3848 3849 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3850 3851 for (i = 0; i < adapter->num_rx_queues; i++) { 3852 ring = adapter->rx_ring[i]; 3853 do { 3854 start = u64_stats_fetch_begin_irq(&ring->syncp); 3855 bytes = ring->stats.bytes; 3856 packets = ring->stats.packets; 3857 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3858 stats->rx_bytes += bytes; 3859 stats->rx_packets += packets; 3860 } 3861 3862 for (i = 0; i < adapter->num_tx_queues; i++) { 3863 ring = adapter->tx_ring[i]; 3864 do { 3865 start = u64_stats_fetch_begin_irq(&ring->syncp); 3866 bytes = ring->stats.bytes; 3867 packets = ring->stats.packets; 3868 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3869 stats->tx_bytes += bytes; 3870 stats->tx_packets += packets; 3871 } 3872 } 3873 3874 #define IXGBEVF_MAX_MAC_HDR_LEN 127 3875 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511 3876 3877 static netdev_features_t 3878 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, 3879 netdev_features_t features) 3880 { 3881 unsigned int network_hdr_len, mac_hdr_len; 3882 3883 /* Make certain the headers can be described by a context descriptor */ 3884 mac_hdr_len = skb_network_header(skb) - skb->data; 3885 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) 3886 return features & ~(NETIF_F_HW_CSUM | 3887 NETIF_F_SCTP_CRC | 3888 NETIF_F_HW_VLAN_CTAG_TX | 3889 NETIF_F_TSO | 3890 NETIF_F_TSO6); 3891 3892 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 3893 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) 3894 return features & ~(NETIF_F_HW_CSUM | 3895 NETIF_F_SCTP_CRC | 3896 NETIF_F_TSO | 3897 NETIF_F_TSO6); 3898 3899 /* We can only support IPV4 TSO in tunnels if we can mangle the 3900 * inner IP ID field, so strip TSO if MANGLEID is not supported. 3901 */ 3902 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 3903 features &= ~NETIF_F_TSO; 3904 3905 return features; 3906 } 3907 3908 static const struct net_device_ops ixgbevf_netdev_ops = { 3909 .ndo_open = ixgbevf_open, 3910 .ndo_stop = ixgbevf_close, 3911 .ndo_start_xmit = ixgbevf_xmit_frame, 3912 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3913 .ndo_get_stats64 = ixgbevf_get_stats, 3914 .ndo_validate_addr = eth_validate_addr, 3915 .ndo_set_mac_address = ixgbevf_set_mac, 3916 .ndo_change_mtu = ixgbevf_change_mtu, 3917 .ndo_tx_timeout = ixgbevf_tx_timeout, 3918 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3919 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3920 #ifdef CONFIG_NET_POLL_CONTROLLER 3921 .ndo_poll_controller = ixgbevf_netpoll, 3922 #endif 3923 .ndo_features_check = ixgbevf_features_check, 3924 }; 3925 3926 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3927 { 3928 dev->netdev_ops = &ixgbevf_netdev_ops; 3929 ixgbevf_set_ethtool_ops(dev); 3930 dev->watchdog_timeo = 5 * HZ; 3931 } 3932 3933 /** 3934 * ixgbevf_probe - Device Initialization Routine 3935 * @pdev: PCI device information struct 3936 * @ent: entry in ixgbevf_pci_tbl 3937 * 3938 * Returns 0 on success, negative on failure 3939 * 3940 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3941 * The OS initialization, configuring of the adapter private structure, 3942 * and a hardware reset occur. 3943 **/ 3944 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3945 { 3946 struct net_device *netdev; 3947 struct ixgbevf_adapter *adapter = NULL; 3948 struct ixgbe_hw *hw = NULL; 3949 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3950 int err, pci_using_dac; 3951 bool disable_dev = false; 3952 3953 err = pci_enable_device(pdev); 3954 if (err) 3955 return err; 3956 3957 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3958 pci_using_dac = 1; 3959 } else { 3960 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3961 if (err) { 3962 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 3963 goto err_dma; 3964 } 3965 pci_using_dac = 0; 3966 } 3967 3968 err = pci_request_regions(pdev, ixgbevf_driver_name); 3969 if (err) { 3970 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3971 goto err_pci_reg; 3972 } 3973 3974 pci_set_master(pdev); 3975 3976 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3977 MAX_TX_QUEUES); 3978 if (!netdev) { 3979 err = -ENOMEM; 3980 goto err_alloc_etherdev; 3981 } 3982 3983 SET_NETDEV_DEV(netdev, &pdev->dev); 3984 3985 adapter = netdev_priv(netdev); 3986 3987 adapter->netdev = netdev; 3988 adapter->pdev = pdev; 3989 hw = &adapter->hw; 3990 hw->back = adapter; 3991 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3992 3993 /* call save state here in standalone driver because it relies on 3994 * adapter struct to exist, and needs to call netdev_priv 3995 */ 3996 pci_save_state(pdev); 3997 3998 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3999 pci_resource_len(pdev, 0)); 4000 adapter->io_addr = hw->hw_addr; 4001 if (!hw->hw_addr) { 4002 err = -EIO; 4003 goto err_ioremap; 4004 } 4005 4006 ixgbevf_assign_netdev_ops(netdev); 4007 4008 /* Setup HW API */ 4009 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 4010 hw->mac.type = ii->mac; 4011 4012 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 4013 sizeof(struct ixgbe_mbx_operations)); 4014 4015 /* setup the private structure */ 4016 err = ixgbevf_sw_init(adapter); 4017 if (err) 4018 goto err_sw_init; 4019 4020 /* The HW MAC address was set and/or determined in sw_init */ 4021 if (!is_valid_ether_addr(netdev->dev_addr)) { 4022 pr_err("invalid MAC address\n"); 4023 err = -EIO; 4024 goto err_sw_init; 4025 } 4026 4027 netdev->hw_features = NETIF_F_SG | 4028 NETIF_F_TSO | 4029 NETIF_F_TSO6 | 4030 NETIF_F_RXCSUM | 4031 NETIF_F_HW_CSUM | 4032 NETIF_F_SCTP_CRC; 4033 4034 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 4035 NETIF_F_GSO_GRE_CSUM | \ 4036 NETIF_F_GSO_IPXIP4 | \ 4037 NETIF_F_GSO_IPXIP6 | \ 4038 NETIF_F_GSO_UDP_TUNNEL | \ 4039 NETIF_F_GSO_UDP_TUNNEL_CSUM) 4040 4041 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; 4042 netdev->hw_features |= NETIF_F_GSO_PARTIAL | 4043 IXGBEVF_GSO_PARTIAL_FEATURES; 4044 4045 netdev->features = netdev->hw_features; 4046 4047 if (pci_using_dac) 4048 netdev->features |= NETIF_F_HIGHDMA; 4049 4050 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 4051 netdev->mpls_features |= NETIF_F_HW_CSUM; 4052 netdev->hw_enc_features |= netdev->vlan_features; 4053 4054 /* set this bit last since it cannot be part of vlan_features */ 4055 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 4056 NETIF_F_HW_VLAN_CTAG_RX | 4057 NETIF_F_HW_VLAN_CTAG_TX; 4058 4059 netdev->priv_flags |= IFF_UNICAST_FLT; 4060 4061 /* MTU range: 68 - 1504 or 9710 */ 4062 netdev->min_mtu = ETH_MIN_MTU; 4063 switch (adapter->hw.api_version) { 4064 case ixgbe_mbox_api_11: 4065 case ixgbe_mbox_api_12: 4066 case ixgbe_mbox_api_13: 4067 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - 4068 (ETH_HLEN + ETH_FCS_LEN); 4069 break; 4070 default: 4071 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) 4072 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - 4073 (ETH_HLEN + ETH_FCS_LEN); 4074 else 4075 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; 4076 break; 4077 } 4078 4079 if (IXGBE_REMOVED(hw->hw_addr)) { 4080 err = -EIO; 4081 goto err_sw_init; 4082 } 4083 4084 setup_timer(&adapter->service_timer, &ixgbevf_service_timer, 4085 (unsigned long)adapter); 4086 4087 INIT_WORK(&adapter->service_task, ixgbevf_service_task); 4088 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); 4089 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); 4090 4091 err = ixgbevf_init_interrupt_scheme(adapter); 4092 if (err) 4093 goto err_sw_init; 4094 4095 strcpy(netdev->name, "eth%d"); 4096 4097 err = register_netdev(netdev); 4098 if (err) 4099 goto err_register; 4100 4101 pci_set_drvdata(pdev, netdev); 4102 netif_carrier_off(netdev); 4103 4104 ixgbevf_init_last_counter_stats(adapter); 4105 4106 /* print the VF info */ 4107 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); 4108 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 4109 4110 switch (hw->mac.type) { 4111 case ixgbe_mac_X550_vf: 4112 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); 4113 break; 4114 case ixgbe_mac_X540_vf: 4115 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); 4116 break; 4117 case ixgbe_mac_82599_vf: 4118 default: 4119 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); 4120 break; 4121 } 4122 4123 return 0; 4124 4125 err_register: 4126 ixgbevf_clear_interrupt_scheme(adapter); 4127 err_sw_init: 4128 ixgbevf_reset_interrupt_capability(adapter); 4129 iounmap(adapter->io_addr); 4130 err_ioremap: 4131 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); 4132 free_netdev(netdev); 4133 err_alloc_etherdev: 4134 pci_release_regions(pdev); 4135 err_pci_reg: 4136 err_dma: 4137 if (!adapter || disable_dev) 4138 pci_disable_device(pdev); 4139 return err; 4140 } 4141 4142 /** 4143 * ixgbevf_remove - Device Removal Routine 4144 * @pdev: PCI device information struct 4145 * 4146 * ixgbevf_remove is called by the PCI subsystem to alert the driver 4147 * that it should release a PCI device. The could be caused by a 4148 * Hot-Plug event, or because the driver is going to be removed from 4149 * memory. 4150 **/ 4151 static void ixgbevf_remove(struct pci_dev *pdev) 4152 { 4153 struct net_device *netdev = pci_get_drvdata(pdev); 4154 struct ixgbevf_adapter *adapter; 4155 bool disable_dev; 4156 4157 if (!netdev) 4158 return; 4159 4160 adapter = netdev_priv(netdev); 4161 4162 set_bit(__IXGBEVF_REMOVING, &adapter->state); 4163 cancel_work_sync(&adapter->service_task); 4164 4165 if (netdev->reg_state == NETREG_REGISTERED) 4166 unregister_netdev(netdev); 4167 4168 ixgbevf_clear_interrupt_scheme(adapter); 4169 ixgbevf_reset_interrupt_capability(adapter); 4170 4171 iounmap(adapter->io_addr); 4172 pci_release_regions(pdev); 4173 4174 hw_dbg(&adapter->hw, "Remove complete\n"); 4175 4176 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); 4177 free_netdev(netdev); 4178 4179 if (disable_dev) 4180 pci_disable_device(pdev); 4181 } 4182 4183 /** 4184 * ixgbevf_io_error_detected - called when PCI error is detected 4185 * @pdev: Pointer to PCI device 4186 * @state: The current pci connection state 4187 * 4188 * This function is called after a PCI bus error affecting 4189 * this device has been detected. 4190 **/ 4191 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 4192 pci_channel_state_t state) 4193 { 4194 struct net_device *netdev = pci_get_drvdata(pdev); 4195 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4196 4197 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) 4198 return PCI_ERS_RESULT_DISCONNECT; 4199 4200 rtnl_lock(); 4201 netif_device_detach(netdev); 4202 4203 if (state == pci_channel_io_perm_failure) { 4204 rtnl_unlock(); 4205 return PCI_ERS_RESULT_DISCONNECT; 4206 } 4207 4208 if (netif_running(netdev)) 4209 ixgbevf_close_suspend(adapter); 4210 4211 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 4212 pci_disable_device(pdev); 4213 rtnl_unlock(); 4214 4215 /* Request a slot slot reset. */ 4216 return PCI_ERS_RESULT_NEED_RESET; 4217 } 4218 4219 /** 4220 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 4221 * @pdev: Pointer to PCI device 4222 * 4223 * Restart the card from scratch, as if from a cold-boot. Implementation 4224 * resembles the first-half of the ixgbevf_resume routine. 4225 **/ 4226 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 4227 { 4228 struct net_device *netdev = pci_get_drvdata(pdev); 4229 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4230 4231 if (pci_enable_device_mem(pdev)) { 4232 dev_err(&pdev->dev, 4233 "Cannot re-enable PCI device after reset.\n"); 4234 return PCI_ERS_RESULT_DISCONNECT; 4235 } 4236 4237 adapter->hw.hw_addr = adapter->io_addr; 4238 smp_mb__before_atomic(); 4239 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 4240 pci_set_master(pdev); 4241 4242 ixgbevf_reset(adapter); 4243 4244 return PCI_ERS_RESULT_RECOVERED; 4245 } 4246 4247 /** 4248 * ixgbevf_io_resume - called when traffic can start flowing again. 4249 * @pdev: Pointer to PCI device 4250 * 4251 * This callback is called when the error recovery driver tells us that 4252 * its OK to resume normal operation. Implementation resembles the 4253 * second-half of the ixgbevf_resume routine. 4254 **/ 4255 static void ixgbevf_io_resume(struct pci_dev *pdev) 4256 { 4257 struct net_device *netdev = pci_get_drvdata(pdev); 4258 4259 rtnl_lock(); 4260 if (netif_running(netdev)) 4261 ixgbevf_open(netdev); 4262 4263 netif_device_attach(netdev); 4264 rtnl_unlock(); 4265 } 4266 4267 /* PCI Error Recovery (ERS) */ 4268 static const struct pci_error_handlers ixgbevf_err_handler = { 4269 .error_detected = ixgbevf_io_error_detected, 4270 .slot_reset = ixgbevf_io_slot_reset, 4271 .resume = ixgbevf_io_resume, 4272 }; 4273 4274 static struct pci_driver ixgbevf_driver = { 4275 .name = ixgbevf_driver_name, 4276 .id_table = ixgbevf_pci_tbl, 4277 .probe = ixgbevf_probe, 4278 .remove = ixgbevf_remove, 4279 #ifdef CONFIG_PM 4280 /* Power Management Hooks */ 4281 .suspend = ixgbevf_suspend, 4282 .resume = ixgbevf_resume, 4283 #endif 4284 .shutdown = ixgbevf_shutdown, 4285 .err_handler = &ixgbevf_err_handler 4286 }; 4287 4288 /** 4289 * ixgbevf_init_module - Driver Registration Routine 4290 * 4291 * ixgbevf_init_module is the first routine called when the driver is 4292 * loaded. All it does is register with the PCI subsystem. 4293 **/ 4294 static int __init ixgbevf_init_module(void) 4295 { 4296 pr_info("%s - version %s\n", ixgbevf_driver_string, 4297 ixgbevf_driver_version); 4298 4299 pr_info("%s\n", ixgbevf_copyright); 4300 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); 4301 if (!ixgbevf_wq) { 4302 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); 4303 return -ENOMEM; 4304 } 4305 4306 return pci_register_driver(&ixgbevf_driver); 4307 } 4308 4309 module_init(ixgbevf_init_module); 4310 4311 /** 4312 * ixgbevf_exit_module - Driver Exit Cleanup Routine 4313 * 4314 * ixgbevf_exit_module is called just before the driver is removed 4315 * from memory. 4316 **/ 4317 static void __exit ixgbevf_exit_module(void) 4318 { 4319 pci_unregister_driver(&ixgbevf_driver); 4320 if (ixgbevf_wq) { 4321 destroy_workqueue(ixgbevf_wq); 4322 ixgbevf_wq = NULL; 4323 } 4324 } 4325 4326 #ifdef DEBUG 4327 /** 4328 * ixgbevf_get_hw_dev_name - return device name string 4329 * used by hardware layer to print debugging information 4330 **/ 4331 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 4332 { 4333 struct ixgbevf_adapter *adapter = hw->back; 4334 4335 return adapter->netdev->name; 4336 } 4337 4338 #endif 4339 module_exit(ixgbevf_exit_module); 4340 4341 /* ixgbevf_main.c */ 4342