1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 /****************************************************************************** 5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 6 ******************************************************************************/ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/bitops.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/netdevice.h> 15 #include <linux/vmalloc.h> 16 #include <linux/string.h> 17 #include <linux/in.h> 18 #include <linux/ip.h> 19 #include <linux/tcp.h> 20 #include <linux/sctp.h> 21 #include <linux/ipv6.h> 22 #include <linux/slab.h> 23 #include <net/checksum.h> 24 #include <net/ip6_checksum.h> 25 #include <linux/ethtool.h> 26 #include <linux/if.h> 27 #include <linux/if_vlan.h> 28 #include <linux/prefetch.h> 29 #include <net/mpls.h> 30 #include <linux/bpf.h> 31 #include <linux/bpf_trace.h> 32 #include <linux/atomic.h> 33 #include <net/xfrm.h> 34 35 #include "ixgbevf.h" 36 37 const char ixgbevf_driver_name[] = "ixgbevf"; 38 static const char ixgbevf_driver_string[] = 39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 40 41 static char ixgbevf_copyright[] = 42 "Copyright (c) 2009 - 2018 Intel Corporation."; 43 44 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 45 [board_82599_vf] = &ixgbevf_82599_vf_info, 46 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, 47 [board_X540_vf] = &ixgbevf_X540_vf_info, 48 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, 49 [board_X550_vf] = &ixgbevf_X550_vf_info, 50 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, 51 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, 52 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, 53 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, 54 }; 55 56 /* ixgbevf_pci_tbl - PCI Device ID Table 57 * 58 * Wildcard entries (PCI_ANY_ID) should come last 59 * Last entry must be all 0s 60 * 61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 62 * Class, Class Mask, private data (not used) } 63 */ 64 static const struct pci_device_id ixgbevf_pci_tbl[] = { 65 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, 67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, 69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, 70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, 72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, 74 /* required last entry */ 75 {0, } 76 }; 77 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 78 79 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 80 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); 81 MODULE_LICENSE("GPL v2"); 82 83 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 84 static int debug = -1; 85 module_param(debug, int, 0); 86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 87 88 static struct workqueue_struct *ixgbevf_wq; 89 90 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) 91 { 92 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 93 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && 94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) 95 queue_work(ixgbevf_wq, &adapter->service_task); 96 } 97 98 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) 99 { 100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); 101 102 /* flush memory to make sure state is correct before next watchdog */ 103 smp_mb__before_atomic(); 104 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); 105 } 106 107 /* forward decls */ 108 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 109 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 110 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 111 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer); 112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 113 struct ixgbevf_rx_buffer *old_buff); 114 115 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) 116 { 117 struct ixgbevf_adapter *adapter = hw->back; 118 119 if (!hw->hw_addr) 120 return; 121 hw->hw_addr = NULL; 122 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 123 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) 124 ixgbevf_service_event_schedule(adapter); 125 } 126 127 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 128 { 129 u32 value; 130 131 /* The following check not only optimizes a bit by not 132 * performing a read on the status register when the 133 * register just read was a status register read that 134 * returned IXGBE_FAILED_READ_REG. It also blocks any 135 * potential recursion. 136 */ 137 if (reg == IXGBE_VFSTATUS) { 138 ixgbevf_remove_adapter(hw); 139 return; 140 } 141 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); 142 if (value == IXGBE_FAILED_READ_REG) 143 ixgbevf_remove_adapter(hw); 144 } 145 146 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) 147 { 148 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); 149 u32 value; 150 151 if (IXGBE_REMOVED(reg_addr)) 152 return IXGBE_FAILED_READ_REG; 153 value = readl(reg_addr + reg); 154 if (unlikely(value == IXGBE_FAILED_READ_REG)) 155 ixgbevf_check_remove(hw, reg); 156 return value; 157 } 158 159 /** 160 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 161 * @adapter: pointer to adapter struct 162 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 163 * @queue: queue to map the corresponding interrupt to 164 * @msix_vector: the vector to map to the corresponding queue 165 **/ 166 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 167 u8 queue, u8 msix_vector) 168 { 169 u32 ivar, index; 170 struct ixgbe_hw *hw = &adapter->hw; 171 172 if (direction == -1) { 173 /* other causes */ 174 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 175 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 176 ivar &= ~0xFF; 177 ivar |= msix_vector; 178 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 179 } else { 180 /* Tx or Rx causes */ 181 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 182 index = ((16 * (queue & 1)) + (8 * direction)); 183 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 184 ivar &= ~(0xFF << index); 185 ivar |= (msix_vector << index); 186 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 187 } 188 } 189 190 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) 191 { 192 return ring->stats.packets; 193 } 194 195 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) 196 { 197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); 198 struct ixgbe_hw *hw = &adapter->hw; 199 200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); 201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); 202 203 if (head != tail) 204 return (head < tail) ? 205 tail - head : (tail + ring->count - head); 206 207 return 0; 208 } 209 210 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) 211 { 212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); 213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); 215 216 clear_check_for_tx_hang(tx_ring); 217 218 /* Check for a hung queue, but be thorough. This verifies 219 * that a transmit has been completed since the previous 220 * check AND there is at least one packet pending. The 221 * ARMED bit is set to indicate a potential hang. 222 */ 223 if ((tx_done_old == tx_done) && tx_pending) { 224 /* make sure it is true for two checks in a row */ 225 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, 226 &tx_ring->state); 227 } 228 /* reset the countdown */ 229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); 230 231 /* update completed stats and continue */ 232 tx_ring->tx_stats.tx_done_old = tx_done; 233 234 return false; 235 } 236 237 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) 238 { 239 /* Do the reset outside of interrupt context */ 240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); 242 ixgbevf_service_event_schedule(adapter); 243 } 244 } 245 246 /** 247 * ixgbevf_tx_timeout - Respond to a Tx Hang 248 * @netdev: network interface device structure 249 * @txqueue: transmit queue hanging (unused) 250 **/ 251 static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) 252 { 253 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 254 255 ixgbevf_tx_timeout_reset(adapter); 256 } 257 258 /** 259 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 260 * @q_vector: board private structure 261 * @tx_ring: tx ring to clean 262 * @napi_budget: Used to determine if we are in netpoll 263 **/ 264 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 265 struct ixgbevf_ring *tx_ring, int napi_budget) 266 { 267 struct ixgbevf_adapter *adapter = q_vector->adapter; 268 struct ixgbevf_tx_buffer *tx_buffer; 269 union ixgbe_adv_tx_desc *tx_desc; 270 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; 271 unsigned int budget = tx_ring->count / 2; 272 unsigned int i = tx_ring->next_to_clean; 273 274 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 275 return true; 276 277 tx_buffer = &tx_ring->tx_buffer_info[i]; 278 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 279 i -= tx_ring->count; 280 281 do { 282 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 283 284 /* if next_to_watch is not set then there is no work pending */ 285 if (!eop_desc) 286 break; 287 288 /* prevent any other reads prior to eop_desc */ 289 smp_rmb(); 290 291 /* if DD is not set pending work has not been completed */ 292 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 293 break; 294 295 /* clear next_to_watch to prevent false hangs */ 296 tx_buffer->next_to_watch = NULL; 297 298 /* update the statistics for this packet */ 299 total_bytes += tx_buffer->bytecount; 300 total_packets += tx_buffer->gso_segs; 301 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) 302 total_ipsec++; 303 304 /* free the skb */ 305 if (ring_is_xdp(tx_ring)) 306 page_frag_free(tx_buffer->data); 307 else 308 napi_consume_skb(tx_buffer->skb, napi_budget); 309 310 /* unmap skb header data */ 311 dma_unmap_single(tx_ring->dev, 312 dma_unmap_addr(tx_buffer, dma), 313 dma_unmap_len(tx_buffer, len), 314 DMA_TO_DEVICE); 315 316 /* clear tx_buffer data */ 317 dma_unmap_len_set(tx_buffer, len, 0); 318 319 /* unmap remaining buffers */ 320 while (tx_desc != eop_desc) { 321 tx_buffer++; 322 tx_desc++; 323 i++; 324 if (unlikely(!i)) { 325 i -= tx_ring->count; 326 tx_buffer = tx_ring->tx_buffer_info; 327 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 328 } 329 330 /* unmap any remaining paged data */ 331 if (dma_unmap_len(tx_buffer, len)) { 332 dma_unmap_page(tx_ring->dev, 333 dma_unmap_addr(tx_buffer, dma), 334 dma_unmap_len(tx_buffer, len), 335 DMA_TO_DEVICE); 336 dma_unmap_len_set(tx_buffer, len, 0); 337 } 338 } 339 340 /* move us one more past the eop_desc for start of next pkt */ 341 tx_buffer++; 342 tx_desc++; 343 i++; 344 if (unlikely(!i)) { 345 i -= tx_ring->count; 346 tx_buffer = tx_ring->tx_buffer_info; 347 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 348 } 349 350 /* issue prefetch for next Tx descriptor */ 351 prefetch(tx_desc); 352 353 /* update budget accounting */ 354 budget--; 355 } while (likely(budget)); 356 357 i += tx_ring->count; 358 tx_ring->next_to_clean = i; 359 u64_stats_update_begin(&tx_ring->syncp); 360 tx_ring->stats.bytes += total_bytes; 361 tx_ring->stats.packets += total_packets; 362 u64_stats_update_end(&tx_ring->syncp); 363 q_vector->tx.total_bytes += total_bytes; 364 q_vector->tx.total_packets += total_packets; 365 adapter->tx_ipsec += total_ipsec; 366 367 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { 368 struct ixgbe_hw *hw = &adapter->hw; 369 union ixgbe_adv_tx_desc *eop_desc; 370 371 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; 372 373 pr_err("Detected Tx Unit Hang%s\n" 374 " Tx Queue <%d>\n" 375 " TDH, TDT <%x>, <%x>\n" 376 " next_to_use <%x>\n" 377 " next_to_clean <%x>\n" 378 "tx_buffer_info[next_to_clean]\n" 379 " next_to_watch <%p>\n" 380 " eop_desc->wb.status <%x>\n" 381 " time_stamp <%lx>\n" 382 " jiffies <%lx>\n", 383 ring_is_xdp(tx_ring) ? " XDP" : "", 384 tx_ring->queue_index, 385 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), 386 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), 387 tx_ring->next_to_use, i, 388 eop_desc, (eop_desc ? eop_desc->wb.status : 0), 389 tx_ring->tx_buffer_info[i].time_stamp, jiffies); 390 391 if (!ring_is_xdp(tx_ring)) 392 netif_stop_subqueue(tx_ring->netdev, 393 tx_ring->queue_index); 394 395 /* schedule immediate reset if we believe we hung */ 396 ixgbevf_tx_timeout_reset(adapter); 397 398 return true; 399 } 400 401 if (ring_is_xdp(tx_ring)) 402 return !!budget; 403 404 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 405 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 406 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 407 /* Make sure that anybody stopping the queue after this 408 * sees the new next_to_clean. 409 */ 410 smp_mb(); 411 412 if (__netif_subqueue_stopped(tx_ring->netdev, 413 tx_ring->queue_index) && 414 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 415 netif_wake_subqueue(tx_ring->netdev, 416 tx_ring->queue_index); 417 ++tx_ring->tx_stats.restart_queue; 418 } 419 } 420 421 return !!budget; 422 } 423 424 /** 425 * ixgbevf_rx_skb - Helper function to determine proper Rx method 426 * @q_vector: structure containing interrupt and ring information 427 * @skb: packet to send up 428 **/ 429 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 430 struct sk_buff *skb) 431 { 432 napi_gro_receive(&q_vector->napi, skb); 433 } 434 435 #define IXGBE_RSS_L4_TYPES_MASK \ 436 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ 437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ 438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ 439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) 440 441 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, 442 union ixgbe_adv_rx_desc *rx_desc, 443 struct sk_buff *skb) 444 { 445 u16 rss_type; 446 447 if (!(ring->netdev->features & NETIF_F_RXHASH)) 448 return; 449 450 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & 451 IXGBE_RXDADV_RSSTYPE_MASK; 452 453 if (!rss_type) 454 return; 455 456 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 457 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 458 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 459 } 460 461 /** 462 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 463 * @ring: structure containig ring specific data 464 * @rx_desc: current Rx descriptor being processed 465 * @skb: skb currently being received and modified 466 **/ 467 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 468 union ixgbe_adv_rx_desc *rx_desc, 469 struct sk_buff *skb) 470 { 471 skb_checksum_none_assert(skb); 472 473 /* Rx csum disabled */ 474 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 475 return; 476 477 /* if IP and error */ 478 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 479 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 480 ring->rx_stats.csum_err++; 481 return; 482 } 483 484 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) 485 return; 486 487 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 488 ring->rx_stats.csum_err++; 489 return; 490 } 491 492 /* It must be a TCP or UDP packet with a valid checksum */ 493 skb->ip_summed = CHECKSUM_UNNECESSARY; 494 } 495 496 /** 497 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor 498 * @rx_ring: rx descriptor ring packet is being transacted on 499 * @rx_desc: pointer to the EOP Rx descriptor 500 * @skb: pointer to current skb being populated 501 * 502 * This function checks the ring, descriptor, and packet information in 503 * order to populate the checksum, VLAN, protocol, and other fields within 504 * the skb. 505 **/ 506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, 507 union ixgbe_adv_rx_desc *rx_desc, 508 struct sk_buff *skb) 509 { 510 ixgbevf_rx_hash(rx_ring, rx_desc, skb); 511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); 512 513 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 514 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); 516 517 if (test_bit(vid & VLAN_VID_MASK, active_vlans)) 518 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 519 } 520 521 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) 522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); 523 524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 525 } 526 527 static 528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, 529 const unsigned int size) 530 { 531 struct ixgbevf_rx_buffer *rx_buffer; 532 533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 534 prefetchw(rx_buffer->page); 535 536 /* we are reusing so sync this buffer for CPU use */ 537 dma_sync_single_range_for_cpu(rx_ring->dev, 538 rx_buffer->dma, 539 rx_buffer->page_offset, 540 size, 541 DMA_FROM_DEVICE); 542 543 rx_buffer->pagecnt_bias--; 544 545 return rx_buffer; 546 } 547 548 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, 549 struct ixgbevf_rx_buffer *rx_buffer, 550 struct sk_buff *skb) 551 { 552 if (ixgbevf_can_reuse_rx_page(rx_buffer)) { 553 /* hand second half of page back to the ring */ 554 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); 555 } else { 556 if (IS_ERR(skb)) 557 /* We are not reusing the buffer so unmap it and free 558 * any references we are holding to it 559 */ 560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 561 ixgbevf_rx_pg_size(rx_ring), 562 DMA_FROM_DEVICE, 563 IXGBEVF_RX_DMA_ATTR); 564 __page_frag_cache_drain(rx_buffer->page, 565 rx_buffer->pagecnt_bias); 566 } 567 568 /* clear contents of rx_buffer */ 569 rx_buffer->page = NULL; 570 } 571 572 /** 573 * ixgbevf_is_non_eop - process handling of non-EOP buffers 574 * @rx_ring: Rx ring being processed 575 * @rx_desc: Rx descriptor for current buffer 576 * 577 * This function updates next to clean. If the buffer is an EOP buffer 578 * this function exits returning false, otherwise it will place the 579 * sk_buff in the next buffer to be chained and return true indicating 580 * that this is in fact a non-EOP buffer. 581 **/ 582 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, 583 union ixgbe_adv_rx_desc *rx_desc) 584 { 585 u32 ntc = rx_ring->next_to_clean + 1; 586 587 /* fetch, update, and store next to clean */ 588 ntc = (ntc < rx_ring->count) ? ntc : 0; 589 rx_ring->next_to_clean = ntc; 590 591 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); 592 593 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 594 return false; 595 596 return true; 597 } 598 599 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) 600 { 601 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; 602 } 603 604 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, 605 struct ixgbevf_rx_buffer *bi) 606 { 607 struct page *page = bi->page; 608 dma_addr_t dma; 609 610 /* since we are recycling buffers we should seldom need to alloc */ 611 if (likely(page)) 612 return true; 613 614 /* alloc new page for storage */ 615 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); 616 if (unlikely(!page)) { 617 rx_ring->rx_stats.alloc_rx_page_failed++; 618 return false; 619 } 620 621 /* map page for use */ 622 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 623 ixgbevf_rx_pg_size(rx_ring), 624 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); 625 626 /* if mapping failed free memory back to system since 627 * there isn't much point in holding memory we can't use 628 */ 629 if (dma_mapping_error(rx_ring->dev, dma)) { 630 __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); 631 632 rx_ring->rx_stats.alloc_rx_page_failed++; 633 return false; 634 } 635 636 bi->dma = dma; 637 bi->page = page; 638 bi->page_offset = ixgbevf_rx_offset(rx_ring); 639 bi->pagecnt_bias = 1; 640 rx_ring->rx_stats.alloc_rx_page++; 641 642 return true; 643 } 644 645 /** 646 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 647 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 648 * @cleaned_count: number of buffers to replace 649 **/ 650 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 651 u16 cleaned_count) 652 { 653 union ixgbe_adv_rx_desc *rx_desc; 654 struct ixgbevf_rx_buffer *bi; 655 unsigned int i = rx_ring->next_to_use; 656 657 /* nothing to do or no valid netdev defined */ 658 if (!cleaned_count || !rx_ring->netdev) 659 return; 660 661 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 662 bi = &rx_ring->rx_buffer_info[i]; 663 i -= rx_ring->count; 664 665 do { 666 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) 667 break; 668 669 /* sync the buffer for use by the device */ 670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 671 bi->page_offset, 672 ixgbevf_rx_bufsz(rx_ring), 673 DMA_FROM_DEVICE); 674 675 /* Refresh the desc even if pkt_addr didn't change 676 * because each write-back erases this info. 677 */ 678 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 679 680 rx_desc++; 681 bi++; 682 i++; 683 if (unlikely(!i)) { 684 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); 685 bi = rx_ring->rx_buffer_info; 686 i -= rx_ring->count; 687 } 688 689 /* clear the length for the next_to_use descriptor */ 690 rx_desc->wb.upper.length = 0; 691 692 cleaned_count--; 693 } while (cleaned_count); 694 695 i += rx_ring->count; 696 697 if (rx_ring->next_to_use != i) { 698 /* record the next descriptor to use */ 699 rx_ring->next_to_use = i; 700 701 /* update next to alloc since we have filled the ring */ 702 rx_ring->next_to_alloc = i; 703 704 /* Force memory writes to complete before letting h/w 705 * know there are new descriptors to fetch. (Only 706 * applicable for weak-ordered memory model archs, 707 * such as IA-64). 708 */ 709 wmb(); 710 ixgbevf_write_tail(rx_ring, i); 711 } 712 } 713 714 /** 715 * ixgbevf_cleanup_headers - Correct corrupted or empty headers 716 * @rx_ring: rx descriptor ring packet is being transacted on 717 * @rx_desc: pointer to the EOP Rx descriptor 718 * @skb: pointer to current skb being fixed 719 * 720 * Check for corrupted packet headers caused by senders on the local L2 721 * embedded NIC switch not setting up their Tx Descriptors right. These 722 * should be very rare. 723 * 724 * Also address the case where we are pulling data in on pages only 725 * and as such no data is present in the skb header. 726 * 727 * In addition if skb is not at least 60 bytes we need to pad it so that 728 * it is large enough to qualify as a valid Ethernet frame. 729 * 730 * Returns true if an error was encountered and skb was freed. 731 **/ 732 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, 733 union ixgbe_adv_rx_desc *rx_desc, 734 struct sk_buff *skb) 735 { 736 /* XDP packets use error pointer so abort at this point */ 737 if (IS_ERR(skb)) 738 return true; 739 740 /* verify that the packet does not have any known errors */ 741 if (unlikely(ixgbevf_test_staterr(rx_desc, 742 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { 743 struct net_device *netdev = rx_ring->netdev; 744 745 if (!(netdev->features & NETIF_F_RXALL)) { 746 dev_kfree_skb_any(skb); 747 return true; 748 } 749 } 750 751 /* if eth_skb_pad returns an error the skb was freed */ 752 if (eth_skb_pad(skb)) 753 return true; 754 755 return false; 756 } 757 758 /** 759 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring 760 * @rx_ring: rx descriptor ring to store buffers on 761 * @old_buff: donor buffer to have page reused 762 * 763 * Synchronizes page for reuse by the adapter 764 **/ 765 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 766 struct ixgbevf_rx_buffer *old_buff) 767 { 768 struct ixgbevf_rx_buffer *new_buff; 769 u16 nta = rx_ring->next_to_alloc; 770 771 new_buff = &rx_ring->rx_buffer_info[nta]; 772 773 /* update, and store next to alloc */ 774 nta++; 775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 776 777 /* transfer page from old buffer to new buffer */ 778 new_buff->page = old_buff->page; 779 new_buff->dma = old_buff->dma; 780 new_buff->page_offset = old_buff->page_offset; 781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 782 } 783 784 static inline bool ixgbevf_page_is_reserved(struct page *page) 785 { 786 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 787 } 788 789 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) 790 { 791 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 792 struct page *page = rx_buffer->page; 793 794 /* avoid re-using remote pages */ 795 if (unlikely(ixgbevf_page_is_reserved(page))) 796 return false; 797 798 #if (PAGE_SIZE < 8192) 799 /* if we are only owner of page we can reuse it */ 800 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 801 return false; 802 #else 803 #define IXGBEVF_LAST_OFFSET \ 804 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) 805 806 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) 807 return false; 808 809 #endif 810 811 /* If we have drained the page fragment pool we need to update 812 * the pagecnt_bias and page count so that we fully restock the 813 * number of references the driver holds. 814 */ 815 if (unlikely(!pagecnt_bias)) { 816 page_ref_add(page, USHRT_MAX); 817 rx_buffer->pagecnt_bias = USHRT_MAX; 818 } 819 820 return true; 821 } 822 823 /** 824 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff 825 * @rx_ring: rx descriptor ring to transact packets on 826 * @rx_buffer: buffer containing page to add 827 * @skb: sk_buff to place the data into 828 * @size: size of buffer to be added 829 * 830 * This function will add the data contained in rx_buffer->page to the skb. 831 **/ 832 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, 833 struct ixgbevf_rx_buffer *rx_buffer, 834 struct sk_buff *skb, 835 unsigned int size) 836 { 837 #if (PAGE_SIZE < 8192) 838 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; 839 #else 840 unsigned int truesize = ring_uses_build_skb(rx_ring) ? 841 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : 842 SKB_DATA_ALIGN(size); 843 #endif 844 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 845 rx_buffer->page_offset, size, truesize); 846 #if (PAGE_SIZE < 8192) 847 rx_buffer->page_offset ^= truesize; 848 #else 849 rx_buffer->page_offset += truesize; 850 #endif 851 } 852 853 static 854 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, 855 struct ixgbevf_rx_buffer *rx_buffer, 856 struct xdp_buff *xdp, 857 union ixgbe_adv_rx_desc *rx_desc) 858 { 859 unsigned int size = xdp->data_end - xdp->data; 860 #if (PAGE_SIZE < 8192) 861 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; 862 #else 863 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - 864 xdp->data_hard_start); 865 #endif 866 unsigned int headlen; 867 struct sk_buff *skb; 868 869 /* prefetch first cache line of first page */ 870 net_prefetch(xdp->data); 871 872 /* Note, we get here by enabling legacy-rx via: 873 * 874 * ethtool --set-priv-flags <dev> legacy-rx on 875 * 876 * In this mode, we currently get 0 extra XDP headroom as 877 * opposed to having legacy-rx off, where we process XDP 878 * packets going to stack via ixgbevf_build_skb(). 879 * 880 * For ixgbevf_construct_skb() mode it means that the 881 * xdp->data_meta will always point to xdp->data, since 882 * the helper cannot expand the head. Should this ever 883 * changed in future for legacy-rx mode on, then lets also 884 * add xdp->data_meta handling here. 885 */ 886 887 /* allocate a skb to store the frags */ 888 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); 889 if (unlikely(!skb)) 890 return NULL; 891 892 /* Determine available headroom for copy */ 893 headlen = size; 894 if (headlen > IXGBEVF_RX_HDR_SIZE) 895 headlen = eth_get_headlen(skb->dev, xdp->data, 896 IXGBEVF_RX_HDR_SIZE); 897 898 /* align pull length to size of long to optimize memcpy performance */ 899 memcpy(__skb_put(skb, headlen), xdp->data, 900 ALIGN(headlen, sizeof(long))); 901 902 /* update all of the pointers */ 903 size -= headlen; 904 if (size) { 905 skb_add_rx_frag(skb, 0, rx_buffer->page, 906 (xdp->data + headlen) - 907 page_address(rx_buffer->page), 908 size, truesize); 909 #if (PAGE_SIZE < 8192) 910 rx_buffer->page_offset ^= truesize; 911 #else 912 rx_buffer->page_offset += truesize; 913 #endif 914 } else { 915 rx_buffer->pagecnt_bias++; 916 } 917 918 return skb; 919 } 920 921 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 922 u32 qmask) 923 { 924 struct ixgbe_hw *hw = &adapter->hw; 925 926 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 927 } 928 929 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, 930 struct ixgbevf_rx_buffer *rx_buffer, 931 struct xdp_buff *xdp, 932 union ixgbe_adv_rx_desc *rx_desc) 933 { 934 unsigned int metasize = xdp->data - xdp->data_meta; 935 #if (PAGE_SIZE < 8192) 936 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; 937 #else 938 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 939 SKB_DATA_ALIGN(xdp->data_end - 940 xdp->data_hard_start); 941 #endif 942 struct sk_buff *skb; 943 944 /* Prefetch first cache line of first page. If xdp->data_meta 945 * is unused, this points to xdp->data, otherwise, we likely 946 * have a consumer accessing first few bytes of meta data, 947 * and then actual data. 948 */ 949 net_prefetch(xdp->data_meta); 950 951 /* build an skb around the page buffer */ 952 skb = build_skb(xdp->data_hard_start, truesize); 953 if (unlikely(!skb)) 954 return NULL; 955 956 /* update pointers within the skb to store the data */ 957 skb_reserve(skb, xdp->data - xdp->data_hard_start); 958 __skb_put(skb, xdp->data_end - xdp->data); 959 if (metasize) 960 skb_metadata_set(skb, metasize); 961 962 /* update buffer offset */ 963 #if (PAGE_SIZE < 8192) 964 rx_buffer->page_offset ^= truesize; 965 #else 966 rx_buffer->page_offset += truesize; 967 #endif 968 969 return skb; 970 } 971 972 #define IXGBEVF_XDP_PASS 0 973 #define IXGBEVF_XDP_CONSUMED 1 974 #define IXGBEVF_XDP_TX 2 975 976 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, 977 struct xdp_buff *xdp) 978 { 979 struct ixgbevf_tx_buffer *tx_buffer; 980 union ixgbe_adv_tx_desc *tx_desc; 981 u32 len, cmd_type; 982 dma_addr_t dma; 983 u16 i; 984 985 len = xdp->data_end - xdp->data; 986 987 if (unlikely(!ixgbevf_desc_unused(ring))) 988 return IXGBEVF_XDP_CONSUMED; 989 990 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); 991 if (dma_mapping_error(ring->dev, dma)) 992 return IXGBEVF_XDP_CONSUMED; 993 994 /* record the location of the first descriptor for this packet */ 995 i = ring->next_to_use; 996 tx_buffer = &ring->tx_buffer_info[i]; 997 998 dma_unmap_len_set(tx_buffer, len, len); 999 dma_unmap_addr_set(tx_buffer, dma, dma); 1000 tx_buffer->data = xdp->data; 1001 tx_buffer->bytecount = len; 1002 tx_buffer->gso_segs = 1; 1003 tx_buffer->protocol = 0; 1004 1005 /* Populate minimal context descriptor that will provide for the 1006 * fact that we are expected to process Ethernet frames. 1007 */ 1008 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { 1009 struct ixgbe_adv_tx_context_desc *context_desc; 1010 1011 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); 1012 1013 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); 1014 context_desc->vlan_macip_lens = 1015 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); 1016 context_desc->fceof_saidx = 0; 1017 context_desc->type_tucmd_mlhl = 1018 cpu_to_le32(IXGBE_TXD_CMD_DEXT | 1019 IXGBE_ADVTXD_DTYP_CTXT); 1020 context_desc->mss_l4len_idx = 0; 1021 1022 i = 1; 1023 } 1024 1025 /* put descriptor type bits */ 1026 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 1027 IXGBE_ADVTXD_DCMD_DEXT | 1028 IXGBE_ADVTXD_DCMD_IFCS; 1029 cmd_type |= len | IXGBE_TXD_CMD; 1030 1031 tx_desc = IXGBEVF_TX_DESC(ring, i); 1032 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1033 1034 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1035 tx_desc->read.olinfo_status = 1036 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | 1037 IXGBE_ADVTXD_CC); 1038 1039 /* Avoid any potential race with cleanup */ 1040 smp_wmb(); 1041 1042 /* set next_to_watch value indicating a packet is present */ 1043 i++; 1044 if (i == ring->count) 1045 i = 0; 1046 1047 tx_buffer->next_to_watch = tx_desc; 1048 ring->next_to_use = i; 1049 1050 return IXGBEVF_XDP_TX; 1051 } 1052 1053 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, 1054 struct ixgbevf_ring *rx_ring, 1055 struct xdp_buff *xdp) 1056 { 1057 int result = IXGBEVF_XDP_PASS; 1058 struct ixgbevf_ring *xdp_ring; 1059 struct bpf_prog *xdp_prog; 1060 u32 act; 1061 1062 rcu_read_lock(); 1063 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1064 1065 if (!xdp_prog) 1066 goto xdp_out; 1067 1068 act = bpf_prog_run_xdp(xdp_prog, xdp); 1069 switch (act) { 1070 case XDP_PASS: 1071 break; 1072 case XDP_TX: 1073 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; 1074 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); 1075 break; 1076 default: 1077 bpf_warn_invalid_xdp_action(act); 1078 fallthrough; 1079 case XDP_ABORTED: 1080 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 1081 fallthrough; /* handle aborts by dropping packet */ 1082 case XDP_DROP: 1083 result = IXGBEVF_XDP_CONSUMED; 1084 break; 1085 } 1086 xdp_out: 1087 rcu_read_unlock(); 1088 return ERR_PTR(-result); 1089 } 1090 1091 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, 1092 unsigned int size) 1093 { 1094 unsigned int truesize; 1095 1096 #if (PAGE_SIZE < 8192) 1097 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 1098 #else 1099 truesize = ring_uses_build_skb(rx_ring) ? 1100 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) + 1101 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 1102 SKB_DATA_ALIGN(size); 1103 #endif 1104 return truesize; 1105 } 1106 1107 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, 1108 struct ixgbevf_rx_buffer *rx_buffer, 1109 unsigned int size) 1110 { 1111 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); 1112 1113 #if (PAGE_SIZE < 8192) 1114 rx_buffer->page_offset ^= truesize; 1115 #else 1116 rx_buffer->page_offset += truesize; 1117 #endif 1118 } 1119 1120 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 1121 struct ixgbevf_ring *rx_ring, 1122 int budget) 1123 { 1124 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; 1125 struct ixgbevf_adapter *adapter = q_vector->adapter; 1126 u16 cleaned_count = ixgbevf_desc_unused(rx_ring); 1127 struct sk_buff *skb = rx_ring->skb; 1128 bool xdp_xmit = false; 1129 struct xdp_buff xdp; 1130 1131 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1132 #if (PAGE_SIZE < 8192) 1133 frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); 1134 #endif 1135 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1136 1137 while (likely(total_rx_packets < budget)) { 1138 struct ixgbevf_rx_buffer *rx_buffer; 1139 union ixgbe_adv_rx_desc *rx_desc; 1140 unsigned int size; 1141 1142 /* return some buffers to hardware, one at a time is too slow */ 1143 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 1144 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 1145 cleaned_count = 0; 1146 } 1147 1148 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 1149 size = le16_to_cpu(rx_desc->wb.upper.length); 1150 if (!size) 1151 break; 1152 1153 /* This memory barrier is needed to keep us from reading 1154 * any other fields out of the rx_desc until we know the 1155 * RXD_STAT_DD bit is set 1156 */ 1157 rmb(); 1158 1159 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); 1160 1161 /* retrieve a buffer from the ring */ 1162 if (!skb) { 1163 xdp.data = page_address(rx_buffer->page) + 1164 rx_buffer->page_offset; 1165 xdp.data_meta = xdp.data; 1166 xdp.data_hard_start = xdp.data - 1167 ixgbevf_rx_offset(rx_ring); 1168 xdp.data_end = xdp.data + size; 1169 #if (PAGE_SIZE > 4096) 1170 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1171 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); 1172 #endif 1173 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); 1174 } 1175 1176 if (IS_ERR(skb)) { 1177 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { 1178 xdp_xmit = true; 1179 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, 1180 size); 1181 } else { 1182 rx_buffer->pagecnt_bias++; 1183 } 1184 total_rx_packets++; 1185 total_rx_bytes += size; 1186 } else if (skb) { 1187 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); 1188 } else if (ring_uses_build_skb(rx_ring)) { 1189 skb = ixgbevf_build_skb(rx_ring, rx_buffer, 1190 &xdp, rx_desc); 1191 } else { 1192 skb = ixgbevf_construct_skb(rx_ring, rx_buffer, 1193 &xdp, rx_desc); 1194 } 1195 1196 /* exit if we failed to retrieve a buffer */ 1197 if (!skb) { 1198 rx_ring->rx_stats.alloc_rx_buff_failed++; 1199 rx_buffer->pagecnt_bias++; 1200 break; 1201 } 1202 1203 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); 1204 cleaned_count++; 1205 1206 /* fetch next buffer in frame if non-eop */ 1207 if (ixgbevf_is_non_eop(rx_ring, rx_desc)) 1208 continue; 1209 1210 /* verify the packet layout is correct */ 1211 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { 1212 skb = NULL; 1213 continue; 1214 } 1215 1216 /* probably a little skewed due to removing CRC */ 1217 total_rx_bytes += skb->len; 1218 1219 /* Workaround hardware that can't do proper VEPA multicast 1220 * source pruning. 1221 */ 1222 if ((skb->pkt_type == PACKET_BROADCAST || 1223 skb->pkt_type == PACKET_MULTICAST) && 1224 ether_addr_equal(rx_ring->netdev->dev_addr, 1225 eth_hdr(skb)->h_source)) { 1226 dev_kfree_skb_irq(skb); 1227 continue; 1228 } 1229 1230 /* populate checksum, VLAN, and protocol */ 1231 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); 1232 1233 ixgbevf_rx_skb(q_vector, skb); 1234 1235 /* reset skb pointer */ 1236 skb = NULL; 1237 1238 /* update budget accounting */ 1239 total_rx_packets++; 1240 } 1241 1242 /* place incomplete frames back on ring for completion */ 1243 rx_ring->skb = skb; 1244 1245 if (xdp_xmit) { 1246 struct ixgbevf_ring *xdp_ring = 1247 adapter->xdp_ring[rx_ring->queue_index]; 1248 1249 /* Force memory writes to complete before letting h/w 1250 * know there are new descriptors to fetch. 1251 */ 1252 wmb(); 1253 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); 1254 } 1255 1256 u64_stats_update_begin(&rx_ring->syncp); 1257 rx_ring->stats.packets += total_rx_packets; 1258 rx_ring->stats.bytes += total_rx_bytes; 1259 u64_stats_update_end(&rx_ring->syncp); 1260 q_vector->rx.total_packets += total_rx_packets; 1261 q_vector->rx.total_bytes += total_rx_bytes; 1262 1263 return total_rx_packets; 1264 } 1265 1266 /** 1267 * ixgbevf_poll - NAPI polling calback 1268 * @napi: napi struct with our devices info in it 1269 * @budget: amount of work driver is allowed to do this pass, in packets 1270 * 1271 * This function will clean more than one or more rings associated with a 1272 * q_vector. 1273 **/ 1274 static int ixgbevf_poll(struct napi_struct *napi, int budget) 1275 { 1276 struct ixgbevf_q_vector *q_vector = 1277 container_of(napi, struct ixgbevf_q_vector, napi); 1278 struct ixgbevf_adapter *adapter = q_vector->adapter; 1279 struct ixgbevf_ring *ring; 1280 int per_ring_budget, work_done = 0; 1281 bool clean_complete = true; 1282 1283 ixgbevf_for_each_ring(ring, q_vector->tx) { 1284 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) 1285 clean_complete = false; 1286 } 1287 1288 if (budget <= 0) 1289 return budget; 1290 1291 /* attempt to distribute budget to each queue fairly, but don't allow 1292 * the budget to go below 1 because we'll exit polling 1293 */ 1294 if (q_vector->rx.count > 1) 1295 per_ring_budget = max(budget/q_vector->rx.count, 1); 1296 else 1297 per_ring_budget = budget; 1298 1299 ixgbevf_for_each_ring(ring, q_vector->rx) { 1300 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, 1301 per_ring_budget); 1302 work_done += cleaned; 1303 if (cleaned >= per_ring_budget) 1304 clean_complete = false; 1305 } 1306 1307 /* If all work not completed, return budget and keep polling */ 1308 if (!clean_complete) 1309 return budget; 1310 1311 /* Exit the polling mode, but don't re-enable interrupts if stack might 1312 * poll us due to busy-polling 1313 */ 1314 if (likely(napi_complete_done(napi, work_done))) { 1315 if (adapter->rx_itr_setting == 1) 1316 ixgbevf_set_itr(q_vector); 1317 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 1318 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) 1319 ixgbevf_irq_enable_queues(adapter, 1320 BIT(q_vector->v_idx)); 1321 } 1322 1323 return min(work_done, budget - 1); 1324 } 1325 1326 /** 1327 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 1328 * @q_vector: structure containing interrupt and ring information 1329 **/ 1330 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 1331 { 1332 struct ixgbevf_adapter *adapter = q_vector->adapter; 1333 struct ixgbe_hw *hw = &adapter->hw; 1334 int v_idx = q_vector->v_idx; 1335 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 1336 1337 /* set the WDIS bit to not clear the timer bits and cause an 1338 * immediate assertion of the interrupt 1339 */ 1340 itr_reg |= IXGBE_EITR_CNT_WDIS; 1341 1342 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 1343 } 1344 1345 /** 1346 * ixgbevf_configure_msix - Configure MSI-X hardware 1347 * @adapter: board private structure 1348 * 1349 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 1350 * interrupts. 1351 **/ 1352 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 1353 { 1354 struct ixgbevf_q_vector *q_vector; 1355 int q_vectors, v_idx; 1356 1357 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1358 adapter->eims_enable_mask = 0; 1359 1360 /* Populate the IVAR table and set the ITR values to the 1361 * corresponding register. 1362 */ 1363 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1364 struct ixgbevf_ring *ring; 1365 1366 q_vector = adapter->q_vector[v_idx]; 1367 1368 ixgbevf_for_each_ring(ring, q_vector->rx) 1369 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 1370 1371 ixgbevf_for_each_ring(ring, q_vector->tx) 1372 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 1373 1374 if (q_vector->tx.ring && !q_vector->rx.ring) { 1375 /* Tx only vector */ 1376 if (adapter->tx_itr_setting == 1) 1377 q_vector->itr = IXGBE_12K_ITR; 1378 else 1379 q_vector->itr = adapter->tx_itr_setting; 1380 } else { 1381 /* Rx or Rx/Tx vector */ 1382 if (adapter->rx_itr_setting == 1) 1383 q_vector->itr = IXGBE_20K_ITR; 1384 else 1385 q_vector->itr = adapter->rx_itr_setting; 1386 } 1387 1388 /* add q_vector eims value to global eims_enable_mask */ 1389 adapter->eims_enable_mask |= BIT(v_idx); 1390 1391 ixgbevf_write_eitr(q_vector); 1392 } 1393 1394 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 1395 /* setup eims_other and add value to global eims_enable_mask */ 1396 adapter->eims_other = BIT(v_idx); 1397 adapter->eims_enable_mask |= adapter->eims_other; 1398 } 1399 1400 enum latency_range { 1401 lowest_latency = 0, 1402 low_latency = 1, 1403 bulk_latency = 2, 1404 latency_invalid = 255 1405 }; 1406 1407 /** 1408 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 1409 * @q_vector: structure containing interrupt and ring information 1410 * @ring_container: structure containing ring performance data 1411 * 1412 * Stores a new ITR value based on packets and byte 1413 * counts during the last interrupt. The advantage of per interrupt 1414 * computation is faster updates and more accurate ITR for the current 1415 * traffic pattern. Constants in this function were computed 1416 * based on theoretical maximum wire speed and thresholds were set based 1417 * on testing data as well as attempting to minimize response time 1418 * while increasing bulk throughput. 1419 **/ 1420 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 1421 struct ixgbevf_ring_container *ring_container) 1422 { 1423 int bytes = ring_container->total_bytes; 1424 int packets = ring_container->total_packets; 1425 u32 timepassed_us; 1426 u64 bytes_perint; 1427 u8 itr_setting = ring_container->itr; 1428 1429 if (packets == 0) 1430 return; 1431 1432 /* simple throttle rate management 1433 * 0-20MB/s lowest (100000 ints/s) 1434 * 20-100MB/s low (20000 ints/s) 1435 * 100-1249MB/s bulk (12000 ints/s) 1436 */ 1437 /* what was last interrupt timeslice? */ 1438 timepassed_us = q_vector->itr >> 2; 1439 if (timepassed_us == 0) 1440 return; 1441 1442 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 1443 1444 switch (itr_setting) { 1445 case lowest_latency: 1446 if (bytes_perint > 10) 1447 itr_setting = low_latency; 1448 break; 1449 case low_latency: 1450 if (bytes_perint > 20) 1451 itr_setting = bulk_latency; 1452 else if (bytes_perint <= 10) 1453 itr_setting = lowest_latency; 1454 break; 1455 case bulk_latency: 1456 if (bytes_perint <= 20) 1457 itr_setting = low_latency; 1458 break; 1459 } 1460 1461 /* clear work counters since we have the values we need */ 1462 ring_container->total_bytes = 0; 1463 ring_container->total_packets = 0; 1464 1465 /* write updated itr to ring container */ 1466 ring_container->itr = itr_setting; 1467 } 1468 1469 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 1470 { 1471 u32 new_itr = q_vector->itr; 1472 u8 current_itr; 1473 1474 ixgbevf_update_itr(q_vector, &q_vector->tx); 1475 ixgbevf_update_itr(q_vector, &q_vector->rx); 1476 1477 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 1478 1479 switch (current_itr) { 1480 /* counts and packets in update_itr are dependent on these numbers */ 1481 case lowest_latency: 1482 new_itr = IXGBE_100K_ITR; 1483 break; 1484 case low_latency: 1485 new_itr = IXGBE_20K_ITR; 1486 break; 1487 case bulk_latency: 1488 new_itr = IXGBE_12K_ITR; 1489 break; 1490 default: 1491 break; 1492 } 1493 1494 if (new_itr != q_vector->itr) { 1495 /* do an exponential smoothing */ 1496 new_itr = (10 * new_itr * q_vector->itr) / 1497 ((9 * new_itr) + q_vector->itr); 1498 1499 /* save the algorithm value here */ 1500 q_vector->itr = new_itr; 1501 1502 ixgbevf_write_eitr(q_vector); 1503 } 1504 } 1505 1506 static irqreturn_t ixgbevf_msix_other(int irq, void *data) 1507 { 1508 struct ixgbevf_adapter *adapter = data; 1509 struct ixgbe_hw *hw = &adapter->hw; 1510 1511 hw->mac.get_link_status = 1; 1512 1513 ixgbevf_service_event_schedule(adapter); 1514 1515 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 1516 1517 return IRQ_HANDLED; 1518 } 1519 1520 /** 1521 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 1522 * @irq: unused 1523 * @data: pointer to our q_vector struct for this interrupt vector 1524 **/ 1525 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 1526 { 1527 struct ixgbevf_q_vector *q_vector = data; 1528 1529 /* EIAM disabled interrupts (on this vector) for us */ 1530 if (q_vector->rx.ring || q_vector->tx.ring) 1531 napi_schedule_irqoff(&q_vector->napi); 1532 1533 return IRQ_HANDLED; 1534 } 1535 1536 /** 1537 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 1538 * @adapter: board private structure 1539 * 1540 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 1541 * interrupts from the kernel. 1542 **/ 1543 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 1544 { 1545 struct net_device *netdev = adapter->netdev; 1546 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1547 unsigned int ri = 0, ti = 0; 1548 int vector, err; 1549 1550 for (vector = 0; vector < q_vectors; vector++) { 1551 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 1552 struct msix_entry *entry = &adapter->msix_entries[vector]; 1553 1554 if (q_vector->tx.ring && q_vector->rx.ring) { 1555 snprintf(q_vector->name, sizeof(q_vector->name), 1556 "%s-TxRx-%u", netdev->name, ri++); 1557 ti++; 1558 } else if (q_vector->rx.ring) { 1559 snprintf(q_vector->name, sizeof(q_vector->name), 1560 "%s-rx-%u", netdev->name, ri++); 1561 } else if (q_vector->tx.ring) { 1562 snprintf(q_vector->name, sizeof(q_vector->name), 1563 "%s-tx-%u", netdev->name, ti++); 1564 } else { 1565 /* skip this unused q_vector */ 1566 continue; 1567 } 1568 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 1569 q_vector->name, q_vector); 1570 if (err) { 1571 hw_dbg(&adapter->hw, 1572 "request_irq failed for MSIX interrupt Error: %d\n", 1573 err); 1574 goto free_queue_irqs; 1575 } 1576 } 1577 1578 err = request_irq(adapter->msix_entries[vector].vector, 1579 &ixgbevf_msix_other, 0, netdev->name, adapter); 1580 if (err) { 1581 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", 1582 err); 1583 goto free_queue_irqs; 1584 } 1585 1586 return 0; 1587 1588 free_queue_irqs: 1589 while (vector) { 1590 vector--; 1591 free_irq(adapter->msix_entries[vector].vector, 1592 adapter->q_vector[vector]); 1593 } 1594 /* This failure is non-recoverable - it indicates the system is 1595 * out of MSIX vector resources and the VF driver cannot run 1596 * without them. Set the number of msix vectors to zero 1597 * indicating that not enough can be allocated. The error 1598 * will be returned to the user indicating device open failed. 1599 * Any further attempts to force the driver to open will also 1600 * fail. The only way to recover is to unload the driver and 1601 * reload it again. If the system has recovered some MSIX 1602 * vectors then it may succeed. 1603 */ 1604 adapter->num_msix_vectors = 0; 1605 return err; 1606 } 1607 1608 /** 1609 * ixgbevf_request_irq - initialize interrupts 1610 * @adapter: board private structure 1611 * 1612 * Attempts to configure interrupts using the best available 1613 * capabilities of the hardware and kernel. 1614 **/ 1615 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1616 { 1617 int err = ixgbevf_request_msix_irqs(adapter); 1618 1619 if (err) 1620 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); 1621 1622 return err; 1623 } 1624 1625 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1626 { 1627 int i, q_vectors; 1628 1629 if (!adapter->msix_entries) 1630 return; 1631 1632 q_vectors = adapter->num_msix_vectors; 1633 i = q_vectors - 1; 1634 1635 free_irq(adapter->msix_entries[i].vector, adapter); 1636 i--; 1637 1638 for (; i >= 0; i--) { 1639 /* free only the irqs that were actually requested */ 1640 if (!adapter->q_vector[i]->rx.ring && 1641 !adapter->q_vector[i]->tx.ring) 1642 continue; 1643 1644 free_irq(adapter->msix_entries[i].vector, 1645 adapter->q_vector[i]); 1646 } 1647 } 1648 1649 /** 1650 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1651 * @adapter: board private structure 1652 **/ 1653 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1654 { 1655 struct ixgbe_hw *hw = &adapter->hw; 1656 int i; 1657 1658 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1659 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1660 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1661 1662 IXGBE_WRITE_FLUSH(hw); 1663 1664 for (i = 0; i < adapter->num_msix_vectors; i++) 1665 synchronize_irq(adapter->msix_entries[i].vector); 1666 } 1667 1668 /** 1669 * ixgbevf_irq_enable - Enable default interrupt generation settings 1670 * @adapter: board private structure 1671 **/ 1672 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1673 { 1674 struct ixgbe_hw *hw = &adapter->hw; 1675 1676 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1677 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1678 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1679 } 1680 1681 /** 1682 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset 1683 * @adapter: board private structure 1684 * @ring: structure containing ring specific data 1685 * 1686 * Configure the Tx descriptor ring after a reset. 1687 **/ 1688 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, 1689 struct ixgbevf_ring *ring) 1690 { 1691 struct ixgbe_hw *hw = &adapter->hw; 1692 u64 tdba = ring->dma; 1693 int wait_loop = 10; 1694 u32 txdctl = IXGBE_TXDCTL_ENABLE; 1695 u8 reg_idx = ring->reg_idx; 1696 1697 /* disable queue to avoid issues while updating state */ 1698 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 1699 IXGBE_WRITE_FLUSH(hw); 1700 1701 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 1702 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); 1703 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), 1704 ring->count * sizeof(union ixgbe_adv_tx_desc)); 1705 1706 /* disable head writeback */ 1707 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); 1708 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); 1709 1710 /* enable relaxed ordering */ 1711 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), 1712 (IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1713 IXGBE_DCA_TXCTRL_DATA_RRO_EN)); 1714 1715 /* reset head and tail pointers */ 1716 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1717 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1718 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); 1719 1720 /* reset ntu and ntc to place SW in sync with hardwdare */ 1721 ring->next_to_clean = 0; 1722 ring->next_to_use = 0; 1723 1724 /* In order to avoid issues WTHRESH + PTHRESH should always be equal 1725 * to or less than the number of on chip descriptors, which is 1726 * currently 40. 1727 */ 1728 txdctl |= (8 << 16); /* WTHRESH = 8 */ 1729 1730 /* Setting PTHRESH to 32 both improves performance */ 1731 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 1732 32; /* PTHRESH = 32 */ 1733 1734 /* reinitialize tx_buffer_info */ 1735 memset(ring->tx_buffer_info, 0, 1736 sizeof(struct ixgbevf_tx_buffer) * ring->count); 1737 1738 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); 1739 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); 1740 1741 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1742 1743 /* poll to verify queue is enabled */ 1744 do { 1745 usleep_range(1000, 2000); 1746 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); 1747 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 1748 if (!wait_loop) 1749 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); 1750 } 1751 1752 /** 1753 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1754 * @adapter: board private structure 1755 * 1756 * Configure the Tx unit of the MAC after a reset. 1757 **/ 1758 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1759 { 1760 u32 i; 1761 1762 /* Setup the HW Tx Head and Tail descriptor pointers */ 1763 for (i = 0; i < adapter->num_tx_queues; i++) 1764 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); 1765 for (i = 0; i < adapter->num_xdp_queues; i++) 1766 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); 1767 } 1768 1769 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1770 1771 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, 1772 struct ixgbevf_ring *ring, int index) 1773 { 1774 struct ixgbe_hw *hw = &adapter->hw; 1775 u32 srrctl; 1776 1777 srrctl = IXGBE_SRRCTL_DROP_EN; 1778 1779 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 1780 if (ring_uses_large_buffer(ring)) 1781 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1782 else 1783 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1784 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1785 1786 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1787 } 1788 1789 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1790 { 1791 struct ixgbe_hw *hw = &adapter->hw; 1792 1793 /* PSRTYPE must be initialized in 82599 */ 1794 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1795 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1796 IXGBE_PSRTYPE_L2HDR; 1797 1798 if (adapter->num_rx_queues > 1) 1799 psrtype |= BIT(29); 1800 1801 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1802 } 1803 1804 #define IXGBEVF_MAX_RX_DESC_POLL 10 1805 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1806 struct ixgbevf_ring *ring) 1807 { 1808 struct ixgbe_hw *hw = &adapter->hw; 1809 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1810 u32 rxdctl; 1811 u8 reg_idx = ring->reg_idx; 1812 1813 if (IXGBE_REMOVED(hw->hw_addr)) 1814 return; 1815 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1816 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1817 1818 /* write value back with RXDCTL.ENABLE bit cleared */ 1819 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1820 1821 /* the hardware may take up to 100us to really disable the Rx queue */ 1822 do { 1823 udelay(10); 1824 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1825 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1826 1827 if (!wait_loop) 1828 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", 1829 reg_idx); 1830 } 1831 1832 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1833 struct ixgbevf_ring *ring) 1834 { 1835 struct ixgbe_hw *hw = &adapter->hw; 1836 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1837 u32 rxdctl; 1838 u8 reg_idx = ring->reg_idx; 1839 1840 if (IXGBE_REMOVED(hw->hw_addr)) 1841 return; 1842 do { 1843 usleep_range(1000, 2000); 1844 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1845 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1846 1847 if (!wait_loop) 1848 pr_err("RXDCTL.ENABLE queue %d not set while polling\n", 1849 reg_idx); 1850 } 1851 1852 /** 1853 * ixgbevf_init_rss_key - Initialize adapter RSS key 1854 * @adapter: device handle 1855 * 1856 * Allocates and initializes the RSS key if it is not allocated. 1857 **/ 1858 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter) 1859 { 1860 u32 *rss_key; 1861 1862 if (!adapter->rss_key) { 1863 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL); 1864 if (unlikely(!rss_key)) 1865 return -ENOMEM; 1866 1867 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE); 1868 adapter->rss_key = rss_key; 1869 } 1870 1871 return 0; 1872 } 1873 1874 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) 1875 { 1876 struct ixgbe_hw *hw = &adapter->hw; 1877 u32 vfmrqc = 0, vfreta = 0; 1878 u16 rss_i = adapter->num_rx_queues; 1879 u8 i, j; 1880 1881 /* Fill out hash function seeds */ 1882 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) 1883 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); 1884 1885 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { 1886 if (j == rss_i) 1887 j = 0; 1888 1889 adapter->rss_indir_tbl[i] = j; 1890 1891 vfreta |= j << (i & 0x3) * 8; 1892 if ((i & 3) == 3) { 1893 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); 1894 vfreta = 0; 1895 } 1896 } 1897 1898 /* Perform hash on these packet types */ 1899 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | 1900 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | 1901 IXGBE_VFMRQC_RSS_FIELD_IPV6 | 1902 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; 1903 1904 vfmrqc |= IXGBE_VFMRQC_RSSEN; 1905 1906 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); 1907 } 1908 1909 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1910 struct ixgbevf_ring *ring) 1911 { 1912 struct ixgbe_hw *hw = &adapter->hw; 1913 union ixgbe_adv_rx_desc *rx_desc; 1914 u64 rdba = ring->dma; 1915 u32 rxdctl; 1916 u8 reg_idx = ring->reg_idx; 1917 1918 /* disable queue to avoid issues while updating state */ 1919 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1920 ixgbevf_disable_rx_queue(adapter, ring); 1921 1922 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1923 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); 1924 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), 1925 ring->count * sizeof(union ixgbe_adv_rx_desc)); 1926 1927 #ifndef CONFIG_SPARC 1928 /* enable relaxed ordering */ 1929 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1930 IXGBE_DCA_RXCTRL_DESC_RRO_EN); 1931 #else 1932 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1933 IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1934 IXGBE_DCA_RXCTRL_DATA_WRO_EN); 1935 #endif 1936 1937 /* reset head and tail pointers */ 1938 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1939 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1940 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); 1941 1942 /* initialize rx_buffer_info */ 1943 memset(ring->rx_buffer_info, 0, 1944 sizeof(struct ixgbevf_rx_buffer) * ring->count); 1945 1946 /* initialize Rx descriptor 0 */ 1947 rx_desc = IXGBEVF_RX_DESC(ring, 0); 1948 rx_desc->wb.upper.length = 0; 1949 1950 /* reset ntu and ntc to place SW in sync with hardwdare */ 1951 ring->next_to_clean = 0; 1952 ring->next_to_use = 0; 1953 ring->next_to_alloc = 0; 1954 1955 ixgbevf_configure_srrctl(adapter, ring, reg_idx); 1956 1957 /* RXDCTL.RLPML does not work on 82599 */ 1958 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { 1959 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | 1960 IXGBE_RXDCTL_RLPML_EN); 1961 1962 #if (PAGE_SIZE < 8192) 1963 /* Limit the maximum frame size so we don't overrun the skb */ 1964 if (ring_uses_build_skb(ring) && 1965 !ring_uses_large_buffer(ring)) 1966 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB | 1967 IXGBE_RXDCTL_RLPML_EN; 1968 #endif 1969 } 1970 1971 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1972 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1973 1974 ixgbevf_rx_desc_queue_enable(adapter, ring); 1975 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); 1976 } 1977 1978 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, 1979 struct ixgbevf_ring *rx_ring) 1980 { 1981 struct net_device *netdev = adapter->netdev; 1982 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1983 1984 /* set build_skb and buffer size flags */ 1985 clear_ring_build_skb_enabled(rx_ring); 1986 clear_ring_uses_large_buffer(rx_ring); 1987 1988 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) 1989 return; 1990 1991 set_ring_build_skb_enabled(rx_ring); 1992 1993 if (PAGE_SIZE < 8192) { 1994 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) 1995 return; 1996 1997 set_ring_uses_large_buffer(rx_ring); 1998 } 1999 } 2000 2001 /** 2002 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 2003 * @adapter: board private structure 2004 * 2005 * Configure the Rx unit of the MAC after a reset. 2006 **/ 2007 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 2008 { 2009 struct ixgbe_hw *hw = &adapter->hw; 2010 struct net_device *netdev = adapter->netdev; 2011 int i, ret; 2012 2013 ixgbevf_setup_psrtype(adapter); 2014 if (hw->mac.type >= ixgbe_mac_X550_vf) 2015 ixgbevf_setup_vfmrqc(adapter); 2016 2017 spin_lock_bh(&adapter->mbx_lock); 2018 /* notify the PF of our intent to use this size of frame */ 2019 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); 2020 spin_unlock_bh(&adapter->mbx_lock); 2021 if (ret) 2022 dev_err(&adapter->pdev->dev, 2023 "Failed to set MTU at %d\n", netdev->mtu); 2024 2025 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2026 * the Base and Length of the Rx Descriptor Ring 2027 */ 2028 for (i = 0; i < adapter->num_rx_queues; i++) { 2029 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; 2030 2031 ixgbevf_set_rx_buffer_len(adapter, rx_ring); 2032 ixgbevf_configure_rx_ring(adapter, rx_ring); 2033 } 2034 } 2035 2036 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 2037 __be16 proto, u16 vid) 2038 { 2039 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2040 struct ixgbe_hw *hw = &adapter->hw; 2041 int err; 2042 2043 spin_lock_bh(&adapter->mbx_lock); 2044 2045 /* add VID to filter table */ 2046 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 2047 2048 spin_unlock_bh(&adapter->mbx_lock); 2049 2050 /* translate error return types so error makes sense */ 2051 if (err == IXGBE_ERR_MBX) 2052 return -EIO; 2053 2054 if (err == IXGBE_ERR_INVALID_ARGUMENT) 2055 return -EACCES; 2056 2057 set_bit(vid, adapter->active_vlans); 2058 2059 return err; 2060 } 2061 2062 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 2063 __be16 proto, u16 vid) 2064 { 2065 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2066 struct ixgbe_hw *hw = &adapter->hw; 2067 int err; 2068 2069 spin_lock_bh(&adapter->mbx_lock); 2070 2071 /* remove VID from filter table */ 2072 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 2073 2074 spin_unlock_bh(&adapter->mbx_lock); 2075 2076 clear_bit(vid, adapter->active_vlans); 2077 2078 return err; 2079 } 2080 2081 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 2082 { 2083 u16 vid; 2084 2085 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2086 ixgbevf_vlan_rx_add_vid(adapter->netdev, 2087 htons(ETH_P_8021Q), vid); 2088 } 2089 2090 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 2091 { 2092 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2093 struct ixgbe_hw *hw = &adapter->hw; 2094 int count = 0; 2095 2096 if (!netdev_uc_empty(netdev)) { 2097 struct netdev_hw_addr *ha; 2098 2099 netdev_for_each_uc_addr(ha, netdev) { 2100 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 2101 udelay(200); 2102 } 2103 } else { 2104 /* If the list is empty then send message to PF driver to 2105 * clear all MAC VLANs on this VF. 2106 */ 2107 hw->mac.ops.set_uc_addr(hw, 0, NULL); 2108 } 2109 2110 return count; 2111 } 2112 2113 /** 2114 * ixgbevf_set_rx_mode - Multicast and unicast set 2115 * @netdev: network interface device structure 2116 * 2117 * The set_rx_method entry point is called whenever the multicast address 2118 * list, unicast address list or the network interface flags are updated. 2119 * This routine is responsible for configuring the hardware for proper 2120 * multicast mode and configuring requested unicast filters. 2121 **/ 2122 static void ixgbevf_set_rx_mode(struct net_device *netdev) 2123 { 2124 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2125 struct ixgbe_hw *hw = &adapter->hw; 2126 unsigned int flags = netdev->flags; 2127 int xcast_mode; 2128 2129 /* request the most inclusive mode we need */ 2130 if (flags & IFF_PROMISC) 2131 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; 2132 else if (flags & IFF_ALLMULTI) 2133 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; 2134 else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) 2135 xcast_mode = IXGBEVF_XCAST_MODE_MULTI; 2136 else 2137 xcast_mode = IXGBEVF_XCAST_MODE_NONE; 2138 2139 spin_lock_bh(&adapter->mbx_lock); 2140 2141 hw->mac.ops.update_xcast_mode(hw, xcast_mode); 2142 2143 /* reprogram multicast list */ 2144 hw->mac.ops.update_mc_addr_list(hw, netdev); 2145 2146 ixgbevf_write_uc_addr_list(netdev); 2147 2148 spin_unlock_bh(&adapter->mbx_lock); 2149 } 2150 2151 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 2152 { 2153 int q_idx; 2154 struct ixgbevf_q_vector *q_vector; 2155 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2156 2157 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2158 q_vector = adapter->q_vector[q_idx]; 2159 napi_enable(&q_vector->napi); 2160 } 2161 } 2162 2163 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 2164 { 2165 int q_idx; 2166 struct ixgbevf_q_vector *q_vector; 2167 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2168 2169 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2170 q_vector = adapter->q_vector[q_idx]; 2171 napi_disable(&q_vector->napi); 2172 } 2173 } 2174 2175 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) 2176 { 2177 struct ixgbe_hw *hw = &adapter->hw; 2178 unsigned int def_q = 0; 2179 unsigned int num_tcs = 0; 2180 unsigned int num_rx_queues = adapter->num_rx_queues; 2181 unsigned int num_tx_queues = adapter->num_tx_queues; 2182 int err; 2183 2184 spin_lock_bh(&adapter->mbx_lock); 2185 2186 /* fetch queue configuration from the PF */ 2187 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2188 2189 spin_unlock_bh(&adapter->mbx_lock); 2190 2191 if (err) 2192 return err; 2193 2194 if (num_tcs > 1) { 2195 /* we need only one Tx queue */ 2196 num_tx_queues = 1; 2197 2198 /* update default Tx ring register index */ 2199 adapter->tx_ring[0]->reg_idx = def_q; 2200 2201 /* we need as many queues as traffic classes */ 2202 num_rx_queues = num_tcs; 2203 } 2204 2205 /* if we have a bad config abort request queue reset */ 2206 if ((adapter->num_rx_queues != num_rx_queues) || 2207 (adapter->num_tx_queues != num_tx_queues)) { 2208 /* force mailbox timeout to prevent further messages */ 2209 hw->mbx.timeout = 0; 2210 2211 /* wait for watchdog to come around and bail us out */ 2212 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); 2213 } 2214 2215 return 0; 2216 } 2217 2218 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 2219 { 2220 ixgbevf_configure_dcb(adapter); 2221 2222 ixgbevf_set_rx_mode(adapter->netdev); 2223 2224 ixgbevf_restore_vlan(adapter); 2225 ixgbevf_ipsec_restore(adapter); 2226 2227 ixgbevf_configure_tx(adapter); 2228 ixgbevf_configure_rx(adapter); 2229 } 2230 2231 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 2232 { 2233 /* Only save pre-reset stats if there are some */ 2234 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 2235 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 2236 adapter->stats.base_vfgprc; 2237 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 2238 adapter->stats.base_vfgptc; 2239 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 2240 adapter->stats.base_vfgorc; 2241 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 2242 adapter->stats.base_vfgotc; 2243 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 2244 adapter->stats.base_vfmprc; 2245 } 2246 } 2247 2248 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 2249 { 2250 struct ixgbe_hw *hw = &adapter->hw; 2251 2252 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2253 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2254 adapter->stats.last_vfgorc |= 2255 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2256 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2257 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2258 adapter->stats.last_vfgotc |= 2259 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2260 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2261 2262 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 2263 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 2264 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 2265 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 2266 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 2267 } 2268 2269 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 2270 { 2271 struct ixgbe_hw *hw = &adapter->hw; 2272 static const int api[] = { 2273 ixgbe_mbox_api_14, 2274 ixgbe_mbox_api_13, 2275 ixgbe_mbox_api_12, 2276 ixgbe_mbox_api_11, 2277 ixgbe_mbox_api_10, 2278 ixgbe_mbox_api_unknown 2279 }; 2280 int err, idx = 0; 2281 2282 spin_lock_bh(&adapter->mbx_lock); 2283 2284 while (api[idx] != ixgbe_mbox_api_unknown) { 2285 err = hw->mac.ops.negotiate_api_version(hw, api[idx]); 2286 if (!err) 2287 break; 2288 idx++; 2289 } 2290 2291 spin_unlock_bh(&adapter->mbx_lock); 2292 } 2293 2294 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 2295 { 2296 struct net_device *netdev = adapter->netdev; 2297 struct ixgbe_hw *hw = &adapter->hw; 2298 2299 ixgbevf_configure_msix(adapter); 2300 2301 spin_lock_bh(&adapter->mbx_lock); 2302 2303 if (is_valid_ether_addr(hw->mac.addr)) 2304 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 2305 else 2306 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 2307 2308 spin_unlock_bh(&adapter->mbx_lock); 2309 2310 smp_mb__before_atomic(); 2311 clear_bit(__IXGBEVF_DOWN, &adapter->state); 2312 ixgbevf_napi_enable_all(adapter); 2313 2314 /* clear any pending interrupts, may auto mask */ 2315 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2316 ixgbevf_irq_enable(adapter); 2317 2318 /* enable transmits */ 2319 netif_tx_start_all_queues(netdev); 2320 2321 ixgbevf_save_reset_stats(adapter); 2322 ixgbevf_init_last_counter_stats(adapter); 2323 2324 hw->mac.get_link_status = 1; 2325 mod_timer(&adapter->service_timer, jiffies); 2326 } 2327 2328 void ixgbevf_up(struct ixgbevf_adapter *adapter) 2329 { 2330 ixgbevf_configure(adapter); 2331 2332 ixgbevf_up_complete(adapter); 2333 } 2334 2335 /** 2336 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 2337 * @rx_ring: ring to free buffers from 2338 **/ 2339 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 2340 { 2341 u16 i = rx_ring->next_to_clean; 2342 2343 /* Free Rx ring sk_buff */ 2344 if (rx_ring->skb) { 2345 dev_kfree_skb(rx_ring->skb); 2346 rx_ring->skb = NULL; 2347 } 2348 2349 /* Free all the Rx ring pages */ 2350 while (i != rx_ring->next_to_alloc) { 2351 struct ixgbevf_rx_buffer *rx_buffer; 2352 2353 rx_buffer = &rx_ring->rx_buffer_info[i]; 2354 2355 /* Invalidate cache lines that may have been written to by 2356 * device so that we avoid corrupting memory. 2357 */ 2358 dma_sync_single_range_for_cpu(rx_ring->dev, 2359 rx_buffer->dma, 2360 rx_buffer->page_offset, 2361 ixgbevf_rx_bufsz(rx_ring), 2362 DMA_FROM_DEVICE); 2363 2364 /* free resources associated with mapping */ 2365 dma_unmap_page_attrs(rx_ring->dev, 2366 rx_buffer->dma, 2367 ixgbevf_rx_pg_size(rx_ring), 2368 DMA_FROM_DEVICE, 2369 IXGBEVF_RX_DMA_ATTR); 2370 2371 __page_frag_cache_drain(rx_buffer->page, 2372 rx_buffer->pagecnt_bias); 2373 2374 i++; 2375 if (i == rx_ring->count) 2376 i = 0; 2377 } 2378 2379 rx_ring->next_to_alloc = 0; 2380 rx_ring->next_to_clean = 0; 2381 rx_ring->next_to_use = 0; 2382 } 2383 2384 /** 2385 * ixgbevf_clean_tx_ring - Free Tx Buffers 2386 * @tx_ring: ring to be cleaned 2387 **/ 2388 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 2389 { 2390 u16 i = tx_ring->next_to_clean; 2391 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 2392 2393 while (i != tx_ring->next_to_use) { 2394 union ixgbe_adv_tx_desc *eop_desc, *tx_desc; 2395 2396 /* Free all the Tx ring sk_buffs */ 2397 if (ring_is_xdp(tx_ring)) 2398 page_frag_free(tx_buffer->data); 2399 else 2400 dev_kfree_skb_any(tx_buffer->skb); 2401 2402 /* unmap skb header data */ 2403 dma_unmap_single(tx_ring->dev, 2404 dma_unmap_addr(tx_buffer, dma), 2405 dma_unmap_len(tx_buffer, len), 2406 DMA_TO_DEVICE); 2407 2408 /* check for eop_desc to determine the end of the packet */ 2409 eop_desc = tx_buffer->next_to_watch; 2410 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2411 2412 /* unmap remaining buffers */ 2413 while (tx_desc != eop_desc) { 2414 tx_buffer++; 2415 tx_desc++; 2416 i++; 2417 if (unlikely(i == tx_ring->count)) { 2418 i = 0; 2419 tx_buffer = tx_ring->tx_buffer_info; 2420 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 2421 } 2422 2423 /* unmap any remaining paged data */ 2424 if (dma_unmap_len(tx_buffer, len)) 2425 dma_unmap_page(tx_ring->dev, 2426 dma_unmap_addr(tx_buffer, dma), 2427 dma_unmap_len(tx_buffer, len), 2428 DMA_TO_DEVICE); 2429 } 2430 2431 /* move us one more past the eop_desc for start of next pkt */ 2432 tx_buffer++; 2433 i++; 2434 if (unlikely(i == tx_ring->count)) { 2435 i = 0; 2436 tx_buffer = tx_ring->tx_buffer_info; 2437 } 2438 } 2439 2440 /* reset next_to_use and next_to_clean */ 2441 tx_ring->next_to_use = 0; 2442 tx_ring->next_to_clean = 0; 2443 2444 } 2445 2446 /** 2447 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 2448 * @adapter: board private structure 2449 **/ 2450 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 2451 { 2452 int i; 2453 2454 for (i = 0; i < adapter->num_rx_queues; i++) 2455 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); 2456 } 2457 2458 /** 2459 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 2460 * @adapter: board private structure 2461 **/ 2462 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 2463 { 2464 int i; 2465 2466 for (i = 0; i < adapter->num_tx_queues; i++) 2467 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); 2468 for (i = 0; i < adapter->num_xdp_queues; i++) 2469 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); 2470 } 2471 2472 void ixgbevf_down(struct ixgbevf_adapter *adapter) 2473 { 2474 struct net_device *netdev = adapter->netdev; 2475 struct ixgbe_hw *hw = &adapter->hw; 2476 int i; 2477 2478 /* signal that we are down to the interrupt handler */ 2479 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) 2480 return; /* do nothing if already down */ 2481 2482 /* disable all enabled Rx queues */ 2483 for (i = 0; i < adapter->num_rx_queues; i++) 2484 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 2485 2486 usleep_range(10000, 20000); 2487 2488 netif_tx_stop_all_queues(netdev); 2489 2490 /* call carrier off first to avoid false dev_watchdog timeouts */ 2491 netif_carrier_off(netdev); 2492 netif_tx_disable(netdev); 2493 2494 ixgbevf_irq_disable(adapter); 2495 2496 ixgbevf_napi_disable_all(adapter); 2497 2498 del_timer_sync(&adapter->service_timer); 2499 2500 /* disable transmits in the hardware now that interrupts are off */ 2501 for (i = 0; i < adapter->num_tx_queues; i++) { 2502 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 2503 2504 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 2505 IXGBE_TXDCTL_SWFLSH); 2506 } 2507 2508 for (i = 0; i < adapter->num_xdp_queues; i++) { 2509 u8 reg_idx = adapter->xdp_ring[i]->reg_idx; 2510 2511 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 2512 IXGBE_TXDCTL_SWFLSH); 2513 } 2514 2515 if (!pci_channel_offline(adapter->pdev)) 2516 ixgbevf_reset(adapter); 2517 2518 ixgbevf_clean_all_tx_rings(adapter); 2519 ixgbevf_clean_all_rx_rings(adapter); 2520 } 2521 2522 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 2523 { 2524 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 2525 msleep(1); 2526 2527 ixgbevf_down(adapter); 2528 pci_set_master(adapter->pdev); 2529 ixgbevf_up(adapter); 2530 2531 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 2532 } 2533 2534 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 2535 { 2536 struct ixgbe_hw *hw = &adapter->hw; 2537 struct net_device *netdev = adapter->netdev; 2538 2539 if (hw->mac.ops.reset_hw(hw)) { 2540 hw_dbg(hw, "PF still resetting\n"); 2541 } else { 2542 hw->mac.ops.init_hw(hw); 2543 ixgbevf_negotiate_api(adapter); 2544 } 2545 2546 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 2547 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 2548 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2549 } 2550 2551 adapter->last_reset = jiffies; 2552 } 2553 2554 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 2555 int vectors) 2556 { 2557 int vector_threshold; 2558 2559 /* We'll want at least 2 (vector_threshold): 2560 * 1) TxQ[0] + RxQ[0] handler 2561 * 2) Other (Link Status Change, etc.) 2562 */ 2563 vector_threshold = MIN_MSIX_COUNT; 2564 2565 /* The more we get, the more we will assign to Tx/Rx Cleanup 2566 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 2567 * Right now, we simply care about how many we'll get; we'll 2568 * set them up later while requesting irq's. 2569 */ 2570 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 2571 vector_threshold, vectors); 2572 2573 if (vectors < 0) { 2574 dev_err(&adapter->pdev->dev, 2575 "Unable to allocate MSI-X interrupts\n"); 2576 kfree(adapter->msix_entries); 2577 adapter->msix_entries = NULL; 2578 return vectors; 2579 } 2580 2581 /* Adjust for only the vectors we'll use, which is minimum 2582 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 2583 * vectors we were allocated. 2584 */ 2585 adapter->num_msix_vectors = vectors; 2586 2587 return 0; 2588 } 2589 2590 /** 2591 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 2592 * @adapter: board private structure to initialize 2593 * 2594 * This is the top level queue allocation routine. The order here is very 2595 * important, starting with the "most" number of features turned on at once, 2596 * and ending with the smallest set of features. This way large combinations 2597 * can be allocated if they're turned on, and smaller combinations are the 2598 * fall through conditions. 2599 * 2600 **/ 2601 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 2602 { 2603 struct ixgbe_hw *hw = &adapter->hw; 2604 unsigned int def_q = 0; 2605 unsigned int num_tcs = 0; 2606 int err; 2607 2608 /* Start with base case */ 2609 adapter->num_rx_queues = 1; 2610 adapter->num_tx_queues = 1; 2611 adapter->num_xdp_queues = 0; 2612 2613 spin_lock_bh(&adapter->mbx_lock); 2614 2615 /* fetch queue configuration from the PF */ 2616 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2617 2618 spin_unlock_bh(&adapter->mbx_lock); 2619 2620 if (err) 2621 return; 2622 2623 /* we need as many queues as traffic classes */ 2624 if (num_tcs > 1) { 2625 adapter->num_rx_queues = num_tcs; 2626 } else { 2627 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES); 2628 2629 switch (hw->api_version) { 2630 case ixgbe_mbox_api_11: 2631 case ixgbe_mbox_api_12: 2632 case ixgbe_mbox_api_13: 2633 case ixgbe_mbox_api_14: 2634 if (adapter->xdp_prog && 2635 hw->mac.max_tx_queues == rss) 2636 rss = rss > 3 ? 2 : 1; 2637 2638 adapter->num_rx_queues = rss; 2639 adapter->num_tx_queues = rss; 2640 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; 2641 default: 2642 break; 2643 } 2644 } 2645 } 2646 2647 /** 2648 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2649 * @adapter: board private structure to initialize 2650 * 2651 * Attempt to configure the interrupts using the best available 2652 * capabilities of the hardware and the kernel. 2653 **/ 2654 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2655 { 2656 int vector, v_budget; 2657 2658 /* It's easy to be greedy for MSI-X vectors, but it really 2659 * doesn't do us much good if we have a lot more vectors 2660 * than CPU's. So let's be conservative and only ask for 2661 * (roughly) the same number of vectors as there are CPU's. 2662 * The default is to use pairs of vectors. 2663 */ 2664 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 2665 v_budget = min_t(int, v_budget, num_online_cpus()); 2666 v_budget += NON_Q_VECTORS; 2667 2668 adapter->msix_entries = kcalloc(v_budget, 2669 sizeof(struct msix_entry), GFP_KERNEL); 2670 if (!adapter->msix_entries) 2671 return -ENOMEM; 2672 2673 for (vector = 0; vector < v_budget; vector++) 2674 adapter->msix_entries[vector].entry = vector; 2675 2676 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver 2677 * does not support any other modes, so we will simply fail here. Note 2678 * that we clean up the msix_entries pointer else-where. 2679 */ 2680 return ixgbevf_acquire_msix_vectors(adapter, v_budget); 2681 } 2682 2683 static void ixgbevf_add_ring(struct ixgbevf_ring *ring, 2684 struct ixgbevf_ring_container *head) 2685 { 2686 ring->next = head->ring; 2687 head->ring = ring; 2688 head->count++; 2689 } 2690 2691 /** 2692 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector 2693 * @adapter: board private structure to initialize 2694 * @v_idx: index of vector in adapter struct 2695 * @txr_count: number of Tx rings for q vector 2696 * @txr_idx: index of first Tx ring to assign 2697 * @xdp_count: total number of XDP rings to allocate 2698 * @xdp_idx: index of first XDP ring to allocate 2699 * @rxr_count: number of Rx rings for q vector 2700 * @rxr_idx: index of first Rx ring to assign 2701 * 2702 * We allocate one q_vector. If allocation fails we return -ENOMEM. 2703 **/ 2704 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, 2705 int txr_count, int txr_idx, 2706 int xdp_count, int xdp_idx, 2707 int rxr_count, int rxr_idx) 2708 { 2709 struct ixgbevf_q_vector *q_vector; 2710 int reg_idx = txr_idx + xdp_idx; 2711 struct ixgbevf_ring *ring; 2712 int ring_count, size; 2713 2714 ring_count = txr_count + xdp_count + rxr_count; 2715 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count); 2716 2717 /* allocate q_vector and rings */ 2718 q_vector = kzalloc(size, GFP_KERNEL); 2719 if (!q_vector) 2720 return -ENOMEM; 2721 2722 /* initialize NAPI */ 2723 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); 2724 2725 /* tie q_vector and adapter together */ 2726 adapter->q_vector[v_idx] = q_vector; 2727 q_vector->adapter = adapter; 2728 q_vector->v_idx = v_idx; 2729 2730 /* initialize pointer to rings */ 2731 ring = q_vector->ring; 2732 2733 while (txr_count) { 2734 /* assign generic ring traits */ 2735 ring->dev = &adapter->pdev->dev; 2736 ring->netdev = adapter->netdev; 2737 2738 /* configure backlink on ring */ 2739 ring->q_vector = q_vector; 2740 2741 /* update q_vector Tx values */ 2742 ixgbevf_add_ring(ring, &q_vector->tx); 2743 2744 /* apply Tx specific ring traits */ 2745 ring->count = adapter->tx_ring_count; 2746 ring->queue_index = txr_idx; 2747 ring->reg_idx = reg_idx; 2748 2749 /* assign ring to adapter */ 2750 adapter->tx_ring[txr_idx] = ring; 2751 2752 /* update count and index */ 2753 txr_count--; 2754 txr_idx++; 2755 reg_idx++; 2756 2757 /* push pointer to next ring */ 2758 ring++; 2759 } 2760 2761 while (xdp_count) { 2762 /* assign generic ring traits */ 2763 ring->dev = &adapter->pdev->dev; 2764 ring->netdev = adapter->netdev; 2765 2766 /* configure backlink on ring */ 2767 ring->q_vector = q_vector; 2768 2769 /* update q_vector Tx values */ 2770 ixgbevf_add_ring(ring, &q_vector->tx); 2771 2772 /* apply Tx specific ring traits */ 2773 ring->count = adapter->tx_ring_count; 2774 ring->queue_index = xdp_idx; 2775 ring->reg_idx = reg_idx; 2776 set_ring_xdp(ring); 2777 2778 /* assign ring to adapter */ 2779 adapter->xdp_ring[xdp_idx] = ring; 2780 2781 /* update count and index */ 2782 xdp_count--; 2783 xdp_idx++; 2784 reg_idx++; 2785 2786 /* push pointer to next ring */ 2787 ring++; 2788 } 2789 2790 while (rxr_count) { 2791 /* assign generic ring traits */ 2792 ring->dev = &adapter->pdev->dev; 2793 ring->netdev = adapter->netdev; 2794 2795 /* configure backlink on ring */ 2796 ring->q_vector = q_vector; 2797 2798 /* update q_vector Rx values */ 2799 ixgbevf_add_ring(ring, &q_vector->rx); 2800 2801 /* apply Rx specific ring traits */ 2802 ring->count = adapter->rx_ring_count; 2803 ring->queue_index = rxr_idx; 2804 ring->reg_idx = rxr_idx; 2805 2806 /* assign ring to adapter */ 2807 adapter->rx_ring[rxr_idx] = ring; 2808 2809 /* update count and index */ 2810 rxr_count--; 2811 rxr_idx++; 2812 2813 /* push pointer to next ring */ 2814 ring++; 2815 } 2816 2817 return 0; 2818 } 2819 2820 /** 2821 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector 2822 * @adapter: board private structure to initialize 2823 * @v_idx: index of vector in adapter struct 2824 * 2825 * This function frees the memory allocated to the q_vector. In addition if 2826 * NAPI is enabled it will delete any references to the NAPI struct prior 2827 * to freeing the q_vector. 2828 **/ 2829 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) 2830 { 2831 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; 2832 struct ixgbevf_ring *ring; 2833 2834 ixgbevf_for_each_ring(ring, q_vector->tx) { 2835 if (ring_is_xdp(ring)) 2836 adapter->xdp_ring[ring->queue_index] = NULL; 2837 else 2838 adapter->tx_ring[ring->queue_index] = NULL; 2839 } 2840 2841 ixgbevf_for_each_ring(ring, q_vector->rx) 2842 adapter->rx_ring[ring->queue_index] = NULL; 2843 2844 adapter->q_vector[v_idx] = NULL; 2845 netif_napi_del(&q_vector->napi); 2846 2847 /* ixgbevf_get_stats() might access the rings on this vector, 2848 * we must wait a grace period before freeing it. 2849 */ 2850 kfree_rcu(q_vector, rcu); 2851 } 2852 2853 /** 2854 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2855 * @adapter: board private structure to initialize 2856 * 2857 * We allocate one q_vector per queue interrupt. If allocation fails we 2858 * return -ENOMEM. 2859 **/ 2860 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2861 { 2862 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2863 int rxr_remaining = adapter->num_rx_queues; 2864 int txr_remaining = adapter->num_tx_queues; 2865 int xdp_remaining = adapter->num_xdp_queues; 2866 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; 2867 int err; 2868 2869 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { 2870 for (; rxr_remaining; v_idx++, q_vectors--) { 2871 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 2872 2873 err = ixgbevf_alloc_q_vector(adapter, v_idx, 2874 0, 0, 0, 0, rqpv, rxr_idx); 2875 if (err) 2876 goto err_out; 2877 2878 /* update counts and index */ 2879 rxr_remaining -= rqpv; 2880 rxr_idx += rqpv; 2881 } 2882 } 2883 2884 for (; q_vectors; v_idx++, q_vectors--) { 2885 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 2886 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); 2887 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors); 2888 2889 err = ixgbevf_alloc_q_vector(adapter, v_idx, 2890 tqpv, txr_idx, 2891 xqpv, xdp_idx, 2892 rqpv, rxr_idx); 2893 2894 if (err) 2895 goto err_out; 2896 2897 /* update counts and index */ 2898 rxr_remaining -= rqpv; 2899 rxr_idx += rqpv; 2900 txr_remaining -= tqpv; 2901 txr_idx += tqpv; 2902 xdp_remaining -= xqpv; 2903 xdp_idx += xqpv; 2904 } 2905 2906 return 0; 2907 2908 err_out: 2909 while (v_idx) { 2910 v_idx--; 2911 ixgbevf_free_q_vector(adapter, v_idx); 2912 } 2913 2914 return -ENOMEM; 2915 } 2916 2917 /** 2918 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2919 * @adapter: board private structure to initialize 2920 * 2921 * This function frees the memory allocated to the q_vectors. In addition if 2922 * NAPI is enabled it will delete any references to the NAPI struct prior 2923 * to freeing the q_vector. 2924 **/ 2925 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2926 { 2927 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2928 2929 while (q_vectors) { 2930 q_vectors--; 2931 ixgbevf_free_q_vector(adapter, q_vectors); 2932 } 2933 } 2934 2935 /** 2936 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2937 * @adapter: board private structure 2938 * 2939 **/ 2940 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2941 { 2942 if (!adapter->msix_entries) 2943 return; 2944 2945 pci_disable_msix(adapter->pdev); 2946 kfree(adapter->msix_entries); 2947 adapter->msix_entries = NULL; 2948 } 2949 2950 /** 2951 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2952 * @adapter: board private structure to initialize 2953 * 2954 **/ 2955 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2956 { 2957 int err; 2958 2959 /* Number of supported queues */ 2960 ixgbevf_set_num_queues(adapter); 2961 2962 err = ixgbevf_set_interrupt_capability(adapter); 2963 if (err) { 2964 hw_dbg(&adapter->hw, 2965 "Unable to setup interrupt capabilities\n"); 2966 goto err_set_interrupt; 2967 } 2968 2969 err = ixgbevf_alloc_q_vectors(adapter); 2970 if (err) { 2971 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); 2972 goto err_alloc_q_vectors; 2973 } 2974 2975 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n", 2976 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 2977 adapter->num_rx_queues, adapter->num_tx_queues, 2978 adapter->num_xdp_queues); 2979 2980 set_bit(__IXGBEVF_DOWN, &adapter->state); 2981 2982 return 0; 2983 err_alloc_q_vectors: 2984 ixgbevf_reset_interrupt_capability(adapter); 2985 err_set_interrupt: 2986 return err; 2987 } 2988 2989 /** 2990 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2991 * @adapter: board private structure to clear interrupt scheme on 2992 * 2993 * We go through and clear interrupt specific resources and reset the structure 2994 * to pre-load conditions 2995 **/ 2996 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2997 { 2998 adapter->num_tx_queues = 0; 2999 adapter->num_xdp_queues = 0; 3000 adapter->num_rx_queues = 0; 3001 3002 ixgbevf_free_q_vectors(adapter); 3003 ixgbevf_reset_interrupt_capability(adapter); 3004 } 3005 3006 /** 3007 * ixgbevf_sw_init - Initialize general software structures 3008 * @adapter: board private structure to initialize 3009 * 3010 * ixgbevf_sw_init initializes the Adapter private data structure. 3011 * Fields are initialized based on PCI device information and 3012 * OS network device settings (MTU size). 3013 **/ 3014 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 3015 { 3016 struct ixgbe_hw *hw = &adapter->hw; 3017 struct pci_dev *pdev = adapter->pdev; 3018 struct net_device *netdev = adapter->netdev; 3019 int err; 3020 3021 /* PCI config space info */ 3022 hw->vendor_id = pdev->vendor; 3023 hw->device_id = pdev->device; 3024 hw->revision_id = pdev->revision; 3025 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3026 hw->subsystem_device_id = pdev->subsystem_device; 3027 3028 hw->mbx.ops.init_params(hw); 3029 3030 if (hw->mac.type >= ixgbe_mac_X550_vf) { 3031 err = ixgbevf_init_rss_key(adapter); 3032 if (err) 3033 goto out; 3034 } 3035 3036 /* assume legacy case in which PF would only give VF 2 queues */ 3037 hw->mac.max_tx_queues = 2; 3038 hw->mac.max_rx_queues = 2; 3039 3040 /* lock to protect mailbox accesses */ 3041 spin_lock_init(&adapter->mbx_lock); 3042 3043 err = hw->mac.ops.reset_hw(hw); 3044 if (err) { 3045 dev_info(&pdev->dev, 3046 "PF still in reset state. Is the PF interface up?\n"); 3047 } else { 3048 err = hw->mac.ops.init_hw(hw); 3049 if (err) { 3050 pr_err("init_shared_code failed: %d\n", err); 3051 goto out; 3052 } 3053 ixgbevf_negotiate_api(adapter); 3054 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 3055 if (err) 3056 dev_info(&pdev->dev, "Error reading MAC address\n"); 3057 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 3058 dev_info(&pdev->dev, 3059 "MAC address not assigned by administrator.\n"); 3060 ether_addr_copy(netdev->dev_addr, hw->mac.addr); 3061 } 3062 3063 if (!is_valid_ether_addr(netdev->dev_addr)) { 3064 dev_info(&pdev->dev, "Assigning random MAC address\n"); 3065 eth_hw_addr_random(netdev); 3066 ether_addr_copy(hw->mac.addr, netdev->dev_addr); 3067 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); 3068 } 3069 3070 /* Enable dynamic interrupt throttling rates */ 3071 adapter->rx_itr_setting = 1; 3072 adapter->tx_itr_setting = 1; 3073 3074 /* set default ring sizes */ 3075 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 3076 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 3077 3078 set_bit(__IXGBEVF_DOWN, &adapter->state); 3079 return 0; 3080 3081 out: 3082 return err; 3083 } 3084 3085 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 3086 { \ 3087 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 3088 if (current_counter < last_counter) \ 3089 counter += 0x100000000LL; \ 3090 last_counter = current_counter; \ 3091 counter &= 0xFFFFFFFF00000000LL; \ 3092 counter |= current_counter; \ 3093 } 3094 3095 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 3096 { \ 3097 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 3098 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 3099 u64 current_counter = (current_counter_msb << 32) | \ 3100 current_counter_lsb; \ 3101 if (current_counter < last_counter) \ 3102 counter += 0x1000000000LL; \ 3103 last_counter = current_counter; \ 3104 counter &= 0xFFFFFFF000000000LL; \ 3105 counter |= current_counter; \ 3106 } 3107 /** 3108 * ixgbevf_update_stats - Update the board statistics counters. 3109 * @adapter: board private structure 3110 **/ 3111 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 3112 { 3113 struct ixgbe_hw *hw = &adapter->hw; 3114 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 3115 u64 alloc_rx_page = 0, hw_csum_rx_error = 0; 3116 int i; 3117 3118 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3119 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3120 return; 3121 3122 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 3123 adapter->stats.vfgprc); 3124 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 3125 adapter->stats.vfgptc); 3126 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3127 adapter->stats.last_vfgorc, 3128 adapter->stats.vfgorc); 3129 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3130 adapter->stats.last_vfgotc, 3131 adapter->stats.vfgotc); 3132 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 3133 adapter->stats.vfmprc); 3134 3135 for (i = 0; i < adapter->num_rx_queues; i++) { 3136 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; 3137 3138 hw_csum_rx_error += rx_ring->rx_stats.csum_err; 3139 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; 3140 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; 3141 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; 3142 } 3143 3144 adapter->hw_csum_rx_error = hw_csum_rx_error; 3145 adapter->alloc_rx_page_failed = alloc_rx_page_failed; 3146 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; 3147 adapter->alloc_rx_page = alloc_rx_page; 3148 } 3149 3150 /** 3151 * ixgbevf_service_timer - Timer Call-back 3152 * @t: pointer to timer_list struct 3153 **/ 3154 static void ixgbevf_service_timer(struct timer_list *t) 3155 { 3156 struct ixgbevf_adapter *adapter = from_timer(adapter, t, 3157 service_timer); 3158 3159 /* Reset the timer */ 3160 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); 3161 3162 ixgbevf_service_event_schedule(adapter); 3163 } 3164 3165 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) 3166 { 3167 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) 3168 return; 3169 3170 rtnl_lock(); 3171 /* If we're already down or resetting, just bail */ 3172 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3173 test_bit(__IXGBEVF_REMOVING, &adapter->state) || 3174 test_bit(__IXGBEVF_RESETTING, &adapter->state)) { 3175 rtnl_unlock(); 3176 return; 3177 } 3178 3179 adapter->tx_timeout_count++; 3180 3181 ixgbevf_reinit_locked(adapter); 3182 rtnl_unlock(); 3183 } 3184 3185 /** 3186 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts 3187 * @adapter: pointer to the device adapter structure 3188 * 3189 * This function serves two purposes. First it strobes the interrupt lines 3190 * in order to make certain interrupts are occurring. Secondly it sets the 3191 * bits needed to check for TX hangs. As a result we should immediately 3192 * determine if a hang has occurred. 3193 **/ 3194 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) 3195 { 3196 struct ixgbe_hw *hw = &adapter->hw; 3197 u32 eics = 0; 3198 int i; 3199 3200 /* If we're down or resetting, just bail */ 3201 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3202 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3203 return; 3204 3205 /* Force detection of hung controller */ 3206 if (netif_carrier_ok(adapter->netdev)) { 3207 for (i = 0; i < adapter->num_tx_queues; i++) 3208 set_check_for_tx_hang(adapter->tx_ring[i]); 3209 for (i = 0; i < adapter->num_xdp_queues; i++) 3210 set_check_for_tx_hang(adapter->xdp_ring[i]); 3211 } 3212 3213 /* get one bit for every active Tx/Rx interrupt vector */ 3214 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 3215 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 3216 3217 if (qv->rx.ring || qv->tx.ring) 3218 eics |= BIT(i); 3219 } 3220 3221 /* Cause software interrupt to ensure rings are cleaned */ 3222 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 3223 } 3224 3225 /** 3226 * ixgbevf_watchdog_update_link - update the link status 3227 * @adapter: pointer to the device adapter structure 3228 **/ 3229 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) 3230 { 3231 struct ixgbe_hw *hw = &adapter->hw; 3232 u32 link_speed = adapter->link_speed; 3233 bool link_up = adapter->link_up; 3234 s32 err; 3235 3236 spin_lock_bh(&adapter->mbx_lock); 3237 3238 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 3239 3240 spin_unlock_bh(&adapter->mbx_lock); 3241 3242 /* if check for link returns error we will need to reset */ 3243 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { 3244 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); 3245 link_up = false; 3246 } 3247 3248 adapter->link_up = link_up; 3249 adapter->link_speed = link_speed; 3250 } 3251 3252 /** 3253 * ixgbevf_watchdog_link_is_up - update netif_carrier status and 3254 * print link up message 3255 * @adapter: pointer to the device adapter structure 3256 **/ 3257 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) 3258 { 3259 struct net_device *netdev = adapter->netdev; 3260 3261 /* only continue if link was previously down */ 3262 if (netif_carrier_ok(netdev)) 3263 return; 3264 3265 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", 3266 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 3267 "10 Gbps" : 3268 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? 3269 "1 Gbps" : 3270 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? 3271 "100 Mbps" : 3272 "unknown speed"); 3273 3274 netif_carrier_on(netdev); 3275 } 3276 3277 /** 3278 * ixgbevf_watchdog_link_is_down - update netif_carrier status and 3279 * print link down message 3280 * @adapter: pointer to the adapter structure 3281 **/ 3282 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) 3283 { 3284 struct net_device *netdev = adapter->netdev; 3285 3286 adapter->link_speed = 0; 3287 3288 /* only continue if link was up previously */ 3289 if (!netif_carrier_ok(netdev)) 3290 return; 3291 3292 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 3293 3294 netif_carrier_off(netdev); 3295 } 3296 3297 /** 3298 * ixgbevf_watchdog_subtask - worker thread to bring link up 3299 * @adapter: board private structure 3300 **/ 3301 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) 3302 { 3303 /* if interface is down do nothing */ 3304 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3305 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3306 return; 3307 3308 ixgbevf_watchdog_update_link(adapter); 3309 3310 if (adapter->link_up) 3311 ixgbevf_watchdog_link_is_up(adapter); 3312 else 3313 ixgbevf_watchdog_link_is_down(adapter); 3314 3315 ixgbevf_update_stats(adapter); 3316 } 3317 3318 /** 3319 * ixgbevf_service_task - manages and runs subtasks 3320 * @work: pointer to work_struct containing our data 3321 **/ 3322 static void ixgbevf_service_task(struct work_struct *work) 3323 { 3324 struct ixgbevf_adapter *adapter = container_of(work, 3325 struct ixgbevf_adapter, 3326 service_task); 3327 struct ixgbe_hw *hw = &adapter->hw; 3328 3329 if (IXGBE_REMOVED(hw->hw_addr)) { 3330 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 3331 rtnl_lock(); 3332 ixgbevf_down(adapter); 3333 rtnl_unlock(); 3334 } 3335 return; 3336 } 3337 3338 ixgbevf_queue_reset_subtask(adapter); 3339 ixgbevf_reset_subtask(adapter); 3340 ixgbevf_watchdog_subtask(adapter); 3341 ixgbevf_check_hang_subtask(adapter); 3342 3343 ixgbevf_service_event_complete(adapter); 3344 } 3345 3346 /** 3347 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 3348 * @tx_ring: Tx descriptor ring for a specific queue 3349 * 3350 * Free all transmit software resources 3351 **/ 3352 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) 3353 { 3354 ixgbevf_clean_tx_ring(tx_ring); 3355 3356 vfree(tx_ring->tx_buffer_info); 3357 tx_ring->tx_buffer_info = NULL; 3358 3359 /* if not set, then don't free */ 3360 if (!tx_ring->desc) 3361 return; 3362 3363 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, 3364 tx_ring->dma); 3365 3366 tx_ring->desc = NULL; 3367 } 3368 3369 /** 3370 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 3371 * @adapter: board private structure 3372 * 3373 * Free all transmit software resources 3374 **/ 3375 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 3376 { 3377 int i; 3378 3379 for (i = 0; i < adapter->num_tx_queues; i++) 3380 if (adapter->tx_ring[i]->desc) 3381 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 3382 for (i = 0; i < adapter->num_xdp_queues; i++) 3383 if (adapter->xdp_ring[i]->desc) 3384 ixgbevf_free_tx_resources(adapter->xdp_ring[i]); 3385 } 3386 3387 /** 3388 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 3389 * @tx_ring: Tx descriptor ring (for a specific queue) to setup 3390 * 3391 * Return 0 on success, negative on failure 3392 **/ 3393 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) 3394 { 3395 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 3396 int size; 3397 3398 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 3399 tx_ring->tx_buffer_info = vmalloc(size); 3400 if (!tx_ring->tx_buffer_info) 3401 goto err; 3402 3403 u64_stats_init(&tx_ring->syncp); 3404 3405 /* round up to nearest 4K */ 3406 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 3407 tx_ring->size = ALIGN(tx_ring->size, 4096); 3408 3409 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, 3410 &tx_ring->dma, GFP_KERNEL); 3411 if (!tx_ring->desc) 3412 goto err; 3413 3414 return 0; 3415 3416 err: 3417 vfree(tx_ring->tx_buffer_info); 3418 tx_ring->tx_buffer_info = NULL; 3419 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); 3420 return -ENOMEM; 3421 } 3422 3423 /** 3424 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 3425 * @adapter: board private structure 3426 * 3427 * If this function returns with an error, then it's possible one or 3428 * more of the rings is populated (while the rest are not). It is the 3429 * callers duty to clean those orphaned rings. 3430 * 3431 * Return 0 on success, negative on failure 3432 **/ 3433 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 3434 { 3435 int i, j = 0, err = 0; 3436 3437 for (i = 0; i < adapter->num_tx_queues; i++) { 3438 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); 3439 if (!err) 3440 continue; 3441 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); 3442 goto err_setup_tx; 3443 } 3444 3445 for (j = 0; j < adapter->num_xdp_queues; j++) { 3446 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); 3447 if (!err) 3448 continue; 3449 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); 3450 goto err_setup_tx; 3451 } 3452 3453 return 0; 3454 err_setup_tx: 3455 /* rewind the index freeing the rings as we go */ 3456 while (j--) 3457 ixgbevf_free_tx_resources(adapter->xdp_ring[j]); 3458 while (i--) 3459 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 3460 3461 return err; 3462 } 3463 3464 /** 3465 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 3466 * @adapter: board private structure 3467 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 3468 * 3469 * Returns 0 on success, negative on failure 3470 **/ 3471 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 3472 struct ixgbevf_ring *rx_ring) 3473 { 3474 int size; 3475 3476 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 3477 rx_ring->rx_buffer_info = vmalloc(size); 3478 if (!rx_ring->rx_buffer_info) 3479 goto err; 3480 3481 u64_stats_init(&rx_ring->syncp); 3482 3483 /* Round up to nearest 4K */ 3484 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 3485 rx_ring->size = ALIGN(rx_ring->size, 4096); 3486 3487 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, 3488 &rx_ring->dma, GFP_KERNEL); 3489 3490 if (!rx_ring->desc) 3491 goto err; 3492 3493 /* XDP RX-queue info */ 3494 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, 3495 rx_ring->queue_index, 0) < 0) 3496 goto err; 3497 3498 rx_ring->xdp_prog = adapter->xdp_prog; 3499 3500 return 0; 3501 err: 3502 vfree(rx_ring->rx_buffer_info); 3503 rx_ring->rx_buffer_info = NULL; 3504 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); 3505 return -ENOMEM; 3506 } 3507 3508 /** 3509 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 3510 * @adapter: board private structure 3511 * 3512 * If this function returns with an error, then it's possible one or 3513 * more of the rings is populated (while the rest are not). It is the 3514 * callers duty to clean those orphaned rings. 3515 * 3516 * Return 0 on success, negative on failure 3517 **/ 3518 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 3519 { 3520 int i, err = 0; 3521 3522 for (i = 0; i < adapter->num_rx_queues; i++) { 3523 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); 3524 if (!err) 3525 continue; 3526 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); 3527 goto err_setup_rx; 3528 } 3529 3530 return 0; 3531 err_setup_rx: 3532 /* rewind the index freeing the rings as we go */ 3533 while (i--) 3534 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 3535 return err; 3536 } 3537 3538 /** 3539 * ixgbevf_free_rx_resources - Free Rx Resources 3540 * @rx_ring: ring to clean the resources from 3541 * 3542 * Free all receive software resources 3543 **/ 3544 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) 3545 { 3546 ixgbevf_clean_rx_ring(rx_ring); 3547 3548 rx_ring->xdp_prog = NULL; 3549 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 3550 vfree(rx_ring->rx_buffer_info); 3551 rx_ring->rx_buffer_info = NULL; 3552 3553 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, 3554 rx_ring->dma); 3555 3556 rx_ring->desc = NULL; 3557 } 3558 3559 /** 3560 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 3561 * @adapter: board private structure 3562 * 3563 * Free all receive software resources 3564 **/ 3565 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 3566 { 3567 int i; 3568 3569 for (i = 0; i < adapter->num_rx_queues; i++) 3570 if (adapter->rx_ring[i]->desc) 3571 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 3572 } 3573 3574 /** 3575 * ixgbevf_open - Called when a network interface is made active 3576 * @netdev: network interface device structure 3577 * 3578 * Returns 0 on success, negative value on failure 3579 * 3580 * The open entry point is called when a network interface is made 3581 * active by the system (IFF_UP). At this point all resources needed 3582 * for transmit and receive operations are allocated, the interrupt 3583 * handler is registered with the OS, the watchdog timer is started, 3584 * and the stack is notified that the interface is ready. 3585 **/ 3586 int ixgbevf_open(struct net_device *netdev) 3587 { 3588 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3589 struct ixgbe_hw *hw = &adapter->hw; 3590 int err; 3591 3592 /* A previous failure to open the device because of a lack of 3593 * available MSIX vector resources may have reset the number 3594 * of msix vectors variable to zero. The only way to recover 3595 * is to unload/reload the driver and hope that the system has 3596 * been able to recover some MSIX vector resources. 3597 */ 3598 if (!adapter->num_msix_vectors) 3599 return -ENOMEM; 3600 3601 if (hw->adapter_stopped) { 3602 ixgbevf_reset(adapter); 3603 /* if adapter is still stopped then PF isn't up and 3604 * the VF can't start. 3605 */ 3606 if (hw->adapter_stopped) { 3607 err = IXGBE_ERR_MBX; 3608 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); 3609 goto err_setup_reset; 3610 } 3611 } 3612 3613 /* disallow open during test */ 3614 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 3615 return -EBUSY; 3616 3617 netif_carrier_off(netdev); 3618 3619 /* allocate transmit descriptors */ 3620 err = ixgbevf_setup_all_tx_resources(adapter); 3621 if (err) 3622 goto err_setup_tx; 3623 3624 /* allocate receive descriptors */ 3625 err = ixgbevf_setup_all_rx_resources(adapter); 3626 if (err) 3627 goto err_setup_rx; 3628 3629 ixgbevf_configure(adapter); 3630 3631 err = ixgbevf_request_irq(adapter); 3632 if (err) 3633 goto err_req_irq; 3634 3635 /* Notify the stack of the actual queue counts. */ 3636 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 3637 if (err) 3638 goto err_set_queues; 3639 3640 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 3641 if (err) 3642 goto err_set_queues; 3643 3644 ixgbevf_up_complete(adapter); 3645 3646 return 0; 3647 3648 err_set_queues: 3649 ixgbevf_free_irq(adapter); 3650 err_req_irq: 3651 ixgbevf_free_all_rx_resources(adapter); 3652 err_setup_rx: 3653 ixgbevf_free_all_tx_resources(adapter); 3654 err_setup_tx: 3655 ixgbevf_reset(adapter); 3656 err_setup_reset: 3657 3658 return err; 3659 } 3660 3661 /** 3662 * ixgbevf_close_suspend - actions necessary to both suspend and close flows 3663 * @adapter: the private adapter struct 3664 * 3665 * This function should contain the necessary work common to both suspending 3666 * and closing of the device. 3667 */ 3668 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) 3669 { 3670 ixgbevf_down(adapter); 3671 ixgbevf_free_irq(adapter); 3672 ixgbevf_free_all_tx_resources(adapter); 3673 ixgbevf_free_all_rx_resources(adapter); 3674 } 3675 3676 /** 3677 * ixgbevf_close - Disables a network interface 3678 * @netdev: network interface device structure 3679 * 3680 * Returns 0, this is not allowed to fail 3681 * 3682 * The close entry point is called when an interface is de-activated 3683 * by the OS. The hardware is still under the drivers control, but 3684 * needs to be disabled. A global MAC reset is issued to stop the 3685 * hardware, and all transmit and receive resources are freed. 3686 **/ 3687 int ixgbevf_close(struct net_device *netdev) 3688 { 3689 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3690 3691 if (netif_device_present(netdev)) 3692 ixgbevf_close_suspend(adapter); 3693 3694 return 0; 3695 } 3696 3697 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) 3698 { 3699 struct net_device *dev = adapter->netdev; 3700 3701 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, 3702 &adapter->state)) 3703 return; 3704 3705 /* if interface is down do nothing */ 3706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 3707 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 3708 return; 3709 3710 /* Hardware has to reinitialize queues and interrupts to 3711 * match packet buffer alignment. Unfortunately, the 3712 * hardware is not flexible enough to do this dynamically. 3713 */ 3714 rtnl_lock(); 3715 3716 if (netif_running(dev)) 3717 ixgbevf_close(dev); 3718 3719 ixgbevf_clear_interrupt_scheme(adapter); 3720 ixgbevf_init_interrupt_scheme(adapter); 3721 3722 if (netif_running(dev)) 3723 ixgbevf_open(dev); 3724 3725 rtnl_unlock(); 3726 } 3727 3728 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 3729 u32 vlan_macip_lens, u32 fceof_saidx, 3730 u32 type_tucmd, u32 mss_l4len_idx) 3731 { 3732 struct ixgbe_adv_tx_context_desc *context_desc; 3733 u16 i = tx_ring->next_to_use; 3734 3735 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 3736 3737 i++; 3738 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3739 3740 /* set bits to identify this as an advanced context descriptor */ 3741 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 3742 3743 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3744 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); 3745 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 3746 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3747 } 3748 3749 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 3750 struct ixgbevf_tx_buffer *first, 3751 u8 *hdr_len, 3752 struct ixgbevf_ipsec_tx_data *itd) 3753 { 3754 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 3755 struct sk_buff *skb = first->skb; 3756 union { 3757 struct iphdr *v4; 3758 struct ipv6hdr *v6; 3759 unsigned char *hdr; 3760 } ip; 3761 union { 3762 struct tcphdr *tcp; 3763 unsigned char *hdr; 3764 } l4; 3765 u32 paylen, l4_offset; 3766 u32 fceof_saidx = 0; 3767 int err; 3768 3769 if (skb->ip_summed != CHECKSUM_PARTIAL) 3770 return 0; 3771 3772 if (!skb_is_gso(skb)) 3773 return 0; 3774 3775 err = skb_cow_head(skb, 0); 3776 if (err < 0) 3777 return err; 3778 3779 if (eth_p_mpls(first->protocol)) 3780 ip.hdr = skb_inner_network_header(skb); 3781 else 3782 ip.hdr = skb_network_header(skb); 3783 l4.hdr = skb_checksum_start(skb); 3784 3785 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3786 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3787 3788 /* initialize outer IP header fields */ 3789 if (ip.v4->version == 4) { 3790 unsigned char *csum_start = skb_checksum_start(skb); 3791 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 3792 int len = csum_start - trans_start; 3793 3794 /* IP header will have to cancel out any data that 3795 * is not a part of the outer IP header, so set to 3796 * a reverse csum if needed, else init check to 0. 3797 */ 3798 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? 3799 csum_fold(csum_partial(trans_start, 3800 len, 0)) : 0; 3801 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3802 3803 ip.v4->tot_len = 0; 3804 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 3805 IXGBE_TX_FLAGS_CSUM | 3806 IXGBE_TX_FLAGS_IPV4; 3807 } else { 3808 ip.v6->payload_len = 0; 3809 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 3810 IXGBE_TX_FLAGS_CSUM; 3811 } 3812 3813 /* determine offset of inner transport header */ 3814 l4_offset = l4.hdr - skb->data; 3815 3816 /* compute length of segmentation header */ 3817 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 3818 3819 /* remove payload length from inner checksum */ 3820 paylen = skb->len - l4_offset; 3821 csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); 3822 3823 /* update gso size and bytecount with header size */ 3824 first->gso_segs = skb_shinfo(skb)->gso_segs; 3825 first->bytecount += (first->gso_segs - 1) * *hdr_len; 3826 3827 /* mss_l4len_id: use 1 as index for TSO */ 3828 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; 3829 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 3830 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); 3831 3832 fceof_saidx |= itd->pfsa; 3833 type_tucmd |= itd->flags | itd->trailer_len; 3834 3835 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 3836 vlan_macip_lens = l4.hdr - ip.hdr; 3837 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; 3838 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 3839 3840 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 3841 mss_l4len_idx); 3842 3843 return 1; 3844 } 3845 3846 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) 3847 { 3848 unsigned int offset = 0; 3849 3850 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); 3851 3852 return offset == skb_checksum_start_offset(skb); 3853 } 3854 3855 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 3856 struct ixgbevf_tx_buffer *first, 3857 struct ixgbevf_ipsec_tx_data *itd) 3858 { 3859 struct sk_buff *skb = first->skb; 3860 u32 vlan_macip_lens = 0; 3861 u32 fceof_saidx = 0; 3862 u32 type_tucmd = 0; 3863 3864 if (skb->ip_summed != CHECKSUM_PARTIAL) 3865 goto no_csum; 3866 3867 switch (skb->csum_offset) { 3868 case offsetof(struct tcphdr, check): 3869 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 3870 fallthrough; 3871 case offsetof(struct udphdr, check): 3872 break; 3873 case offsetof(struct sctphdr, checksum): 3874 /* validate that this is actually an SCTP request */ 3875 if (((first->protocol == htons(ETH_P_IP)) && 3876 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || 3877 ((first->protocol == htons(ETH_P_IPV6)) && 3878 ixgbevf_ipv6_csum_is_sctp(skb))) { 3879 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; 3880 break; 3881 } 3882 fallthrough; 3883 default: 3884 skb_checksum_help(skb); 3885 goto no_csum; 3886 } 3887 3888 if (first->protocol == htons(ETH_P_IP)) 3889 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3890 3891 /* update TX checksum flag */ 3892 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 3893 vlan_macip_lens = skb_checksum_start_offset(skb) - 3894 skb_network_offset(skb); 3895 no_csum: 3896 /* vlan_macip_lens: MACLEN, VLAN tag */ 3897 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 3898 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 3899 3900 fceof_saidx |= itd->pfsa; 3901 type_tucmd |= itd->flags | itd->trailer_len; 3902 3903 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 3904 fceof_saidx, type_tucmd, 0); 3905 } 3906 3907 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) 3908 { 3909 /* set type for advanced descriptor with frame checksum insertion */ 3910 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 3911 IXGBE_ADVTXD_DCMD_IFCS | 3912 IXGBE_ADVTXD_DCMD_DEXT); 3913 3914 /* set HW VLAN bit if VLAN is present */ 3915 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3916 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 3917 3918 /* set segmentation enable bits for TSO/FSO */ 3919 if (tx_flags & IXGBE_TX_FLAGS_TSO) 3920 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 3921 3922 return cmd_type; 3923 } 3924 3925 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, 3926 u32 tx_flags, unsigned int paylen) 3927 { 3928 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); 3929 3930 /* enable L4 checksum for TSO and TX checksum offload */ 3931 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3932 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); 3933 3934 /* enble IPv4 checksum for TSO */ 3935 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3936 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); 3937 3938 /* enable IPsec */ 3939 if (tx_flags & IXGBE_TX_FLAGS_IPSEC) 3940 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); 3941 3942 /* use index 1 context for TSO/FSO/FCOE/IPSEC */ 3943 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) 3944 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); 3945 3946 /* Check Context must be set if Tx switch is enabled, which it 3947 * always is for case where virtual functions are running 3948 */ 3949 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); 3950 3951 tx_desc->read.olinfo_status = olinfo_status; 3952 } 3953 3954 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 3955 struct ixgbevf_tx_buffer *first, 3956 const u8 hdr_len) 3957 { 3958 struct sk_buff *skb = first->skb; 3959 struct ixgbevf_tx_buffer *tx_buffer; 3960 union ixgbe_adv_tx_desc *tx_desc; 3961 skb_frag_t *frag; 3962 dma_addr_t dma; 3963 unsigned int data_len, size; 3964 u32 tx_flags = first->tx_flags; 3965 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3966 u16 i = tx_ring->next_to_use; 3967 3968 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3969 3970 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 3971 3972 size = skb_headlen(skb); 3973 data_len = skb->data_len; 3974 3975 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3976 3977 tx_buffer = first; 3978 3979 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3980 if (dma_mapping_error(tx_ring->dev, dma)) 3981 goto dma_error; 3982 3983 /* record length, and DMA address */ 3984 dma_unmap_len_set(tx_buffer, len, size); 3985 dma_unmap_addr_set(tx_buffer, dma, dma); 3986 3987 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3988 3989 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3990 tx_desc->read.cmd_type_len = 3991 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 3992 3993 i++; 3994 tx_desc++; 3995 if (i == tx_ring->count) { 3996 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3997 i = 0; 3998 } 3999 tx_desc->read.olinfo_status = 0; 4000 4001 dma += IXGBE_MAX_DATA_PER_TXD; 4002 size -= IXGBE_MAX_DATA_PER_TXD; 4003 4004 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4005 } 4006 4007 if (likely(!data_len)) 4008 break; 4009 4010 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 4011 4012 i++; 4013 tx_desc++; 4014 if (i == tx_ring->count) { 4015 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 4016 i = 0; 4017 } 4018 tx_desc->read.olinfo_status = 0; 4019 4020 size = skb_frag_size(frag); 4021 data_len -= size; 4022 4023 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 4024 DMA_TO_DEVICE); 4025 4026 tx_buffer = &tx_ring->tx_buffer_info[i]; 4027 } 4028 4029 /* write last descriptor with RS and EOP bits */ 4030 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); 4031 tx_desc->read.cmd_type_len = cmd_type; 4032 4033 /* set the timestamp */ 4034 first->time_stamp = jiffies; 4035 4036 skb_tx_timestamp(skb); 4037 4038 /* Force memory writes to complete before letting h/w know there 4039 * are new descriptors to fetch. (Only applicable for weak-ordered 4040 * memory model archs, such as IA-64). 4041 * 4042 * We also need this memory barrier (wmb) to make certain all of the 4043 * status bits have been updated before next_to_watch is written. 4044 */ 4045 wmb(); 4046 4047 /* set next_to_watch value indicating a packet is present */ 4048 first->next_to_watch = tx_desc; 4049 4050 i++; 4051 if (i == tx_ring->count) 4052 i = 0; 4053 4054 tx_ring->next_to_use = i; 4055 4056 /* notify HW of packet */ 4057 ixgbevf_write_tail(tx_ring, i); 4058 4059 return; 4060 dma_error: 4061 dev_err(tx_ring->dev, "TX DMA map failed\n"); 4062 tx_buffer = &tx_ring->tx_buffer_info[i]; 4063 4064 /* clear dma mappings for failed tx_buffer_info map */ 4065 while (tx_buffer != first) { 4066 if (dma_unmap_len(tx_buffer, len)) 4067 dma_unmap_page(tx_ring->dev, 4068 dma_unmap_addr(tx_buffer, dma), 4069 dma_unmap_len(tx_buffer, len), 4070 DMA_TO_DEVICE); 4071 dma_unmap_len_set(tx_buffer, len, 0); 4072 4073 if (i-- == 0) 4074 i += tx_ring->count; 4075 tx_buffer = &tx_ring->tx_buffer_info[i]; 4076 } 4077 4078 if (dma_unmap_len(tx_buffer, len)) 4079 dma_unmap_single(tx_ring->dev, 4080 dma_unmap_addr(tx_buffer, dma), 4081 dma_unmap_len(tx_buffer, len), 4082 DMA_TO_DEVICE); 4083 dma_unmap_len_set(tx_buffer, len, 0); 4084 4085 dev_kfree_skb_any(tx_buffer->skb); 4086 tx_buffer->skb = NULL; 4087 4088 tx_ring->next_to_use = i; 4089 } 4090 4091 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 4092 { 4093 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 4094 /* Herbert's original patch had: 4095 * smp_mb__after_netif_stop_queue(); 4096 * but since that doesn't exist yet, just open code it. 4097 */ 4098 smp_mb(); 4099 4100 /* We need to check again in a case another CPU has just 4101 * made room available. 4102 */ 4103 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 4104 return -EBUSY; 4105 4106 /* A reprieve! - use start_queue because it doesn't call schedule */ 4107 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 4108 ++tx_ring->tx_stats.restart_queue; 4109 4110 return 0; 4111 } 4112 4113 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 4114 { 4115 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 4116 return 0; 4117 return __ixgbevf_maybe_stop_tx(tx_ring, size); 4118 } 4119 4120 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, 4121 struct ixgbevf_ring *tx_ring) 4122 { 4123 struct ixgbevf_tx_buffer *first; 4124 int tso; 4125 u32 tx_flags = 0; 4126 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 4127 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; 4128 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 4129 unsigned short f; 4130 #endif 4131 u8 hdr_len = 0; 4132 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 4133 4134 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 4135 dev_kfree_skb_any(skb); 4136 return NETDEV_TX_OK; 4137 } 4138 4139 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 4140 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 4141 * + 2 desc gap to keep tail from touching head, 4142 * + 1 desc for context descriptor, 4143 * otherwise try next time 4144 */ 4145 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 4146 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 4147 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 4148 4149 count += TXD_USE_COUNT(skb_frag_size(frag)); 4150 } 4151 #else 4152 count += skb_shinfo(skb)->nr_frags; 4153 #endif 4154 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 4155 tx_ring->tx_stats.tx_busy++; 4156 return NETDEV_TX_BUSY; 4157 } 4158 4159 /* record the location of the first descriptor for this packet */ 4160 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 4161 first->skb = skb; 4162 first->bytecount = skb->len; 4163 first->gso_segs = 1; 4164 4165 if (skb_vlan_tag_present(skb)) { 4166 tx_flags |= skb_vlan_tag_get(skb); 4167 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 4168 tx_flags |= IXGBE_TX_FLAGS_VLAN; 4169 } 4170 4171 /* record initial flags and protocol */ 4172 first->tx_flags = tx_flags; 4173 first->protocol = vlan_get_protocol(skb); 4174 4175 #ifdef CONFIG_IXGBEVF_IPSEC 4176 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) 4177 goto out_drop; 4178 #endif 4179 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); 4180 if (tso < 0) 4181 goto out_drop; 4182 else if (!tso) 4183 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); 4184 4185 ixgbevf_tx_map(tx_ring, first, hdr_len); 4186 4187 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 4188 4189 return NETDEV_TX_OK; 4190 4191 out_drop: 4192 dev_kfree_skb_any(first->skb); 4193 first->skb = NULL; 4194 4195 return NETDEV_TX_OK; 4196 } 4197 4198 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 4199 { 4200 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4201 struct ixgbevf_ring *tx_ring; 4202 4203 if (skb->len <= 0) { 4204 dev_kfree_skb_any(skb); 4205 return NETDEV_TX_OK; 4206 } 4207 4208 /* The minimum packet size for olinfo paylen is 17 so pad the skb 4209 * in order to meet this minimum size requirement. 4210 */ 4211 if (skb->len < 17) { 4212 if (skb_padto(skb, 17)) 4213 return NETDEV_TX_OK; 4214 skb->len = 17; 4215 } 4216 4217 tx_ring = adapter->tx_ring[skb->queue_mapping]; 4218 return ixgbevf_xmit_frame_ring(skb, tx_ring); 4219 } 4220 4221 /** 4222 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 4223 * @netdev: network interface device structure 4224 * @p: pointer to an address structure 4225 * 4226 * Returns 0 on success, negative on failure 4227 **/ 4228 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 4229 { 4230 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4231 struct ixgbe_hw *hw = &adapter->hw; 4232 struct sockaddr *addr = p; 4233 int err; 4234 4235 if (!is_valid_ether_addr(addr->sa_data)) 4236 return -EADDRNOTAVAIL; 4237 4238 spin_lock_bh(&adapter->mbx_lock); 4239 4240 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); 4241 4242 spin_unlock_bh(&adapter->mbx_lock); 4243 4244 if (err) 4245 return -EPERM; 4246 4247 ether_addr_copy(hw->mac.addr, addr->sa_data); 4248 ether_addr_copy(hw->mac.perm_addr, addr->sa_data); 4249 ether_addr_copy(netdev->dev_addr, addr->sa_data); 4250 4251 return 0; 4252 } 4253 4254 /** 4255 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 4256 * @netdev: network interface device structure 4257 * @new_mtu: new value for maximum frame size 4258 * 4259 * Returns 0 on success, negative on failure 4260 **/ 4261 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 4262 { 4263 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4264 struct ixgbe_hw *hw = &adapter->hw; 4265 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 4266 int ret; 4267 4268 /* prevent MTU being changed to a size unsupported by XDP */ 4269 if (adapter->xdp_prog) { 4270 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); 4271 return -EPERM; 4272 } 4273 4274 spin_lock_bh(&adapter->mbx_lock); 4275 /* notify the PF of our intent to use this size of frame */ 4276 ret = hw->mac.ops.set_rlpml(hw, max_frame); 4277 spin_unlock_bh(&adapter->mbx_lock); 4278 if (ret) 4279 return -EINVAL; 4280 4281 hw_dbg(hw, "changing MTU from %d to %d\n", 4282 netdev->mtu, new_mtu); 4283 4284 /* must set new MTU before calling down or up */ 4285 netdev->mtu = new_mtu; 4286 4287 if (netif_running(netdev)) 4288 ixgbevf_reinit_locked(adapter); 4289 4290 return 0; 4291 } 4292 4293 static int __maybe_unused ixgbevf_suspend(struct device *dev_d) 4294 { 4295 struct net_device *netdev = dev_get_drvdata(dev_d); 4296 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4297 4298 rtnl_lock(); 4299 netif_device_detach(netdev); 4300 4301 if (netif_running(netdev)) 4302 ixgbevf_close_suspend(adapter); 4303 4304 ixgbevf_clear_interrupt_scheme(adapter); 4305 rtnl_unlock(); 4306 4307 return 0; 4308 } 4309 4310 static int __maybe_unused ixgbevf_resume(struct device *dev_d) 4311 { 4312 struct pci_dev *pdev = to_pci_dev(dev_d); 4313 struct net_device *netdev = pci_get_drvdata(pdev); 4314 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4315 u32 err; 4316 4317 adapter->hw.hw_addr = adapter->io_addr; 4318 smp_mb__before_atomic(); 4319 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 4320 pci_set_master(pdev); 4321 4322 ixgbevf_reset(adapter); 4323 4324 rtnl_lock(); 4325 err = ixgbevf_init_interrupt_scheme(adapter); 4326 if (!err && netif_running(netdev)) 4327 err = ixgbevf_open(netdev); 4328 rtnl_unlock(); 4329 if (err) 4330 return err; 4331 4332 netif_device_attach(netdev); 4333 4334 return err; 4335 } 4336 4337 static void ixgbevf_shutdown(struct pci_dev *pdev) 4338 { 4339 ixgbevf_suspend(&pdev->dev); 4340 } 4341 4342 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, 4343 const struct ixgbevf_ring *ring) 4344 { 4345 u64 bytes, packets; 4346 unsigned int start; 4347 4348 if (ring) { 4349 do { 4350 start = u64_stats_fetch_begin_irq(&ring->syncp); 4351 bytes = ring->stats.bytes; 4352 packets = ring->stats.packets; 4353 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 4354 stats->tx_bytes += bytes; 4355 stats->tx_packets += packets; 4356 } 4357 } 4358 4359 static void ixgbevf_get_stats(struct net_device *netdev, 4360 struct rtnl_link_stats64 *stats) 4361 { 4362 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4363 unsigned int start; 4364 u64 bytes, packets; 4365 const struct ixgbevf_ring *ring; 4366 int i; 4367 4368 ixgbevf_update_stats(adapter); 4369 4370 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 4371 4372 rcu_read_lock(); 4373 for (i = 0; i < adapter->num_rx_queues; i++) { 4374 ring = adapter->rx_ring[i]; 4375 do { 4376 start = u64_stats_fetch_begin_irq(&ring->syncp); 4377 bytes = ring->stats.bytes; 4378 packets = ring->stats.packets; 4379 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 4380 stats->rx_bytes += bytes; 4381 stats->rx_packets += packets; 4382 } 4383 4384 for (i = 0; i < adapter->num_tx_queues; i++) { 4385 ring = adapter->tx_ring[i]; 4386 ixgbevf_get_tx_ring_stats(stats, ring); 4387 } 4388 4389 for (i = 0; i < adapter->num_xdp_queues; i++) { 4390 ring = adapter->xdp_ring[i]; 4391 ixgbevf_get_tx_ring_stats(stats, ring); 4392 } 4393 rcu_read_unlock(); 4394 } 4395 4396 #define IXGBEVF_MAX_MAC_HDR_LEN 127 4397 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511 4398 4399 static netdev_features_t 4400 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, 4401 netdev_features_t features) 4402 { 4403 unsigned int network_hdr_len, mac_hdr_len; 4404 4405 /* Make certain the headers can be described by a context descriptor */ 4406 mac_hdr_len = skb_network_header(skb) - skb->data; 4407 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) 4408 return features & ~(NETIF_F_HW_CSUM | 4409 NETIF_F_SCTP_CRC | 4410 NETIF_F_HW_VLAN_CTAG_TX | 4411 NETIF_F_TSO | 4412 NETIF_F_TSO6); 4413 4414 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 4415 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) 4416 return features & ~(NETIF_F_HW_CSUM | 4417 NETIF_F_SCTP_CRC | 4418 NETIF_F_TSO | 4419 NETIF_F_TSO6); 4420 4421 /* We can only support IPV4 TSO in tunnels if we can mangle the 4422 * inner IP ID field, so strip TSO if MANGLEID is not supported. 4423 */ 4424 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 4425 features &= ~NETIF_F_TSO; 4426 4427 return features; 4428 } 4429 4430 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) 4431 { 4432 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4433 struct ixgbevf_adapter *adapter = netdev_priv(dev); 4434 struct bpf_prog *old_prog; 4435 4436 /* verify ixgbevf ring attributes are sufficient for XDP */ 4437 for (i = 0; i < adapter->num_rx_queues; i++) { 4438 struct ixgbevf_ring *ring = adapter->rx_ring[i]; 4439 4440 if (frame_size > ixgbevf_rx_bufsz(ring)) 4441 return -EINVAL; 4442 } 4443 4444 old_prog = xchg(&adapter->xdp_prog, prog); 4445 4446 /* If transitioning XDP modes reconfigure rings */ 4447 if (!!prog != !!old_prog) { 4448 /* Hardware has to reinitialize queues and interrupts to 4449 * match packet buffer alignment. Unfortunately, the 4450 * hardware is not flexible enough to do this dynamically. 4451 */ 4452 if (netif_running(dev)) 4453 ixgbevf_close(dev); 4454 4455 ixgbevf_clear_interrupt_scheme(adapter); 4456 ixgbevf_init_interrupt_scheme(adapter); 4457 4458 if (netif_running(dev)) 4459 ixgbevf_open(dev); 4460 } else { 4461 for (i = 0; i < adapter->num_rx_queues; i++) 4462 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); 4463 } 4464 4465 if (old_prog) 4466 bpf_prog_put(old_prog); 4467 4468 return 0; 4469 } 4470 4471 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4472 { 4473 switch (xdp->command) { 4474 case XDP_SETUP_PROG: 4475 return ixgbevf_xdp_setup(dev, xdp->prog); 4476 default: 4477 return -EINVAL; 4478 } 4479 } 4480 4481 static const struct net_device_ops ixgbevf_netdev_ops = { 4482 .ndo_open = ixgbevf_open, 4483 .ndo_stop = ixgbevf_close, 4484 .ndo_start_xmit = ixgbevf_xmit_frame, 4485 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 4486 .ndo_get_stats64 = ixgbevf_get_stats, 4487 .ndo_validate_addr = eth_validate_addr, 4488 .ndo_set_mac_address = ixgbevf_set_mac, 4489 .ndo_change_mtu = ixgbevf_change_mtu, 4490 .ndo_tx_timeout = ixgbevf_tx_timeout, 4491 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 4492 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 4493 .ndo_features_check = ixgbevf_features_check, 4494 .ndo_bpf = ixgbevf_xdp, 4495 }; 4496 4497 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 4498 { 4499 dev->netdev_ops = &ixgbevf_netdev_ops; 4500 ixgbevf_set_ethtool_ops(dev); 4501 dev->watchdog_timeo = 5 * HZ; 4502 } 4503 4504 /** 4505 * ixgbevf_probe - Device Initialization Routine 4506 * @pdev: PCI device information struct 4507 * @ent: entry in ixgbevf_pci_tbl 4508 * 4509 * Returns 0 on success, negative on failure 4510 * 4511 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 4512 * The OS initialization, configuring of the adapter private structure, 4513 * and a hardware reset occur. 4514 **/ 4515 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4516 { 4517 struct net_device *netdev; 4518 struct ixgbevf_adapter *adapter = NULL; 4519 struct ixgbe_hw *hw = NULL; 4520 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 4521 int err, pci_using_dac; 4522 bool disable_dev = false; 4523 4524 err = pci_enable_device(pdev); 4525 if (err) 4526 return err; 4527 4528 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 4529 pci_using_dac = 1; 4530 } else { 4531 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4532 if (err) { 4533 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 4534 goto err_dma; 4535 } 4536 pci_using_dac = 0; 4537 } 4538 4539 err = pci_request_regions(pdev, ixgbevf_driver_name); 4540 if (err) { 4541 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 4542 goto err_pci_reg; 4543 } 4544 4545 pci_set_master(pdev); 4546 4547 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 4548 MAX_TX_QUEUES); 4549 if (!netdev) { 4550 err = -ENOMEM; 4551 goto err_alloc_etherdev; 4552 } 4553 4554 SET_NETDEV_DEV(netdev, &pdev->dev); 4555 4556 adapter = netdev_priv(netdev); 4557 4558 adapter->netdev = netdev; 4559 adapter->pdev = pdev; 4560 hw = &adapter->hw; 4561 hw->back = adapter; 4562 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 4563 4564 /* call save state here in standalone driver because it relies on 4565 * adapter struct to exist, and needs to call netdev_priv 4566 */ 4567 pci_save_state(pdev); 4568 4569 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 4570 pci_resource_len(pdev, 0)); 4571 adapter->io_addr = hw->hw_addr; 4572 if (!hw->hw_addr) { 4573 err = -EIO; 4574 goto err_ioremap; 4575 } 4576 4577 ixgbevf_assign_netdev_ops(netdev); 4578 4579 /* Setup HW API */ 4580 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 4581 hw->mac.type = ii->mac; 4582 4583 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 4584 sizeof(struct ixgbe_mbx_operations)); 4585 4586 /* setup the private structure */ 4587 err = ixgbevf_sw_init(adapter); 4588 if (err) 4589 goto err_sw_init; 4590 4591 /* The HW MAC address was set and/or determined in sw_init */ 4592 if (!is_valid_ether_addr(netdev->dev_addr)) { 4593 pr_err("invalid MAC address\n"); 4594 err = -EIO; 4595 goto err_sw_init; 4596 } 4597 4598 netdev->hw_features = NETIF_F_SG | 4599 NETIF_F_TSO | 4600 NETIF_F_TSO6 | 4601 NETIF_F_RXCSUM | 4602 NETIF_F_HW_CSUM | 4603 NETIF_F_SCTP_CRC; 4604 4605 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 4606 NETIF_F_GSO_GRE_CSUM | \ 4607 NETIF_F_GSO_IPXIP4 | \ 4608 NETIF_F_GSO_IPXIP6 | \ 4609 NETIF_F_GSO_UDP_TUNNEL | \ 4610 NETIF_F_GSO_UDP_TUNNEL_CSUM) 4611 4612 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; 4613 netdev->hw_features |= NETIF_F_GSO_PARTIAL | 4614 IXGBEVF_GSO_PARTIAL_FEATURES; 4615 4616 netdev->features = netdev->hw_features; 4617 4618 if (pci_using_dac) 4619 netdev->features |= NETIF_F_HIGHDMA; 4620 4621 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 4622 netdev->mpls_features |= NETIF_F_SG | 4623 NETIF_F_TSO | 4624 NETIF_F_TSO6 | 4625 NETIF_F_HW_CSUM; 4626 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; 4627 netdev->hw_enc_features |= netdev->vlan_features; 4628 4629 /* set this bit last since it cannot be part of vlan_features */ 4630 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 4631 NETIF_F_HW_VLAN_CTAG_RX | 4632 NETIF_F_HW_VLAN_CTAG_TX; 4633 4634 netdev->priv_flags |= IFF_UNICAST_FLT; 4635 4636 /* MTU range: 68 - 1504 or 9710 */ 4637 netdev->min_mtu = ETH_MIN_MTU; 4638 switch (adapter->hw.api_version) { 4639 case ixgbe_mbox_api_11: 4640 case ixgbe_mbox_api_12: 4641 case ixgbe_mbox_api_13: 4642 case ixgbe_mbox_api_14: 4643 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - 4644 (ETH_HLEN + ETH_FCS_LEN); 4645 break; 4646 default: 4647 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) 4648 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - 4649 (ETH_HLEN + ETH_FCS_LEN); 4650 else 4651 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; 4652 break; 4653 } 4654 4655 if (IXGBE_REMOVED(hw->hw_addr)) { 4656 err = -EIO; 4657 goto err_sw_init; 4658 } 4659 4660 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); 4661 4662 INIT_WORK(&adapter->service_task, ixgbevf_service_task); 4663 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); 4664 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); 4665 4666 err = ixgbevf_init_interrupt_scheme(adapter); 4667 if (err) 4668 goto err_sw_init; 4669 4670 strcpy(netdev->name, "eth%d"); 4671 4672 err = register_netdev(netdev); 4673 if (err) 4674 goto err_register; 4675 4676 pci_set_drvdata(pdev, netdev); 4677 netif_carrier_off(netdev); 4678 ixgbevf_init_ipsec_offload(adapter); 4679 4680 ixgbevf_init_last_counter_stats(adapter); 4681 4682 /* print the VF info */ 4683 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); 4684 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 4685 4686 switch (hw->mac.type) { 4687 case ixgbe_mac_X550_vf: 4688 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); 4689 break; 4690 case ixgbe_mac_X540_vf: 4691 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); 4692 break; 4693 case ixgbe_mac_82599_vf: 4694 default: 4695 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); 4696 break; 4697 } 4698 4699 return 0; 4700 4701 err_register: 4702 ixgbevf_clear_interrupt_scheme(adapter); 4703 err_sw_init: 4704 ixgbevf_reset_interrupt_capability(adapter); 4705 iounmap(adapter->io_addr); 4706 kfree(adapter->rss_key); 4707 err_ioremap: 4708 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); 4709 free_netdev(netdev); 4710 err_alloc_etherdev: 4711 pci_release_regions(pdev); 4712 err_pci_reg: 4713 err_dma: 4714 if (!adapter || disable_dev) 4715 pci_disable_device(pdev); 4716 return err; 4717 } 4718 4719 /** 4720 * ixgbevf_remove - Device Removal Routine 4721 * @pdev: PCI device information struct 4722 * 4723 * ixgbevf_remove is called by the PCI subsystem to alert the driver 4724 * that it should release a PCI device. The could be caused by a 4725 * Hot-Plug event, or because the driver is going to be removed from 4726 * memory. 4727 **/ 4728 static void ixgbevf_remove(struct pci_dev *pdev) 4729 { 4730 struct net_device *netdev = pci_get_drvdata(pdev); 4731 struct ixgbevf_adapter *adapter; 4732 bool disable_dev; 4733 4734 if (!netdev) 4735 return; 4736 4737 adapter = netdev_priv(netdev); 4738 4739 set_bit(__IXGBEVF_REMOVING, &adapter->state); 4740 cancel_work_sync(&adapter->service_task); 4741 4742 if (netdev->reg_state == NETREG_REGISTERED) 4743 unregister_netdev(netdev); 4744 4745 ixgbevf_stop_ipsec_offload(adapter); 4746 ixgbevf_clear_interrupt_scheme(adapter); 4747 ixgbevf_reset_interrupt_capability(adapter); 4748 4749 iounmap(adapter->io_addr); 4750 pci_release_regions(pdev); 4751 4752 hw_dbg(&adapter->hw, "Remove complete\n"); 4753 4754 kfree(adapter->rss_key); 4755 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); 4756 free_netdev(netdev); 4757 4758 if (disable_dev) 4759 pci_disable_device(pdev); 4760 } 4761 4762 /** 4763 * ixgbevf_io_error_detected - called when PCI error is detected 4764 * @pdev: Pointer to PCI device 4765 * @state: The current pci connection state 4766 * 4767 * This function is called after a PCI bus error affecting 4768 * this device has been detected. 4769 **/ 4770 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 4771 pci_channel_state_t state) 4772 { 4773 struct net_device *netdev = pci_get_drvdata(pdev); 4774 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4775 4776 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) 4777 return PCI_ERS_RESULT_DISCONNECT; 4778 4779 rtnl_lock(); 4780 netif_device_detach(netdev); 4781 4782 if (netif_running(netdev)) 4783 ixgbevf_close_suspend(adapter); 4784 4785 if (state == pci_channel_io_perm_failure) { 4786 rtnl_unlock(); 4787 return PCI_ERS_RESULT_DISCONNECT; 4788 } 4789 4790 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 4791 pci_disable_device(pdev); 4792 rtnl_unlock(); 4793 4794 /* Request a slot slot reset. */ 4795 return PCI_ERS_RESULT_NEED_RESET; 4796 } 4797 4798 /** 4799 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 4800 * @pdev: Pointer to PCI device 4801 * 4802 * Restart the card from scratch, as if from a cold-boot. Implementation 4803 * resembles the first-half of the ixgbevf_resume routine. 4804 **/ 4805 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 4806 { 4807 struct net_device *netdev = pci_get_drvdata(pdev); 4808 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4809 4810 if (pci_enable_device_mem(pdev)) { 4811 dev_err(&pdev->dev, 4812 "Cannot re-enable PCI device after reset.\n"); 4813 return PCI_ERS_RESULT_DISCONNECT; 4814 } 4815 4816 adapter->hw.hw_addr = adapter->io_addr; 4817 smp_mb__before_atomic(); 4818 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 4819 pci_set_master(pdev); 4820 4821 ixgbevf_reset(adapter); 4822 4823 return PCI_ERS_RESULT_RECOVERED; 4824 } 4825 4826 /** 4827 * ixgbevf_io_resume - called when traffic can start flowing again. 4828 * @pdev: Pointer to PCI device 4829 * 4830 * This callback is called when the error recovery driver tells us that 4831 * its OK to resume normal operation. Implementation resembles the 4832 * second-half of the ixgbevf_resume routine. 4833 **/ 4834 static void ixgbevf_io_resume(struct pci_dev *pdev) 4835 { 4836 struct net_device *netdev = pci_get_drvdata(pdev); 4837 4838 rtnl_lock(); 4839 if (netif_running(netdev)) 4840 ixgbevf_open(netdev); 4841 4842 netif_device_attach(netdev); 4843 rtnl_unlock(); 4844 } 4845 4846 /* PCI Error Recovery (ERS) */ 4847 static const struct pci_error_handlers ixgbevf_err_handler = { 4848 .error_detected = ixgbevf_io_error_detected, 4849 .slot_reset = ixgbevf_io_slot_reset, 4850 .resume = ixgbevf_io_resume, 4851 }; 4852 4853 static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); 4854 4855 static struct pci_driver ixgbevf_driver = { 4856 .name = ixgbevf_driver_name, 4857 .id_table = ixgbevf_pci_tbl, 4858 .probe = ixgbevf_probe, 4859 .remove = ixgbevf_remove, 4860 4861 /* Power Management Hooks */ 4862 .driver.pm = &ixgbevf_pm_ops, 4863 4864 .shutdown = ixgbevf_shutdown, 4865 .err_handler = &ixgbevf_err_handler 4866 }; 4867 4868 /** 4869 * ixgbevf_init_module - Driver Registration Routine 4870 * 4871 * ixgbevf_init_module is the first routine called when the driver is 4872 * loaded. All it does is register with the PCI subsystem. 4873 **/ 4874 static int __init ixgbevf_init_module(void) 4875 { 4876 pr_info("%s\n", ixgbevf_driver_string); 4877 pr_info("%s\n", ixgbevf_copyright); 4878 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); 4879 if (!ixgbevf_wq) { 4880 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); 4881 return -ENOMEM; 4882 } 4883 4884 return pci_register_driver(&ixgbevf_driver); 4885 } 4886 4887 module_init(ixgbevf_init_module); 4888 4889 /** 4890 * ixgbevf_exit_module - Driver Exit Cleanup Routine 4891 * 4892 * ixgbevf_exit_module is called just before the driver is removed 4893 * from memory. 4894 **/ 4895 static void __exit ixgbevf_exit_module(void) 4896 { 4897 pci_unregister_driver(&ixgbevf_driver); 4898 if (ixgbevf_wq) { 4899 destroy_workqueue(ixgbevf_wq); 4900 ixgbevf_wq = NULL; 4901 } 4902 } 4903 4904 #ifdef DEBUG 4905 /** 4906 * ixgbevf_get_hw_dev_name - return device name string 4907 * used by hardware layer to print debugging information 4908 * @hw: pointer to private hardware struct 4909 **/ 4910 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 4911 { 4912 struct ixgbevf_adapter *adapter = hw->back; 4913 4914 return adapter->netdev->name; 4915 } 4916 4917 #endif 4918 module_exit(ixgbevf_exit_module); 4919 4920 /* ixgbevf_main.c */ 4921