1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/types.h> 5 #include <linux/module.h> 6 #include <net/ipv6.h> 7 #include <net/ip.h> 8 #include <net/tcp.h> 9 #include <linux/if_macvlan.h> 10 #include <linux/prefetch.h> 11 12 #include "fm10k.h" 13 14 #define DRV_VERSION "0.26.1-k" 15 #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" 16 const char fm10k_driver_version[] = DRV_VERSION; 17 char fm10k_driver_name[] = "fm10k"; 18 static const char fm10k_driver_string[] = DRV_SUMMARY; 19 static const char fm10k_copyright[] = 20 "Copyright(c) 2013 - 2018 Intel Corporation."; 21 22 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 23 MODULE_DESCRIPTION(DRV_SUMMARY); 24 MODULE_LICENSE("GPL v2"); 25 MODULE_VERSION(DRV_VERSION); 26 27 /* single workqueue for entire fm10k driver */ 28 struct workqueue_struct *fm10k_workqueue; 29 30 /** 31 * fm10k_init_module - Driver Registration Routine 32 * 33 * fm10k_init_module is the first routine called when the driver is 34 * loaded. All it does is register with the PCI subsystem. 35 **/ 36 static int __init fm10k_init_module(void) 37 { 38 pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); 39 pr_info("%s\n", fm10k_copyright); 40 41 /* create driver workqueue */ 42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 43 fm10k_driver_name); 44 if (!fm10k_workqueue) 45 return -ENOMEM; 46 47 fm10k_dbg_init(); 48 49 return fm10k_register_pci_driver(); 50 } 51 module_init(fm10k_init_module); 52 53 /** 54 * fm10k_exit_module - Driver Exit Cleanup Routine 55 * 56 * fm10k_exit_module is called just before the driver is removed 57 * from memory. 58 **/ 59 static void __exit fm10k_exit_module(void) 60 { 61 fm10k_unregister_pci_driver(); 62 63 fm10k_dbg_exit(); 64 65 /* destroy driver workqueue */ 66 destroy_workqueue(fm10k_workqueue); 67 } 68 module_exit(fm10k_exit_module); 69 70 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, 71 struct fm10k_rx_buffer *bi) 72 { 73 struct page *page = bi->page; 74 dma_addr_t dma; 75 76 /* Only page will be NULL if buffer was consumed */ 77 if (likely(page)) 78 return true; 79 80 /* alloc new page for storage */ 81 page = dev_alloc_page(); 82 if (unlikely(!page)) { 83 rx_ring->rx_stats.alloc_failed++; 84 return false; 85 } 86 87 /* map page for use */ 88 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 89 90 /* if mapping failed free memory back to system since 91 * there isn't much point in holding memory we can't use 92 */ 93 if (dma_mapping_error(rx_ring->dev, dma)) { 94 __free_page(page); 95 96 rx_ring->rx_stats.alloc_failed++; 97 return false; 98 } 99 100 bi->dma = dma; 101 bi->page = page; 102 bi->page_offset = 0; 103 104 return true; 105 } 106 107 /** 108 * fm10k_alloc_rx_buffers - Replace used receive buffers 109 * @rx_ring: ring to place buffers on 110 * @cleaned_count: number of buffers to replace 111 **/ 112 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) 113 { 114 union fm10k_rx_desc *rx_desc; 115 struct fm10k_rx_buffer *bi; 116 u16 i = rx_ring->next_to_use; 117 118 /* nothing to do */ 119 if (!cleaned_count) 120 return; 121 122 rx_desc = FM10K_RX_DESC(rx_ring, i); 123 bi = &rx_ring->rx_buffer[i]; 124 i -= rx_ring->count; 125 126 do { 127 if (!fm10k_alloc_mapped_page(rx_ring, bi)) 128 break; 129 130 /* Refresh the desc even if buffer_addrs didn't change 131 * because each write-back erases this info. 132 */ 133 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 134 135 rx_desc++; 136 bi++; 137 i++; 138 if (unlikely(!i)) { 139 rx_desc = FM10K_RX_DESC(rx_ring, 0); 140 bi = rx_ring->rx_buffer; 141 i -= rx_ring->count; 142 } 143 144 /* clear the status bits for the next_to_use descriptor */ 145 rx_desc->d.staterr = 0; 146 147 cleaned_count--; 148 } while (cleaned_count); 149 150 i += rx_ring->count; 151 152 if (rx_ring->next_to_use != i) { 153 /* record the next descriptor to use */ 154 rx_ring->next_to_use = i; 155 156 /* update next to alloc since we have filled the ring */ 157 rx_ring->next_to_alloc = i; 158 159 /* Force memory writes to complete before letting h/w 160 * know there are new descriptors to fetch. (Only 161 * applicable for weak-ordered memory model archs, 162 * such as IA-64). 163 */ 164 wmb(); 165 166 /* notify hardware of new descriptors */ 167 writel(i, rx_ring->tail); 168 } 169 } 170 171 /** 172 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring 173 * @rx_ring: rx descriptor ring to store buffers on 174 * @old_buff: donor buffer to have page reused 175 * 176 * Synchronizes page for reuse by the interface 177 **/ 178 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, 179 struct fm10k_rx_buffer *old_buff) 180 { 181 struct fm10k_rx_buffer *new_buff; 182 u16 nta = rx_ring->next_to_alloc; 183 184 new_buff = &rx_ring->rx_buffer[nta]; 185 186 /* update, and store next to alloc */ 187 nta++; 188 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 189 190 /* transfer page from old buffer to new buffer */ 191 *new_buff = *old_buff; 192 193 /* sync the buffer for use by the device */ 194 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 195 old_buff->page_offset, 196 FM10K_RX_BUFSZ, 197 DMA_FROM_DEVICE); 198 } 199 200 static inline bool fm10k_page_is_reserved(struct page *page) 201 { 202 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 203 } 204 205 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 206 struct page *page, 207 unsigned int __maybe_unused truesize) 208 { 209 /* avoid re-using remote pages */ 210 if (unlikely(fm10k_page_is_reserved(page))) 211 return false; 212 213 #if (PAGE_SIZE < 8192) 214 /* if we are only owner of page we can reuse it */ 215 if (unlikely(page_count(page) != 1)) 216 return false; 217 218 /* flip page offset to other buffer */ 219 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 220 #else 221 /* move offset up to the next cache line */ 222 rx_buffer->page_offset += truesize; 223 224 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 225 return false; 226 #endif 227 228 /* Even if we own the page, we are not allowed to use atomic_set() 229 * This would break get_page_unless_zero() users. 230 */ 231 page_ref_inc(page); 232 233 return true; 234 } 235 236 /** 237 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff 238 * @rx_buffer: buffer containing page to add 239 * @size: packet size from rx_desc 240 * @rx_desc: descriptor containing length of buffer written by hardware 241 * @skb: sk_buff to place the data into 242 * 243 * This function will add the data contained in rx_buffer->page to the skb. 244 * This is done either through a direct copy if the data in the buffer is 245 * less than the skb header size, otherwise it will just attach the page as 246 * a frag to the skb. 247 * 248 * The function will then update the page offset if necessary and return 249 * true if the buffer can be reused by the interface. 250 **/ 251 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, 252 unsigned int size, 253 union fm10k_rx_desc *rx_desc, 254 struct sk_buff *skb) 255 { 256 struct page *page = rx_buffer->page; 257 unsigned char *va = page_address(page) + rx_buffer->page_offset; 258 #if (PAGE_SIZE < 8192) 259 unsigned int truesize = FM10K_RX_BUFSZ; 260 #else 261 unsigned int truesize = ALIGN(size, 512); 262 #endif 263 unsigned int pull_len; 264 265 if (unlikely(skb_is_nonlinear(skb))) 266 goto add_tail_frag; 267 268 if (likely(size <= FM10K_RX_HDR_LEN)) { 269 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 270 271 /* page is not reserved, we can reuse buffer as-is */ 272 if (likely(!fm10k_page_is_reserved(page))) 273 return true; 274 275 /* this page cannot be reused so discard it */ 276 __free_page(page); 277 return false; 278 } 279 280 /* we need the header to contain the greater of either ETH_HLEN or 281 * 60 bytes if the skb->len is less than 60 for skb_pad. 282 */ 283 pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN); 284 285 /* align pull length to size of long to optimize memcpy performance */ 286 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 287 288 /* update all of the pointers */ 289 va += pull_len; 290 size -= pull_len; 291 292 add_tail_frag: 293 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 294 (unsigned long)va & ~PAGE_MASK, size, truesize); 295 296 return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); 297 } 298 299 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, 300 union fm10k_rx_desc *rx_desc, 301 struct sk_buff *skb) 302 { 303 unsigned int size = le16_to_cpu(rx_desc->w.length); 304 struct fm10k_rx_buffer *rx_buffer; 305 struct page *page; 306 307 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 308 page = rx_buffer->page; 309 prefetchw(page); 310 311 if (likely(!skb)) { 312 void *page_addr = page_address(page) + 313 rx_buffer->page_offset; 314 315 /* prefetch first cache line of first page */ 316 prefetch(page_addr); 317 #if L1_CACHE_BYTES < 128 318 prefetch(page_addr + L1_CACHE_BYTES); 319 #endif 320 321 /* allocate a skb to store the frags */ 322 skb = napi_alloc_skb(&rx_ring->q_vector->napi, 323 FM10K_RX_HDR_LEN); 324 if (unlikely(!skb)) { 325 rx_ring->rx_stats.alloc_failed++; 326 return NULL; 327 } 328 329 /* we will be copying header into skb->data in 330 * pskb_may_pull so it is in our interest to prefetch 331 * it now to avoid a possible cache miss 332 */ 333 prefetchw(skb->data); 334 } 335 336 /* we are reusing so sync this buffer for CPU use */ 337 dma_sync_single_range_for_cpu(rx_ring->dev, 338 rx_buffer->dma, 339 rx_buffer->page_offset, 340 size, 341 DMA_FROM_DEVICE); 342 343 /* pull page into skb */ 344 if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { 345 /* hand second half of page back to the ring */ 346 fm10k_reuse_rx_page(rx_ring, rx_buffer); 347 } else { 348 /* we are not reusing the buffer so unmap it */ 349 dma_unmap_page(rx_ring->dev, rx_buffer->dma, 350 PAGE_SIZE, DMA_FROM_DEVICE); 351 } 352 353 /* clear contents of rx_buffer */ 354 rx_buffer->page = NULL; 355 356 return skb; 357 } 358 359 static inline void fm10k_rx_checksum(struct fm10k_ring *ring, 360 union fm10k_rx_desc *rx_desc, 361 struct sk_buff *skb) 362 { 363 skb_checksum_none_assert(skb); 364 365 /* Rx checksum disabled via ethtool */ 366 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 367 return; 368 369 /* TCP/UDP checksum error bit is set */ 370 if (fm10k_test_staterr(rx_desc, 371 FM10K_RXD_STATUS_L4E | 372 FM10K_RXD_STATUS_L4E2 | 373 FM10K_RXD_STATUS_IPE | 374 FM10K_RXD_STATUS_IPE2)) { 375 ring->rx_stats.csum_err++; 376 return; 377 } 378 379 /* It must be a TCP or UDP packet with a valid checksum */ 380 if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) 381 skb->encapsulation = true; 382 else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) 383 return; 384 385 skb->ip_summed = CHECKSUM_UNNECESSARY; 386 387 ring->rx_stats.csum_good++; 388 } 389 390 #define FM10K_RSS_L4_TYPES_MASK \ 391 (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ 392 BIT(FM10K_RSSTYPE_IPV4_UDP) | \ 393 BIT(FM10K_RSSTYPE_IPV6_TCP) | \ 394 BIT(FM10K_RSSTYPE_IPV6_UDP)) 395 396 static inline void fm10k_rx_hash(struct fm10k_ring *ring, 397 union fm10k_rx_desc *rx_desc, 398 struct sk_buff *skb) 399 { 400 u16 rss_type; 401 402 if (!(ring->netdev->features & NETIF_F_RXHASH)) 403 return; 404 405 rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; 406 if (!rss_type) 407 return; 408 409 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 410 (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? 411 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 412 } 413 414 static void fm10k_type_trans(struct fm10k_ring *rx_ring, 415 union fm10k_rx_desc __maybe_unused *rx_desc, 416 struct sk_buff *skb) 417 { 418 struct net_device *dev = rx_ring->netdev; 419 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); 420 421 /* check to see if DGLORT belongs to a MACVLAN */ 422 if (l2_accel) { 423 u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; 424 425 idx -= l2_accel->dglort; 426 if (idx < l2_accel->size && l2_accel->macvlan[idx]) 427 dev = l2_accel->macvlan[idx]; 428 else 429 l2_accel = NULL; 430 } 431 432 /* Record Rx queue, or update macvlan statistics */ 433 if (!l2_accel) 434 skb_record_rx_queue(skb, rx_ring->queue_index); 435 else 436 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, 437 false); 438 439 skb->protocol = eth_type_trans(skb, dev); 440 } 441 442 /** 443 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor 444 * @rx_ring: rx descriptor ring packet is being transacted on 445 * @rx_desc: pointer to the EOP Rx descriptor 446 * @skb: pointer to current skb being populated 447 * 448 * This function checks the ring, descriptor, and packet information in 449 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 450 * other fields within the skb. 451 **/ 452 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, 453 union fm10k_rx_desc *rx_desc, 454 struct sk_buff *skb) 455 { 456 unsigned int len = skb->len; 457 458 fm10k_rx_hash(rx_ring, rx_desc, skb); 459 460 fm10k_rx_checksum(rx_ring, rx_desc, skb); 461 462 FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; 463 464 FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; 465 466 FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; 467 468 if (rx_desc->w.vlan) { 469 u16 vid = le16_to_cpu(rx_desc->w.vlan); 470 471 if ((vid & VLAN_VID_MASK) != rx_ring->vid) 472 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 473 else if (vid & VLAN_PRIO_MASK) 474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 475 vid & VLAN_PRIO_MASK); 476 } 477 478 fm10k_type_trans(rx_ring, rx_desc, skb); 479 480 return len; 481 } 482 483 /** 484 * fm10k_is_non_eop - process handling of non-EOP buffers 485 * @rx_ring: Rx ring being processed 486 * @rx_desc: Rx descriptor for current buffer 487 * 488 * This function updates next to clean. If the buffer is an EOP buffer 489 * this function exits returning false, otherwise it will place the 490 * sk_buff in the next buffer to be chained and return true indicating 491 * that this is in fact a non-EOP buffer. 492 **/ 493 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, 494 union fm10k_rx_desc *rx_desc) 495 { 496 u32 ntc = rx_ring->next_to_clean + 1; 497 498 /* fetch, update, and store next to clean */ 499 ntc = (ntc < rx_ring->count) ? ntc : 0; 500 rx_ring->next_to_clean = ntc; 501 502 prefetch(FM10K_RX_DESC(rx_ring, ntc)); 503 504 if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) 505 return false; 506 507 return true; 508 } 509 510 /** 511 * fm10k_cleanup_headers - Correct corrupted or empty headers 512 * @rx_ring: rx descriptor ring packet is being transacted on 513 * @rx_desc: pointer to the EOP Rx descriptor 514 * @skb: pointer to current skb being fixed 515 * 516 * Address the case where we are pulling data in on pages only 517 * and as such no data is present in the skb header. 518 * 519 * In addition if skb is not at least 60 bytes we need to pad it so that 520 * it is large enough to qualify as a valid Ethernet frame. 521 * 522 * Returns true if an error was encountered and skb was freed. 523 **/ 524 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, 525 union fm10k_rx_desc *rx_desc, 526 struct sk_buff *skb) 527 { 528 if (unlikely((fm10k_test_staterr(rx_desc, 529 FM10K_RXD_STATUS_RXE)))) { 530 #define FM10K_TEST_RXD_BIT(rxd, bit) \ 531 ((rxd)->w.csum_err & cpu_to_le16(bit)) 532 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) 533 rx_ring->rx_stats.switch_errors++; 534 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) 535 rx_ring->rx_stats.drops++; 536 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) 537 rx_ring->rx_stats.pp_errors++; 538 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) 539 rx_ring->rx_stats.link_errors++; 540 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) 541 rx_ring->rx_stats.length_errors++; 542 dev_kfree_skb_any(skb); 543 rx_ring->rx_stats.errors++; 544 return true; 545 } 546 547 /* if eth_skb_pad returns an error the skb was freed */ 548 if (eth_skb_pad(skb)) 549 return true; 550 551 return false; 552 } 553 554 /** 555 * fm10k_receive_skb - helper function to handle rx indications 556 * @q_vector: structure containing interrupt and ring information 557 * @skb: packet to send up 558 **/ 559 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, 560 struct sk_buff *skb) 561 { 562 napi_gro_receive(&q_vector->napi, skb); 563 } 564 565 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, 566 struct fm10k_ring *rx_ring, 567 int budget) 568 { 569 struct sk_buff *skb = rx_ring->skb; 570 unsigned int total_bytes = 0, total_packets = 0; 571 u16 cleaned_count = fm10k_desc_unused(rx_ring); 572 573 while (likely(total_packets < budget)) { 574 union fm10k_rx_desc *rx_desc; 575 576 /* return some buffers to hardware, one at a time is too slow */ 577 if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { 578 fm10k_alloc_rx_buffers(rx_ring, cleaned_count); 579 cleaned_count = 0; 580 } 581 582 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 583 584 if (!rx_desc->d.staterr) 585 break; 586 587 /* This memory barrier is needed to keep us from reading 588 * any other fields out of the rx_desc until we know the 589 * descriptor has been written back 590 */ 591 dma_rmb(); 592 593 /* retrieve a buffer from the ring */ 594 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); 595 596 /* exit if we failed to retrieve a buffer */ 597 if (!skb) 598 break; 599 600 cleaned_count++; 601 602 /* fetch next buffer in frame if non-eop */ 603 if (fm10k_is_non_eop(rx_ring, rx_desc)) 604 continue; 605 606 /* verify the packet layout is correct */ 607 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { 608 skb = NULL; 609 continue; 610 } 611 612 /* populate checksum, timestamp, VLAN, and protocol */ 613 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); 614 615 fm10k_receive_skb(q_vector, skb); 616 617 /* reset skb pointer */ 618 skb = NULL; 619 620 /* update budget accounting */ 621 total_packets++; 622 } 623 624 /* place incomplete frames back on ring for completion */ 625 rx_ring->skb = skb; 626 627 u64_stats_update_begin(&rx_ring->syncp); 628 rx_ring->stats.packets += total_packets; 629 rx_ring->stats.bytes += total_bytes; 630 u64_stats_update_end(&rx_ring->syncp); 631 q_vector->rx.total_packets += total_packets; 632 q_vector->rx.total_bytes += total_bytes; 633 634 return total_packets; 635 } 636 637 #define VXLAN_HLEN (sizeof(struct udphdr) + 8) 638 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) 639 { 640 struct fm10k_intfc *interface = netdev_priv(skb->dev); 641 struct fm10k_udp_port *vxlan_port; 642 643 /* we can only offload a vxlan if we recognize it as such */ 644 vxlan_port = list_first_entry_or_null(&interface->vxlan_port, 645 struct fm10k_udp_port, list); 646 647 if (!vxlan_port) 648 return NULL; 649 if (vxlan_port->port != udp_hdr(skb)->dest) 650 return NULL; 651 652 /* return offset of udp_hdr plus 8 bytes for VXLAN header */ 653 return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); 654 } 655 656 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) 657 #define NVGRE_TNI htons(0x2000) 658 struct fm10k_nvgre_hdr { 659 __be16 flags; 660 __be16 proto; 661 __be32 tni; 662 }; 663 664 static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) 665 { 666 struct fm10k_nvgre_hdr *nvgre_hdr; 667 int hlen = ip_hdrlen(skb); 668 669 /* currently only IPv4 is supported due to hlen above */ 670 if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 671 return NULL; 672 673 /* our transport header should be NVGRE */ 674 nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); 675 676 /* verify all reserved flags are 0 */ 677 if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) 678 return NULL; 679 680 /* report start of ethernet header */ 681 if (nvgre_hdr->flags & NVGRE_TNI) 682 return (struct ethhdr *)(nvgre_hdr + 1); 683 684 return (struct ethhdr *)(&nvgre_hdr->tni); 685 } 686 687 __be16 fm10k_tx_encap_offload(struct sk_buff *skb) 688 { 689 u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; 690 struct ethhdr *eth_hdr; 691 692 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 693 skb->inner_protocol != htons(ETH_P_TEB)) 694 return 0; 695 696 switch (vlan_get_protocol(skb)) { 697 case htons(ETH_P_IP): 698 l4_hdr = ip_hdr(skb)->protocol; 699 break; 700 case htons(ETH_P_IPV6): 701 l4_hdr = ipv6_hdr(skb)->nexthdr; 702 break; 703 default: 704 return 0; 705 } 706 707 switch (l4_hdr) { 708 case IPPROTO_UDP: 709 eth_hdr = fm10k_port_is_vxlan(skb); 710 break; 711 case IPPROTO_GRE: 712 eth_hdr = fm10k_gre_is_nvgre(skb); 713 break; 714 default: 715 return 0; 716 } 717 718 if (!eth_hdr) 719 return 0; 720 721 switch (eth_hdr->h_proto) { 722 case htons(ETH_P_IP): 723 inner_l4_hdr = inner_ip_hdr(skb)->protocol; 724 break; 725 case htons(ETH_P_IPV6): 726 inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; 727 break; 728 default: 729 return 0; 730 } 731 732 switch (inner_l4_hdr) { 733 case IPPROTO_TCP: 734 inner_l4_hlen = inner_tcp_hdrlen(skb); 735 break; 736 case IPPROTO_UDP: 737 inner_l4_hlen = 8; 738 break; 739 default: 740 return 0; 741 } 742 743 /* The hardware allows tunnel offloads only if the combined inner and 744 * outer header is 184 bytes or less 745 */ 746 if (skb_inner_transport_header(skb) + inner_l4_hlen - 747 skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) 748 return 0; 749 750 return eth_hdr->h_proto; 751 } 752 753 static int fm10k_tso(struct fm10k_ring *tx_ring, 754 struct fm10k_tx_buffer *first) 755 { 756 struct sk_buff *skb = first->skb; 757 struct fm10k_tx_desc *tx_desc; 758 unsigned char *th; 759 u8 hdrlen; 760 761 if (skb->ip_summed != CHECKSUM_PARTIAL) 762 return 0; 763 764 if (!skb_is_gso(skb)) 765 return 0; 766 767 /* compute header lengths */ 768 if (skb->encapsulation) { 769 if (!fm10k_tx_encap_offload(skb)) 770 goto err_vxlan; 771 th = skb_inner_transport_header(skb); 772 } else { 773 th = skb_transport_header(skb); 774 } 775 776 /* compute offset from SOF to transport header and add header len */ 777 hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); 778 779 first->tx_flags |= FM10K_TX_FLAGS_CSUM; 780 781 /* update gso size and bytecount with header size */ 782 first->gso_segs = skb_shinfo(skb)->gso_segs; 783 first->bytecount += (first->gso_segs - 1) * hdrlen; 784 785 /* populate Tx descriptor header size and mss */ 786 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 787 tx_desc->hdrlen = hdrlen; 788 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 789 790 return 1; 791 792 err_vxlan: 793 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; 794 if (net_ratelimit()) 795 netdev_err(tx_ring->netdev, 796 "TSO requested for unsupported tunnel, disabling offload\n"); 797 return -1; 798 } 799 800 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, 801 struct fm10k_tx_buffer *first) 802 { 803 struct sk_buff *skb = first->skb; 804 struct fm10k_tx_desc *tx_desc; 805 union { 806 struct iphdr *ipv4; 807 struct ipv6hdr *ipv6; 808 u8 *raw; 809 } network_hdr; 810 u8 *transport_hdr; 811 __be16 frag_off; 812 __be16 protocol; 813 u8 l4_hdr = 0; 814 815 if (skb->ip_summed != CHECKSUM_PARTIAL) 816 goto no_csum; 817 818 if (skb->encapsulation) { 819 protocol = fm10k_tx_encap_offload(skb); 820 if (!protocol) { 821 if (skb_checksum_help(skb)) { 822 dev_warn(tx_ring->dev, 823 "failed to offload encap csum!\n"); 824 tx_ring->tx_stats.csum_err++; 825 } 826 goto no_csum; 827 } 828 network_hdr.raw = skb_inner_network_header(skb); 829 transport_hdr = skb_inner_transport_header(skb); 830 } else { 831 protocol = vlan_get_protocol(skb); 832 network_hdr.raw = skb_network_header(skb); 833 transport_hdr = skb_transport_header(skb); 834 } 835 836 switch (protocol) { 837 case htons(ETH_P_IP): 838 l4_hdr = network_hdr.ipv4->protocol; 839 break; 840 case htons(ETH_P_IPV6): 841 l4_hdr = network_hdr.ipv6->nexthdr; 842 if (likely((transport_hdr - network_hdr.raw) == 843 sizeof(struct ipv6hdr))) 844 break; 845 ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + 846 sizeof(struct ipv6hdr), 847 &l4_hdr, &frag_off); 848 if (unlikely(frag_off)) 849 l4_hdr = NEXTHDR_FRAGMENT; 850 break; 851 default: 852 break; 853 } 854 855 switch (l4_hdr) { 856 case IPPROTO_TCP: 857 case IPPROTO_UDP: 858 break; 859 case IPPROTO_GRE: 860 if (skb->encapsulation) 861 break; 862 /* fall through */ 863 default: 864 if (unlikely(net_ratelimit())) { 865 dev_warn(tx_ring->dev, 866 "partial checksum, version=%d l4 proto=%x\n", 867 protocol, l4_hdr); 868 } 869 skb_checksum_help(skb); 870 tx_ring->tx_stats.csum_err++; 871 goto no_csum; 872 } 873 874 /* update TX checksum flag */ 875 first->tx_flags |= FM10K_TX_FLAGS_CSUM; 876 tx_ring->tx_stats.csum_good++; 877 878 no_csum: 879 /* populate Tx descriptor header size and mss */ 880 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 881 tx_desc->hdrlen = 0; 882 tx_desc->mss = 0; 883 } 884 885 #define FM10K_SET_FLAG(_input, _flag, _result) \ 886 ((_flag <= _result) ? \ 887 ((u32)(_input & _flag) * (_result / _flag)) : \ 888 ((u32)(_input & _flag) / (_flag / _result))) 889 890 static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) 891 { 892 /* set type for advanced descriptor with frame checksum insertion */ 893 u32 desc_flags = 0; 894 895 /* set checksum offload bits */ 896 desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, 897 FM10K_TXD_FLAG_CSUM); 898 899 return desc_flags; 900 } 901 902 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, 903 struct fm10k_tx_desc *tx_desc, u16 i, 904 dma_addr_t dma, unsigned int size, u8 desc_flags) 905 { 906 /* set RS and INT for last frame in a cache line */ 907 if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) 908 desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; 909 910 /* record values to descriptor */ 911 tx_desc->buffer_addr = cpu_to_le64(dma); 912 tx_desc->flags = desc_flags; 913 tx_desc->buflen = cpu_to_le16(size); 914 915 /* return true if we just wrapped the ring */ 916 return i == tx_ring->count; 917 } 918 919 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 920 { 921 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 922 923 /* Memory barrier before checking head and tail */ 924 smp_mb(); 925 926 /* Check again in a case another CPU has just made room available */ 927 if (likely(fm10k_desc_unused(tx_ring) < size)) 928 return -EBUSY; 929 930 /* A reprieve! - use start_queue because it doesn't call schedule */ 931 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 932 ++tx_ring->tx_stats.restart_queue; 933 return 0; 934 } 935 936 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 937 { 938 if (likely(fm10k_desc_unused(tx_ring) >= size)) 939 return 0; 940 return __fm10k_maybe_stop_tx(tx_ring, size); 941 } 942 943 static void fm10k_tx_map(struct fm10k_ring *tx_ring, 944 struct fm10k_tx_buffer *first) 945 { 946 struct sk_buff *skb = first->skb; 947 struct fm10k_tx_buffer *tx_buffer; 948 struct fm10k_tx_desc *tx_desc; 949 struct skb_frag_struct *frag; 950 unsigned char *data; 951 dma_addr_t dma; 952 unsigned int data_len, size; 953 u32 tx_flags = first->tx_flags; 954 u16 i = tx_ring->next_to_use; 955 u8 flags = fm10k_tx_desc_flags(skb, tx_flags); 956 957 tx_desc = FM10K_TX_DESC(tx_ring, i); 958 959 /* add HW VLAN tag */ 960 if (skb_vlan_tag_present(skb)) 961 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 962 else 963 tx_desc->vlan = 0; 964 965 size = skb_headlen(skb); 966 data = skb->data; 967 968 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 969 970 data_len = skb->data_len; 971 tx_buffer = first; 972 973 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 974 if (dma_mapping_error(tx_ring->dev, dma)) 975 goto dma_error; 976 977 /* record length, and DMA address */ 978 dma_unmap_len_set(tx_buffer, len, size); 979 dma_unmap_addr_set(tx_buffer, dma, dma); 980 981 while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { 982 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, 983 FM10K_MAX_DATA_PER_TXD, flags)) { 984 tx_desc = FM10K_TX_DESC(tx_ring, 0); 985 i = 0; 986 } 987 988 dma += FM10K_MAX_DATA_PER_TXD; 989 size -= FM10K_MAX_DATA_PER_TXD; 990 } 991 992 if (likely(!data_len)) 993 break; 994 995 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, 996 dma, size, flags)) { 997 tx_desc = FM10K_TX_DESC(tx_ring, 0); 998 i = 0; 999 } 1000 1001 size = skb_frag_size(frag); 1002 data_len -= size; 1003 1004 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1005 DMA_TO_DEVICE); 1006 1007 tx_buffer = &tx_ring->tx_buffer[i]; 1008 } 1009 1010 /* write last descriptor with LAST bit set */ 1011 flags |= FM10K_TXD_FLAG_LAST; 1012 1013 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) 1014 i = 0; 1015 1016 /* record bytecount for BQL */ 1017 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1018 1019 /* record SW timestamp if HW timestamp is not available */ 1020 skb_tx_timestamp(first->skb); 1021 1022 /* Force memory writes to complete before letting h/w know there 1023 * are new descriptors to fetch. (Only applicable for weak-ordered 1024 * memory model archs, such as IA-64). 1025 * 1026 * We also need this memory barrier to make certain all of the 1027 * status bits have been updated before next_to_watch is written. 1028 */ 1029 wmb(); 1030 1031 /* set next_to_watch value indicating a packet is present */ 1032 first->next_to_watch = tx_desc; 1033 1034 tx_ring->next_to_use = i; 1035 1036 /* Make sure there is space in the ring for the next send. */ 1037 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); 1038 1039 /* notify HW of packet */ 1040 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1041 writel(i, tx_ring->tail); 1042 } 1043 1044 return; 1045 dma_error: 1046 dev_err(tx_ring->dev, "TX DMA map failed\n"); 1047 1048 /* clear dma mappings for failed tx_buffer map */ 1049 for (;;) { 1050 tx_buffer = &tx_ring->tx_buffer[i]; 1051 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1052 if (tx_buffer == first) 1053 break; 1054 if (i == 0) 1055 i = tx_ring->count; 1056 i--; 1057 } 1058 1059 tx_ring->next_to_use = i; 1060 } 1061 1062 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1063 struct fm10k_ring *tx_ring) 1064 { 1065 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1066 struct fm10k_tx_buffer *first; 1067 unsigned short f; 1068 u32 tx_flags = 0; 1069 int tso; 1070 1071 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, 1072 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, 1073 * + 2 desc gap to keep tail from touching head 1074 * otherwise try next time 1075 */ 1076 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1077 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1078 1079 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { 1080 tx_ring->tx_stats.tx_busy++; 1081 return NETDEV_TX_BUSY; 1082 } 1083 1084 /* record the location of the first descriptor for this packet */ 1085 first = &tx_ring->tx_buffer[tx_ring->next_to_use]; 1086 first->skb = skb; 1087 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1088 first->gso_segs = 1; 1089 1090 /* record initial flags and protocol */ 1091 first->tx_flags = tx_flags; 1092 1093 tso = fm10k_tso(tx_ring, first); 1094 if (tso < 0) 1095 goto out_drop; 1096 else if (!tso) 1097 fm10k_tx_csum(tx_ring, first); 1098 1099 fm10k_tx_map(tx_ring, first); 1100 1101 return NETDEV_TX_OK; 1102 1103 out_drop: 1104 dev_kfree_skb_any(first->skb); 1105 first->skb = NULL; 1106 1107 return NETDEV_TX_OK; 1108 } 1109 1110 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) 1111 { 1112 return ring->stats.packets; 1113 } 1114 1115 /** 1116 * fm10k_get_tx_pending - how many Tx descriptors not processed 1117 * @ring: the ring structure 1118 * @in_sw: is tx_pending being checked in SW or in HW? 1119 */ 1120 u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw) 1121 { 1122 struct fm10k_intfc *interface = ring->q_vector->interface; 1123 struct fm10k_hw *hw = &interface->hw; 1124 u32 head, tail; 1125 1126 if (likely(in_sw)) { 1127 head = ring->next_to_clean; 1128 tail = ring->next_to_use; 1129 } else { 1130 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); 1131 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx)); 1132 } 1133 1134 return ((head <= tail) ? tail : tail + ring->count) - head; 1135 } 1136 1137 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) 1138 { 1139 u32 tx_done = fm10k_get_tx_completed(tx_ring); 1140 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1141 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true); 1142 1143 clear_check_for_tx_hang(tx_ring); 1144 1145 /* Check for a hung queue, but be thorough. This verifies 1146 * that a transmit has been completed since the previous 1147 * check AND there is at least one packet pending. By 1148 * requiring this to fail twice we avoid races with 1149 * clearing the ARMED bit and conditions where we 1150 * run the check_tx_hang logic with a transmit completion 1151 * pending but without time to complete it yet. 1152 */ 1153 if (!tx_pending || (tx_done_old != tx_done)) { 1154 /* update completed stats and continue */ 1155 tx_ring->tx_stats.tx_done_old = tx_done; 1156 /* reset the countdown */ 1157 clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); 1158 1159 return false; 1160 } 1161 1162 /* make sure it is true for two checks in a row */ 1163 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); 1164 } 1165 1166 /** 1167 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout 1168 * @interface: driver private struct 1169 **/ 1170 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) 1171 { 1172 /* Do the reset outside of interrupt context */ 1173 if (!test_bit(__FM10K_DOWN, interface->state)) { 1174 interface->tx_timeout_count++; 1175 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1176 fm10k_service_event_schedule(interface); 1177 } 1178 } 1179 1180 /** 1181 * fm10k_clean_tx_irq - Reclaim resources after transmit completes 1182 * @q_vector: structure containing interrupt and ring information 1183 * @tx_ring: tx ring to clean 1184 * @napi_budget: Used to determine if we are in netpoll 1185 **/ 1186 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, 1187 struct fm10k_ring *tx_ring, int napi_budget) 1188 { 1189 struct fm10k_intfc *interface = q_vector->interface; 1190 struct fm10k_tx_buffer *tx_buffer; 1191 struct fm10k_tx_desc *tx_desc; 1192 unsigned int total_bytes = 0, total_packets = 0; 1193 unsigned int budget = q_vector->tx.work_limit; 1194 unsigned int i = tx_ring->next_to_clean; 1195 1196 if (test_bit(__FM10K_DOWN, interface->state)) 1197 return true; 1198 1199 tx_buffer = &tx_ring->tx_buffer[i]; 1200 tx_desc = FM10K_TX_DESC(tx_ring, i); 1201 i -= tx_ring->count; 1202 1203 do { 1204 struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; 1205 1206 /* if next_to_watch is not set then there is no work pending */ 1207 if (!eop_desc) 1208 break; 1209 1210 /* prevent any other reads prior to eop_desc */ 1211 smp_rmb(); 1212 1213 /* if DD is not set pending work has not been completed */ 1214 if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) 1215 break; 1216 1217 /* clear next_to_watch to prevent false hangs */ 1218 tx_buffer->next_to_watch = NULL; 1219 1220 /* update the statistics for this packet */ 1221 total_bytes += tx_buffer->bytecount; 1222 total_packets += tx_buffer->gso_segs; 1223 1224 /* free the skb */ 1225 napi_consume_skb(tx_buffer->skb, napi_budget); 1226 1227 /* unmap skb header data */ 1228 dma_unmap_single(tx_ring->dev, 1229 dma_unmap_addr(tx_buffer, dma), 1230 dma_unmap_len(tx_buffer, len), 1231 DMA_TO_DEVICE); 1232 1233 /* clear tx_buffer data */ 1234 tx_buffer->skb = NULL; 1235 dma_unmap_len_set(tx_buffer, len, 0); 1236 1237 /* unmap remaining buffers */ 1238 while (tx_desc != eop_desc) { 1239 tx_buffer++; 1240 tx_desc++; 1241 i++; 1242 if (unlikely(!i)) { 1243 i -= tx_ring->count; 1244 tx_buffer = tx_ring->tx_buffer; 1245 tx_desc = FM10K_TX_DESC(tx_ring, 0); 1246 } 1247 1248 /* unmap any remaining paged data */ 1249 if (dma_unmap_len(tx_buffer, len)) { 1250 dma_unmap_page(tx_ring->dev, 1251 dma_unmap_addr(tx_buffer, dma), 1252 dma_unmap_len(tx_buffer, len), 1253 DMA_TO_DEVICE); 1254 dma_unmap_len_set(tx_buffer, len, 0); 1255 } 1256 } 1257 1258 /* move us one more past the eop_desc for start of next pkt */ 1259 tx_buffer++; 1260 tx_desc++; 1261 i++; 1262 if (unlikely(!i)) { 1263 i -= tx_ring->count; 1264 tx_buffer = tx_ring->tx_buffer; 1265 tx_desc = FM10K_TX_DESC(tx_ring, 0); 1266 } 1267 1268 /* issue prefetch for next Tx descriptor */ 1269 prefetch(tx_desc); 1270 1271 /* update budget accounting */ 1272 budget--; 1273 } while (likely(budget)); 1274 1275 i += tx_ring->count; 1276 tx_ring->next_to_clean = i; 1277 u64_stats_update_begin(&tx_ring->syncp); 1278 tx_ring->stats.bytes += total_bytes; 1279 tx_ring->stats.packets += total_packets; 1280 u64_stats_update_end(&tx_ring->syncp); 1281 q_vector->tx.total_bytes += total_bytes; 1282 q_vector->tx.total_packets += total_packets; 1283 1284 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { 1285 /* schedule immediate reset if we believe we hung */ 1286 struct fm10k_hw *hw = &interface->hw; 1287 1288 netif_err(interface, drv, tx_ring->netdev, 1289 "Detected Tx Unit Hang\n" 1290 " Tx Queue <%d>\n" 1291 " TDH, TDT <%x>, <%x>\n" 1292 " next_to_use <%x>\n" 1293 " next_to_clean <%x>\n", 1294 tx_ring->queue_index, 1295 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), 1296 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), 1297 tx_ring->next_to_use, i); 1298 1299 netif_stop_subqueue(tx_ring->netdev, 1300 tx_ring->queue_index); 1301 1302 netif_info(interface, probe, tx_ring->netdev, 1303 "tx hang %d detected on queue %d, resetting interface\n", 1304 interface->tx_timeout_count + 1, 1305 tx_ring->queue_index); 1306 1307 fm10k_tx_timeout_reset(interface); 1308 1309 /* the netdev is about to reset, no point in enabling stuff */ 1310 return true; 1311 } 1312 1313 /* notify netdev of completed buffers */ 1314 netdev_tx_completed_queue(txring_txq(tx_ring), 1315 total_packets, total_bytes); 1316 1317 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) 1318 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1319 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1320 /* Make sure that anybody stopping the queue after this 1321 * sees the new next_to_clean. 1322 */ 1323 smp_mb(); 1324 if (__netif_subqueue_stopped(tx_ring->netdev, 1325 tx_ring->queue_index) && 1326 !test_bit(__FM10K_DOWN, interface->state)) { 1327 netif_wake_subqueue(tx_ring->netdev, 1328 tx_ring->queue_index); 1329 ++tx_ring->tx_stats.restart_queue; 1330 } 1331 } 1332 1333 return !!budget; 1334 } 1335 1336 /** 1337 * fm10k_update_itr - update the dynamic ITR value based on packet size 1338 * 1339 * Stores a new ITR value based on strictly on packet size. The 1340 * divisors and thresholds used by this function were determined based 1341 * on theoretical maximum wire speed and testing data, in order to 1342 * minimize response time while increasing bulk throughput. 1343 * 1344 * @ring_container: Container for rings to have ITR updated 1345 **/ 1346 static void fm10k_update_itr(struct fm10k_ring_container *ring_container) 1347 { 1348 unsigned int avg_wire_size, packets, itr_round; 1349 1350 /* Only update ITR if we are using adaptive setting */ 1351 if (!ITR_IS_ADAPTIVE(ring_container->itr)) 1352 goto clear_counts; 1353 1354 packets = ring_container->total_packets; 1355 if (!packets) 1356 goto clear_counts; 1357 1358 avg_wire_size = ring_container->total_bytes / packets; 1359 1360 /* The following is a crude approximation of: 1361 * wmem_default / (size + overhead) = desired_pkts_per_int 1362 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1363 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1364 * 1365 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1366 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1367 * formula down to 1368 * 1369 * (34 * (size + 24)) / (size + 640) = ITR 1370 * 1371 * We first do some math on the packet size and then finally bitshift 1372 * by 8 after rounding up. We also have to account for PCIe link speed 1373 * difference as ITR scales based on this. 1374 */ 1375 if (avg_wire_size <= 360) { 1376 /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ 1377 avg_wire_size *= 8; 1378 avg_wire_size += 376; 1379 } else if (avg_wire_size <= 1152) { 1380 /* 77K ints/sec to 45K ints/sec */ 1381 avg_wire_size *= 3; 1382 avg_wire_size += 2176; 1383 } else if (avg_wire_size <= 1920) { 1384 /* 45K ints/sec to 38K ints/sec */ 1385 avg_wire_size += 4480; 1386 } else { 1387 /* plateau at a limit of 38K ints/sec */ 1388 avg_wire_size = 6656; 1389 } 1390 1391 /* Perform final bitshift for division after rounding up to ensure 1392 * that the calculation will never get below a 1. The bit shift 1393 * accounts for changes in the ITR due to PCIe link speed. 1394 */ 1395 itr_round = READ_ONCE(ring_container->itr_scale) + 8; 1396 avg_wire_size += BIT(itr_round) - 1; 1397 avg_wire_size >>= itr_round; 1398 1399 /* write back value and retain adaptive flag */ 1400 ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; 1401 1402 clear_counts: 1403 ring_container->total_bytes = 0; 1404 ring_container->total_packets = 0; 1405 } 1406 1407 static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) 1408 { 1409 /* Enable auto-mask and clear the current mask */ 1410 u32 itr = FM10K_ITR_ENABLE; 1411 1412 /* Update Tx ITR */ 1413 fm10k_update_itr(&q_vector->tx); 1414 1415 /* Update Rx ITR */ 1416 fm10k_update_itr(&q_vector->rx); 1417 1418 /* Store Tx itr in timer slot 0 */ 1419 itr |= (q_vector->tx.itr & FM10K_ITR_MAX); 1420 1421 /* Shift Rx itr to timer slot 1 */ 1422 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; 1423 1424 /* Write the final value to the ITR register */ 1425 writel(itr, q_vector->itr); 1426 } 1427 1428 static int fm10k_poll(struct napi_struct *napi, int budget) 1429 { 1430 struct fm10k_q_vector *q_vector = 1431 container_of(napi, struct fm10k_q_vector, napi); 1432 struct fm10k_ring *ring; 1433 int per_ring_budget, work_done = 0; 1434 bool clean_complete = true; 1435 1436 fm10k_for_each_ring(ring, q_vector->tx) { 1437 if (!fm10k_clean_tx_irq(q_vector, ring, budget)) 1438 clean_complete = false; 1439 } 1440 1441 /* Handle case where we are called by netpoll with a budget of 0 */ 1442 if (budget <= 0) 1443 return budget; 1444 1445 /* attempt to distribute budget to each queue fairly, but don't 1446 * allow the budget to go below 1 because we'll exit polling 1447 */ 1448 if (q_vector->rx.count > 1) 1449 per_ring_budget = max(budget / q_vector->rx.count, 1); 1450 else 1451 per_ring_budget = budget; 1452 1453 fm10k_for_each_ring(ring, q_vector->rx) { 1454 int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); 1455 1456 work_done += work; 1457 if (work >= per_ring_budget) 1458 clean_complete = false; 1459 } 1460 1461 /* If all work not completed, return budget and keep polling */ 1462 if (!clean_complete) 1463 return budget; 1464 1465 /* Exit the polling mode, but don't re-enable interrupts if stack might 1466 * poll us due to busy-polling 1467 */ 1468 if (likely(napi_complete_done(napi, work_done))) 1469 fm10k_qv_enable(q_vector); 1470 1471 return min(work_done, budget - 1); 1472 } 1473 1474 /** 1475 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device 1476 * @interface: board private structure to initialize 1477 * 1478 * When QoS (Quality of Service) is enabled, allocate queues for 1479 * each traffic class. If multiqueue isn't available,then abort QoS 1480 * initialization. 1481 * 1482 * This function handles all combinations of Qos and RSS. 1483 * 1484 **/ 1485 static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) 1486 { 1487 struct net_device *dev = interface->netdev; 1488 struct fm10k_ring_feature *f; 1489 int rss_i, i; 1490 int pcs; 1491 1492 /* Map queue offset and counts onto allocated tx queues */ 1493 pcs = netdev_get_num_tc(dev); 1494 1495 if (pcs <= 1) 1496 return false; 1497 1498 /* set QoS mask and indices */ 1499 f = &interface->ring_feature[RING_F_QOS]; 1500 f->indices = pcs; 1501 f->mask = BIT(fls(pcs - 1)) - 1; 1502 1503 /* determine the upper limit for our current DCB mode */ 1504 rss_i = interface->hw.mac.max_queues / pcs; 1505 rss_i = BIT(fls(rss_i) - 1); 1506 1507 /* set RSS mask and indices */ 1508 f = &interface->ring_feature[RING_F_RSS]; 1509 rss_i = min_t(u16, rss_i, f->limit); 1510 f->indices = rss_i; 1511 f->mask = BIT(fls(rss_i - 1)) - 1; 1512 1513 /* configure pause class to queue mapping */ 1514 for (i = 0; i < pcs; i++) 1515 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 1516 1517 interface->num_rx_queues = rss_i * pcs; 1518 interface->num_tx_queues = rss_i * pcs; 1519 1520 return true; 1521 } 1522 1523 /** 1524 * fm10k_set_rss_queues: Allocate queues for RSS 1525 * @interface: board private structure to initialize 1526 * 1527 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1528 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1529 * 1530 **/ 1531 static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) 1532 { 1533 struct fm10k_ring_feature *f; 1534 u16 rss_i; 1535 1536 f = &interface->ring_feature[RING_F_RSS]; 1537 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); 1538 1539 /* record indices and power of 2 mask for RSS */ 1540 f->indices = rss_i; 1541 f->mask = BIT(fls(rss_i - 1)) - 1; 1542 1543 interface->num_rx_queues = rss_i; 1544 interface->num_tx_queues = rss_i; 1545 1546 return true; 1547 } 1548 1549 /** 1550 * fm10k_set_num_queues: Allocate queues for device, feature dependent 1551 * @interface: board private structure to initialize 1552 * 1553 * This is the top level queue allocation routine. The order here is very 1554 * important, starting with the "most" number of features turned on at once, 1555 * and ending with the smallest set of features. This way large combinations 1556 * can be allocated if they're turned on, and smaller combinations are the 1557 * fallthrough conditions. 1558 * 1559 **/ 1560 static void fm10k_set_num_queues(struct fm10k_intfc *interface) 1561 { 1562 /* Attempt to setup QoS and RSS first */ 1563 if (fm10k_set_qos_queues(interface)) 1564 return; 1565 1566 /* If we don't have QoS, just fallback to only RSS. */ 1567 fm10k_set_rss_queues(interface); 1568 } 1569 1570 /** 1571 * fm10k_reset_num_queues - Reset the number of queues to zero 1572 * @interface: board private structure 1573 * 1574 * This function should be called whenever we need to reset the number of 1575 * queues after an error condition. 1576 */ 1577 static void fm10k_reset_num_queues(struct fm10k_intfc *interface) 1578 { 1579 interface->num_tx_queues = 0; 1580 interface->num_rx_queues = 0; 1581 interface->num_q_vectors = 0; 1582 } 1583 1584 /** 1585 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 1586 * @interface: board private structure to initialize 1587 * @v_count: q_vectors allocated on interface, used for ring interleaving 1588 * @v_idx: index of vector in interface struct 1589 * @txr_count: total number of Tx rings to allocate 1590 * @txr_idx: index of first Tx ring to allocate 1591 * @rxr_count: total number of Rx rings to allocate 1592 * @rxr_idx: index of first Rx ring to allocate 1593 * 1594 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1595 **/ 1596 static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, 1597 unsigned int v_count, unsigned int v_idx, 1598 unsigned int txr_count, unsigned int txr_idx, 1599 unsigned int rxr_count, unsigned int rxr_idx) 1600 { 1601 struct fm10k_q_vector *q_vector; 1602 struct fm10k_ring *ring; 1603 int ring_count; 1604 1605 ring_count = txr_count + rxr_count; 1606 1607 /* allocate q_vector and rings */ 1608 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); 1609 if (!q_vector) 1610 return -ENOMEM; 1611 1612 /* initialize NAPI */ 1613 netif_napi_add(interface->netdev, &q_vector->napi, 1614 fm10k_poll, NAPI_POLL_WEIGHT); 1615 1616 /* tie q_vector and interface together */ 1617 interface->q_vector[v_idx] = q_vector; 1618 q_vector->interface = interface; 1619 q_vector->v_idx = v_idx; 1620 1621 /* initialize pointer to rings */ 1622 ring = q_vector->ring; 1623 1624 /* save Tx ring container info */ 1625 q_vector->tx.ring = ring; 1626 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; 1627 q_vector->tx.itr = interface->tx_itr; 1628 q_vector->tx.itr_scale = interface->hw.mac.itr_scale; 1629 q_vector->tx.count = txr_count; 1630 1631 while (txr_count) { 1632 /* assign generic ring traits */ 1633 ring->dev = &interface->pdev->dev; 1634 ring->netdev = interface->netdev; 1635 1636 /* configure backlink on ring */ 1637 ring->q_vector = q_vector; 1638 1639 /* apply Tx specific ring traits */ 1640 ring->count = interface->tx_ring_count; 1641 ring->queue_index = txr_idx; 1642 1643 /* assign ring to interface */ 1644 interface->tx_ring[txr_idx] = ring; 1645 1646 /* update count and index */ 1647 txr_count--; 1648 txr_idx += v_count; 1649 1650 /* push pointer to next ring */ 1651 ring++; 1652 } 1653 1654 /* save Rx ring container info */ 1655 q_vector->rx.ring = ring; 1656 q_vector->rx.itr = interface->rx_itr; 1657 q_vector->rx.itr_scale = interface->hw.mac.itr_scale; 1658 q_vector->rx.count = rxr_count; 1659 1660 while (rxr_count) { 1661 /* assign generic ring traits */ 1662 ring->dev = &interface->pdev->dev; 1663 ring->netdev = interface->netdev; 1664 rcu_assign_pointer(ring->l2_accel, interface->l2_accel); 1665 1666 /* configure backlink on ring */ 1667 ring->q_vector = q_vector; 1668 1669 /* apply Rx specific ring traits */ 1670 ring->count = interface->rx_ring_count; 1671 ring->queue_index = rxr_idx; 1672 1673 /* assign ring to interface */ 1674 interface->rx_ring[rxr_idx] = ring; 1675 1676 /* update count and index */ 1677 rxr_count--; 1678 rxr_idx += v_count; 1679 1680 /* push pointer to next ring */ 1681 ring++; 1682 } 1683 1684 fm10k_dbg_q_vector_init(q_vector); 1685 1686 return 0; 1687 } 1688 1689 /** 1690 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector 1691 * @interface: board private structure to initialize 1692 * @v_idx: Index of vector to be freed 1693 * 1694 * This function frees the memory allocated to the q_vector. In addition if 1695 * NAPI is enabled it will delete any references to the NAPI struct prior 1696 * to freeing the q_vector. 1697 **/ 1698 static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 1699 { 1700 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 1701 struct fm10k_ring *ring; 1702 1703 fm10k_dbg_q_vector_exit(q_vector); 1704 1705 fm10k_for_each_ring(ring, q_vector->tx) 1706 interface->tx_ring[ring->queue_index] = NULL; 1707 1708 fm10k_for_each_ring(ring, q_vector->rx) 1709 interface->rx_ring[ring->queue_index] = NULL; 1710 1711 interface->q_vector[v_idx] = NULL; 1712 netif_napi_del(&q_vector->napi); 1713 kfree_rcu(q_vector, rcu); 1714 } 1715 1716 /** 1717 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors 1718 * @interface: board private structure to initialize 1719 * 1720 * We allocate one q_vector per queue interrupt. If allocation fails we 1721 * return -ENOMEM. 1722 **/ 1723 static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) 1724 { 1725 unsigned int q_vectors = interface->num_q_vectors; 1726 unsigned int rxr_remaining = interface->num_rx_queues; 1727 unsigned int txr_remaining = interface->num_tx_queues; 1728 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 1729 int err; 1730 1731 if (q_vectors >= (rxr_remaining + txr_remaining)) { 1732 for (; rxr_remaining; v_idx++) { 1733 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 1734 0, 0, 1, rxr_idx); 1735 if (err) 1736 goto err_out; 1737 1738 /* update counts and index */ 1739 rxr_remaining--; 1740 rxr_idx++; 1741 } 1742 } 1743 1744 for (; v_idx < q_vectors; v_idx++) { 1745 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1746 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1747 1748 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 1749 tqpv, txr_idx, 1750 rqpv, rxr_idx); 1751 1752 if (err) 1753 goto err_out; 1754 1755 /* update counts and index */ 1756 rxr_remaining -= rqpv; 1757 txr_remaining -= tqpv; 1758 rxr_idx++; 1759 txr_idx++; 1760 } 1761 1762 return 0; 1763 1764 err_out: 1765 fm10k_reset_num_queues(interface); 1766 1767 while (v_idx--) 1768 fm10k_free_q_vector(interface, v_idx); 1769 1770 return -ENOMEM; 1771 } 1772 1773 /** 1774 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors 1775 * @interface: board private structure to initialize 1776 * 1777 * This function frees the memory allocated to the q_vectors. In addition if 1778 * NAPI is enabled it will delete any references to the NAPI struct prior 1779 * to freeing the q_vector. 1780 **/ 1781 static void fm10k_free_q_vectors(struct fm10k_intfc *interface) 1782 { 1783 int v_idx = interface->num_q_vectors; 1784 1785 fm10k_reset_num_queues(interface); 1786 1787 while (v_idx--) 1788 fm10k_free_q_vector(interface, v_idx); 1789 } 1790 1791 /** 1792 * f10k_reset_msix_capability - reset MSI-X capability 1793 * @interface: board private structure to initialize 1794 * 1795 * Reset the MSI-X capability back to its starting state 1796 **/ 1797 static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) 1798 { 1799 pci_disable_msix(interface->pdev); 1800 kfree(interface->msix_entries); 1801 interface->msix_entries = NULL; 1802 } 1803 1804 /** 1805 * f10k_init_msix_capability - configure MSI-X capability 1806 * @interface: board private structure to initialize 1807 * 1808 * Attempt to configure the interrupts using the best available 1809 * capabilities of the hardware and the kernel. 1810 **/ 1811 static int fm10k_init_msix_capability(struct fm10k_intfc *interface) 1812 { 1813 struct fm10k_hw *hw = &interface->hw; 1814 int v_budget, vector; 1815 1816 /* It's easy to be greedy for MSI-X vectors, but it really 1817 * doesn't do us much good if we have a lot more vectors 1818 * than CPU's. So let's be conservative and only ask for 1819 * (roughly) the same number of vectors as there are CPU's. 1820 * the default is to use pairs of vectors 1821 */ 1822 v_budget = max(interface->num_rx_queues, interface->num_tx_queues); 1823 v_budget = min_t(u16, v_budget, num_online_cpus()); 1824 1825 /* account for vectors not related to queues */ 1826 v_budget += NON_Q_VECTORS(hw); 1827 1828 /* At the same time, hardware can only support a maximum of 1829 * hw.mac->max_msix_vectors vectors. With features 1830 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 1831 * descriptor queues supported by our device. Thus, we cap it off in 1832 * those rare cases where the cpu count also exceeds our vector limit. 1833 */ 1834 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 1835 1836 /* A failure in MSI-X entry allocation is fatal. */ 1837 interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 1838 GFP_KERNEL); 1839 if (!interface->msix_entries) 1840 return -ENOMEM; 1841 1842 /* populate entry values */ 1843 for (vector = 0; vector < v_budget; vector++) 1844 interface->msix_entries[vector].entry = vector; 1845 1846 /* Attempt to enable MSI-X with requested value */ 1847 v_budget = pci_enable_msix_range(interface->pdev, 1848 interface->msix_entries, 1849 MIN_MSIX_COUNT(hw), 1850 v_budget); 1851 if (v_budget < 0) { 1852 kfree(interface->msix_entries); 1853 interface->msix_entries = NULL; 1854 return v_budget; 1855 } 1856 1857 /* record the number of queues available for q_vectors */ 1858 interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); 1859 1860 return 0; 1861 } 1862 1863 /** 1864 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS 1865 * @interface: Interface structure continaining rings and devices 1866 * 1867 * Cache the descriptor ring offsets for Qos 1868 **/ 1869 static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) 1870 { 1871 struct net_device *dev = interface->netdev; 1872 int pc, offset, rss_i, i, q_idx; 1873 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; 1874 u8 num_pcs = netdev_get_num_tc(dev); 1875 1876 if (num_pcs <= 1) 1877 return false; 1878 1879 rss_i = interface->ring_feature[RING_F_RSS].indices; 1880 1881 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { 1882 q_idx = pc; 1883 for (i = 0; i < rss_i; i++) { 1884 interface->tx_ring[offset + i]->reg_idx = q_idx; 1885 interface->tx_ring[offset + i]->qos_pc = pc; 1886 interface->rx_ring[offset + i]->reg_idx = q_idx; 1887 interface->rx_ring[offset + i]->qos_pc = pc; 1888 q_idx += pc_stride; 1889 } 1890 } 1891 1892 return true; 1893 } 1894 1895 /** 1896 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS 1897 * @interface: Interface structure continaining rings and devices 1898 * 1899 * Cache the descriptor ring offsets for RSS 1900 **/ 1901 static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) 1902 { 1903 int i; 1904 1905 for (i = 0; i < interface->num_rx_queues; i++) 1906 interface->rx_ring[i]->reg_idx = i; 1907 1908 for (i = 0; i < interface->num_tx_queues; i++) 1909 interface->tx_ring[i]->reg_idx = i; 1910 } 1911 1912 /** 1913 * fm10k_assign_rings - Map rings to network devices 1914 * @interface: Interface structure containing rings and devices 1915 * 1916 * This function is meant to go though and configure both the network 1917 * devices so that they contain rings, and configure the rings so that 1918 * they function with their network devices. 1919 **/ 1920 static void fm10k_assign_rings(struct fm10k_intfc *interface) 1921 { 1922 if (fm10k_cache_ring_qos(interface)) 1923 return; 1924 1925 fm10k_cache_ring_rss(interface); 1926 } 1927 1928 static void fm10k_init_reta(struct fm10k_intfc *interface) 1929 { 1930 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 1931 u32 reta; 1932 1933 /* If the Rx flow indirection table has been configured manually, we 1934 * need to maintain it when possible. 1935 */ 1936 if (netif_is_rxfh_configured(interface->netdev)) { 1937 for (i = FM10K_RETA_SIZE; i--;) { 1938 reta = interface->reta[i]; 1939 if ((((reta << 24) >> 24) < rss_i) && 1940 (((reta << 16) >> 24) < rss_i) && 1941 (((reta << 8) >> 24) < rss_i) && 1942 (((reta) >> 24) < rss_i)) 1943 continue; 1944 1945 /* this should never happen */ 1946 dev_err(&interface->pdev->dev, 1947 "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); 1948 goto repopulate_reta; 1949 } 1950 1951 /* do nothing if all of the elements are in bounds */ 1952 return; 1953 } 1954 1955 repopulate_reta: 1956 fm10k_write_reta(interface, NULL); 1957 } 1958 1959 /** 1960 * fm10k_init_queueing_scheme - Determine proper queueing scheme 1961 * @interface: board private structure to initialize 1962 * 1963 * We determine which queueing scheme to use based on... 1964 * - Hardware queue count (num_*_queues) 1965 * - defined by miscellaneous hardware support/features (RSS, etc.) 1966 **/ 1967 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) 1968 { 1969 int err; 1970 1971 /* Number of supported queues */ 1972 fm10k_set_num_queues(interface); 1973 1974 /* Configure MSI-X capability */ 1975 err = fm10k_init_msix_capability(interface); 1976 if (err) { 1977 dev_err(&interface->pdev->dev, 1978 "Unable to initialize MSI-X capability\n"); 1979 goto err_init_msix; 1980 } 1981 1982 /* Allocate memory for queues */ 1983 err = fm10k_alloc_q_vectors(interface); 1984 if (err) { 1985 dev_err(&interface->pdev->dev, 1986 "Unable to allocate queue vectors\n"); 1987 goto err_alloc_q_vectors; 1988 } 1989 1990 /* Map rings to devices, and map devices to physical queues */ 1991 fm10k_assign_rings(interface); 1992 1993 /* Initialize RSS redirection table */ 1994 fm10k_init_reta(interface); 1995 1996 return 0; 1997 1998 err_alloc_q_vectors: 1999 fm10k_reset_msix_capability(interface); 2000 err_init_msix: 2001 fm10k_reset_num_queues(interface); 2002 return err; 2003 } 2004 2005 /** 2006 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings 2007 * @interface: board private structure to clear queueing scheme on 2008 * 2009 * We go through and clear queueing specific resources and reset the structure 2010 * to pre-load conditions 2011 **/ 2012 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) 2013 { 2014 fm10k_free_q_vectors(interface); 2015 fm10k_reset_msix_capability(interface); 2016 } 2017