1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0 27a432d57SJacob Keller /* Copyright(c) 2013 - 2019 Intel Corporation. */ 3b3890e30SAlexander Duyck 4b3890e30SAlexander Duyck #include <linux/types.h> 5b3890e30SAlexander Duyck #include <linux/module.h> 6b3890e30SAlexander Duyck #include <net/ipv6.h> 7b3890e30SAlexander Duyck #include <net/ip.h> 8b3890e30SAlexander Duyck #include <net/tcp.h> 9b3890e30SAlexander Duyck #include <linux/if_macvlan.h> 10b101c962SAlexander Duyck #include <linux/prefetch.h> 11b3890e30SAlexander Duyck 12b3890e30SAlexander Duyck #include "fm10k.h" 13b3890e30SAlexander Duyck 142d0f76beSJacob Keller #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" 15b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k"; 162d0f76beSJacob Keller static const char fm10k_driver_string[] = DRV_SUMMARY; 17b3890e30SAlexander Duyck static const char fm10k_copyright[] = 187a432d57SJacob Keller "Copyright(c) 2013 - 2019 Intel Corporation."; 19b3890e30SAlexander Duyck 20b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 212d0f76beSJacob Keller MODULE_DESCRIPTION(DRV_SUMMARY); 2298674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2"); 23b3890e30SAlexander Duyck 24b382bb1bSJeff Kirsher /* single workqueue for entire fm10k driver */ 2507146e2eSBruce Allan struct workqueue_struct *fm10k_workqueue; 26b382bb1bSJeff Kirsher 276d2ce900SAlexander Duyck /** 286d2ce900SAlexander Duyck * fm10k_init_module - Driver Registration Routine 29b3890e30SAlexander Duyck * 30b3890e30SAlexander Duyck * fm10k_init_module is the first routine called when the driver is 31b3890e30SAlexander Duyck * loaded. All it does is register with the PCI subsystem. 32b3890e30SAlexander Duyck **/ 33b3890e30SAlexander Duyck static int __init fm10k_init_module(void) 34b3890e30SAlexander Duyck { 3534a2a3b8SJeff Kirsher pr_info("%s\n", fm10k_driver_string); 36b3890e30SAlexander Duyck pr_info("%s\n", fm10k_copyright); 37b3890e30SAlexander Duyck 38b382bb1bSJeff Kirsher /* create driver workqueue */ 395e3d033eSJacob Keller fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 405e3d033eSJacob Keller fm10k_driver_name); 4101ca6671SYue Haibing if (!fm10k_workqueue) 4201ca6671SYue Haibing return -ENOMEM; 43b382bb1bSJeff Kirsher 447461fd91SAlexander Duyck fm10k_dbg_init(); 457461fd91SAlexander Duyck 46b3890e30SAlexander Duyck return fm10k_register_pci_driver(); 47b3890e30SAlexander Duyck } 48b3890e30SAlexander Duyck module_init(fm10k_init_module); 49b3890e30SAlexander Duyck 50b3890e30SAlexander Duyck /** 51b3890e30SAlexander Duyck * fm10k_exit_module - Driver Exit Cleanup Routine 52b3890e30SAlexander Duyck * 53b3890e30SAlexander Duyck * fm10k_exit_module is called just before the driver is removed 54b3890e30SAlexander Duyck * from memory. 55b3890e30SAlexander Duyck **/ 56b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void) 57b3890e30SAlexander Duyck { 58b3890e30SAlexander Duyck fm10k_unregister_pci_driver(); 597461fd91SAlexander Duyck 607461fd91SAlexander Duyck fm10k_dbg_exit(); 61b382bb1bSJeff Kirsher 62b382bb1bSJeff Kirsher /* destroy driver workqueue */ 63b382bb1bSJeff Kirsher destroy_workqueue(fm10k_workqueue); 64b3890e30SAlexander Duyck } 65b3890e30SAlexander Duyck module_exit(fm10k_exit_module); 6618283cadSAlexander Duyck 67b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, 68b101c962SAlexander Duyck struct fm10k_rx_buffer *bi) 69b101c962SAlexander Duyck { 70b101c962SAlexander Duyck struct page *page = bi->page; 71b101c962SAlexander Duyck dma_addr_t dma; 72b101c962SAlexander Duyck 73b101c962SAlexander Duyck /* Only page will be NULL if buffer was consumed */ 74b101c962SAlexander Duyck if (likely(page)) 75b101c962SAlexander Duyck return true; 76b101c962SAlexander Duyck 77b101c962SAlexander Duyck /* alloc new page for storage */ 7842b17f09SAlexander Duyck page = dev_alloc_page(); 79b101c962SAlexander Duyck if (unlikely(!page)) { 80b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 81b101c962SAlexander Duyck return false; 82b101c962SAlexander Duyck } 83b101c962SAlexander Duyck 84b101c962SAlexander Duyck /* map page for use */ 85b101c962SAlexander Duyck dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 86b101c962SAlexander Duyck 87b101c962SAlexander Duyck /* if mapping failed free memory back to system since 88b101c962SAlexander Duyck * there isn't much point in holding memory we can't use 89b101c962SAlexander Duyck */ 90b101c962SAlexander Duyck if (dma_mapping_error(rx_ring->dev, dma)) { 91b101c962SAlexander Duyck __free_page(page); 92b101c962SAlexander Duyck 93b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 94b101c962SAlexander Duyck return false; 95b101c962SAlexander Duyck } 96b101c962SAlexander Duyck 97b101c962SAlexander Duyck bi->dma = dma; 98b101c962SAlexander Duyck bi->page = page; 99b101c962SAlexander Duyck bi->page_offset = 0; 100b101c962SAlexander Duyck 101b101c962SAlexander Duyck return true; 102b101c962SAlexander Duyck } 103b101c962SAlexander Duyck 104b101c962SAlexander Duyck /** 105b101c962SAlexander Duyck * fm10k_alloc_rx_buffers - Replace used receive buffers 106b101c962SAlexander Duyck * @rx_ring: ring to place buffers on 107b101c962SAlexander Duyck * @cleaned_count: number of buffers to replace 108b101c962SAlexander Duyck **/ 109b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) 110b101c962SAlexander Duyck { 111b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 112b101c962SAlexander Duyck struct fm10k_rx_buffer *bi; 113b101c962SAlexander Duyck u16 i = rx_ring->next_to_use; 114b101c962SAlexander Duyck 115b101c962SAlexander Duyck /* nothing to do */ 116b101c962SAlexander Duyck if (!cleaned_count) 117b101c962SAlexander Duyck return; 118b101c962SAlexander Duyck 119b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, i); 120b101c962SAlexander Duyck bi = &rx_ring->rx_buffer[i]; 121b101c962SAlexander Duyck i -= rx_ring->count; 122b101c962SAlexander Duyck 123b101c962SAlexander Duyck do { 124b101c962SAlexander Duyck if (!fm10k_alloc_mapped_page(rx_ring, bi)) 125b101c962SAlexander Duyck break; 126b101c962SAlexander Duyck 127b101c962SAlexander Duyck /* Refresh the desc even if buffer_addrs didn't change 128b101c962SAlexander Duyck * because each write-back erases this info. 129b101c962SAlexander Duyck */ 130b101c962SAlexander Duyck rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 131b101c962SAlexander Duyck 132b101c962SAlexander Duyck rx_desc++; 133b101c962SAlexander Duyck bi++; 134b101c962SAlexander Duyck i++; 135b101c962SAlexander Duyck if (unlikely(!i)) { 136b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, 0); 137b101c962SAlexander Duyck bi = rx_ring->rx_buffer; 138b101c962SAlexander Duyck i -= rx_ring->count; 139b101c962SAlexander Duyck } 140b101c962SAlexander Duyck 141ba5b8dcdSAlexander Duyck /* clear the status bits for the next_to_use descriptor */ 142ba5b8dcdSAlexander Duyck rx_desc->d.staterr = 0; 143b101c962SAlexander Duyck 144b101c962SAlexander Duyck cleaned_count--; 145b101c962SAlexander Duyck } while (cleaned_count); 146b101c962SAlexander Duyck 147b101c962SAlexander Duyck i += rx_ring->count; 148b101c962SAlexander Duyck 149b101c962SAlexander Duyck if (rx_ring->next_to_use != i) { 150b101c962SAlexander Duyck /* record the next descriptor to use */ 151b101c962SAlexander Duyck rx_ring->next_to_use = i; 152b101c962SAlexander Duyck 153b101c962SAlexander Duyck /* update next to alloc since we have filled the ring */ 154b101c962SAlexander Duyck rx_ring->next_to_alloc = i; 155b101c962SAlexander Duyck 156b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w 157b101c962SAlexander Duyck * know there are new descriptors to fetch. (Only 158b101c962SAlexander Duyck * applicable for weak-ordered memory model archs, 159b101c962SAlexander Duyck * such as IA-64). 160b101c962SAlexander Duyck */ 161b101c962SAlexander Duyck wmb(); 162b101c962SAlexander Duyck 163b101c962SAlexander Duyck /* notify hardware of new descriptors */ 164b101c962SAlexander Duyck writel(i, rx_ring->tail); 165b101c962SAlexander Duyck } 166b101c962SAlexander Duyck } 167b101c962SAlexander Duyck 168b101c962SAlexander Duyck /** 169b101c962SAlexander Duyck * fm10k_reuse_rx_page - page flip buffer and store it back on the ring 170b101c962SAlexander Duyck * @rx_ring: rx descriptor ring to store buffers on 171b101c962SAlexander Duyck * @old_buff: donor buffer to have page reused 172b101c962SAlexander Duyck * 173b101c962SAlexander Duyck * Synchronizes page for reuse by the interface 174b101c962SAlexander Duyck **/ 175b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, 176b101c962SAlexander Duyck struct fm10k_rx_buffer *old_buff) 177b101c962SAlexander Duyck { 178b101c962SAlexander Duyck struct fm10k_rx_buffer *new_buff; 179b101c962SAlexander Duyck u16 nta = rx_ring->next_to_alloc; 180b101c962SAlexander Duyck 181b101c962SAlexander Duyck new_buff = &rx_ring->rx_buffer[nta]; 182b101c962SAlexander Duyck 183b101c962SAlexander Duyck /* update, and store next to alloc */ 184b101c962SAlexander Duyck nta++; 185b101c962SAlexander Duyck rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 186b101c962SAlexander Duyck 187b101c962SAlexander Duyck /* transfer page from old buffer to new buffer */ 188ba5b8dcdSAlexander Duyck *new_buff = *old_buff; 189b101c962SAlexander Duyck 190b101c962SAlexander Duyck /* sync the buffer for use by the device */ 191b101c962SAlexander Duyck dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 192b101c962SAlexander Duyck old_buff->page_offset, 193b101c962SAlexander Duyck FM10K_RX_BUFSZ, 194b101c962SAlexander Duyck DMA_FROM_DEVICE); 195b101c962SAlexander Duyck } 196b101c962SAlexander Duyck 197ba5b8dcdSAlexander Duyck static inline bool fm10k_page_is_reserved(struct page *page) 198ba5b8dcdSAlexander Duyck { 1992f064f34SMichal Hocko return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 200ba5b8dcdSAlexander Duyck } 201ba5b8dcdSAlexander Duyck 202b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 203b101c962SAlexander Duyck struct page *page, 204de445199SJeff Kirsher unsigned int __maybe_unused truesize) 205b101c962SAlexander Duyck { 206b101c962SAlexander Duyck /* avoid re-using remote pages */ 207ba5b8dcdSAlexander Duyck if (unlikely(fm10k_page_is_reserved(page))) 208b101c962SAlexander Duyck return false; 209b101c962SAlexander Duyck 210b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 211b101c962SAlexander Duyck /* if we are only owner of page we can reuse it */ 212b101c962SAlexander Duyck if (unlikely(page_count(page) != 1)) 213b101c962SAlexander Duyck return false; 214b101c962SAlexander Duyck 215b101c962SAlexander Duyck /* flip page offset to other buffer */ 216b101c962SAlexander Duyck rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 217b101c962SAlexander Duyck #else 218b101c962SAlexander Duyck /* move offset up to the next cache line */ 219b101c962SAlexander Duyck rx_buffer->page_offset += truesize; 220b101c962SAlexander Duyck 221b101c962SAlexander Duyck if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 222b101c962SAlexander Duyck return false; 223b101c962SAlexander Duyck #endif 224b101c962SAlexander Duyck 225ba5b8dcdSAlexander Duyck /* Even if we own the page, we are not allowed to use atomic_set() 226ba5b8dcdSAlexander Duyck * This would break get_page_unless_zero() users. 227ba5b8dcdSAlexander Duyck */ 228fe896d18SJoonsoo Kim page_ref_inc(page); 229ba5b8dcdSAlexander Duyck 230b101c962SAlexander Duyck return true; 231b101c962SAlexander Duyck } 232b101c962SAlexander Duyck 233b101c962SAlexander Duyck /** 234b101c962SAlexander Duyck * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff 235b101c962SAlexander Duyck * @rx_buffer: buffer containing page to add 236881571c1SScott Peterson * @size: packet size from rx_desc 237b101c962SAlexander Duyck * @rx_desc: descriptor containing length of buffer written by hardware 238b101c962SAlexander Duyck * @skb: sk_buff to place the data into 239b101c962SAlexander Duyck * 240b101c962SAlexander Duyck * This function will add the data contained in rx_buffer->page to the skb. 241b101c962SAlexander Duyck * This is done either through a direct copy if the data in the buffer is 242b101c962SAlexander Duyck * less than the skb header size, otherwise it will just attach the page as 243b101c962SAlexander Duyck * a frag to the skb. 244b101c962SAlexander Duyck * 245b101c962SAlexander Duyck * The function will then update the page offset if necessary and return 246b101c962SAlexander Duyck * true if the buffer can be reused by the interface. 247b101c962SAlexander Duyck **/ 248de445199SJeff Kirsher static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, 249881571c1SScott Peterson unsigned int size, 250b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 251b101c962SAlexander Duyck struct sk_buff *skb) 252b101c962SAlexander Duyck { 253b101c962SAlexander Duyck struct page *page = rx_buffer->page; 2541a8782e5SAlexander Duyck unsigned char *va = page_address(page) + rx_buffer->page_offset; 255b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 256b101c962SAlexander Duyck unsigned int truesize = FM10K_RX_BUFSZ; 257b101c962SAlexander Duyck #else 258fb5677aaSAlexander Duyck unsigned int truesize = ALIGN(size, 512); 259b101c962SAlexander Duyck #endif 2601a8782e5SAlexander Duyck unsigned int pull_len; 261b101c962SAlexander Duyck 2621a8782e5SAlexander Duyck if (unlikely(skb_is_nonlinear(skb))) 2631a8782e5SAlexander Duyck goto add_tail_frag; 264b101c962SAlexander Duyck 2651a8782e5SAlexander Duyck if (likely(size <= FM10K_RX_HDR_LEN)) { 266b101c962SAlexander Duyck memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 267b101c962SAlexander Duyck 268ba5b8dcdSAlexander Duyck /* page is not reserved, we can reuse buffer as-is */ 269ba5b8dcdSAlexander Duyck if (likely(!fm10k_page_is_reserved(page))) 270b101c962SAlexander Duyck return true; 271b101c962SAlexander Duyck 272b101c962SAlexander Duyck /* this page cannot be reused so discard it */ 273ba5b8dcdSAlexander Duyck __free_page(page); 274b101c962SAlexander Duyck return false; 275b101c962SAlexander Duyck } 276b101c962SAlexander Duyck 2771a8782e5SAlexander Duyck /* we need the header to contain the greater of either ETH_HLEN or 2781a8782e5SAlexander Duyck * 60 bytes if the skb->len is less than 60 for skb_pad. 2791a8782e5SAlexander Duyck */ 280c43f1255SStanislav Fomichev pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN); 2811a8782e5SAlexander Duyck 2821a8782e5SAlexander Duyck /* align pull length to size of long to optimize memcpy performance */ 2831a8782e5SAlexander Duyck memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 2841a8782e5SAlexander Duyck 2851a8782e5SAlexander Duyck /* update all of the pointers */ 2861a8782e5SAlexander Duyck va += pull_len; 2871a8782e5SAlexander Duyck size -= pull_len; 2881a8782e5SAlexander Duyck 2891a8782e5SAlexander Duyck add_tail_frag: 290b101c962SAlexander Duyck skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 2911a8782e5SAlexander Duyck (unsigned long)va & ~PAGE_MASK, size, truesize); 292b101c962SAlexander Duyck 293b101c962SAlexander Duyck return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); 294b101c962SAlexander Duyck } 295b101c962SAlexander Duyck 296b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, 297b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 298b101c962SAlexander Duyck struct sk_buff *skb) 299b101c962SAlexander Duyck { 300881571c1SScott Peterson unsigned int size = le16_to_cpu(rx_desc->w.length); 301b101c962SAlexander Duyck struct fm10k_rx_buffer *rx_buffer; 302b101c962SAlexander Duyck struct page *page; 303b101c962SAlexander Duyck 304b101c962SAlexander Duyck rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 305b101c962SAlexander Duyck page = rx_buffer->page; 306b101c962SAlexander Duyck prefetchw(page); 307b101c962SAlexander Duyck 308b101c962SAlexander Duyck if (likely(!skb)) { 309b101c962SAlexander Duyck void *page_addr = page_address(page) + 310b101c962SAlexander Duyck rx_buffer->page_offset; 311b101c962SAlexander Duyck 312b101c962SAlexander Duyck /* prefetch first cache line of first page */ 313b101c962SAlexander Duyck prefetch(page_addr); 314b101c962SAlexander Duyck #if L1_CACHE_BYTES < 128 31527429be7SJacob Keller prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); 316b101c962SAlexander Duyck #endif 317b101c962SAlexander Duyck 318b101c962SAlexander Duyck /* allocate a skb to store the frags */ 31967fd893eSAlexander Duyck skb = napi_alloc_skb(&rx_ring->q_vector->napi, 320b101c962SAlexander Duyck FM10K_RX_HDR_LEN); 321b101c962SAlexander Duyck if (unlikely(!skb)) { 322b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 323b101c962SAlexander Duyck return NULL; 324b101c962SAlexander Duyck } 325b101c962SAlexander Duyck 326b101c962SAlexander Duyck /* we will be copying header into skb->data in 327b101c962SAlexander Duyck * pskb_may_pull so it is in our interest to prefetch 328b101c962SAlexander Duyck * it now to avoid a possible cache miss 329b101c962SAlexander Duyck */ 330b101c962SAlexander Duyck prefetchw(skb->data); 331b101c962SAlexander Duyck } 332b101c962SAlexander Duyck 333b101c962SAlexander Duyck /* we are reusing so sync this buffer for CPU use */ 334b101c962SAlexander Duyck dma_sync_single_range_for_cpu(rx_ring->dev, 335b101c962SAlexander Duyck rx_buffer->dma, 336b101c962SAlexander Duyck rx_buffer->page_offset, 337881571c1SScott Peterson size, 338b101c962SAlexander Duyck DMA_FROM_DEVICE); 339b101c962SAlexander Duyck 340b101c962SAlexander Duyck /* pull page into skb */ 341881571c1SScott Peterson if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) { 342b101c962SAlexander Duyck /* hand second half of page back to the ring */ 343b101c962SAlexander Duyck fm10k_reuse_rx_page(rx_ring, rx_buffer); 344b101c962SAlexander Duyck } else { 345b101c962SAlexander Duyck /* we are not reusing the buffer so unmap it */ 346b101c962SAlexander Duyck dma_unmap_page(rx_ring->dev, rx_buffer->dma, 347b101c962SAlexander Duyck PAGE_SIZE, DMA_FROM_DEVICE); 348b101c962SAlexander Duyck } 349b101c962SAlexander Duyck 350b101c962SAlexander Duyck /* clear contents of rx_buffer */ 351b101c962SAlexander Duyck rx_buffer->page = NULL; 352b101c962SAlexander Duyck 353b101c962SAlexander Duyck return skb; 354b101c962SAlexander Duyck } 355b101c962SAlexander Duyck 35676a540d4SAlexander Duyck static inline void fm10k_rx_checksum(struct fm10k_ring *ring, 35776a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 35876a540d4SAlexander Duyck struct sk_buff *skb) 35976a540d4SAlexander Duyck { 36076a540d4SAlexander Duyck skb_checksum_none_assert(skb); 36176a540d4SAlexander Duyck 36276a540d4SAlexander Duyck /* Rx checksum disabled via ethtool */ 36376a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXCSUM)) 36476a540d4SAlexander Duyck return; 36576a540d4SAlexander Duyck 36676a540d4SAlexander Duyck /* TCP/UDP checksum error bit is set */ 36776a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, 36876a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E | 36976a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E2 | 37076a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE | 37176a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE2)) { 37276a540d4SAlexander Duyck ring->rx_stats.csum_err++; 37376a540d4SAlexander Duyck return; 37476a540d4SAlexander Duyck } 37576a540d4SAlexander Duyck 37676a540d4SAlexander Duyck /* It must be a TCP or UDP packet with a valid checksum */ 37776a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) 37876a540d4SAlexander Duyck skb->encapsulation = true; 37976a540d4SAlexander Duyck else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) 38076a540d4SAlexander Duyck return; 38176a540d4SAlexander Duyck 38276a540d4SAlexander Duyck skb->ip_summed = CHECKSUM_UNNECESSARY; 38380043f3bSJacob Keller 38480043f3bSJacob Keller ring->rx_stats.csum_good++; 38576a540d4SAlexander Duyck } 38676a540d4SAlexander Duyck 38776a540d4SAlexander Duyck #define FM10K_RSS_L4_TYPES_MASK \ 388fcdb0a99SBruce Allan (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ 389fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV4_UDP) | \ 390fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_TCP) | \ 391fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_UDP)) 39276a540d4SAlexander Duyck 39376a540d4SAlexander Duyck static inline void fm10k_rx_hash(struct fm10k_ring *ring, 39476a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 39576a540d4SAlexander Duyck struct sk_buff *skb) 39676a540d4SAlexander Duyck { 39776a540d4SAlexander Duyck u16 rss_type; 39876a540d4SAlexander Duyck 39976a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXHASH)) 40076a540d4SAlexander Duyck return; 40176a540d4SAlexander Duyck 40276a540d4SAlexander Duyck rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; 40376a540d4SAlexander Duyck if (!rss_type) 40476a540d4SAlexander Duyck return; 40576a540d4SAlexander Duyck 40676a540d4SAlexander Duyck skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 407fcdb0a99SBruce Allan (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? 40876a540d4SAlexander Duyck PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 40976a540d4SAlexander Duyck } 41076a540d4SAlexander Duyck 4115cd5e2e9SAlexander Duyck static void fm10k_type_trans(struct fm10k_ring *rx_ring, 412de445199SJeff Kirsher union fm10k_rx_desc __maybe_unused *rx_desc, 4135cd5e2e9SAlexander Duyck struct sk_buff *skb) 4145cd5e2e9SAlexander Duyck { 4155cd5e2e9SAlexander Duyck struct net_device *dev = rx_ring->netdev; 4165cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); 4175cd5e2e9SAlexander Duyck 4185cd5e2e9SAlexander Duyck /* check to see if DGLORT belongs to a MACVLAN */ 4195cd5e2e9SAlexander Duyck if (l2_accel) { 4205cd5e2e9SAlexander Duyck u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; 4215cd5e2e9SAlexander Duyck 4225cd5e2e9SAlexander Duyck idx -= l2_accel->dglort; 4235cd5e2e9SAlexander Duyck if (idx < l2_accel->size && l2_accel->macvlan[idx]) 4245cd5e2e9SAlexander Duyck dev = l2_accel->macvlan[idx]; 4255cd5e2e9SAlexander Duyck else 4265cd5e2e9SAlexander Duyck l2_accel = NULL; 4275cd5e2e9SAlexander Duyck } 4285cd5e2e9SAlexander Duyck 42958918df0SAlexander Duyck /* Record Rx queue, or update macvlan statistics */ 4305cd5e2e9SAlexander Duyck if (!l2_accel) 43158918df0SAlexander Duyck skb_record_rx_queue(skb, rx_ring->queue_index); 43258918df0SAlexander Duyck else 43358918df0SAlexander Duyck macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, 4348d80ac43SAlexander Duyck false); 4358d80ac43SAlexander Duyck 4368d80ac43SAlexander Duyck skb->protocol = eth_type_trans(skb, dev); 4375cd5e2e9SAlexander Duyck } 4385cd5e2e9SAlexander Duyck 439b101c962SAlexander Duyck /** 440b101c962SAlexander Duyck * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor 441b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 442b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 443b101c962SAlexander Duyck * @skb: pointer to current skb being populated 444b101c962SAlexander Duyck * 445b101c962SAlexander Duyck * This function checks the ring, descriptor, and packet information in 446b101c962SAlexander Duyck * order to populate the hash, checksum, VLAN, timestamp, protocol, and 447b101c962SAlexander Duyck * other fields within the skb. 448b101c962SAlexander Duyck **/ 449b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, 450b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 451b101c962SAlexander Duyck struct sk_buff *skb) 452b101c962SAlexander Duyck { 453b101c962SAlexander Duyck unsigned int len = skb->len; 454b101c962SAlexander Duyck 45576a540d4SAlexander Duyck fm10k_rx_hash(rx_ring, rx_desc, skb); 45676a540d4SAlexander Duyck 45776a540d4SAlexander Duyck fm10k_rx_checksum(rx_ring, rx_desc, skb); 45876a540d4SAlexander Duyck 459b5db29f0SJacob Keller FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; 460b5db29f0SJacob Keller 461b101c962SAlexander Duyck FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; 462b101c962SAlexander Duyck 463b101c962SAlexander Duyck FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; 464b101c962SAlexander Duyck 465b101c962SAlexander Duyck if (rx_desc->w.vlan) { 466b101c962SAlexander Duyck u16 vid = le16_to_cpu(rx_desc->w.vlan); 467b101c962SAlexander Duyck 468e71c9318SJacob Keller if ((vid & VLAN_VID_MASK) != rx_ring->vid) 469b101c962SAlexander Duyck __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 470e71c9318SJacob Keller else if (vid & VLAN_PRIO_MASK) 471e71c9318SJacob Keller __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 472e71c9318SJacob Keller vid & VLAN_PRIO_MASK); 473b101c962SAlexander Duyck } 474b101c962SAlexander Duyck 4755cd5e2e9SAlexander Duyck fm10k_type_trans(rx_ring, rx_desc, skb); 476b101c962SAlexander Duyck 477b101c962SAlexander Duyck return len; 478b101c962SAlexander Duyck } 479b101c962SAlexander Duyck 480b101c962SAlexander Duyck /** 481b101c962SAlexander Duyck * fm10k_is_non_eop - process handling of non-EOP buffers 482b101c962SAlexander Duyck * @rx_ring: Rx ring being processed 483b101c962SAlexander Duyck * @rx_desc: Rx descriptor for current buffer 484b101c962SAlexander Duyck * 485b101c962SAlexander Duyck * This function updates next to clean. If the buffer is an EOP buffer 486b101c962SAlexander Duyck * this function exits returning false, otherwise it will place the 487b101c962SAlexander Duyck * sk_buff in the next buffer to be chained and return true indicating 488b101c962SAlexander Duyck * that this is in fact a non-EOP buffer. 489b101c962SAlexander Duyck **/ 490b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, 491b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc) 492b101c962SAlexander Duyck { 493b101c962SAlexander Duyck u32 ntc = rx_ring->next_to_clean + 1; 494b101c962SAlexander Duyck 495b101c962SAlexander Duyck /* fetch, update, and store next to clean */ 496b101c962SAlexander Duyck ntc = (ntc < rx_ring->count) ? ntc : 0; 497b101c962SAlexander Duyck rx_ring->next_to_clean = ntc; 498b101c962SAlexander Duyck 499b101c962SAlexander Duyck prefetch(FM10K_RX_DESC(rx_ring, ntc)); 500b101c962SAlexander Duyck 501b101c962SAlexander Duyck if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) 502b101c962SAlexander Duyck return false; 503b101c962SAlexander Duyck 504b101c962SAlexander Duyck return true; 505b101c962SAlexander Duyck } 506b101c962SAlexander Duyck 507b101c962SAlexander Duyck /** 508b101c962SAlexander Duyck * fm10k_cleanup_headers - Correct corrupted or empty headers 509b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 510b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 511b101c962SAlexander Duyck * @skb: pointer to current skb being fixed 512b101c962SAlexander Duyck * 513b101c962SAlexander Duyck * Address the case where we are pulling data in on pages only 514b101c962SAlexander Duyck * and as such no data is present in the skb header. 515b101c962SAlexander Duyck * 516b101c962SAlexander Duyck * In addition if skb is not at least 60 bytes we need to pad it so that 517b101c962SAlexander Duyck * it is large enough to qualify as a valid Ethernet frame. 518b101c962SAlexander Duyck * 519b101c962SAlexander Duyck * Returns true if an error was encountered and skb was freed. 520b101c962SAlexander Duyck **/ 521b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, 522b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 523b101c962SAlexander Duyck struct sk_buff *skb) 524b101c962SAlexander Duyck { 525b101c962SAlexander Duyck if (unlikely((fm10k_test_staterr(rx_desc, 526b101c962SAlexander Duyck FM10K_RXD_STATUS_RXE)))) { 52780043f3bSJacob Keller #define FM10K_TEST_RXD_BIT(rxd, bit) \ 52880043f3bSJacob Keller ((rxd)->w.csum_err & cpu_to_le16(bit)) 52980043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) 53080043f3bSJacob Keller rx_ring->rx_stats.switch_errors++; 53180043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) 53280043f3bSJacob Keller rx_ring->rx_stats.drops++; 53380043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) 53480043f3bSJacob Keller rx_ring->rx_stats.pp_errors++; 53580043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) 53680043f3bSJacob Keller rx_ring->rx_stats.link_errors++; 53780043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) 53880043f3bSJacob Keller rx_ring->rx_stats.length_errors++; 539b101c962SAlexander Duyck dev_kfree_skb_any(skb); 540b101c962SAlexander Duyck rx_ring->rx_stats.errors++; 541b101c962SAlexander Duyck return true; 542b101c962SAlexander Duyck } 543b101c962SAlexander Duyck 544a94d9e22SAlexander Duyck /* if eth_skb_pad returns an error the skb was freed */ 545a94d9e22SAlexander Duyck if (eth_skb_pad(skb)) 546b101c962SAlexander Duyck return true; 547b101c962SAlexander Duyck 548b101c962SAlexander Duyck return false; 549b101c962SAlexander Duyck } 550b101c962SAlexander Duyck 551b101c962SAlexander Duyck /** 552b101c962SAlexander Duyck * fm10k_receive_skb - helper function to handle rx indications 553b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 554b101c962SAlexander Duyck * @skb: packet to send up 555b101c962SAlexander Duyck **/ 556b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, 557b101c962SAlexander Duyck struct sk_buff *skb) 558b101c962SAlexander Duyck { 559b101c962SAlexander Duyck napi_gro_receive(&q_vector->napi, skb); 560b101c962SAlexander Duyck } 561b101c962SAlexander Duyck 56232b3e08fSJesse Brandeburg static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, 563b101c962SAlexander Duyck struct fm10k_ring *rx_ring, 564b101c962SAlexander Duyck int budget) 565b101c962SAlexander Duyck { 566b101c962SAlexander Duyck struct sk_buff *skb = rx_ring->skb; 567b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 568b101c962SAlexander Duyck u16 cleaned_count = fm10k_desc_unused(rx_ring); 569b101c962SAlexander Duyck 57059486329SAlexander Duyck while (likely(total_packets < budget)) { 571b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 572b101c962SAlexander Duyck 573b101c962SAlexander Duyck /* return some buffers to hardware, one at a time is too slow */ 574b101c962SAlexander Duyck if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { 575b101c962SAlexander Duyck fm10k_alloc_rx_buffers(rx_ring, cleaned_count); 576b101c962SAlexander Duyck cleaned_count = 0; 577b101c962SAlexander Duyck } 578b101c962SAlexander Duyck 579b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 580b101c962SAlexander Duyck 581124b74c1SAlexander Duyck if (!rx_desc->d.staterr) 582b101c962SAlexander Duyck break; 583b101c962SAlexander Duyck 584b101c962SAlexander Duyck /* This memory barrier is needed to keep us from reading 585b101c962SAlexander Duyck * any other fields out of the rx_desc until we know the 586124b74c1SAlexander Duyck * descriptor has been written back 587b101c962SAlexander Duyck */ 588124b74c1SAlexander Duyck dma_rmb(); 589b101c962SAlexander Duyck 590b101c962SAlexander Duyck /* retrieve a buffer from the ring */ 591b101c962SAlexander Duyck skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); 592b101c962SAlexander Duyck 593b101c962SAlexander Duyck /* exit if we failed to retrieve a buffer */ 594b101c962SAlexander Duyck if (!skb) 595b101c962SAlexander Duyck break; 596b101c962SAlexander Duyck 597b101c962SAlexander Duyck cleaned_count++; 598b101c962SAlexander Duyck 599b101c962SAlexander Duyck /* fetch next buffer in frame if non-eop */ 600b101c962SAlexander Duyck if (fm10k_is_non_eop(rx_ring, rx_desc)) 601b101c962SAlexander Duyck continue; 602b101c962SAlexander Duyck 603b101c962SAlexander Duyck /* verify the packet layout is correct */ 604b101c962SAlexander Duyck if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { 605b101c962SAlexander Duyck skb = NULL; 606b101c962SAlexander Duyck continue; 607b101c962SAlexander Duyck } 608b101c962SAlexander Duyck 609b101c962SAlexander Duyck /* populate checksum, timestamp, VLAN, and protocol */ 610b101c962SAlexander Duyck total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); 611b101c962SAlexander Duyck 612b101c962SAlexander Duyck fm10k_receive_skb(q_vector, skb); 613b101c962SAlexander Duyck 614b101c962SAlexander Duyck /* reset skb pointer */ 615b101c962SAlexander Duyck skb = NULL; 616b101c962SAlexander Duyck 617b101c962SAlexander Duyck /* update budget accounting */ 618b101c962SAlexander Duyck total_packets++; 61959486329SAlexander Duyck } 620b101c962SAlexander Duyck 621b101c962SAlexander Duyck /* place incomplete frames back on ring for completion */ 622b101c962SAlexander Duyck rx_ring->skb = skb; 623b101c962SAlexander Duyck 624b101c962SAlexander Duyck u64_stats_update_begin(&rx_ring->syncp); 625b101c962SAlexander Duyck rx_ring->stats.packets += total_packets; 626b101c962SAlexander Duyck rx_ring->stats.bytes += total_bytes; 627b101c962SAlexander Duyck u64_stats_update_end(&rx_ring->syncp); 628b101c962SAlexander Duyck q_vector->rx.total_packets += total_packets; 629b101c962SAlexander Duyck q_vector->rx.total_bytes += total_bytes; 630b101c962SAlexander Duyck 63132b3e08fSJesse Brandeburg return total_packets; 632b101c962SAlexander Duyck } 633b101c962SAlexander Duyck 63476a540d4SAlexander Duyck #define VXLAN_HLEN (sizeof(struct udphdr) + 8) 63576a540d4SAlexander Duyck static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) 63676a540d4SAlexander Duyck { 63776a540d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(skb->dev); 638f92e0e48SJacob Keller struct fm10k_udp_port *vxlan_port; 63976a540d4SAlexander Duyck 64076a540d4SAlexander Duyck /* we can only offload a vxlan if we recognize it as such */ 64176a540d4SAlexander Duyck vxlan_port = list_first_entry_or_null(&interface->vxlan_port, 642f92e0e48SJacob Keller struct fm10k_udp_port, list); 64376a540d4SAlexander Duyck 64476a540d4SAlexander Duyck if (!vxlan_port) 64576a540d4SAlexander Duyck return NULL; 64676a540d4SAlexander Duyck if (vxlan_port->port != udp_hdr(skb)->dest) 64776a540d4SAlexander Duyck return NULL; 64876a540d4SAlexander Duyck 64976a540d4SAlexander Duyck /* return offset of udp_hdr plus 8 bytes for VXLAN header */ 65076a540d4SAlexander Duyck return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); 65176a540d4SAlexander Duyck } 65276a540d4SAlexander Duyck 65376a540d4SAlexander Duyck #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) 65476a540d4SAlexander Duyck #define NVGRE_TNI htons(0x2000) 65576a540d4SAlexander Duyck struct fm10k_nvgre_hdr { 65676a540d4SAlexander Duyck __be16 flags; 65776a540d4SAlexander Duyck __be16 proto; 65876a540d4SAlexander Duyck __be32 tni; 65976a540d4SAlexander Duyck }; 66076a540d4SAlexander Duyck 66176a540d4SAlexander Duyck static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) 66276a540d4SAlexander Duyck { 66376a540d4SAlexander Duyck struct fm10k_nvgre_hdr *nvgre_hdr; 66476a540d4SAlexander Duyck int hlen = ip_hdrlen(skb); 66576a540d4SAlexander Duyck 66676a540d4SAlexander Duyck /* currently only IPv4 is supported due to hlen above */ 66776a540d4SAlexander Duyck if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 66876a540d4SAlexander Duyck return NULL; 66976a540d4SAlexander Duyck 67076a540d4SAlexander Duyck /* our transport header should be NVGRE */ 67176a540d4SAlexander Duyck nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); 67276a540d4SAlexander Duyck 67376a540d4SAlexander Duyck /* verify all reserved flags are 0 */ 67476a540d4SAlexander Duyck if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) 67576a540d4SAlexander Duyck return NULL; 67676a540d4SAlexander Duyck 67776a540d4SAlexander Duyck /* report start of ethernet header */ 67876a540d4SAlexander Duyck if (nvgre_hdr->flags & NVGRE_TNI) 67976a540d4SAlexander Duyck return (struct ethhdr *)(nvgre_hdr + 1); 68076a540d4SAlexander Duyck 68176a540d4SAlexander Duyck return (struct ethhdr *)(&nvgre_hdr->tni); 68276a540d4SAlexander Duyck } 68376a540d4SAlexander Duyck 6845bf33dc6SMatthew Vick __be16 fm10k_tx_encap_offload(struct sk_buff *skb) 68576a540d4SAlexander Duyck { 6868c1a90aaSMatthew Vick u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; 68776a540d4SAlexander Duyck struct ethhdr *eth_hdr; 68876a540d4SAlexander Duyck 6898c1a90aaSMatthew Vick if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 6908c1a90aaSMatthew Vick skb->inner_protocol != htons(ETH_P_TEB)) 691b66b6d9fSJoe Stringer return 0; 692b66b6d9fSJoe Stringer 69376a540d4SAlexander Duyck switch (vlan_get_protocol(skb)) { 69476a540d4SAlexander Duyck case htons(ETH_P_IP): 69576a540d4SAlexander Duyck l4_hdr = ip_hdr(skb)->protocol; 69676a540d4SAlexander Duyck break; 69776a540d4SAlexander Duyck case htons(ETH_P_IPV6): 69876a540d4SAlexander Duyck l4_hdr = ipv6_hdr(skb)->nexthdr; 69976a540d4SAlexander Duyck break; 70076a540d4SAlexander Duyck default: 70176a540d4SAlexander Duyck return 0; 70276a540d4SAlexander Duyck } 70376a540d4SAlexander Duyck 70476a540d4SAlexander Duyck switch (l4_hdr) { 70576a540d4SAlexander Duyck case IPPROTO_UDP: 70676a540d4SAlexander Duyck eth_hdr = fm10k_port_is_vxlan(skb); 70776a540d4SAlexander Duyck break; 70876a540d4SAlexander Duyck case IPPROTO_GRE: 70976a540d4SAlexander Duyck eth_hdr = fm10k_gre_is_nvgre(skb); 71076a540d4SAlexander Duyck break; 71176a540d4SAlexander Duyck default: 71276a540d4SAlexander Duyck return 0; 71376a540d4SAlexander Duyck } 71476a540d4SAlexander Duyck 71576a540d4SAlexander Duyck if (!eth_hdr) 71676a540d4SAlexander Duyck return 0; 71776a540d4SAlexander Duyck 71876a540d4SAlexander Duyck switch (eth_hdr->h_proto) { 71976a540d4SAlexander Duyck case htons(ETH_P_IP): 7208c1a90aaSMatthew Vick inner_l4_hdr = inner_ip_hdr(skb)->protocol; 7218c1a90aaSMatthew Vick break; 72276a540d4SAlexander Duyck case htons(ETH_P_IPV6): 7238c1a90aaSMatthew Vick inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; 72476a540d4SAlexander Duyck break; 72576a540d4SAlexander Duyck default: 72676a540d4SAlexander Duyck return 0; 72776a540d4SAlexander Duyck } 72876a540d4SAlexander Duyck 7298c1a90aaSMatthew Vick switch (inner_l4_hdr) { 7308c1a90aaSMatthew Vick case IPPROTO_TCP: 7318c1a90aaSMatthew Vick inner_l4_hlen = inner_tcp_hdrlen(skb); 7328c1a90aaSMatthew Vick break; 7338c1a90aaSMatthew Vick case IPPROTO_UDP: 7348c1a90aaSMatthew Vick inner_l4_hlen = 8; 7358c1a90aaSMatthew Vick break; 7368c1a90aaSMatthew Vick default: 7378c1a90aaSMatthew Vick return 0; 7388c1a90aaSMatthew Vick } 7398c1a90aaSMatthew Vick 7408c1a90aaSMatthew Vick /* The hardware allows tunnel offloads only if the combined inner and 7418c1a90aaSMatthew Vick * outer header is 184 bytes or less 7428c1a90aaSMatthew Vick */ 7438c1a90aaSMatthew Vick if (skb_inner_transport_header(skb) + inner_l4_hlen - 7448c1a90aaSMatthew Vick skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) 7458c1a90aaSMatthew Vick return 0; 7468c1a90aaSMatthew Vick 74776a540d4SAlexander Duyck return eth_hdr->h_proto; 74876a540d4SAlexander Duyck } 74976a540d4SAlexander Duyck 75076a540d4SAlexander Duyck static int fm10k_tso(struct fm10k_ring *tx_ring, 75176a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 75276a540d4SAlexander Duyck { 75376a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 75476a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 75576a540d4SAlexander Duyck unsigned char *th; 75676a540d4SAlexander Duyck u8 hdrlen; 75776a540d4SAlexander Duyck 75876a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 75976a540d4SAlexander Duyck return 0; 76076a540d4SAlexander Duyck 76176a540d4SAlexander Duyck if (!skb_is_gso(skb)) 76276a540d4SAlexander Duyck return 0; 76376a540d4SAlexander Duyck 76476a540d4SAlexander Duyck /* compute header lengths */ 76576a540d4SAlexander Duyck if (skb->encapsulation) { 76676a540d4SAlexander Duyck if (!fm10k_tx_encap_offload(skb)) 76776a540d4SAlexander Duyck goto err_vxlan; 76876a540d4SAlexander Duyck th = skb_inner_transport_header(skb); 76976a540d4SAlexander Duyck } else { 77076a540d4SAlexander Duyck th = skb_transport_header(skb); 77176a540d4SAlexander Duyck } 77276a540d4SAlexander Duyck 77376a540d4SAlexander Duyck /* compute offset from SOF to transport header and add header len */ 77476a540d4SAlexander Duyck hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); 77576a540d4SAlexander Duyck 77676a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 77776a540d4SAlexander Duyck 77876a540d4SAlexander Duyck /* update gso size and bytecount with header size */ 77976a540d4SAlexander Duyck first->gso_segs = skb_shinfo(skb)->gso_segs; 78076a540d4SAlexander Duyck first->bytecount += (first->gso_segs - 1) * hdrlen; 78176a540d4SAlexander Duyck 78276a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 78376a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 78476a540d4SAlexander Duyck tx_desc->hdrlen = hdrlen; 78576a540d4SAlexander Duyck tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 78676a540d4SAlexander Duyck 78776a540d4SAlexander Duyck return 1; 788c0ad8ef3SJoe Perches 78976a540d4SAlexander Duyck err_vxlan: 79076a540d4SAlexander Duyck tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; 791c0ad8ef3SJoe Perches if (net_ratelimit()) 79276a540d4SAlexander Duyck netdev_err(tx_ring->netdev, 79376a540d4SAlexander Duyck "TSO requested for unsupported tunnel, disabling offload\n"); 79476a540d4SAlexander Duyck return -1; 79576a540d4SAlexander Duyck } 79676a540d4SAlexander Duyck 79776a540d4SAlexander Duyck static void fm10k_tx_csum(struct fm10k_ring *tx_ring, 79876a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 79976a540d4SAlexander Duyck { 80076a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 80176a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 80276a540d4SAlexander Duyck union { 80376a540d4SAlexander Duyck struct iphdr *ipv4; 80476a540d4SAlexander Duyck struct ipv6hdr *ipv6; 80576a540d4SAlexander Duyck u8 *raw; 80676a540d4SAlexander Duyck } network_hdr; 807dc1b4c2bSJacob Keller u8 *transport_hdr; 808dc1b4c2bSJacob Keller __be16 frag_off; 80976a540d4SAlexander Duyck __be16 protocol; 81076a540d4SAlexander Duyck u8 l4_hdr = 0; 81176a540d4SAlexander Duyck 81276a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 81376a540d4SAlexander Duyck goto no_csum; 81476a540d4SAlexander Duyck 81576a540d4SAlexander Duyck if (skb->encapsulation) { 81676a540d4SAlexander Duyck protocol = fm10k_tx_encap_offload(skb); 81776a540d4SAlexander Duyck if (!protocol) { 81876a540d4SAlexander Duyck if (skb_checksum_help(skb)) { 81976a540d4SAlexander Duyck dev_warn(tx_ring->dev, 82076a540d4SAlexander Duyck "failed to offload encap csum!\n"); 82176a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 82276a540d4SAlexander Duyck } 82376a540d4SAlexander Duyck goto no_csum; 82476a540d4SAlexander Duyck } 82576a540d4SAlexander Duyck network_hdr.raw = skb_inner_network_header(skb); 826dc1b4c2bSJacob Keller transport_hdr = skb_inner_transport_header(skb); 82776a540d4SAlexander Duyck } else { 82876a540d4SAlexander Duyck protocol = vlan_get_protocol(skb); 82976a540d4SAlexander Duyck network_hdr.raw = skb_network_header(skb); 830dc1b4c2bSJacob Keller transport_hdr = skb_transport_header(skb); 83176a540d4SAlexander Duyck } 83276a540d4SAlexander Duyck 83376a540d4SAlexander Duyck switch (protocol) { 83476a540d4SAlexander Duyck case htons(ETH_P_IP): 83576a540d4SAlexander Duyck l4_hdr = network_hdr.ipv4->protocol; 83676a540d4SAlexander Duyck break; 83776a540d4SAlexander Duyck case htons(ETH_P_IPV6): 83876a540d4SAlexander Duyck l4_hdr = network_hdr.ipv6->nexthdr; 839dc1b4c2bSJacob Keller if (likely((transport_hdr - network_hdr.raw) == 840dc1b4c2bSJacob Keller sizeof(struct ipv6hdr))) 841dc1b4c2bSJacob Keller break; 842dc1b4c2bSJacob Keller ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + 843dc1b4c2bSJacob Keller sizeof(struct ipv6hdr), 844dc1b4c2bSJacob Keller &l4_hdr, &frag_off); 845dc1b4c2bSJacob Keller if (unlikely(frag_off)) 846dc1b4c2bSJacob Keller l4_hdr = NEXTHDR_FRAGMENT; 84776a540d4SAlexander Duyck break; 84876a540d4SAlexander Duyck default: 849dc1b4c2bSJacob Keller break; 85076a540d4SAlexander Duyck } 85176a540d4SAlexander Duyck 85276a540d4SAlexander Duyck switch (l4_hdr) { 85376a540d4SAlexander Duyck case IPPROTO_TCP: 85476a540d4SAlexander Duyck case IPPROTO_UDP: 85576a540d4SAlexander Duyck break; 85676a540d4SAlexander Duyck case IPPROTO_GRE: 85776a540d4SAlexander Duyck if (skb->encapsulation) 85876a540d4SAlexander Duyck break; 8595463fce6SJeff Kirsher fallthrough; 86076a540d4SAlexander Duyck default: 86176a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 86276a540d4SAlexander Duyck dev_warn(tx_ring->dev, 863dc1b4c2bSJacob Keller "partial checksum, version=%d l4 proto=%x\n", 864dc1b4c2bSJacob Keller protocol, l4_hdr); 86576a540d4SAlexander Duyck } 866dc1b4c2bSJacob Keller skb_checksum_help(skb); 86776a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 86876a540d4SAlexander Duyck goto no_csum; 86976a540d4SAlexander Duyck } 87076a540d4SAlexander Duyck 87176a540d4SAlexander Duyck /* update TX checksum flag */ 87276a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 87380043f3bSJacob Keller tx_ring->tx_stats.csum_good++; 87476a540d4SAlexander Duyck 87576a540d4SAlexander Duyck no_csum: 87676a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 87776a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 87876a540d4SAlexander Duyck tx_desc->hdrlen = 0; 87976a540d4SAlexander Duyck tx_desc->mss = 0; 88076a540d4SAlexander Duyck } 88176a540d4SAlexander Duyck 88276a540d4SAlexander Duyck #define FM10K_SET_FLAG(_input, _flag, _result) \ 88376a540d4SAlexander Duyck ((_flag <= _result) ? \ 88476a540d4SAlexander Duyck ((u32)(_input & _flag) * (_result / _flag)) : \ 88576a540d4SAlexander Duyck ((u32)(_input & _flag) / (_flag / _result))) 88676a540d4SAlexander Duyck 88776a540d4SAlexander Duyck static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) 88876a540d4SAlexander Duyck { 88976a540d4SAlexander Duyck /* set type for advanced descriptor with frame checksum insertion */ 89076a540d4SAlexander Duyck u32 desc_flags = 0; 89176a540d4SAlexander Duyck 89276a540d4SAlexander Duyck /* set checksum offload bits */ 89376a540d4SAlexander Duyck desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, 89476a540d4SAlexander Duyck FM10K_TXD_FLAG_CSUM); 89576a540d4SAlexander Duyck 89676a540d4SAlexander Duyck return desc_flags; 89776a540d4SAlexander Duyck } 89876a540d4SAlexander Duyck 899b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, 900b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc, u16 i, 901b101c962SAlexander Duyck dma_addr_t dma, unsigned int size, u8 desc_flags) 902b101c962SAlexander Duyck { 903b101c962SAlexander Duyck /* set RS and INT for last frame in a cache line */ 904b101c962SAlexander Duyck if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) 905b101c962SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; 906b101c962SAlexander Duyck 907b101c962SAlexander Duyck /* record values to descriptor */ 908b101c962SAlexander Duyck tx_desc->buffer_addr = cpu_to_le64(dma); 909b101c962SAlexander Duyck tx_desc->flags = desc_flags; 910b101c962SAlexander Duyck tx_desc->buflen = cpu_to_le16(size); 911b101c962SAlexander Duyck 912b101c962SAlexander Duyck /* return true if we just wrapped the ring */ 913b101c962SAlexander Duyck return i == tx_ring->count; 914b101c962SAlexander Duyck } 915b101c962SAlexander Duyck 9162c2b2f0cSAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9172c2b2f0cSAlexander Duyck { 9182c2b2f0cSAlexander Duyck netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 9192c2b2f0cSAlexander Duyck 920eca32047SMatthew Vick /* Memory barrier before checking head and tail */ 9212c2b2f0cSAlexander Duyck smp_mb(); 9222c2b2f0cSAlexander Duyck 923eca32047SMatthew Vick /* Check again in a case another CPU has just made room available */ 9242c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) < size)) 9252c2b2f0cSAlexander Duyck return -EBUSY; 9262c2b2f0cSAlexander Duyck 9272c2b2f0cSAlexander Duyck /* A reprieve! - use start_queue because it doesn't call schedule */ 9282c2b2f0cSAlexander Duyck netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 9292c2b2f0cSAlexander Duyck ++tx_ring->tx_stats.restart_queue; 9302c2b2f0cSAlexander Duyck return 0; 9312c2b2f0cSAlexander Duyck } 9322c2b2f0cSAlexander Duyck 9332c2b2f0cSAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9342c2b2f0cSAlexander Duyck { 9352c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) >= size)) 9362c2b2f0cSAlexander Duyck return 0; 9372c2b2f0cSAlexander Duyck return __fm10k_maybe_stop_tx(tx_ring, size); 9382c2b2f0cSAlexander Duyck } 9392c2b2f0cSAlexander Duyck 940b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring, 941b101c962SAlexander Duyck struct fm10k_tx_buffer *first) 942b101c962SAlexander Duyck { 943b101c962SAlexander Duyck struct sk_buff *skb = first->skb; 944b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 945b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 946d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag; 947b101c962SAlexander Duyck unsigned char *data; 948b101c962SAlexander Duyck dma_addr_t dma; 949b101c962SAlexander Duyck unsigned int data_len, size; 95076a540d4SAlexander Duyck u32 tx_flags = first->tx_flags; 951b101c962SAlexander Duyck u16 i = tx_ring->next_to_use; 95276a540d4SAlexander Duyck u8 flags = fm10k_tx_desc_flags(skb, tx_flags); 953b101c962SAlexander Duyck 954b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 955b101c962SAlexander Duyck 956b101c962SAlexander Duyck /* add HW VLAN tag */ 957df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 958df8a39deSJiri Pirko tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 959b101c962SAlexander Duyck else 960b101c962SAlexander Duyck tx_desc->vlan = 0; 961b101c962SAlexander Duyck 962b101c962SAlexander Duyck size = skb_headlen(skb); 963b101c962SAlexander Duyck data = skb->data; 964b101c962SAlexander Duyck 965b101c962SAlexander Duyck dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 966b101c962SAlexander Duyck 967b101c962SAlexander Duyck data_len = skb->data_len; 968b101c962SAlexander Duyck tx_buffer = first; 969b101c962SAlexander Duyck 970b101c962SAlexander Duyck for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 971b101c962SAlexander Duyck if (dma_mapping_error(tx_ring->dev, dma)) 972b101c962SAlexander Duyck goto dma_error; 973b101c962SAlexander Duyck 974b101c962SAlexander Duyck /* record length, and DMA address */ 975b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, size); 976b101c962SAlexander Duyck dma_unmap_addr_set(tx_buffer, dma, dma); 977b101c962SAlexander Duyck 978b101c962SAlexander Duyck while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { 979b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, 980b101c962SAlexander Duyck FM10K_MAX_DATA_PER_TXD, flags)) { 981b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 982b101c962SAlexander Duyck i = 0; 983b101c962SAlexander Duyck } 984b101c962SAlexander Duyck 985b101c962SAlexander Duyck dma += FM10K_MAX_DATA_PER_TXD; 986b101c962SAlexander Duyck size -= FM10K_MAX_DATA_PER_TXD; 987b101c962SAlexander Duyck } 988b101c962SAlexander Duyck 989b101c962SAlexander Duyck if (likely(!data_len)) 990b101c962SAlexander Duyck break; 991b101c962SAlexander Duyck 992b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, 993b101c962SAlexander Duyck dma, size, flags)) { 994b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 995b101c962SAlexander Duyck i = 0; 996b101c962SAlexander Duyck } 997b101c962SAlexander Duyck 998b101c962SAlexander Duyck size = skb_frag_size(frag); 999b101c962SAlexander Duyck data_len -= size; 1000b101c962SAlexander Duyck 1001b101c962SAlexander Duyck dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1002b101c962SAlexander Duyck DMA_TO_DEVICE); 1003b101c962SAlexander Duyck 1004b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1005b101c962SAlexander Duyck } 1006b101c962SAlexander Duyck 1007b101c962SAlexander Duyck /* write last descriptor with LAST bit set */ 1008b101c962SAlexander Duyck flags |= FM10K_TXD_FLAG_LAST; 1009b101c962SAlexander Duyck 1010b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) 1011b101c962SAlexander Duyck i = 0; 1012b101c962SAlexander Duyck 1013b101c962SAlexander Duyck /* record bytecount for BQL */ 1014b101c962SAlexander Duyck netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1015b101c962SAlexander Duyck 1016b101c962SAlexander Duyck /* record SW timestamp if HW timestamp is not available */ 1017b101c962SAlexander Duyck skb_tx_timestamp(first->skb); 1018b101c962SAlexander Duyck 1019b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w know there 1020b101c962SAlexander Duyck * are new descriptors to fetch. (Only applicable for weak-ordered 1021b101c962SAlexander Duyck * memory model archs, such as IA-64). 1022b101c962SAlexander Duyck * 1023b101c962SAlexander Duyck * We also need this memory barrier to make certain all of the 1024b101c962SAlexander Duyck * status bits have been updated before next_to_watch is written. 1025b101c962SAlexander Duyck */ 1026b101c962SAlexander Duyck wmb(); 1027b101c962SAlexander Duyck 1028b101c962SAlexander Duyck /* set next_to_watch value indicating a packet is present */ 1029b101c962SAlexander Duyck first->next_to_watch = tx_desc; 1030b101c962SAlexander Duyck 1031b101c962SAlexander Duyck tx_ring->next_to_use = i; 1032b101c962SAlexander Duyck 10332c2b2f0cSAlexander Duyck /* Make sure there is space in the ring for the next send. */ 10342c2b2f0cSAlexander Duyck fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); 10352c2b2f0cSAlexander Duyck 1036b101c962SAlexander Duyck /* notify HW of packet */ 10376b16f9eeSFlorian Westphal if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1038b101c962SAlexander Duyck writel(i, tx_ring->tail); 10392c2b2f0cSAlexander Duyck } 1040b101c962SAlexander Duyck 1041b101c962SAlexander Duyck return; 1042b101c962SAlexander Duyck dma_error: 1043b101c962SAlexander Duyck dev_err(tx_ring->dev, "TX DMA map failed\n"); 1044b101c962SAlexander Duyck 1045b101c962SAlexander Duyck /* clear dma mappings for failed tx_buffer map */ 1046b101c962SAlexander Duyck for (;;) { 1047b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1048b101c962SAlexander Duyck fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1049b101c962SAlexander Duyck if (tx_buffer == first) 1050b101c962SAlexander Duyck break; 1051b101c962SAlexander Duyck if (i == 0) 1052b101c962SAlexander Duyck i = tx_ring->count; 1053b101c962SAlexander Duyck i--; 1054b101c962SAlexander Duyck } 1055b101c962SAlexander Duyck 1056b101c962SAlexander Duyck tx_ring->next_to_use = i; 1057b101c962SAlexander Duyck } 1058b101c962SAlexander Duyck 1059b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1060b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1061b101c962SAlexander Duyck { 1062b101c962SAlexander Duyck u16 count = TXD_USE_COUNT(skb_headlen(skb)); 106303d13a51SJacob Keller struct fm10k_tx_buffer *first; 106403d13a51SJacob Keller unsigned short f; 106503d13a51SJacob Keller u32 tx_flags = 0; 106603d13a51SJacob Keller int tso; 1067b101c962SAlexander Duyck 1068b101c962SAlexander Duyck /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, 1069b101c962SAlexander Duyck * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, 1070b101c962SAlexander Duyck * + 2 desc gap to keep tail from touching head 1071b101c962SAlexander Duyck * otherwise try next time 1072b101c962SAlexander Duyck */ 10730ea7e88dSJacob Keller for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 10740ea7e88dSJacob Keller skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 10750ea7e88dSJacob Keller 10760ea7e88dSJacob Keller count += TXD_USE_COUNT(skb_frag_size(frag)); 10770ea7e88dSJacob Keller } 1078aae072e3SAlexander Duyck 1079b101c962SAlexander Duyck if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { 1080b101c962SAlexander Duyck tx_ring->tx_stats.tx_busy++; 1081b101c962SAlexander Duyck return NETDEV_TX_BUSY; 1082b101c962SAlexander Duyck } 1083b101c962SAlexander Duyck 1084b101c962SAlexander Duyck /* record the location of the first descriptor for this packet */ 1085b101c962SAlexander Duyck first = &tx_ring->tx_buffer[tx_ring->next_to_use]; 1086b101c962SAlexander Duyck first->skb = skb; 1087b101c962SAlexander Duyck first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1088b101c962SAlexander Duyck first->gso_segs = 1; 1089b101c962SAlexander Duyck 1090b101c962SAlexander Duyck /* record initial flags and protocol */ 1091b101c962SAlexander Duyck first->tx_flags = tx_flags; 1092b101c962SAlexander Duyck 109376a540d4SAlexander Duyck tso = fm10k_tso(tx_ring, first); 109476a540d4SAlexander Duyck if (tso < 0) 109576a540d4SAlexander Duyck goto out_drop; 109676a540d4SAlexander Duyck else if (!tso) 109776a540d4SAlexander Duyck fm10k_tx_csum(tx_ring, first); 109876a540d4SAlexander Duyck 1099b101c962SAlexander Duyck fm10k_tx_map(tx_ring, first); 1100b101c962SAlexander Duyck 1101b101c962SAlexander Duyck return NETDEV_TX_OK; 110276a540d4SAlexander Duyck 110376a540d4SAlexander Duyck out_drop: 110476a540d4SAlexander Duyck dev_kfree_skb_any(first->skb); 110576a540d4SAlexander Duyck first->skb = NULL; 110676a540d4SAlexander Duyck 110776a540d4SAlexander Duyck return NETDEV_TX_OK; 1108b101c962SAlexander Duyck } 1109b101c962SAlexander Duyck 1110b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) 1111b101c962SAlexander Duyck { 1112b101c962SAlexander Duyck return ring->stats.packets; 1113b101c962SAlexander Duyck } 1114b101c962SAlexander Duyck 11155b9e4432SJacob Keller /** 11165b9e4432SJacob Keller * fm10k_get_tx_pending - how many Tx descriptors not processed 11175b9e4432SJacob Keller * @ring: the ring structure 11185b9e4432SJacob Keller * @in_sw: is tx_pending being checked in SW or in HW? 11195b9e4432SJacob Keller */ 11205b9e4432SJacob Keller u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw) 1121b101c962SAlexander Duyck { 112234bad71cSJacob Keller struct fm10k_intfc *interface = ring->q_vector->interface; 112334bad71cSJacob Keller struct fm10k_hw *hw = &interface->hw; 11245b9e4432SJacob Keller u32 head, tail; 112534bad71cSJacob Keller 11265b9e4432SJacob Keller if (likely(in_sw)) { 11275b9e4432SJacob Keller head = ring->next_to_clean; 11285b9e4432SJacob Keller tail = ring->next_to_use; 11295b9e4432SJacob Keller } else { 11305b9e4432SJacob Keller head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); 11315b9e4432SJacob Keller tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx)); 11325b9e4432SJacob Keller } 1133b101c962SAlexander Duyck 1134b101c962SAlexander Duyck return ((head <= tail) ? tail : tail + ring->count) - head; 1135b101c962SAlexander Duyck } 1136b101c962SAlexander Duyck 1137b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) 1138b101c962SAlexander Duyck { 1139b101c962SAlexander Duyck u32 tx_done = fm10k_get_tx_completed(tx_ring); 1140b101c962SAlexander Duyck u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 11415b9e4432SJacob Keller u32 tx_pending = fm10k_get_tx_pending(tx_ring, true); 1142b101c962SAlexander Duyck 1143b101c962SAlexander Duyck clear_check_for_tx_hang(tx_ring); 1144b101c962SAlexander Duyck 1145b101c962SAlexander Duyck /* Check for a hung queue, but be thorough. This verifies 1146b101c962SAlexander Duyck * that a transmit has been completed since the previous 1147b101c962SAlexander Duyck * check AND there is at least one packet pending. By 1148b101c962SAlexander Duyck * requiring this to fail twice we avoid races with 1149b101c962SAlexander Duyck * clearing the ARMED bit and conditions where we 1150b101c962SAlexander Duyck * run the check_tx_hang logic with a transmit completion 1151b101c962SAlexander Duyck * pending but without time to complete it yet. 1152b101c962SAlexander Duyck */ 1153b101c962SAlexander Duyck if (!tx_pending || (tx_done_old != tx_done)) { 1154b101c962SAlexander Duyck /* update completed stats and continue */ 1155b101c962SAlexander Duyck tx_ring->tx_stats.tx_done_old = tx_done; 1156b101c962SAlexander Duyck /* reset the countdown */ 115746929557SJacob Keller clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); 1158b101c962SAlexander Duyck 1159b101c962SAlexander Duyck return false; 1160b101c962SAlexander Duyck } 1161b101c962SAlexander Duyck 1162b101c962SAlexander Duyck /* make sure it is true for two checks in a row */ 116346929557SJacob Keller return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); 1164b101c962SAlexander Duyck } 1165b101c962SAlexander Duyck 1166b101c962SAlexander Duyck /** 1167b101c962SAlexander Duyck * fm10k_tx_timeout_reset - initiate reset due to Tx timeout 1168b101c962SAlexander Duyck * @interface: driver private struct 1169b101c962SAlexander Duyck **/ 1170b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) 1171b101c962SAlexander Duyck { 1172b101c962SAlexander Duyck /* Do the reset outside of interrupt context */ 117346929557SJacob Keller if (!test_bit(__FM10K_DOWN, interface->state)) { 1174b101c962SAlexander Duyck interface->tx_timeout_count++; 11753ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1176b101c962SAlexander Duyck fm10k_service_event_schedule(interface); 1177b101c962SAlexander Duyck } 1178b101c962SAlexander Duyck } 1179b101c962SAlexander Duyck 1180b101c962SAlexander Duyck /** 1181b101c962SAlexander Duyck * fm10k_clean_tx_irq - Reclaim resources after transmit completes 1182b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 1183b101c962SAlexander Duyck * @tx_ring: tx ring to clean 1184144d8305SAlexander Duyck * @napi_budget: Used to determine if we are in netpoll 1185b101c962SAlexander Duyck **/ 1186b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, 1187144d8305SAlexander Duyck struct fm10k_ring *tx_ring, int napi_budget) 1188b101c962SAlexander Duyck { 1189b101c962SAlexander Duyck struct fm10k_intfc *interface = q_vector->interface; 1190b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 1191b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 1192b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 1193b101c962SAlexander Duyck unsigned int budget = q_vector->tx.work_limit; 1194b101c962SAlexander Duyck unsigned int i = tx_ring->next_to_clean; 1195b101c962SAlexander Duyck 119646929557SJacob Keller if (test_bit(__FM10K_DOWN, interface->state)) 1197b101c962SAlexander Duyck return true; 1198b101c962SAlexander Duyck 1199b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1200b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 1201b101c962SAlexander Duyck i -= tx_ring->count; 1202b101c962SAlexander Duyck 1203b101c962SAlexander Duyck do { 1204b101c962SAlexander Duyck struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; 1205b101c962SAlexander Duyck 1206b101c962SAlexander Duyck /* if next_to_watch is not set then there is no work pending */ 1207b101c962SAlexander Duyck if (!eop_desc) 1208b101c962SAlexander Duyck break; 1209b101c962SAlexander Duyck 1210b101c962SAlexander Duyck /* prevent any other reads prior to eop_desc */ 12117b8edcc6SBrian King smp_rmb(); 1212b101c962SAlexander Duyck 1213b101c962SAlexander Duyck /* if DD is not set pending work has not been completed */ 1214b101c962SAlexander Duyck if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) 1215b101c962SAlexander Duyck break; 1216b101c962SAlexander Duyck 1217b101c962SAlexander Duyck /* clear next_to_watch to prevent false hangs */ 1218b101c962SAlexander Duyck tx_buffer->next_to_watch = NULL; 1219b101c962SAlexander Duyck 1220b101c962SAlexander Duyck /* update the statistics for this packet */ 1221b101c962SAlexander Duyck total_bytes += tx_buffer->bytecount; 1222b101c962SAlexander Duyck total_packets += tx_buffer->gso_segs; 1223b101c962SAlexander Duyck 1224b101c962SAlexander Duyck /* free the skb */ 1225144d8305SAlexander Duyck napi_consume_skb(tx_buffer->skb, napi_budget); 1226b101c962SAlexander Duyck 1227b101c962SAlexander Duyck /* unmap skb header data */ 1228b101c962SAlexander Duyck dma_unmap_single(tx_ring->dev, 1229b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1230b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1231b101c962SAlexander Duyck DMA_TO_DEVICE); 1232b101c962SAlexander Duyck 1233b101c962SAlexander Duyck /* clear tx_buffer data */ 1234b101c962SAlexander Duyck tx_buffer->skb = NULL; 1235b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1236b101c962SAlexander Duyck 1237b101c962SAlexander Duyck /* unmap remaining buffers */ 1238b101c962SAlexander Duyck while (tx_desc != eop_desc) { 1239b101c962SAlexander Duyck tx_buffer++; 1240b101c962SAlexander Duyck tx_desc++; 1241b101c962SAlexander Duyck i++; 1242b101c962SAlexander Duyck if (unlikely(!i)) { 1243b101c962SAlexander Duyck i -= tx_ring->count; 1244b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1245b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1246b101c962SAlexander Duyck } 1247b101c962SAlexander Duyck 1248b101c962SAlexander Duyck /* unmap any remaining paged data */ 1249b101c962SAlexander Duyck if (dma_unmap_len(tx_buffer, len)) { 1250b101c962SAlexander Duyck dma_unmap_page(tx_ring->dev, 1251b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1252b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1253b101c962SAlexander Duyck DMA_TO_DEVICE); 1254b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1255b101c962SAlexander Duyck } 1256b101c962SAlexander Duyck } 1257b101c962SAlexander Duyck 1258b101c962SAlexander Duyck /* move us one more past the eop_desc for start of next pkt */ 1259b101c962SAlexander Duyck tx_buffer++; 1260b101c962SAlexander Duyck tx_desc++; 1261b101c962SAlexander Duyck i++; 1262b101c962SAlexander Duyck if (unlikely(!i)) { 1263b101c962SAlexander Duyck i -= tx_ring->count; 1264b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1265b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1266b101c962SAlexander Duyck } 1267b101c962SAlexander Duyck 1268b101c962SAlexander Duyck /* issue prefetch for next Tx descriptor */ 1269b101c962SAlexander Duyck prefetch(tx_desc); 1270b101c962SAlexander Duyck 1271b101c962SAlexander Duyck /* update budget accounting */ 1272b101c962SAlexander Duyck budget--; 1273b101c962SAlexander Duyck } while (likely(budget)); 1274b101c962SAlexander Duyck 1275b101c962SAlexander Duyck i += tx_ring->count; 1276b101c962SAlexander Duyck tx_ring->next_to_clean = i; 1277b101c962SAlexander Duyck u64_stats_update_begin(&tx_ring->syncp); 1278b101c962SAlexander Duyck tx_ring->stats.bytes += total_bytes; 1279b101c962SAlexander Duyck tx_ring->stats.packets += total_packets; 1280b101c962SAlexander Duyck u64_stats_update_end(&tx_ring->syncp); 1281b101c962SAlexander Duyck q_vector->tx.total_bytes += total_bytes; 1282b101c962SAlexander Duyck q_vector->tx.total_packets += total_packets; 1283b101c962SAlexander Duyck 1284b101c962SAlexander Duyck if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { 1285b101c962SAlexander Duyck /* schedule immediate reset if we believe we hung */ 1286b101c962SAlexander Duyck struct fm10k_hw *hw = &interface->hw; 1287b101c962SAlexander Duyck 1288b101c962SAlexander Duyck netif_err(interface, drv, tx_ring->netdev, 1289b101c962SAlexander Duyck "Detected Tx Unit Hang\n" 1290b101c962SAlexander Duyck " Tx Queue <%d>\n" 1291b101c962SAlexander Duyck " TDH, TDT <%x>, <%x>\n" 1292b101c962SAlexander Duyck " next_to_use <%x>\n" 1293b101c962SAlexander Duyck " next_to_clean <%x>\n", 1294b101c962SAlexander Duyck tx_ring->queue_index, 1295b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), 1296b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), 1297b101c962SAlexander Duyck tx_ring->next_to_use, i); 1298b101c962SAlexander Duyck 1299b101c962SAlexander Duyck netif_stop_subqueue(tx_ring->netdev, 1300b101c962SAlexander Duyck tx_ring->queue_index); 1301b101c962SAlexander Duyck 1302b101c962SAlexander Duyck netif_info(interface, probe, tx_ring->netdev, 1303b101c962SAlexander Duyck "tx hang %d detected on queue %d, resetting interface\n", 1304b101c962SAlexander Duyck interface->tx_timeout_count + 1, 1305b101c962SAlexander Duyck tx_ring->queue_index); 1306b101c962SAlexander Duyck 1307b101c962SAlexander Duyck fm10k_tx_timeout_reset(interface); 1308b101c962SAlexander Duyck 1309b101c962SAlexander Duyck /* the netdev is about to reset, no point in enabling stuff */ 1310b101c962SAlexander Duyck return true; 1311b101c962SAlexander Duyck } 1312b101c962SAlexander Duyck 1313b101c962SAlexander Duyck /* notify netdev of completed buffers */ 1314b101c962SAlexander Duyck netdev_tx_completed_queue(txring_txq(tx_ring), 1315b101c962SAlexander Duyck total_packets, total_bytes); 1316b101c962SAlexander Duyck 1317b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) 1318b101c962SAlexander Duyck if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1319b101c962SAlexander Duyck (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1320b101c962SAlexander Duyck /* Make sure that anybody stopping the queue after this 1321b101c962SAlexander Duyck * sees the new next_to_clean. 1322b101c962SAlexander Duyck */ 1323b101c962SAlexander Duyck smp_mb(); 1324b101c962SAlexander Duyck if (__netif_subqueue_stopped(tx_ring->netdev, 1325b101c962SAlexander Duyck tx_ring->queue_index) && 132646929557SJacob Keller !test_bit(__FM10K_DOWN, interface->state)) { 1327b101c962SAlexander Duyck netif_wake_subqueue(tx_ring->netdev, 1328b101c962SAlexander Duyck tx_ring->queue_index); 1329b101c962SAlexander Duyck ++tx_ring->tx_stats.restart_queue; 1330b101c962SAlexander Duyck } 1331b101c962SAlexander Duyck } 1332b101c962SAlexander Duyck 1333b101c962SAlexander Duyck return !!budget; 1334b101c962SAlexander Duyck } 1335b101c962SAlexander Duyck 133618283cadSAlexander Duyck /** 133718283cadSAlexander Duyck * fm10k_update_itr - update the dynamic ITR value based on packet size 133818283cadSAlexander Duyck * 133918283cadSAlexander Duyck * Stores a new ITR value based on strictly on packet size. The 134018283cadSAlexander Duyck * divisors and thresholds used by this function were determined based 134118283cadSAlexander Duyck * on theoretical maximum wire speed and testing data, in order to 134218283cadSAlexander Duyck * minimize response time while increasing bulk throughput. 134318283cadSAlexander Duyck * 134418283cadSAlexander Duyck * @ring_container: Container for rings to have ITR updated 134518283cadSAlexander Duyck **/ 134618283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container) 134718283cadSAlexander Duyck { 1348242722ddSJacob Keller unsigned int avg_wire_size, packets, itr_round; 134918283cadSAlexander Duyck 135018283cadSAlexander Duyck /* Only update ITR if we are using adaptive setting */ 1351584373f5SJacob Keller if (!ITR_IS_ADAPTIVE(ring_container->itr)) 135218283cadSAlexander Duyck goto clear_counts; 135318283cadSAlexander Duyck 135418283cadSAlexander Duyck packets = ring_container->total_packets; 135518283cadSAlexander Duyck if (!packets) 135618283cadSAlexander Duyck goto clear_counts; 135718283cadSAlexander Duyck 135818283cadSAlexander Duyck avg_wire_size = ring_container->total_bytes / packets; 135918283cadSAlexander Duyck 1360242722ddSJacob Keller /* The following is a crude approximation of: 1361242722ddSJacob Keller * wmem_default / (size + overhead) = desired_pkts_per_int 1362242722ddSJacob Keller * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1363242722ddSJacob Keller * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1364242722ddSJacob Keller * 1365242722ddSJacob Keller * Assuming wmem_default is 212992 and overhead is 640 bytes per 1366242722ddSJacob Keller * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1367242722ddSJacob Keller * formula down to 1368242722ddSJacob Keller * 1369242722ddSJacob Keller * (34 * (size + 24)) / (size + 640) = ITR 1370242722ddSJacob Keller * 1371242722ddSJacob Keller * We first do some math on the packet size and then finally bitshift 1372242722ddSJacob Keller * by 8 after rounding up. We also have to account for PCIe link speed 1373242722ddSJacob Keller * difference as ITR scales based on this. 1374242722ddSJacob Keller */ 1375242722ddSJacob Keller if (avg_wire_size <= 360) { 1376242722ddSJacob Keller /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ 1377242722ddSJacob Keller avg_wire_size *= 8; 1378242722ddSJacob Keller avg_wire_size += 376; 1379242722ddSJacob Keller } else if (avg_wire_size <= 1152) { 1380242722ddSJacob Keller /* 77K ints/sec to 45K ints/sec */ 1381242722ddSJacob Keller avg_wire_size *= 3; 1382242722ddSJacob Keller avg_wire_size += 2176; 1383242722ddSJacob Keller } else if (avg_wire_size <= 1920) { 1384242722ddSJacob Keller /* 45K ints/sec to 38K ints/sec */ 1385242722ddSJacob Keller avg_wire_size += 4480; 1386242722ddSJacob Keller } else { 1387242722ddSJacob Keller /* plateau at a limit of 38K ints/sec */ 1388242722ddSJacob Keller avg_wire_size = 6656; 1389242722ddSJacob Keller } 139018283cadSAlexander Duyck 1391242722ddSJacob Keller /* Perform final bitshift for division after rounding up to ensure 1392242722ddSJacob Keller * that the calculation will never get below a 1. The bit shift 1393242722ddSJacob Keller * accounts for changes in the ITR due to PCIe link speed. 1394242722ddSJacob Keller */ 1395ce4dad2cSJacob Keller itr_round = READ_ONCE(ring_container->itr_scale) + 8; 1396fcdb0a99SBruce Allan avg_wire_size += BIT(itr_round) - 1; 1397242722ddSJacob Keller avg_wire_size >>= itr_round; 139818283cadSAlexander Duyck 139918283cadSAlexander Duyck /* write back value and retain adaptive flag */ 140018283cadSAlexander Duyck ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; 140118283cadSAlexander Duyck 140218283cadSAlexander Duyck clear_counts: 140318283cadSAlexander Duyck ring_container->total_bytes = 0; 140418283cadSAlexander Duyck ring_container->total_packets = 0; 140518283cadSAlexander Duyck } 140618283cadSAlexander Duyck 140718283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) 140818283cadSAlexander Duyck { 140918283cadSAlexander Duyck /* Enable auto-mask and clear the current mask */ 141018283cadSAlexander Duyck u32 itr = FM10K_ITR_ENABLE; 141118283cadSAlexander Duyck 141218283cadSAlexander Duyck /* Update Tx ITR */ 141318283cadSAlexander Duyck fm10k_update_itr(&q_vector->tx); 141418283cadSAlexander Duyck 141518283cadSAlexander Duyck /* Update Rx ITR */ 141618283cadSAlexander Duyck fm10k_update_itr(&q_vector->rx); 141718283cadSAlexander Duyck 141818283cadSAlexander Duyck /* Store Tx itr in timer slot 0 */ 141918283cadSAlexander Duyck itr |= (q_vector->tx.itr & FM10K_ITR_MAX); 142018283cadSAlexander Duyck 142118283cadSAlexander Duyck /* Shift Rx itr to timer slot 1 */ 142218283cadSAlexander Duyck itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; 142318283cadSAlexander Duyck 142418283cadSAlexander Duyck /* Write the final value to the ITR register */ 142518283cadSAlexander Duyck writel(itr, q_vector->itr); 142618283cadSAlexander Duyck } 142718283cadSAlexander Duyck 142818283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget) 142918283cadSAlexander Duyck { 143018283cadSAlexander Duyck struct fm10k_q_vector *q_vector = 143118283cadSAlexander Duyck container_of(napi, struct fm10k_q_vector, napi); 1432b101c962SAlexander Duyck struct fm10k_ring *ring; 143332b3e08fSJesse Brandeburg int per_ring_budget, work_done = 0; 1434b101c962SAlexander Duyck bool clean_complete = true; 1435b101c962SAlexander Duyck 1436144d8305SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) { 1437144d8305SAlexander Duyck if (!fm10k_clean_tx_irq(q_vector, ring, budget)) 1438144d8305SAlexander Duyck clean_complete = false; 1439144d8305SAlexander Duyck } 1440b101c962SAlexander Duyck 14419f872986SAlexander Duyck /* Handle case where we are called by netpoll with a budget of 0 */ 14429f872986SAlexander Duyck if (budget <= 0) 14439f872986SAlexander Duyck return budget; 14449f872986SAlexander Duyck 1445b101c962SAlexander Duyck /* attempt to distribute budget to each queue fairly, but don't 1446b101c962SAlexander Duyck * allow the budget to go below 1 because we'll exit polling 1447b101c962SAlexander Duyck */ 1448b101c962SAlexander Duyck if (q_vector->rx.count > 1) 1449b101c962SAlexander Duyck per_ring_budget = max(budget / q_vector->rx.count, 1); 1450b101c962SAlexander Duyck else 1451b101c962SAlexander Duyck per_ring_budget = budget; 1452b101c962SAlexander Duyck 145332b3e08fSJesse Brandeburg fm10k_for_each_ring(ring, q_vector->rx) { 145432b3e08fSJesse Brandeburg int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); 145532b3e08fSJesse Brandeburg 145632b3e08fSJesse Brandeburg work_done += work; 1457144d8305SAlexander Duyck if (work >= per_ring_budget) 1458144d8305SAlexander Duyck clean_complete = false; 145932b3e08fSJesse Brandeburg } 1460b101c962SAlexander Duyck 1461b101c962SAlexander Duyck /* If all work not completed, return budget and keep polling */ 1462b101c962SAlexander Duyck if (!clean_complete) 1463b101c962SAlexander Duyck return budget; 146418283cadSAlexander Duyck 14650bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 14660bcd952fSJesse Brandeburg * poll us due to busy-polling 14670bcd952fSJesse Brandeburg */ 14680bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 146918283cadSAlexander Duyck fm10k_qv_enable(q_vector); 147018283cadSAlexander Duyck 1471e5fbfb78SJacob Keller return min(work_done, budget - 1); 147218283cadSAlexander Duyck } 147318283cadSAlexander Duyck 147418283cadSAlexander Duyck /** 1475aa3ac822SAlexander Duyck * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device 1476aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1477aa3ac822SAlexander Duyck * 1478aa3ac822SAlexander Duyck * When QoS (Quality of Service) is enabled, allocate queues for 1479aa3ac822SAlexander Duyck * each traffic class. If multiqueue isn't available,then abort QoS 1480aa3ac822SAlexander Duyck * initialization. 1481aa3ac822SAlexander Duyck * 1482aa3ac822SAlexander Duyck * This function handles all combinations of Qos and RSS. 1483aa3ac822SAlexander Duyck * 1484aa3ac822SAlexander Duyck **/ 1485aa3ac822SAlexander Duyck static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) 1486aa3ac822SAlexander Duyck { 1487aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1488aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1489aa3ac822SAlexander Duyck int rss_i, i; 1490aa3ac822SAlexander Duyck int pcs; 1491aa3ac822SAlexander Duyck 1492aa3ac822SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 1493aa3ac822SAlexander Duyck pcs = netdev_get_num_tc(dev); 1494aa3ac822SAlexander Duyck 1495aa3ac822SAlexander Duyck if (pcs <= 1) 1496aa3ac822SAlexander Duyck return false; 1497aa3ac822SAlexander Duyck 1498aa3ac822SAlexander Duyck /* set QoS mask and indices */ 1499aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_QOS]; 1500aa3ac822SAlexander Duyck f->indices = pcs; 1501fcdb0a99SBruce Allan f->mask = BIT(fls(pcs - 1)) - 1; 1502aa3ac822SAlexander Duyck 1503aa3ac822SAlexander Duyck /* determine the upper limit for our current DCB mode */ 1504aa3ac822SAlexander Duyck rss_i = interface->hw.mac.max_queues / pcs; 1505fcdb0a99SBruce Allan rss_i = BIT(fls(rss_i) - 1); 1506aa3ac822SAlexander Duyck 1507aa3ac822SAlexander Duyck /* set RSS mask and indices */ 1508aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1509aa3ac822SAlexander Duyck rss_i = min_t(u16, rss_i, f->limit); 1510aa3ac822SAlexander Duyck f->indices = rss_i; 1511fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1512aa3ac822SAlexander Duyck 1513aa3ac822SAlexander Duyck /* configure pause class to queue mapping */ 1514aa3ac822SAlexander Duyck for (i = 0; i < pcs; i++) 1515aa3ac822SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 1516aa3ac822SAlexander Duyck 1517aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i * pcs; 1518aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i * pcs; 1519aa3ac822SAlexander Duyck 1520aa3ac822SAlexander Duyck return true; 1521aa3ac822SAlexander Duyck } 1522aa3ac822SAlexander Duyck 1523aa3ac822SAlexander Duyck /** 1524aa3ac822SAlexander Duyck * fm10k_set_rss_queues: Allocate queues for RSS 1525aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1526aa3ac822SAlexander Duyck * 1527aa3ac822SAlexander Duyck * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1528aa3ac822SAlexander Duyck * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1529aa3ac822SAlexander Duyck * 1530aa3ac822SAlexander Duyck **/ 1531aa3ac822SAlexander Duyck static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) 1532aa3ac822SAlexander Duyck { 1533aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1534aa3ac822SAlexander Duyck u16 rss_i; 1535aa3ac822SAlexander Duyck 1536aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1537aa3ac822SAlexander Duyck rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); 1538aa3ac822SAlexander Duyck 1539aa3ac822SAlexander Duyck /* record indices and power of 2 mask for RSS */ 1540aa3ac822SAlexander Duyck f->indices = rss_i; 1541fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1542aa3ac822SAlexander Duyck 1543aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i; 1544aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i; 1545aa3ac822SAlexander Duyck 1546aa3ac822SAlexander Duyck return true; 1547aa3ac822SAlexander Duyck } 1548aa3ac822SAlexander Duyck 1549aa3ac822SAlexander Duyck /** 155018283cadSAlexander Duyck * fm10k_set_num_queues: Allocate queues for device, feature dependent 155118283cadSAlexander Duyck * @interface: board private structure to initialize 155218283cadSAlexander Duyck * 155318283cadSAlexander Duyck * This is the top level queue allocation routine. The order here is very 155418283cadSAlexander Duyck * important, starting with the "most" number of features turned on at once, 155518283cadSAlexander Duyck * and ending with the smallest set of features. This way large combinations 155618283cadSAlexander Duyck * can be allocated if they're turned on, and smaller combinations are the 155718283cadSAlexander Duyck * fall through conditions. 155818283cadSAlexander Duyck * 155918283cadSAlexander Duyck **/ 156018283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface) 156118283cadSAlexander Duyck { 1562b3525696SJacob Keller /* Attempt to setup QoS and RSS first */ 1563aa3ac822SAlexander Duyck if (fm10k_set_qos_queues(interface)) 1564aa3ac822SAlexander Duyck return; 1565aa3ac822SAlexander Duyck 1566b3525696SJacob Keller /* If we don't have QoS, just fallback to only RSS. */ 1567aa3ac822SAlexander Duyck fm10k_set_rss_queues(interface); 156818283cadSAlexander Duyck } 156918283cadSAlexander Duyck 157018283cadSAlexander Duyck /** 15714be37c42SJacob Keller * fm10k_reset_num_queues - Reset the number of queues to zero 15724be37c42SJacob Keller * @interface: board private structure 15734be37c42SJacob Keller * 15744be37c42SJacob Keller * This function should be called whenever we need to reset the number of 15754be37c42SJacob Keller * queues after an error condition. 15764be37c42SJacob Keller */ 15774be37c42SJacob Keller static void fm10k_reset_num_queues(struct fm10k_intfc *interface) 15784be37c42SJacob Keller { 15794be37c42SJacob Keller interface->num_tx_queues = 0; 15804be37c42SJacob Keller interface->num_rx_queues = 0; 15814be37c42SJacob Keller interface->num_q_vectors = 0; 15824be37c42SJacob Keller } 15834be37c42SJacob Keller 15844be37c42SJacob Keller /** 158518283cadSAlexander Duyck * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 158618283cadSAlexander Duyck * @interface: board private structure to initialize 158718283cadSAlexander Duyck * @v_count: q_vectors allocated on interface, used for ring interleaving 158818283cadSAlexander Duyck * @v_idx: index of vector in interface struct 158918283cadSAlexander Duyck * @txr_count: total number of Tx rings to allocate 159018283cadSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 159118283cadSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 159218283cadSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 159318283cadSAlexander Duyck * 159418283cadSAlexander Duyck * We allocate one q_vector. If allocation fails we return -ENOMEM. 159518283cadSAlexander Duyck **/ 159618283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, 159718283cadSAlexander Duyck unsigned int v_count, unsigned int v_idx, 159818283cadSAlexander Duyck unsigned int txr_count, unsigned int txr_idx, 159918283cadSAlexander Duyck unsigned int rxr_count, unsigned int rxr_idx) 160018283cadSAlexander Duyck { 160118283cadSAlexander Duyck struct fm10k_q_vector *q_vector; 1602e27ef599SAlexander Duyck struct fm10k_ring *ring; 16039a00536cSGustavo A. R. Silva int ring_count; 160418283cadSAlexander Duyck 160518283cadSAlexander Duyck ring_count = txr_count + rxr_count; 160618283cadSAlexander Duyck 160718283cadSAlexander Duyck /* allocate q_vector and rings */ 16089a00536cSGustavo A. R. Silva q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); 160918283cadSAlexander Duyck if (!q_vector) 161018283cadSAlexander Duyck return -ENOMEM; 161118283cadSAlexander Duyck 161218283cadSAlexander Duyck /* initialize NAPI */ 161318283cadSAlexander Duyck netif_napi_add(interface->netdev, &q_vector->napi, 161418283cadSAlexander Duyck fm10k_poll, NAPI_POLL_WEIGHT); 161518283cadSAlexander Duyck 161618283cadSAlexander Duyck /* tie q_vector and interface together */ 161718283cadSAlexander Duyck interface->q_vector[v_idx] = q_vector; 161818283cadSAlexander Duyck q_vector->interface = interface; 161918283cadSAlexander Duyck q_vector->v_idx = v_idx; 162018283cadSAlexander Duyck 1621e27ef599SAlexander Duyck /* initialize pointer to rings */ 1622e27ef599SAlexander Duyck ring = q_vector->ring; 1623e27ef599SAlexander Duyck 162418283cadSAlexander Duyck /* save Tx ring container info */ 1625e27ef599SAlexander Duyck q_vector->tx.ring = ring; 1626e27ef599SAlexander Duyck q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; 162718283cadSAlexander Duyck q_vector->tx.itr = interface->tx_itr; 1628242722ddSJacob Keller q_vector->tx.itr_scale = interface->hw.mac.itr_scale; 162918283cadSAlexander Duyck q_vector->tx.count = txr_count; 163018283cadSAlexander Duyck 1631e27ef599SAlexander Duyck while (txr_count) { 1632e27ef599SAlexander Duyck /* assign generic ring traits */ 1633e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1634e27ef599SAlexander Duyck ring->netdev = interface->netdev; 1635e27ef599SAlexander Duyck 1636e27ef599SAlexander Duyck /* configure backlink on ring */ 1637e27ef599SAlexander Duyck ring->q_vector = q_vector; 1638e27ef599SAlexander Duyck 1639e27ef599SAlexander Duyck /* apply Tx specific ring traits */ 1640e27ef599SAlexander Duyck ring->count = interface->tx_ring_count; 1641e27ef599SAlexander Duyck ring->queue_index = txr_idx; 1642e27ef599SAlexander Duyck 1643e27ef599SAlexander Duyck /* assign ring to interface */ 1644e27ef599SAlexander Duyck interface->tx_ring[txr_idx] = ring; 1645e27ef599SAlexander Duyck 1646e27ef599SAlexander Duyck /* update count and index */ 1647e27ef599SAlexander Duyck txr_count--; 1648e27ef599SAlexander Duyck txr_idx += v_count; 1649e27ef599SAlexander Duyck 1650e27ef599SAlexander Duyck /* push pointer to next ring */ 1651e27ef599SAlexander Duyck ring++; 1652e27ef599SAlexander Duyck } 1653e27ef599SAlexander Duyck 165418283cadSAlexander Duyck /* save Rx ring container info */ 1655e27ef599SAlexander Duyck q_vector->rx.ring = ring; 165618283cadSAlexander Duyck q_vector->rx.itr = interface->rx_itr; 1657242722ddSJacob Keller q_vector->rx.itr_scale = interface->hw.mac.itr_scale; 165818283cadSAlexander Duyck q_vector->rx.count = rxr_count; 165918283cadSAlexander Duyck 1660e27ef599SAlexander Duyck while (rxr_count) { 1661e27ef599SAlexander Duyck /* assign generic ring traits */ 1662e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1663e27ef599SAlexander Duyck ring->netdev = interface->netdev; 16645cd5e2e9SAlexander Duyck rcu_assign_pointer(ring->l2_accel, interface->l2_accel); 1665e27ef599SAlexander Duyck 1666e27ef599SAlexander Duyck /* configure backlink on ring */ 1667e27ef599SAlexander Duyck ring->q_vector = q_vector; 1668e27ef599SAlexander Duyck 1669e27ef599SAlexander Duyck /* apply Rx specific ring traits */ 1670e27ef599SAlexander Duyck ring->count = interface->rx_ring_count; 1671e27ef599SAlexander Duyck ring->queue_index = rxr_idx; 1672e27ef599SAlexander Duyck 1673e27ef599SAlexander Duyck /* assign ring to interface */ 1674e27ef599SAlexander Duyck interface->rx_ring[rxr_idx] = ring; 1675e27ef599SAlexander Duyck 1676e27ef599SAlexander Duyck /* update count and index */ 1677e27ef599SAlexander Duyck rxr_count--; 1678e27ef599SAlexander Duyck rxr_idx += v_count; 1679e27ef599SAlexander Duyck 1680e27ef599SAlexander Duyck /* push pointer to next ring */ 1681e27ef599SAlexander Duyck ring++; 1682e27ef599SAlexander Duyck } 1683e27ef599SAlexander Duyck 16847461fd91SAlexander Duyck fm10k_dbg_q_vector_init(q_vector); 16857461fd91SAlexander Duyck 168618283cadSAlexander Duyck return 0; 168718283cadSAlexander Duyck } 168818283cadSAlexander Duyck 168918283cadSAlexander Duyck /** 169018283cadSAlexander Duyck * fm10k_free_q_vector - Free memory allocated for specific interrupt vector 169118283cadSAlexander Duyck * @interface: board private structure to initialize 169218283cadSAlexander Duyck * @v_idx: Index of vector to be freed 169318283cadSAlexander Duyck * 169418283cadSAlexander Duyck * This function frees the memory allocated to the q_vector. In addition if 169518283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 169618283cadSAlexander Duyck * to freeing the q_vector. 169718283cadSAlexander Duyck **/ 169818283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 169918283cadSAlexander Duyck { 170018283cadSAlexander Duyck struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 1701e27ef599SAlexander Duyck struct fm10k_ring *ring; 1702e27ef599SAlexander Duyck 17037461fd91SAlexander Duyck fm10k_dbg_q_vector_exit(q_vector); 17047461fd91SAlexander Duyck 1705e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1706e27ef599SAlexander Duyck interface->tx_ring[ring->queue_index] = NULL; 1707e27ef599SAlexander Duyck 1708e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->rx) 1709e27ef599SAlexander Duyck interface->rx_ring[ring->queue_index] = NULL; 171018283cadSAlexander Duyck 171118283cadSAlexander Duyck interface->q_vector[v_idx] = NULL; 171218283cadSAlexander Duyck netif_napi_del(&q_vector->napi); 171318283cadSAlexander Duyck kfree_rcu(q_vector, rcu); 171418283cadSAlexander Duyck } 171518283cadSAlexander Duyck 171618283cadSAlexander Duyck /** 171718283cadSAlexander Duyck * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors 171818283cadSAlexander Duyck * @interface: board private structure to initialize 171918283cadSAlexander Duyck * 172018283cadSAlexander Duyck * We allocate one q_vector per queue interrupt. If allocation fails we 172118283cadSAlexander Duyck * return -ENOMEM. 172218283cadSAlexander Duyck **/ 172318283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) 172418283cadSAlexander Duyck { 172518283cadSAlexander Duyck unsigned int q_vectors = interface->num_q_vectors; 172618283cadSAlexander Duyck unsigned int rxr_remaining = interface->num_rx_queues; 172718283cadSAlexander Duyck unsigned int txr_remaining = interface->num_tx_queues; 172818283cadSAlexander Duyck unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 172918283cadSAlexander Duyck int err; 173018283cadSAlexander Duyck 173118283cadSAlexander Duyck if (q_vectors >= (rxr_remaining + txr_remaining)) { 173218283cadSAlexander Duyck for (; rxr_remaining; v_idx++) { 173318283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 173418283cadSAlexander Duyck 0, 0, 1, rxr_idx); 173518283cadSAlexander Duyck if (err) 173618283cadSAlexander Duyck goto err_out; 173718283cadSAlexander Duyck 173818283cadSAlexander Duyck /* update counts and index */ 173918283cadSAlexander Duyck rxr_remaining--; 174018283cadSAlexander Duyck rxr_idx++; 174118283cadSAlexander Duyck } 174218283cadSAlexander Duyck } 174318283cadSAlexander Duyck 174418283cadSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 174518283cadSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 174618283cadSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 174718283cadSAlexander Duyck 174818283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 174918283cadSAlexander Duyck tqpv, txr_idx, 175018283cadSAlexander Duyck rqpv, rxr_idx); 175118283cadSAlexander Duyck 175218283cadSAlexander Duyck if (err) 175318283cadSAlexander Duyck goto err_out; 175418283cadSAlexander Duyck 175518283cadSAlexander Duyck /* update counts and index */ 175618283cadSAlexander Duyck rxr_remaining -= rqpv; 175718283cadSAlexander Duyck txr_remaining -= tqpv; 175818283cadSAlexander Duyck rxr_idx++; 175918283cadSAlexander Duyck txr_idx++; 176018283cadSAlexander Duyck } 176118283cadSAlexander Duyck 176218283cadSAlexander Duyck return 0; 176318283cadSAlexander Duyck 176418283cadSAlexander Duyck err_out: 17654be37c42SJacob Keller fm10k_reset_num_queues(interface); 176618283cadSAlexander Duyck 176718283cadSAlexander Duyck while (v_idx--) 176818283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 176918283cadSAlexander Duyck 177018283cadSAlexander Duyck return -ENOMEM; 177118283cadSAlexander Duyck } 177218283cadSAlexander Duyck 177318283cadSAlexander Duyck /** 177418283cadSAlexander Duyck * fm10k_free_q_vectors - Free memory allocated for interrupt vectors 177518283cadSAlexander Duyck * @interface: board private structure to initialize 177618283cadSAlexander Duyck * 177718283cadSAlexander Duyck * This function frees the memory allocated to the q_vectors. In addition if 177818283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 177918283cadSAlexander Duyck * to freeing the q_vector. 178018283cadSAlexander Duyck **/ 178118283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface) 178218283cadSAlexander Duyck { 178318283cadSAlexander Duyck int v_idx = interface->num_q_vectors; 178418283cadSAlexander Duyck 17854be37c42SJacob Keller fm10k_reset_num_queues(interface); 178618283cadSAlexander Duyck 178718283cadSAlexander Duyck while (v_idx--) 178818283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 178918283cadSAlexander Duyck } 179018283cadSAlexander Duyck 179118283cadSAlexander Duyck /** 179218283cadSAlexander Duyck * f10k_reset_msix_capability - reset MSI-X capability 179318283cadSAlexander Duyck * @interface: board private structure to initialize 179418283cadSAlexander Duyck * 179518283cadSAlexander Duyck * Reset the MSI-X capability back to its starting state 179618283cadSAlexander Duyck **/ 179718283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) 179818283cadSAlexander Duyck { 179918283cadSAlexander Duyck pci_disable_msix(interface->pdev); 180018283cadSAlexander Duyck kfree(interface->msix_entries); 180118283cadSAlexander Duyck interface->msix_entries = NULL; 180218283cadSAlexander Duyck } 180318283cadSAlexander Duyck 180418283cadSAlexander Duyck /** 180518283cadSAlexander Duyck * f10k_init_msix_capability - configure MSI-X capability 180618283cadSAlexander Duyck * @interface: board private structure to initialize 180718283cadSAlexander Duyck * 180818283cadSAlexander Duyck * Attempt to configure the interrupts using the best available 180918283cadSAlexander Duyck * capabilities of the hardware and the kernel. 181018283cadSAlexander Duyck **/ 181118283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface) 181218283cadSAlexander Duyck { 181318283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw; 181418283cadSAlexander Duyck int v_budget, vector; 181518283cadSAlexander Duyck 181618283cadSAlexander Duyck /* It's easy to be greedy for MSI-X vectors, but it really 181718283cadSAlexander Duyck * doesn't do us much good if we have a lot more vectors 181818283cadSAlexander Duyck * than CPU's. So let's be conservative and only ask for 181918283cadSAlexander Duyck * (roughly) the same number of vectors as there are CPU's. 182018283cadSAlexander Duyck * the default is to use pairs of vectors 182118283cadSAlexander Duyck */ 182218283cadSAlexander Duyck v_budget = max(interface->num_rx_queues, interface->num_tx_queues); 182318283cadSAlexander Duyck v_budget = min_t(u16, v_budget, num_online_cpus()); 182418283cadSAlexander Duyck 182518283cadSAlexander Duyck /* account for vectors not related to queues */ 1826a3ffeaf7SJacob Keller v_budget += NON_Q_VECTORS; 182718283cadSAlexander Duyck 182818283cadSAlexander Duyck /* At the same time, hardware can only support a maximum of 182918283cadSAlexander Duyck * hw.mac->max_msix_vectors vectors. With features 183018283cadSAlexander Duyck * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 183118283cadSAlexander Duyck * descriptor queues supported by our device. Thus, we cap it off in 183218283cadSAlexander Duyck * those rare cases where the cpu count also exceeds our vector limit. 183318283cadSAlexander Duyck */ 183418283cadSAlexander Duyck v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 183518283cadSAlexander Duyck 183618283cadSAlexander Duyck /* A failure in MSI-X entry allocation is fatal. */ 183718283cadSAlexander Duyck interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 183818283cadSAlexander Duyck GFP_KERNEL); 183918283cadSAlexander Duyck if (!interface->msix_entries) 184018283cadSAlexander Duyck return -ENOMEM; 184118283cadSAlexander Duyck 184218283cadSAlexander Duyck /* populate entry values */ 184318283cadSAlexander Duyck for (vector = 0; vector < v_budget; vector++) 184418283cadSAlexander Duyck interface->msix_entries[vector].entry = vector; 184518283cadSAlexander Duyck 184618283cadSAlexander Duyck /* Attempt to enable MSI-X with requested value */ 184718283cadSAlexander Duyck v_budget = pci_enable_msix_range(interface->pdev, 184818283cadSAlexander Duyck interface->msix_entries, 184918283cadSAlexander Duyck MIN_MSIX_COUNT(hw), 185018283cadSAlexander Duyck v_budget); 185118283cadSAlexander Duyck if (v_budget < 0) { 185218283cadSAlexander Duyck kfree(interface->msix_entries); 185318283cadSAlexander Duyck interface->msix_entries = NULL; 185430e23b71SJacob Keller return v_budget; 185518283cadSAlexander Duyck } 185618283cadSAlexander Duyck 185718283cadSAlexander Duyck /* record the number of queues available for q_vectors */ 1858a3ffeaf7SJacob Keller interface->num_q_vectors = v_budget - NON_Q_VECTORS; 185918283cadSAlexander Duyck 186018283cadSAlexander Duyck return 0; 186118283cadSAlexander Duyck } 186218283cadSAlexander Duyck 1863aa3ac822SAlexander Duyck /** 1864aa3ac822SAlexander Duyck * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS 1865aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1866aa3ac822SAlexander Duyck * 1867aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for Qos 1868aa3ac822SAlexander Duyck **/ 1869aa3ac822SAlexander Duyck static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) 1870aa3ac822SAlexander Duyck { 1871aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 18727a432d57SJacob Keller int pc, offset, rss_i, i; 1873aa3ac822SAlexander Duyck u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; 1874aa3ac822SAlexander Duyck u8 num_pcs = netdev_get_num_tc(dev); 1875aa3ac822SAlexander Duyck 1876aa3ac822SAlexander Duyck if (num_pcs <= 1) 1877aa3ac822SAlexander Duyck return false; 1878aa3ac822SAlexander Duyck 1879aa3ac822SAlexander Duyck rss_i = interface->ring_feature[RING_F_RSS].indices; 1880aa3ac822SAlexander Duyck 1881aa3ac822SAlexander Duyck for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { 18827a432d57SJacob Keller int q_idx = pc; 18837a432d57SJacob Keller 1884aa3ac822SAlexander Duyck for (i = 0; i < rss_i; i++) { 1885aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->reg_idx = q_idx; 1886aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->qos_pc = pc; 1887aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->reg_idx = q_idx; 1888aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->qos_pc = pc; 1889aa3ac822SAlexander Duyck q_idx += pc_stride; 1890aa3ac822SAlexander Duyck } 1891aa3ac822SAlexander Duyck } 1892aa3ac822SAlexander Duyck 1893aa3ac822SAlexander Duyck return true; 1894aa3ac822SAlexander Duyck } 1895aa3ac822SAlexander Duyck 1896aa3ac822SAlexander Duyck /** 1897aa3ac822SAlexander Duyck * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS 1898aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1899aa3ac822SAlexander Duyck * 1900aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for RSS 1901aa3ac822SAlexander Duyck **/ 1902aa3ac822SAlexander Duyck static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) 1903aa3ac822SAlexander Duyck { 1904aa3ac822SAlexander Duyck int i; 1905aa3ac822SAlexander Duyck 1906aa3ac822SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) 1907aa3ac822SAlexander Duyck interface->rx_ring[i]->reg_idx = i; 1908aa3ac822SAlexander Duyck 1909aa3ac822SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) 1910aa3ac822SAlexander Duyck interface->tx_ring[i]->reg_idx = i; 1911aa3ac822SAlexander Duyck } 1912aa3ac822SAlexander Duyck 1913aa3ac822SAlexander Duyck /** 1914aa3ac822SAlexander Duyck * fm10k_assign_rings - Map rings to network devices 1915aa3ac822SAlexander Duyck * @interface: Interface structure containing rings and devices 1916aa3ac822SAlexander Duyck * 1917aa3ac822SAlexander Duyck * This function is meant to go though and configure both the network 1918aa3ac822SAlexander Duyck * devices so that they contain rings, and configure the rings so that 1919aa3ac822SAlexander Duyck * they function with their network devices. 1920aa3ac822SAlexander Duyck **/ 1921aa3ac822SAlexander Duyck static void fm10k_assign_rings(struct fm10k_intfc *interface) 1922aa3ac822SAlexander Duyck { 1923aa3ac822SAlexander Duyck if (fm10k_cache_ring_qos(interface)) 1924aa3ac822SAlexander Duyck return; 1925aa3ac822SAlexander Duyck 1926aa3ac822SAlexander Duyck fm10k_cache_ring_rss(interface); 1927aa3ac822SAlexander Duyck } 1928aa3ac822SAlexander Duyck 192918283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface) 193018283cadSAlexander Duyck { 193118283cadSAlexander Duyck u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 1932540a5d85SJacob Keller u32 reta; 193318283cadSAlexander Duyck 19341012014eSKeller, Jacob E /* If the Rx flow indirection table has been configured manually, we 19351012014eSKeller, Jacob E * need to maintain it when possible. 19361012014eSKeller, Jacob E */ 19371012014eSKeller, Jacob E if (netif_is_rxfh_configured(interface->netdev)) { 193818283cadSAlexander Duyck for (i = FM10K_RETA_SIZE; i--;) { 193918283cadSAlexander Duyck reta = interface->reta[i]; 194018283cadSAlexander Duyck if ((((reta << 24) >> 24) < rss_i) && 194118283cadSAlexander Duyck (((reta << 16) >> 24) < rss_i) && 194218283cadSAlexander Duyck (((reta << 8) >> 24) < rss_i) && 194318283cadSAlexander Duyck (((reta) >> 24) < rss_i)) 194418283cadSAlexander Duyck continue; 19451012014eSKeller, Jacob E 19461012014eSKeller, Jacob E /* this should never happen */ 19471012014eSKeller, Jacob E dev_err(&interface->pdev->dev, 19481012014eSKeller, Jacob E "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); 194918283cadSAlexander Duyck goto repopulate_reta; 195018283cadSAlexander Duyck } 195118283cadSAlexander Duyck 195218283cadSAlexander Duyck /* do nothing if all of the elements are in bounds */ 195318283cadSAlexander Duyck return; 195418283cadSAlexander Duyck } 195518283cadSAlexander Duyck 195618283cadSAlexander Duyck repopulate_reta: 1957540a5d85SJacob Keller fm10k_write_reta(interface, NULL); 195818283cadSAlexander Duyck } 195918283cadSAlexander Duyck 196018283cadSAlexander Duyck /** 196118283cadSAlexander Duyck * fm10k_init_queueing_scheme - Determine proper queueing scheme 196218283cadSAlexander Duyck * @interface: board private structure to initialize 196318283cadSAlexander Duyck * 196418283cadSAlexander Duyck * We determine which queueing scheme to use based on... 196518283cadSAlexander Duyck * - Hardware queue count (num_*_queues) 196618283cadSAlexander Duyck * - defined by miscellaneous hardware support/features (RSS, etc.) 196718283cadSAlexander Duyck **/ 196818283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) 196918283cadSAlexander Duyck { 197018283cadSAlexander Duyck int err; 197118283cadSAlexander Duyck 197218283cadSAlexander Duyck /* Number of supported queues */ 197318283cadSAlexander Duyck fm10k_set_num_queues(interface); 197418283cadSAlexander Duyck 197518283cadSAlexander Duyck /* Configure MSI-X capability */ 197618283cadSAlexander Duyck err = fm10k_init_msix_capability(interface); 197718283cadSAlexander Duyck if (err) { 197818283cadSAlexander Duyck dev_err(&interface->pdev->dev, 197918283cadSAlexander Duyck "Unable to initialize MSI-X capability\n"); 19804be37c42SJacob Keller goto err_init_msix; 198118283cadSAlexander Duyck } 198218283cadSAlexander Duyck 198318283cadSAlexander Duyck /* Allocate memory for queues */ 198418283cadSAlexander Duyck err = fm10k_alloc_q_vectors(interface); 1985587731e6SAlexander Duyck if (err) { 19864be37c42SJacob Keller dev_err(&interface->pdev->dev, 19874be37c42SJacob Keller "Unable to allocate queue vectors\n"); 19884be37c42SJacob Keller goto err_alloc_q_vectors; 1989587731e6SAlexander Duyck } 199018283cadSAlexander Duyck 1991aa3ac822SAlexander Duyck /* Map rings to devices, and map devices to physical queues */ 1992aa3ac822SAlexander Duyck fm10k_assign_rings(interface); 1993aa3ac822SAlexander Duyck 199418283cadSAlexander Duyck /* Initialize RSS redirection table */ 199518283cadSAlexander Duyck fm10k_init_reta(interface); 199618283cadSAlexander Duyck 199718283cadSAlexander Duyck return 0; 19984be37c42SJacob Keller 19994be37c42SJacob Keller err_alloc_q_vectors: 20004be37c42SJacob Keller fm10k_reset_msix_capability(interface); 20014be37c42SJacob Keller err_init_msix: 20024be37c42SJacob Keller fm10k_reset_num_queues(interface); 20034be37c42SJacob Keller return err; 200418283cadSAlexander Duyck } 200518283cadSAlexander Duyck 200618283cadSAlexander Duyck /** 200718283cadSAlexander Duyck * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings 200818283cadSAlexander Duyck * @interface: board private structure to clear queueing scheme on 200918283cadSAlexander Duyck * 201018283cadSAlexander Duyck * We go through and clear queueing specific resources and reset the structure 201118283cadSAlexander Duyck * to pre-load conditions 201218283cadSAlexander Duyck **/ 201318283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) 201418283cadSAlexander Duyck { 201518283cadSAlexander Duyck fm10k_free_q_vectors(interface); 201618283cadSAlexander Duyck fm10k_reset_msix_capability(interface); 201718283cadSAlexander Duyck } 2018