186641094SJacob Keller /* Intel(R) Ethernet Switch Host Interface Driver 29de6a1a6SJacob Keller * Copyright(c) 2013 - 2016 Intel Corporation. 3b3890e30SAlexander Duyck * 4b3890e30SAlexander Duyck * This program is free software; you can redistribute it and/or modify it 5b3890e30SAlexander Duyck * under the terms and conditions of the GNU General Public License, 6b3890e30SAlexander Duyck * version 2, as published by the Free Software Foundation. 7b3890e30SAlexander Duyck * 8b3890e30SAlexander Duyck * This program is distributed in the hope it will be useful, but WITHOUT 9b3890e30SAlexander Duyck * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10b3890e30SAlexander Duyck * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11b3890e30SAlexander Duyck * more details. 12b3890e30SAlexander Duyck * 13b3890e30SAlexander Duyck * The full GNU General Public License is included in this distribution in 14b3890e30SAlexander Duyck * the file called "COPYING". 15b3890e30SAlexander Duyck * 16b3890e30SAlexander Duyck * Contact Information: 17b3890e30SAlexander Duyck * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 18b3890e30SAlexander Duyck * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 19b3890e30SAlexander Duyck */ 20b3890e30SAlexander Duyck 21b3890e30SAlexander Duyck #include <linux/types.h> 22b3890e30SAlexander Duyck #include <linux/module.h> 23b3890e30SAlexander Duyck #include <net/ipv6.h> 24b3890e30SAlexander Duyck #include <net/ip.h> 25b3890e30SAlexander Duyck #include <net/tcp.h> 26b3890e30SAlexander Duyck #include <linux/if_macvlan.h> 27b101c962SAlexander Duyck #include <linux/prefetch.h> 28b3890e30SAlexander Duyck 29b3890e30SAlexander Duyck #include "fm10k.h" 30b3890e30SAlexander Duyck 315264cc63SJacob Keller #define DRV_VERSION "0.21.2-k" 322d0f76beSJacob Keller #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" 33b3890e30SAlexander Duyck const char fm10k_driver_version[] = DRV_VERSION; 34b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k"; 352d0f76beSJacob Keller static const char fm10k_driver_string[] = DRV_SUMMARY; 36b3890e30SAlexander Duyck static const char fm10k_copyright[] = 3786641094SJacob Keller "Copyright (c) 2013 - 2016 Intel Corporation."; 38b3890e30SAlexander Duyck 39b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 402d0f76beSJacob Keller MODULE_DESCRIPTION(DRV_SUMMARY); 41b3890e30SAlexander Duyck MODULE_LICENSE("GPL"); 42b3890e30SAlexander Duyck MODULE_VERSION(DRV_VERSION); 43b3890e30SAlexander Duyck 44b382bb1bSJeff Kirsher /* single workqueue for entire fm10k driver */ 4507146e2eSBruce Allan struct workqueue_struct *fm10k_workqueue; 46b382bb1bSJeff Kirsher 476d2ce900SAlexander Duyck /** 486d2ce900SAlexander Duyck * fm10k_init_module - Driver Registration Routine 49b3890e30SAlexander Duyck * 50b3890e30SAlexander Duyck * fm10k_init_module is the first routine called when the driver is 51b3890e30SAlexander Duyck * loaded. All it does is register with the PCI subsystem. 52b3890e30SAlexander Duyck **/ 53b3890e30SAlexander Duyck static int __init fm10k_init_module(void) 54b3890e30SAlexander Duyck { 55b3890e30SAlexander Duyck pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); 56b3890e30SAlexander Duyck pr_info("%s\n", fm10k_copyright); 57b3890e30SAlexander Duyck 58b382bb1bSJeff Kirsher /* create driver workqueue */ 594aa0bd54SJacob Keller fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name); 60b382bb1bSJeff Kirsher 617461fd91SAlexander Duyck fm10k_dbg_init(); 627461fd91SAlexander Duyck 63b3890e30SAlexander Duyck return fm10k_register_pci_driver(); 64b3890e30SAlexander Duyck } 65b3890e30SAlexander Duyck module_init(fm10k_init_module); 66b3890e30SAlexander Duyck 67b3890e30SAlexander Duyck /** 68b3890e30SAlexander Duyck * fm10k_exit_module - Driver Exit Cleanup Routine 69b3890e30SAlexander Duyck * 70b3890e30SAlexander Duyck * fm10k_exit_module is called just before the driver is removed 71b3890e30SAlexander Duyck * from memory. 72b3890e30SAlexander Duyck **/ 73b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void) 74b3890e30SAlexander Duyck { 75b3890e30SAlexander Duyck fm10k_unregister_pci_driver(); 767461fd91SAlexander Duyck 777461fd91SAlexander Duyck fm10k_dbg_exit(); 78b382bb1bSJeff Kirsher 79b382bb1bSJeff Kirsher /* destroy driver workqueue */ 80b382bb1bSJeff Kirsher destroy_workqueue(fm10k_workqueue); 81b3890e30SAlexander Duyck } 82b3890e30SAlexander Duyck module_exit(fm10k_exit_module); 8318283cadSAlexander Duyck 84b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, 85b101c962SAlexander Duyck struct fm10k_rx_buffer *bi) 86b101c962SAlexander Duyck { 87b101c962SAlexander Duyck struct page *page = bi->page; 88b101c962SAlexander Duyck dma_addr_t dma; 89b101c962SAlexander Duyck 90b101c962SAlexander Duyck /* Only page will be NULL if buffer was consumed */ 91b101c962SAlexander Duyck if (likely(page)) 92b101c962SAlexander Duyck return true; 93b101c962SAlexander Duyck 94b101c962SAlexander Duyck /* alloc new page for storage */ 9542b17f09SAlexander Duyck page = dev_alloc_page(); 96b101c962SAlexander Duyck if (unlikely(!page)) { 97b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 98b101c962SAlexander Duyck return false; 99b101c962SAlexander Duyck } 100b101c962SAlexander Duyck 101b101c962SAlexander Duyck /* map page for use */ 102b101c962SAlexander Duyck dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 103b101c962SAlexander Duyck 104b101c962SAlexander Duyck /* if mapping failed free memory back to system since 105b101c962SAlexander Duyck * there isn't much point in holding memory we can't use 106b101c962SAlexander Duyck */ 107b101c962SAlexander Duyck if (dma_mapping_error(rx_ring->dev, dma)) { 108b101c962SAlexander Duyck __free_page(page); 109b101c962SAlexander Duyck 110b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 111b101c962SAlexander Duyck return false; 112b101c962SAlexander Duyck } 113b101c962SAlexander Duyck 114b101c962SAlexander Duyck bi->dma = dma; 115b101c962SAlexander Duyck bi->page = page; 116b101c962SAlexander Duyck bi->page_offset = 0; 117b101c962SAlexander Duyck 118b101c962SAlexander Duyck return true; 119b101c962SAlexander Duyck } 120b101c962SAlexander Duyck 121b101c962SAlexander Duyck /** 122b101c962SAlexander Duyck * fm10k_alloc_rx_buffers - Replace used receive buffers 123b101c962SAlexander Duyck * @rx_ring: ring to place buffers on 124b101c962SAlexander Duyck * @cleaned_count: number of buffers to replace 125b101c962SAlexander Duyck **/ 126b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) 127b101c962SAlexander Duyck { 128b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 129b101c962SAlexander Duyck struct fm10k_rx_buffer *bi; 130b101c962SAlexander Duyck u16 i = rx_ring->next_to_use; 131b101c962SAlexander Duyck 132b101c962SAlexander Duyck /* nothing to do */ 133b101c962SAlexander Duyck if (!cleaned_count) 134b101c962SAlexander Duyck return; 135b101c962SAlexander Duyck 136b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, i); 137b101c962SAlexander Duyck bi = &rx_ring->rx_buffer[i]; 138b101c962SAlexander Duyck i -= rx_ring->count; 139b101c962SAlexander Duyck 140b101c962SAlexander Duyck do { 141b101c962SAlexander Duyck if (!fm10k_alloc_mapped_page(rx_ring, bi)) 142b101c962SAlexander Duyck break; 143b101c962SAlexander Duyck 144b101c962SAlexander Duyck /* Refresh the desc even if buffer_addrs didn't change 145b101c962SAlexander Duyck * because each write-back erases this info. 146b101c962SAlexander Duyck */ 147b101c962SAlexander Duyck rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 148b101c962SAlexander Duyck 149b101c962SAlexander Duyck rx_desc++; 150b101c962SAlexander Duyck bi++; 151b101c962SAlexander Duyck i++; 152b101c962SAlexander Duyck if (unlikely(!i)) { 153b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, 0); 154b101c962SAlexander Duyck bi = rx_ring->rx_buffer; 155b101c962SAlexander Duyck i -= rx_ring->count; 156b101c962SAlexander Duyck } 157b101c962SAlexander Duyck 158ba5b8dcdSAlexander Duyck /* clear the status bits for the next_to_use descriptor */ 159ba5b8dcdSAlexander Duyck rx_desc->d.staterr = 0; 160b101c962SAlexander Duyck 161b101c962SAlexander Duyck cleaned_count--; 162b101c962SAlexander Duyck } while (cleaned_count); 163b101c962SAlexander Duyck 164b101c962SAlexander Duyck i += rx_ring->count; 165b101c962SAlexander Duyck 166b101c962SAlexander Duyck if (rx_ring->next_to_use != i) { 167b101c962SAlexander Duyck /* record the next descriptor to use */ 168b101c962SAlexander Duyck rx_ring->next_to_use = i; 169b101c962SAlexander Duyck 170b101c962SAlexander Duyck /* update next to alloc since we have filled the ring */ 171b101c962SAlexander Duyck rx_ring->next_to_alloc = i; 172b101c962SAlexander Duyck 173b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w 174b101c962SAlexander Duyck * know there are new descriptors to fetch. (Only 175b101c962SAlexander Duyck * applicable for weak-ordered memory model archs, 176b101c962SAlexander Duyck * such as IA-64). 177b101c962SAlexander Duyck */ 178b101c962SAlexander Duyck wmb(); 179b101c962SAlexander Duyck 180b101c962SAlexander Duyck /* notify hardware of new descriptors */ 181b101c962SAlexander Duyck writel(i, rx_ring->tail); 182b101c962SAlexander Duyck } 183b101c962SAlexander Duyck } 184b101c962SAlexander Duyck 185b101c962SAlexander Duyck /** 186b101c962SAlexander Duyck * fm10k_reuse_rx_page - page flip buffer and store it back on the ring 187b101c962SAlexander Duyck * @rx_ring: rx descriptor ring to store buffers on 188b101c962SAlexander Duyck * @old_buff: donor buffer to have page reused 189b101c962SAlexander Duyck * 190b101c962SAlexander Duyck * Synchronizes page for reuse by the interface 191b101c962SAlexander Duyck **/ 192b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, 193b101c962SAlexander Duyck struct fm10k_rx_buffer *old_buff) 194b101c962SAlexander Duyck { 195b101c962SAlexander Duyck struct fm10k_rx_buffer *new_buff; 196b101c962SAlexander Duyck u16 nta = rx_ring->next_to_alloc; 197b101c962SAlexander Duyck 198b101c962SAlexander Duyck new_buff = &rx_ring->rx_buffer[nta]; 199b101c962SAlexander Duyck 200b101c962SAlexander Duyck /* update, and store next to alloc */ 201b101c962SAlexander Duyck nta++; 202b101c962SAlexander Duyck rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 203b101c962SAlexander Duyck 204b101c962SAlexander Duyck /* transfer page from old buffer to new buffer */ 205ba5b8dcdSAlexander Duyck *new_buff = *old_buff; 206b101c962SAlexander Duyck 207b101c962SAlexander Duyck /* sync the buffer for use by the device */ 208b101c962SAlexander Duyck dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 209b101c962SAlexander Duyck old_buff->page_offset, 210b101c962SAlexander Duyck FM10K_RX_BUFSZ, 211b101c962SAlexander Duyck DMA_FROM_DEVICE); 212b101c962SAlexander Duyck } 213b101c962SAlexander Duyck 214ba5b8dcdSAlexander Duyck static inline bool fm10k_page_is_reserved(struct page *page) 215ba5b8dcdSAlexander Duyck { 2162f064f34SMichal Hocko return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 217ba5b8dcdSAlexander Duyck } 218ba5b8dcdSAlexander Duyck 219b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 220b101c962SAlexander Duyck struct page *page, 221de445199SJeff Kirsher unsigned int __maybe_unused truesize) 222b101c962SAlexander Duyck { 223b101c962SAlexander Duyck /* avoid re-using remote pages */ 224ba5b8dcdSAlexander Duyck if (unlikely(fm10k_page_is_reserved(page))) 225b101c962SAlexander Duyck return false; 226b101c962SAlexander Duyck 227b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 228b101c962SAlexander Duyck /* if we are only owner of page we can reuse it */ 229b101c962SAlexander Duyck if (unlikely(page_count(page) != 1)) 230b101c962SAlexander Duyck return false; 231b101c962SAlexander Duyck 232b101c962SAlexander Duyck /* flip page offset to other buffer */ 233b101c962SAlexander Duyck rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 234b101c962SAlexander Duyck #else 235b101c962SAlexander Duyck /* move offset up to the next cache line */ 236b101c962SAlexander Duyck rx_buffer->page_offset += truesize; 237b101c962SAlexander Duyck 238b101c962SAlexander Duyck if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 239b101c962SAlexander Duyck return false; 240b101c962SAlexander Duyck #endif 241b101c962SAlexander Duyck 242ba5b8dcdSAlexander Duyck /* Even if we own the page, we are not allowed to use atomic_set() 243ba5b8dcdSAlexander Duyck * This would break get_page_unless_zero() users. 244ba5b8dcdSAlexander Duyck */ 245fe896d18SJoonsoo Kim page_ref_inc(page); 246ba5b8dcdSAlexander Duyck 247b101c962SAlexander Duyck return true; 248b101c962SAlexander Duyck } 249b101c962SAlexander Duyck 250b101c962SAlexander Duyck /** 251b101c962SAlexander Duyck * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff 252b101c962SAlexander Duyck * @rx_buffer: buffer containing page to add 253b101c962SAlexander Duyck * @rx_desc: descriptor containing length of buffer written by hardware 254b101c962SAlexander Duyck * @skb: sk_buff to place the data into 255b101c962SAlexander Duyck * 256b101c962SAlexander Duyck * This function will add the data contained in rx_buffer->page to the skb. 257b101c962SAlexander Duyck * This is done either through a direct copy if the data in the buffer is 258b101c962SAlexander Duyck * less than the skb header size, otherwise it will just attach the page as 259b101c962SAlexander Duyck * a frag to the skb. 260b101c962SAlexander Duyck * 261b101c962SAlexander Duyck * The function will then update the page offset if necessary and return 262b101c962SAlexander Duyck * true if the buffer can be reused by the interface. 263b101c962SAlexander Duyck **/ 264de445199SJeff Kirsher static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, 265b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 266b101c962SAlexander Duyck struct sk_buff *skb) 267b101c962SAlexander Duyck { 268b101c962SAlexander Duyck struct page *page = rx_buffer->page; 2691a8782e5SAlexander Duyck unsigned char *va = page_address(page) + rx_buffer->page_offset; 270b101c962SAlexander Duyck unsigned int size = le16_to_cpu(rx_desc->w.length); 271b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 272b101c962SAlexander Duyck unsigned int truesize = FM10K_RX_BUFSZ; 273b101c962SAlexander Duyck #else 274fb5677aaSAlexander Duyck unsigned int truesize = ALIGN(size, 512); 275b101c962SAlexander Duyck #endif 2761a8782e5SAlexander Duyck unsigned int pull_len; 277b101c962SAlexander Duyck 2781a8782e5SAlexander Duyck if (unlikely(skb_is_nonlinear(skb))) 2791a8782e5SAlexander Duyck goto add_tail_frag; 280b101c962SAlexander Duyck 2811a8782e5SAlexander Duyck if (likely(size <= FM10K_RX_HDR_LEN)) { 282b101c962SAlexander Duyck memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 283b101c962SAlexander Duyck 284ba5b8dcdSAlexander Duyck /* page is not reserved, we can reuse buffer as-is */ 285ba5b8dcdSAlexander Duyck if (likely(!fm10k_page_is_reserved(page))) 286b101c962SAlexander Duyck return true; 287b101c962SAlexander Duyck 288b101c962SAlexander Duyck /* this page cannot be reused so discard it */ 289ba5b8dcdSAlexander Duyck __free_page(page); 290b101c962SAlexander Duyck return false; 291b101c962SAlexander Duyck } 292b101c962SAlexander Duyck 2931a8782e5SAlexander Duyck /* we need the header to contain the greater of either ETH_HLEN or 2941a8782e5SAlexander Duyck * 60 bytes if the skb->len is less than 60 for skb_pad. 2951a8782e5SAlexander Duyck */ 2961a8782e5SAlexander Duyck pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); 2971a8782e5SAlexander Duyck 2981a8782e5SAlexander Duyck /* align pull length to size of long to optimize memcpy performance */ 2991a8782e5SAlexander Duyck memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 3001a8782e5SAlexander Duyck 3011a8782e5SAlexander Duyck /* update all of the pointers */ 3021a8782e5SAlexander Duyck va += pull_len; 3031a8782e5SAlexander Duyck size -= pull_len; 3041a8782e5SAlexander Duyck 3051a8782e5SAlexander Duyck add_tail_frag: 306b101c962SAlexander Duyck skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 3071a8782e5SAlexander Duyck (unsigned long)va & ~PAGE_MASK, size, truesize); 308b101c962SAlexander Duyck 309b101c962SAlexander Duyck return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); 310b101c962SAlexander Duyck } 311b101c962SAlexander Duyck 312b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, 313b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 314b101c962SAlexander Duyck struct sk_buff *skb) 315b101c962SAlexander Duyck { 316b101c962SAlexander Duyck struct fm10k_rx_buffer *rx_buffer; 317b101c962SAlexander Duyck struct page *page; 318b101c962SAlexander Duyck 319b101c962SAlexander Duyck rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 320b101c962SAlexander Duyck page = rx_buffer->page; 321b101c962SAlexander Duyck prefetchw(page); 322b101c962SAlexander Duyck 323b101c962SAlexander Duyck if (likely(!skb)) { 324b101c962SAlexander Duyck void *page_addr = page_address(page) + 325b101c962SAlexander Duyck rx_buffer->page_offset; 326b101c962SAlexander Duyck 327b101c962SAlexander Duyck /* prefetch first cache line of first page */ 328b101c962SAlexander Duyck prefetch(page_addr); 329b101c962SAlexander Duyck #if L1_CACHE_BYTES < 128 330b101c962SAlexander Duyck prefetch(page_addr + L1_CACHE_BYTES); 331b101c962SAlexander Duyck #endif 332b101c962SAlexander Duyck 333b101c962SAlexander Duyck /* allocate a skb to store the frags */ 33467fd893eSAlexander Duyck skb = napi_alloc_skb(&rx_ring->q_vector->napi, 335b101c962SAlexander Duyck FM10K_RX_HDR_LEN); 336b101c962SAlexander Duyck if (unlikely(!skb)) { 337b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 338b101c962SAlexander Duyck return NULL; 339b101c962SAlexander Duyck } 340b101c962SAlexander Duyck 341b101c962SAlexander Duyck /* we will be copying header into skb->data in 342b101c962SAlexander Duyck * pskb_may_pull so it is in our interest to prefetch 343b101c962SAlexander Duyck * it now to avoid a possible cache miss 344b101c962SAlexander Duyck */ 345b101c962SAlexander Duyck prefetchw(skb->data); 346b101c962SAlexander Duyck } 347b101c962SAlexander Duyck 348b101c962SAlexander Duyck /* we are reusing so sync this buffer for CPU use */ 349b101c962SAlexander Duyck dma_sync_single_range_for_cpu(rx_ring->dev, 350b101c962SAlexander Duyck rx_buffer->dma, 351b101c962SAlexander Duyck rx_buffer->page_offset, 352b101c962SAlexander Duyck FM10K_RX_BUFSZ, 353b101c962SAlexander Duyck DMA_FROM_DEVICE); 354b101c962SAlexander Duyck 355b101c962SAlexander Duyck /* pull page into skb */ 356de445199SJeff Kirsher if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { 357b101c962SAlexander Duyck /* hand second half of page back to the ring */ 358b101c962SAlexander Duyck fm10k_reuse_rx_page(rx_ring, rx_buffer); 359b101c962SAlexander Duyck } else { 360b101c962SAlexander Duyck /* we are not reusing the buffer so unmap it */ 361b101c962SAlexander Duyck dma_unmap_page(rx_ring->dev, rx_buffer->dma, 362b101c962SAlexander Duyck PAGE_SIZE, DMA_FROM_DEVICE); 363b101c962SAlexander Duyck } 364b101c962SAlexander Duyck 365b101c962SAlexander Duyck /* clear contents of rx_buffer */ 366b101c962SAlexander Duyck rx_buffer->page = NULL; 367b101c962SAlexander Duyck 368b101c962SAlexander Duyck return skb; 369b101c962SAlexander Duyck } 370b101c962SAlexander Duyck 37176a540d4SAlexander Duyck static inline void fm10k_rx_checksum(struct fm10k_ring *ring, 37276a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 37376a540d4SAlexander Duyck struct sk_buff *skb) 37476a540d4SAlexander Duyck { 37576a540d4SAlexander Duyck skb_checksum_none_assert(skb); 37676a540d4SAlexander Duyck 37776a540d4SAlexander Duyck /* Rx checksum disabled via ethtool */ 37876a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXCSUM)) 37976a540d4SAlexander Duyck return; 38076a540d4SAlexander Duyck 38176a540d4SAlexander Duyck /* TCP/UDP checksum error bit is set */ 38276a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, 38376a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E | 38476a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E2 | 38576a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE | 38676a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE2)) { 38776a540d4SAlexander Duyck ring->rx_stats.csum_err++; 38876a540d4SAlexander Duyck return; 38976a540d4SAlexander Duyck } 39076a540d4SAlexander Duyck 39176a540d4SAlexander Duyck /* It must be a TCP or UDP packet with a valid checksum */ 39276a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) 39376a540d4SAlexander Duyck skb->encapsulation = true; 39476a540d4SAlexander Duyck else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) 39576a540d4SAlexander Duyck return; 39676a540d4SAlexander Duyck 39776a540d4SAlexander Duyck skb->ip_summed = CHECKSUM_UNNECESSARY; 39880043f3bSJacob Keller 39980043f3bSJacob Keller ring->rx_stats.csum_good++; 40076a540d4SAlexander Duyck } 40176a540d4SAlexander Duyck 40276a540d4SAlexander Duyck #define FM10K_RSS_L4_TYPES_MASK \ 403fcdb0a99SBruce Allan (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ 404fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV4_UDP) | \ 405fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_TCP) | \ 406fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_UDP)) 40776a540d4SAlexander Duyck 40876a540d4SAlexander Duyck static inline void fm10k_rx_hash(struct fm10k_ring *ring, 40976a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 41076a540d4SAlexander Duyck struct sk_buff *skb) 41176a540d4SAlexander Duyck { 41276a540d4SAlexander Duyck u16 rss_type; 41376a540d4SAlexander Duyck 41476a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXHASH)) 41576a540d4SAlexander Duyck return; 41676a540d4SAlexander Duyck 41776a540d4SAlexander Duyck rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; 41876a540d4SAlexander Duyck if (!rss_type) 41976a540d4SAlexander Duyck return; 42076a540d4SAlexander Duyck 42176a540d4SAlexander Duyck skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 422fcdb0a99SBruce Allan (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? 42376a540d4SAlexander Duyck PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 42476a540d4SAlexander Duyck } 42576a540d4SAlexander Duyck 4265cd5e2e9SAlexander Duyck static void fm10k_type_trans(struct fm10k_ring *rx_ring, 427de445199SJeff Kirsher union fm10k_rx_desc __maybe_unused *rx_desc, 4285cd5e2e9SAlexander Duyck struct sk_buff *skb) 4295cd5e2e9SAlexander Duyck { 4305cd5e2e9SAlexander Duyck struct net_device *dev = rx_ring->netdev; 4315cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); 4325cd5e2e9SAlexander Duyck 4335cd5e2e9SAlexander Duyck /* check to see if DGLORT belongs to a MACVLAN */ 4345cd5e2e9SAlexander Duyck if (l2_accel) { 4355cd5e2e9SAlexander Duyck u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; 4365cd5e2e9SAlexander Duyck 4375cd5e2e9SAlexander Duyck idx -= l2_accel->dglort; 4385cd5e2e9SAlexander Duyck if (idx < l2_accel->size && l2_accel->macvlan[idx]) 4395cd5e2e9SAlexander Duyck dev = l2_accel->macvlan[idx]; 4405cd5e2e9SAlexander Duyck else 4415cd5e2e9SAlexander Duyck l2_accel = NULL; 4425cd5e2e9SAlexander Duyck } 4435cd5e2e9SAlexander Duyck 4445cd5e2e9SAlexander Duyck skb->protocol = eth_type_trans(skb, dev); 4455cd5e2e9SAlexander Duyck 4465cd5e2e9SAlexander Duyck if (!l2_accel) 4475cd5e2e9SAlexander Duyck return; 4485cd5e2e9SAlexander Duyck 4495cd5e2e9SAlexander Duyck /* update MACVLAN statistics */ 4505cd5e2e9SAlexander Duyck macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, 4515cd5e2e9SAlexander Duyck !!(rx_desc->w.hdr_info & 4525cd5e2e9SAlexander Duyck cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); 4535cd5e2e9SAlexander Duyck } 4545cd5e2e9SAlexander Duyck 455b101c962SAlexander Duyck /** 456b101c962SAlexander Duyck * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor 457b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 458b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 459b101c962SAlexander Duyck * @skb: pointer to current skb being populated 460b101c962SAlexander Duyck * 461b101c962SAlexander Duyck * This function checks the ring, descriptor, and packet information in 462b101c962SAlexander Duyck * order to populate the hash, checksum, VLAN, timestamp, protocol, and 463b101c962SAlexander Duyck * other fields within the skb. 464b101c962SAlexander Duyck **/ 465b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, 466b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 467b101c962SAlexander Duyck struct sk_buff *skb) 468b101c962SAlexander Duyck { 469b101c962SAlexander Duyck unsigned int len = skb->len; 470b101c962SAlexander Duyck 47176a540d4SAlexander Duyck fm10k_rx_hash(rx_ring, rx_desc, skb); 47276a540d4SAlexander Duyck 47376a540d4SAlexander Duyck fm10k_rx_checksum(rx_ring, rx_desc, skb); 47476a540d4SAlexander Duyck 475b101c962SAlexander Duyck FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; 476b101c962SAlexander Duyck 477b101c962SAlexander Duyck skb_record_rx_queue(skb, rx_ring->queue_index); 478b101c962SAlexander Duyck 479b101c962SAlexander Duyck FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; 480b101c962SAlexander Duyck 481b101c962SAlexander Duyck if (rx_desc->w.vlan) { 482b101c962SAlexander Duyck u16 vid = le16_to_cpu(rx_desc->w.vlan); 483b101c962SAlexander Duyck 484e71c9318SJacob Keller if ((vid & VLAN_VID_MASK) != rx_ring->vid) 485b101c962SAlexander Duyck __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 486e71c9318SJacob Keller else if (vid & VLAN_PRIO_MASK) 487e71c9318SJacob Keller __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 488e71c9318SJacob Keller vid & VLAN_PRIO_MASK); 489b101c962SAlexander Duyck } 490b101c962SAlexander Duyck 4915cd5e2e9SAlexander Duyck fm10k_type_trans(rx_ring, rx_desc, skb); 492b101c962SAlexander Duyck 493b101c962SAlexander Duyck return len; 494b101c962SAlexander Duyck } 495b101c962SAlexander Duyck 496b101c962SAlexander Duyck /** 497b101c962SAlexander Duyck * fm10k_is_non_eop - process handling of non-EOP buffers 498b101c962SAlexander Duyck * @rx_ring: Rx ring being processed 499b101c962SAlexander Duyck * @rx_desc: Rx descriptor for current buffer 500b101c962SAlexander Duyck * 501b101c962SAlexander Duyck * This function updates next to clean. If the buffer is an EOP buffer 502b101c962SAlexander Duyck * this function exits returning false, otherwise it will place the 503b101c962SAlexander Duyck * sk_buff in the next buffer to be chained and return true indicating 504b101c962SAlexander Duyck * that this is in fact a non-EOP buffer. 505b101c962SAlexander Duyck **/ 506b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, 507b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc) 508b101c962SAlexander Duyck { 509b101c962SAlexander Duyck u32 ntc = rx_ring->next_to_clean + 1; 510b101c962SAlexander Duyck 511b101c962SAlexander Duyck /* fetch, update, and store next to clean */ 512b101c962SAlexander Duyck ntc = (ntc < rx_ring->count) ? ntc : 0; 513b101c962SAlexander Duyck rx_ring->next_to_clean = ntc; 514b101c962SAlexander Duyck 515b101c962SAlexander Duyck prefetch(FM10K_RX_DESC(rx_ring, ntc)); 516b101c962SAlexander Duyck 517b101c962SAlexander Duyck if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) 518b101c962SAlexander Duyck return false; 519b101c962SAlexander Duyck 520b101c962SAlexander Duyck return true; 521b101c962SAlexander Duyck } 522b101c962SAlexander Duyck 523b101c962SAlexander Duyck /** 524b101c962SAlexander Duyck * fm10k_cleanup_headers - Correct corrupted or empty headers 525b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 526b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 527b101c962SAlexander Duyck * @skb: pointer to current skb being fixed 528b101c962SAlexander Duyck * 529b101c962SAlexander Duyck * Address the case where we are pulling data in on pages only 530b101c962SAlexander Duyck * and as such no data is present in the skb header. 531b101c962SAlexander Duyck * 532b101c962SAlexander Duyck * In addition if skb is not at least 60 bytes we need to pad it so that 533b101c962SAlexander Duyck * it is large enough to qualify as a valid Ethernet frame. 534b101c962SAlexander Duyck * 535b101c962SAlexander Duyck * Returns true if an error was encountered and skb was freed. 536b101c962SAlexander Duyck **/ 537b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, 538b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 539b101c962SAlexander Duyck struct sk_buff *skb) 540b101c962SAlexander Duyck { 541b101c962SAlexander Duyck if (unlikely((fm10k_test_staterr(rx_desc, 542b101c962SAlexander Duyck FM10K_RXD_STATUS_RXE)))) { 54380043f3bSJacob Keller #define FM10K_TEST_RXD_BIT(rxd, bit) \ 54480043f3bSJacob Keller ((rxd)->w.csum_err & cpu_to_le16(bit)) 54580043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) 54680043f3bSJacob Keller rx_ring->rx_stats.switch_errors++; 54780043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) 54880043f3bSJacob Keller rx_ring->rx_stats.drops++; 54980043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) 55080043f3bSJacob Keller rx_ring->rx_stats.pp_errors++; 55180043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) 55280043f3bSJacob Keller rx_ring->rx_stats.link_errors++; 55380043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) 55480043f3bSJacob Keller rx_ring->rx_stats.length_errors++; 555b101c962SAlexander Duyck dev_kfree_skb_any(skb); 556b101c962SAlexander Duyck rx_ring->rx_stats.errors++; 557b101c962SAlexander Duyck return true; 558b101c962SAlexander Duyck } 559b101c962SAlexander Duyck 560a94d9e22SAlexander Duyck /* if eth_skb_pad returns an error the skb was freed */ 561a94d9e22SAlexander Duyck if (eth_skb_pad(skb)) 562b101c962SAlexander Duyck return true; 563b101c962SAlexander Duyck 564b101c962SAlexander Duyck return false; 565b101c962SAlexander Duyck } 566b101c962SAlexander Duyck 567b101c962SAlexander Duyck /** 568b101c962SAlexander Duyck * fm10k_receive_skb - helper function to handle rx indications 569b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 570b101c962SAlexander Duyck * @skb: packet to send up 571b101c962SAlexander Duyck **/ 572b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, 573b101c962SAlexander Duyck struct sk_buff *skb) 574b101c962SAlexander Duyck { 575b101c962SAlexander Duyck napi_gro_receive(&q_vector->napi, skb); 576b101c962SAlexander Duyck } 577b101c962SAlexander Duyck 57832b3e08fSJesse Brandeburg static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, 579b101c962SAlexander Duyck struct fm10k_ring *rx_ring, 580b101c962SAlexander Duyck int budget) 581b101c962SAlexander Duyck { 582b101c962SAlexander Duyck struct sk_buff *skb = rx_ring->skb; 583b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 584b101c962SAlexander Duyck u16 cleaned_count = fm10k_desc_unused(rx_ring); 585b101c962SAlexander Duyck 58659486329SAlexander Duyck while (likely(total_packets < budget)) { 587b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 588b101c962SAlexander Duyck 589b101c962SAlexander Duyck /* return some buffers to hardware, one at a time is too slow */ 590b101c962SAlexander Duyck if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { 591b101c962SAlexander Duyck fm10k_alloc_rx_buffers(rx_ring, cleaned_count); 592b101c962SAlexander Duyck cleaned_count = 0; 593b101c962SAlexander Duyck } 594b101c962SAlexander Duyck 595b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 596b101c962SAlexander Duyck 597124b74c1SAlexander Duyck if (!rx_desc->d.staterr) 598b101c962SAlexander Duyck break; 599b101c962SAlexander Duyck 600b101c962SAlexander Duyck /* This memory barrier is needed to keep us from reading 601b101c962SAlexander Duyck * any other fields out of the rx_desc until we know the 602124b74c1SAlexander Duyck * descriptor has been written back 603b101c962SAlexander Duyck */ 604124b74c1SAlexander Duyck dma_rmb(); 605b101c962SAlexander Duyck 606b101c962SAlexander Duyck /* retrieve a buffer from the ring */ 607b101c962SAlexander Duyck skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); 608b101c962SAlexander Duyck 609b101c962SAlexander Duyck /* exit if we failed to retrieve a buffer */ 610b101c962SAlexander Duyck if (!skb) 611b101c962SAlexander Duyck break; 612b101c962SAlexander Duyck 613b101c962SAlexander Duyck cleaned_count++; 614b101c962SAlexander Duyck 615b101c962SAlexander Duyck /* fetch next buffer in frame if non-eop */ 616b101c962SAlexander Duyck if (fm10k_is_non_eop(rx_ring, rx_desc)) 617b101c962SAlexander Duyck continue; 618b101c962SAlexander Duyck 619b101c962SAlexander Duyck /* verify the packet layout is correct */ 620b101c962SAlexander Duyck if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { 621b101c962SAlexander Duyck skb = NULL; 622b101c962SAlexander Duyck continue; 623b101c962SAlexander Duyck } 624b101c962SAlexander Duyck 625b101c962SAlexander Duyck /* populate checksum, timestamp, VLAN, and protocol */ 626b101c962SAlexander Duyck total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); 627b101c962SAlexander Duyck 628b101c962SAlexander Duyck fm10k_receive_skb(q_vector, skb); 629b101c962SAlexander Duyck 630b101c962SAlexander Duyck /* reset skb pointer */ 631b101c962SAlexander Duyck skb = NULL; 632b101c962SAlexander Duyck 633b101c962SAlexander Duyck /* update budget accounting */ 634b101c962SAlexander Duyck total_packets++; 63559486329SAlexander Duyck } 636b101c962SAlexander Duyck 637b101c962SAlexander Duyck /* place incomplete frames back on ring for completion */ 638b101c962SAlexander Duyck rx_ring->skb = skb; 639b101c962SAlexander Duyck 640b101c962SAlexander Duyck u64_stats_update_begin(&rx_ring->syncp); 641b101c962SAlexander Duyck rx_ring->stats.packets += total_packets; 642b101c962SAlexander Duyck rx_ring->stats.bytes += total_bytes; 643b101c962SAlexander Duyck u64_stats_update_end(&rx_ring->syncp); 644b101c962SAlexander Duyck q_vector->rx.total_packets += total_packets; 645b101c962SAlexander Duyck q_vector->rx.total_bytes += total_bytes; 646b101c962SAlexander Duyck 64732b3e08fSJesse Brandeburg return total_packets; 648b101c962SAlexander Duyck } 649b101c962SAlexander Duyck 65076a540d4SAlexander Duyck #define VXLAN_HLEN (sizeof(struct udphdr) + 8) 65176a540d4SAlexander Duyck static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) 65276a540d4SAlexander Duyck { 65376a540d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(skb->dev); 65476a540d4SAlexander Duyck struct fm10k_vxlan_port *vxlan_port; 65576a540d4SAlexander Duyck 65676a540d4SAlexander Duyck /* we can only offload a vxlan if we recognize it as such */ 65776a540d4SAlexander Duyck vxlan_port = list_first_entry_or_null(&interface->vxlan_port, 65876a540d4SAlexander Duyck struct fm10k_vxlan_port, list); 65976a540d4SAlexander Duyck 66076a540d4SAlexander Duyck if (!vxlan_port) 66176a540d4SAlexander Duyck return NULL; 66276a540d4SAlexander Duyck if (vxlan_port->port != udp_hdr(skb)->dest) 66376a540d4SAlexander Duyck return NULL; 66476a540d4SAlexander Duyck 66576a540d4SAlexander Duyck /* return offset of udp_hdr plus 8 bytes for VXLAN header */ 66676a540d4SAlexander Duyck return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); 66776a540d4SAlexander Duyck } 66876a540d4SAlexander Duyck 66976a540d4SAlexander Duyck #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) 67076a540d4SAlexander Duyck #define NVGRE_TNI htons(0x2000) 67176a540d4SAlexander Duyck struct fm10k_nvgre_hdr { 67276a540d4SAlexander Duyck __be16 flags; 67376a540d4SAlexander Duyck __be16 proto; 67476a540d4SAlexander Duyck __be32 tni; 67576a540d4SAlexander Duyck }; 67676a540d4SAlexander Duyck 67776a540d4SAlexander Duyck static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) 67876a540d4SAlexander Duyck { 67976a540d4SAlexander Duyck struct fm10k_nvgre_hdr *nvgre_hdr; 68076a540d4SAlexander Duyck int hlen = ip_hdrlen(skb); 68176a540d4SAlexander Duyck 68276a540d4SAlexander Duyck /* currently only IPv4 is supported due to hlen above */ 68376a540d4SAlexander Duyck if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 68476a540d4SAlexander Duyck return NULL; 68576a540d4SAlexander Duyck 68676a540d4SAlexander Duyck /* our transport header should be NVGRE */ 68776a540d4SAlexander Duyck nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); 68876a540d4SAlexander Duyck 68976a540d4SAlexander Duyck /* verify all reserved flags are 0 */ 69076a540d4SAlexander Duyck if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) 69176a540d4SAlexander Duyck return NULL; 69276a540d4SAlexander Duyck 69376a540d4SAlexander Duyck /* report start of ethernet header */ 69476a540d4SAlexander Duyck if (nvgre_hdr->flags & NVGRE_TNI) 69576a540d4SAlexander Duyck return (struct ethhdr *)(nvgre_hdr + 1); 69676a540d4SAlexander Duyck 69776a540d4SAlexander Duyck return (struct ethhdr *)(&nvgre_hdr->tni); 69876a540d4SAlexander Duyck } 69976a540d4SAlexander Duyck 7005bf33dc6SMatthew Vick __be16 fm10k_tx_encap_offload(struct sk_buff *skb) 70176a540d4SAlexander Duyck { 7028c1a90aaSMatthew Vick u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; 70376a540d4SAlexander Duyck struct ethhdr *eth_hdr; 70476a540d4SAlexander Duyck 7058c1a90aaSMatthew Vick if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 7068c1a90aaSMatthew Vick skb->inner_protocol != htons(ETH_P_TEB)) 707b66b6d9fSJoe Stringer return 0; 708b66b6d9fSJoe Stringer 70976a540d4SAlexander Duyck switch (vlan_get_protocol(skb)) { 71076a540d4SAlexander Duyck case htons(ETH_P_IP): 71176a540d4SAlexander Duyck l4_hdr = ip_hdr(skb)->protocol; 71276a540d4SAlexander Duyck break; 71376a540d4SAlexander Duyck case htons(ETH_P_IPV6): 71476a540d4SAlexander Duyck l4_hdr = ipv6_hdr(skb)->nexthdr; 71576a540d4SAlexander Duyck break; 71676a540d4SAlexander Duyck default: 71776a540d4SAlexander Duyck return 0; 71876a540d4SAlexander Duyck } 71976a540d4SAlexander Duyck 72076a540d4SAlexander Duyck switch (l4_hdr) { 72176a540d4SAlexander Duyck case IPPROTO_UDP: 72276a540d4SAlexander Duyck eth_hdr = fm10k_port_is_vxlan(skb); 72376a540d4SAlexander Duyck break; 72476a540d4SAlexander Duyck case IPPROTO_GRE: 72576a540d4SAlexander Duyck eth_hdr = fm10k_gre_is_nvgre(skb); 72676a540d4SAlexander Duyck break; 72776a540d4SAlexander Duyck default: 72876a540d4SAlexander Duyck return 0; 72976a540d4SAlexander Duyck } 73076a540d4SAlexander Duyck 73176a540d4SAlexander Duyck if (!eth_hdr) 73276a540d4SAlexander Duyck return 0; 73376a540d4SAlexander Duyck 73476a540d4SAlexander Duyck switch (eth_hdr->h_proto) { 73576a540d4SAlexander Duyck case htons(ETH_P_IP): 7368c1a90aaSMatthew Vick inner_l4_hdr = inner_ip_hdr(skb)->protocol; 7378c1a90aaSMatthew Vick break; 73876a540d4SAlexander Duyck case htons(ETH_P_IPV6): 7398c1a90aaSMatthew Vick inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; 74076a540d4SAlexander Duyck break; 74176a540d4SAlexander Duyck default: 74276a540d4SAlexander Duyck return 0; 74376a540d4SAlexander Duyck } 74476a540d4SAlexander Duyck 7458c1a90aaSMatthew Vick switch (inner_l4_hdr) { 7468c1a90aaSMatthew Vick case IPPROTO_TCP: 7478c1a90aaSMatthew Vick inner_l4_hlen = inner_tcp_hdrlen(skb); 7488c1a90aaSMatthew Vick break; 7498c1a90aaSMatthew Vick case IPPROTO_UDP: 7508c1a90aaSMatthew Vick inner_l4_hlen = 8; 7518c1a90aaSMatthew Vick break; 7528c1a90aaSMatthew Vick default: 7538c1a90aaSMatthew Vick return 0; 7548c1a90aaSMatthew Vick } 7558c1a90aaSMatthew Vick 7568c1a90aaSMatthew Vick /* The hardware allows tunnel offloads only if the combined inner and 7578c1a90aaSMatthew Vick * outer header is 184 bytes or less 7588c1a90aaSMatthew Vick */ 7598c1a90aaSMatthew Vick if (skb_inner_transport_header(skb) + inner_l4_hlen - 7608c1a90aaSMatthew Vick skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) 7618c1a90aaSMatthew Vick return 0; 7628c1a90aaSMatthew Vick 76376a540d4SAlexander Duyck return eth_hdr->h_proto; 76476a540d4SAlexander Duyck } 76576a540d4SAlexander Duyck 76676a540d4SAlexander Duyck static int fm10k_tso(struct fm10k_ring *tx_ring, 76776a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 76876a540d4SAlexander Duyck { 76976a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 77076a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 77176a540d4SAlexander Duyck unsigned char *th; 77276a540d4SAlexander Duyck u8 hdrlen; 77376a540d4SAlexander Duyck 77476a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 77576a540d4SAlexander Duyck return 0; 77676a540d4SAlexander Duyck 77776a540d4SAlexander Duyck if (!skb_is_gso(skb)) 77876a540d4SAlexander Duyck return 0; 77976a540d4SAlexander Duyck 78076a540d4SAlexander Duyck /* compute header lengths */ 78176a540d4SAlexander Duyck if (skb->encapsulation) { 78276a540d4SAlexander Duyck if (!fm10k_tx_encap_offload(skb)) 78376a540d4SAlexander Duyck goto err_vxlan; 78476a540d4SAlexander Duyck th = skb_inner_transport_header(skb); 78576a540d4SAlexander Duyck } else { 78676a540d4SAlexander Duyck th = skb_transport_header(skb); 78776a540d4SAlexander Duyck } 78876a540d4SAlexander Duyck 78976a540d4SAlexander Duyck /* compute offset from SOF to transport header and add header len */ 79076a540d4SAlexander Duyck hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); 79176a540d4SAlexander Duyck 79276a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 79376a540d4SAlexander Duyck 79476a540d4SAlexander Duyck /* update gso size and bytecount with header size */ 79576a540d4SAlexander Duyck first->gso_segs = skb_shinfo(skb)->gso_segs; 79676a540d4SAlexander Duyck first->bytecount += (first->gso_segs - 1) * hdrlen; 79776a540d4SAlexander Duyck 79876a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 79976a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 80076a540d4SAlexander Duyck tx_desc->hdrlen = hdrlen; 80176a540d4SAlexander Duyck tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 80276a540d4SAlexander Duyck 80376a540d4SAlexander Duyck return 1; 80476a540d4SAlexander Duyck err_vxlan: 80576a540d4SAlexander Duyck tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; 80676a540d4SAlexander Duyck if (!net_ratelimit()) 80776a540d4SAlexander Duyck netdev_err(tx_ring->netdev, 80876a540d4SAlexander Duyck "TSO requested for unsupported tunnel, disabling offload\n"); 80976a540d4SAlexander Duyck return -1; 81076a540d4SAlexander Duyck } 81176a540d4SAlexander Duyck 81276a540d4SAlexander Duyck static void fm10k_tx_csum(struct fm10k_ring *tx_ring, 81376a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 81476a540d4SAlexander Duyck { 81576a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 81676a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 81776a540d4SAlexander Duyck union { 81876a540d4SAlexander Duyck struct iphdr *ipv4; 81976a540d4SAlexander Duyck struct ipv6hdr *ipv6; 82076a540d4SAlexander Duyck u8 *raw; 82176a540d4SAlexander Duyck } network_hdr; 822dc1b4c2bSJacob Keller u8 *transport_hdr; 823dc1b4c2bSJacob Keller __be16 frag_off; 82476a540d4SAlexander Duyck __be16 protocol; 82576a540d4SAlexander Duyck u8 l4_hdr = 0; 82676a540d4SAlexander Duyck 82776a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 82876a540d4SAlexander Duyck goto no_csum; 82976a540d4SAlexander Duyck 83076a540d4SAlexander Duyck if (skb->encapsulation) { 83176a540d4SAlexander Duyck protocol = fm10k_tx_encap_offload(skb); 83276a540d4SAlexander Duyck if (!protocol) { 83376a540d4SAlexander Duyck if (skb_checksum_help(skb)) { 83476a540d4SAlexander Duyck dev_warn(tx_ring->dev, 83576a540d4SAlexander Duyck "failed to offload encap csum!\n"); 83676a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 83776a540d4SAlexander Duyck } 83876a540d4SAlexander Duyck goto no_csum; 83976a540d4SAlexander Duyck } 84076a540d4SAlexander Duyck network_hdr.raw = skb_inner_network_header(skb); 841dc1b4c2bSJacob Keller transport_hdr = skb_inner_transport_header(skb); 84276a540d4SAlexander Duyck } else { 84376a540d4SAlexander Duyck protocol = vlan_get_protocol(skb); 84476a540d4SAlexander Duyck network_hdr.raw = skb_network_header(skb); 845dc1b4c2bSJacob Keller transport_hdr = skb_transport_header(skb); 84676a540d4SAlexander Duyck } 84776a540d4SAlexander Duyck 84876a540d4SAlexander Duyck switch (protocol) { 84976a540d4SAlexander Duyck case htons(ETH_P_IP): 85076a540d4SAlexander Duyck l4_hdr = network_hdr.ipv4->protocol; 85176a540d4SAlexander Duyck break; 85276a540d4SAlexander Duyck case htons(ETH_P_IPV6): 85376a540d4SAlexander Duyck l4_hdr = network_hdr.ipv6->nexthdr; 854dc1b4c2bSJacob Keller if (likely((transport_hdr - network_hdr.raw) == 855dc1b4c2bSJacob Keller sizeof(struct ipv6hdr))) 856dc1b4c2bSJacob Keller break; 857dc1b4c2bSJacob Keller ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + 858dc1b4c2bSJacob Keller sizeof(struct ipv6hdr), 859dc1b4c2bSJacob Keller &l4_hdr, &frag_off); 860dc1b4c2bSJacob Keller if (unlikely(frag_off)) 861dc1b4c2bSJacob Keller l4_hdr = NEXTHDR_FRAGMENT; 86276a540d4SAlexander Duyck break; 86376a540d4SAlexander Duyck default: 864dc1b4c2bSJacob Keller break; 86576a540d4SAlexander Duyck } 86676a540d4SAlexander Duyck 86776a540d4SAlexander Duyck switch (l4_hdr) { 86876a540d4SAlexander Duyck case IPPROTO_TCP: 86976a540d4SAlexander Duyck case IPPROTO_UDP: 87076a540d4SAlexander Duyck break; 87176a540d4SAlexander Duyck case IPPROTO_GRE: 87276a540d4SAlexander Duyck if (skb->encapsulation) 87376a540d4SAlexander Duyck break; 87476a540d4SAlexander Duyck default: 87576a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 87676a540d4SAlexander Duyck dev_warn(tx_ring->dev, 877dc1b4c2bSJacob Keller "partial checksum, version=%d l4 proto=%x\n", 878dc1b4c2bSJacob Keller protocol, l4_hdr); 87976a540d4SAlexander Duyck } 880dc1b4c2bSJacob Keller skb_checksum_help(skb); 88176a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 88276a540d4SAlexander Duyck goto no_csum; 88376a540d4SAlexander Duyck } 88476a540d4SAlexander Duyck 88576a540d4SAlexander Duyck /* update TX checksum flag */ 88676a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 88780043f3bSJacob Keller tx_ring->tx_stats.csum_good++; 88876a540d4SAlexander Duyck 88976a540d4SAlexander Duyck no_csum: 89076a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 89176a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 89276a540d4SAlexander Duyck tx_desc->hdrlen = 0; 89376a540d4SAlexander Duyck tx_desc->mss = 0; 89476a540d4SAlexander Duyck } 89576a540d4SAlexander Duyck 89676a540d4SAlexander Duyck #define FM10K_SET_FLAG(_input, _flag, _result) \ 89776a540d4SAlexander Duyck ((_flag <= _result) ? \ 89876a540d4SAlexander Duyck ((u32)(_input & _flag) * (_result / _flag)) : \ 89976a540d4SAlexander Duyck ((u32)(_input & _flag) / (_flag / _result))) 90076a540d4SAlexander Duyck 90176a540d4SAlexander Duyck static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) 90276a540d4SAlexander Duyck { 90376a540d4SAlexander Duyck /* set type for advanced descriptor with frame checksum insertion */ 90476a540d4SAlexander Duyck u32 desc_flags = 0; 90576a540d4SAlexander Duyck 90676a540d4SAlexander Duyck /* set checksum offload bits */ 90776a540d4SAlexander Duyck desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, 90876a540d4SAlexander Duyck FM10K_TXD_FLAG_CSUM); 90976a540d4SAlexander Duyck 91076a540d4SAlexander Duyck return desc_flags; 91176a540d4SAlexander Duyck } 91276a540d4SAlexander Duyck 913b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, 914b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc, u16 i, 915b101c962SAlexander Duyck dma_addr_t dma, unsigned int size, u8 desc_flags) 916b101c962SAlexander Duyck { 917b101c962SAlexander Duyck /* set RS and INT for last frame in a cache line */ 918b101c962SAlexander Duyck if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) 919b101c962SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; 920b101c962SAlexander Duyck 921b101c962SAlexander Duyck /* record values to descriptor */ 922b101c962SAlexander Duyck tx_desc->buffer_addr = cpu_to_le64(dma); 923b101c962SAlexander Duyck tx_desc->flags = desc_flags; 924b101c962SAlexander Duyck tx_desc->buflen = cpu_to_le16(size); 925b101c962SAlexander Duyck 926b101c962SAlexander Duyck /* return true if we just wrapped the ring */ 927b101c962SAlexander Duyck return i == tx_ring->count; 928b101c962SAlexander Duyck } 929b101c962SAlexander Duyck 9302c2b2f0cSAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9312c2b2f0cSAlexander Duyck { 9322c2b2f0cSAlexander Duyck netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 9332c2b2f0cSAlexander Duyck 934eca32047SMatthew Vick /* Memory barrier before checking head and tail */ 9352c2b2f0cSAlexander Duyck smp_mb(); 9362c2b2f0cSAlexander Duyck 937eca32047SMatthew Vick /* Check again in a case another CPU has just made room available */ 9382c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) < size)) 9392c2b2f0cSAlexander Duyck return -EBUSY; 9402c2b2f0cSAlexander Duyck 9412c2b2f0cSAlexander Duyck /* A reprieve! - use start_queue because it doesn't call schedule */ 9422c2b2f0cSAlexander Duyck netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 9432c2b2f0cSAlexander Duyck ++tx_ring->tx_stats.restart_queue; 9442c2b2f0cSAlexander Duyck return 0; 9452c2b2f0cSAlexander Duyck } 9462c2b2f0cSAlexander Duyck 9472c2b2f0cSAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9482c2b2f0cSAlexander Duyck { 9492c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) >= size)) 9502c2b2f0cSAlexander Duyck return 0; 9512c2b2f0cSAlexander Duyck return __fm10k_maybe_stop_tx(tx_ring, size); 9522c2b2f0cSAlexander Duyck } 9532c2b2f0cSAlexander Duyck 954b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring, 955b101c962SAlexander Duyck struct fm10k_tx_buffer *first) 956b101c962SAlexander Duyck { 957b101c962SAlexander Duyck struct sk_buff *skb = first->skb; 958b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 959b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 960b101c962SAlexander Duyck struct skb_frag_struct *frag; 961b101c962SAlexander Duyck unsigned char *data; 962b101c962SAlexander Duyck dma_addr_t dma; 963b101c962SAlexander Duyck unsigned int data_len, size; 96476a540d4SAlexander Duyck u32 tx_flags = first->tx_flags; 965b101c962SAlexander Duyck u16 i = tx_ring->next_to_use; 96676a540d4SAlexander Duyck u8 flags = fm10k_tx_desc_flags(skb, tx_flags); 967b101c962SAlexander Duyck 968b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 969b101c962SAlexander Duyck 970b101c962SAlexander Duyck /* add HW VLAN tag */ 971df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 972df8a39deSJiri Pirko tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 973b101c962SAlexander Duyck else 974b101c962SAlexander Duyck tx_desc->vlan = 0; 975b101c962SAlexander Duyck 976b101c962SAlexander Duyck size = skb_headlen(skb); 977b101c962SAlexander Duyck data = skb->data; 978b101c962SAlexander Duyck 979b101c962SAlexander Duyck dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 980b101c962SAlexander Duyck 981b101c962SAlexander Duyck data_len = skb->data_len; 982b101c962SAlexander Duyck tx_buffer = first; 983b101c962SAlexander Duyck 984b101c962SAlexander Duyck for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 985b101c962SAlexander Duyck if (dma_mapping_error(tx_ring->dev, dma)) 986b101c962SAlexander Duyck goto dma_error; 987b101c962SAlexander Duyck 988b101c962SAlexander Duyck /* record length, and DMA address */ 989b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, size); 990b101c962SAlexander Duyck dma_unmap_addr_set(tx_buffer, dma, dma); 991b101c962SAlexander Duyck 992b101c962SAlexander Duyck while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { 993b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, 994b101c962SAlexander Duyck FM10K_MAX_DATA_PER_TXD, flags)) { 995b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 996b101c962SAlexander Duyck i = 0; 997b101c962SAlexander Duyck } 998b101c962SAlexander Duyck 999b101c962SAlexander Duyck dma += FM10K_MAX_DATA_PER_TXD; 1000b101c962SAlexander Duyck size -= FM10K_MAX_DATA_PER_TXD; 1001b101c962SAlexander Duyck } 1002b101c962SAlexander Duyck 1003b101c962SAlexander Duyck if (likely(!data_len)) 1004b101c962SAlexander Duyck break; 1005b101c962SAlexander Duyck 1006b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, 1007b101c962SAlexander Duyck dma, size, flags)) { 1008b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1009b101c962SAlexander Duyck i = 0; 1010b101c962SAlexander Duyck } 1011b101c962SAlexander Duyck 1012b101c962SAlexander Duyck size = skb_frag_size(frag); 1013b101c962SAlexander Duyck data_len -= size; 1014b101c962SAlexander Duyck 1015b101c962SAlexander Duyck dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1016b101c962SAlexander Duyck DMA_TO_DEVICE); 1017b101c962SAlexander Duyck 1018b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1019b101c962SAlexander Duyck } 1020b101c962SAlexander Duyck 1021b101c962SAlexander Duyck /* write last descriptor with LAST bit set */ 1022b101c962SAlexander Duyck flags |= FM10K_TXD_FLAG_LAST; 1023b101c962SAlexander Duyck 1024b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) 1025b101c962SAlexander Duyck i = 0; 1026b101c962SAlexander Duyck 1027b101c962SAlexander Duyck /* record bytecount for BQL */ 1028b101c962SAlexander Duyck netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1029b101c962SAlexander Duyck 1030b101c962SAlexander Duyck /* record SW timestamp if HW timestamp is not available */ 1031b101c962SAlexander Duyck skb_tx_timestamp(first->skb); 1032b101c962SAlexander Duyck 1033b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w know there 1034b101c962SAlexander Duyck * are new descriptors to fetch. (Only applicable for weak-ordered 1035b101c962SAlexander Duyck * memory model archs, such as IA-64). 1036b101c962SAlexander Duyck * 1037b101c962SAlexander Duyck * We also need this memory barrier to make certain all of the 1038b101c962SAlexander Duyck * status bits have been updated before next_to_watch is written. 1039b101c962SAlexander Duyck */ 1040b101c962SAlexander Duyck wmb(); 1041b101c962SAlexander Duyck 1042b101c962SAlexander Duyck /* set next_to_watch value indicating a packet is present */ 1043b101c962SAlexander Duyck first->next_to_watch = tx_desc; 1044b101c962SAlexander Duyck 1045b101c962SAlexander Duyck tx_ring->next_to_use = i; 1046b101c962SAlexander Duyck 10472c2b2f0cSAlexander Duyck /* Make sure there is space in the ring for the next send. */ 10482c2b2f0cSAlexander Duyck fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); 10492c2b2f0cSAlexander Duyck 1050b101c962SAlexander Duyck /* notify HW of packet */ 10512c2b2f0cSAlexander Duyck if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 1052b101c962SAlexander Duyck writel(i, tx_ring->tail); 1053b101c962SAlexander Duyck 1054b101c962SAlexander Duyck /* we need this if more than one processor can write to our tail 1055b101c962SAlexander Duyck * at a time, it synchronizes IO on IA64/Altix systems 1056b101c962SAlexander Duyck */ 1057b101c962SAlexander Duyck mmiowb(); 10582c2b2f0cSAlexander Duyck } 1059b101c962SAlexander Duyck 1060b101c962SAlexander Duyck return; 1061b101c962SAlexander Duyck dma_error: 1062b101c962SAlexander Duyck dev_err(tx_ring->dev, "TX DMA map failed\n"); 1063b101c962SAlexander Duyck 1064b101c962SAlexander Duyck /* clear dma mappings for failed tx_buffer map */ 1065b101c962SAlexander Duyck for (;;) { 1066b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1067b101c962SAlexander Duyck fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1068b101c962SAlexander Duyck if (tx_buffer == first) 1069b101c962SAlexander Duyck break; 1070b101c962SAlexander Duyck if (i == 0) 1071b101c962SAlexander Duyck i = tx_ring->count; 1072b101c962SAlexander Duyck i--; 1073b101c962SAlexander Duyck } 1074b101c962SAlexander Duyck 1075b101c962SAlexander Duyck tx_ring->next_to_use = i; 1076b101c962SAlexander Duyck } 1077b101c962SAlexander Duyck 1078b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1079b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1080b101c962SAlexander Duyck { 1081b101c962SAlexander Duyck u16 count = TXD_USE_COUNT(skb_headlen(skb)); 108203d13a51SJacob Keller struct fm10k_tx_buffer *first; 108303d13a51SJacob Keller unsigned short f; 108403d13a51SJacob Keller u32 tx_flags = 0; 108503d13a51SJacob Keller int tso; 1086b101c962SAlexander Duyck 1087b101c962SAlexander Duyck /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, 1088b101c962SAlexander Duyck * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, 1089b101c962SAlexander Duyck * + 2 desc gap to keep tail from touching head 1090b101c962SAlexander Duyck * otherwise try next time 1091b101c962SAlexander Duyck */ 1092b101c962SAlexander Duyck for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1093b101c962SAlexander Duyck count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1094aae072e3SAlexander Duyck 1095b101c962SAlexander Duyck if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { 1096b101c962SAlexander Duyck tx_ring->tx_stats.tx_busy++; 1097b101c962SAlexander Duyck return NETDEV_TX_BUSY; 1098b101c962SAlexander Duyck } 1099b101c962SAlexander Duyck 1100b101c962SAlexander Duyck /* record the location of the first descriptor for this packet */ 1101b101c962SAlexander Duyck first = &tx_ring->tx_buffer[tx_ring->next_to_use]; 1102b101c962SAlexander Duyck first->skb = skb; 1103b101c962SAlexander Duyck first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1104b101c962SAlexander Duyck first->gso_segs = 1; 1105b101c962SAlexander Duyck 1106b101c962SAlexander Duyck /* record initial flags and protocol */ 1107b101c962SAlexander Duyck first->tx_flags = tx_flags; 1108b101c962SAlexander Duyck 110976a540d4SAlexander Duyck tso = fm10k_tso(tx_ring, first); 111076a540d4SAlexander Duyck if (tso < 0) 111176a540d4SAlexander Duyck goto out_drop; 111276a540d4SAlexander Duyck else if (!tso) 111376a540d4SAlexander Duyck fm10k_tx_csum(tx_ring, first); 111476a540d4SAlexander Duyck 1115b101c962SAlexander Duyck fm10k_tx_map(tx_ring, first); 1116b101c962SAlexander Duyck 1117b101c962SAlexander Duyck return NETDEV_TX_OK; 111876a540d4SAlexander Duyck 111976a540d4SAlexander Duyck out_drop: 112076a540d4SAlexander Duyck dev_kfree_skb_any(first->skb); 112176a540d4SAlexander Duyck first->skb = NULL; 112276a540d4SAlexander Duyck 112376a540d4SAlexander Duyck return NETDEV_TX_OK; 1124b101c962SAlexander Duyck } 1125b101c962SAlexander Duyck 1126b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) 1127b101c962SAlexander Duyck { 1128b101c962SAlexander Duyck return ring->stats.packets; 1129b101c962SAlexander Duyck } 1130b101c962SAlexander Duyck 11315b9e4432SJacob Keller /** 11325b9e4432SJacob Keller * fm10k_get_tx_pending - how many Tx descriptors not processed 11335b9e4432SJacob Keller * @ring: the ring structure 11345b9e4432SJacob Keller * @in_sw: is tx_pending being checked in SW or in HW? 11355b9e4432SJacob Keller */ 11365b9e4432SJacob Keller u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw) 1137b101c962SAlexander Duyck { 113834bad71cSJacob Keller struct fm10k_intfc *interface = ring->q_vector->interface; 113934bad71cSJacob Keller struct fm10k_hw *hw = &interface->hw; 11405b9e4432SJacob Keller u32 head, tail; 114134bad71cSJacob Keller 11425b9e4432SJacob Keller if (likely(in_sw)) { 11435b9e4432SJacob Keller head = ring->next_to_clean; 11445b9e4432SJacob Keller tail = ring->next_to_use; 11455b9e4432SJacob Keller } else { 11465b9e4432SJacob Keller head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); 11475b9e4432SJacob Keller tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx)); 11485b9e4432SJacob Keller } 1149b101c962SAlexander Duyck 1150b101c962SAlexander Duyck return ((head <= tail) ? tail : tail + ring->count) - head; 1151b101c962SAlexander Duyck } 1152b101c962SAlexander Duyck 1153b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) 1154b101c962SAlexander Duyck { 1155b101c962SAlexander Duyck u32 tx_done = fm10k_get_tx_completed(tx_ring); 1156b101c962SAlexander Duyck u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 11575b9e4432SJacob Keller u32 tx_pending = fm10k_get_tx_pending(tx_ring, true); 1158b101c962SAlexander Duyck 1159b101c962SAlexander Duyck clear_check_for_tx_hang(tx_ring); 1160b101c962SAlexander Duyck 1161b101c962SAlexander Duyck /* Check for a hung queue, but be thorough. This verifies 1162b101c962SAlexander Duyck * that a transmit has been completed since the previous 1163b101c962SAlexander Duyck * check AND there is at least one packet pending. By 1164b101c962SAlexander Duyck * requiring this to fail twice we avoid races with 1165b101c962SAlexander Duyck * clearing the ARMED bit and conditions where we 1166b101c962SAlexander Duyck * run the check_tx_hang logic with a transmit completion 1167b101c962SAlexander Duyck * pending but without time to complete it yet. 1168b101c962SAlexander Duyck */ 1169b101c962SAlexander Duyck if (!tx_pending || (tx_done_old != tx_done)) { 1170b101c962SAlexander Duyck /* update completed stats and continue */ 1171b101c962SAlexander Duyck tx_ring->tx_stats.tx_done_old = tx_done; 1172b101c962SAlexander Duyck /* reset the countdown */ 1173b101c962SAlexander Duyck clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1174b101c962SAlexander Duyck 1175b101c962SAlexander Duyck return false; 1176b101c962SAlexander Duyck } 1177b101c962SAlexander Duyck 1178b101c962SAlexander Duyck /* make sure it is true for two checks in a row */ 1179b101c962SAlexander Duyck return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1180b101c962SAlexander Duyck } 1181b101c962SAlexander Duyck 1182b101c962SAlexander Duyck /** 1183b101c962SAlexander Duyck * fm10k_tx_timeout_reset - initiate reset due to Tx timeout 1184b101c962SAlexander Duyck * @interface: driver private struct 1185b101c962SAlexander Duyck **/ 1186b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) 1187b101c962SAlexander Duyck { 1188b101c962SAlexander Duyck /* Do the reset outside of interrupt context */ 1189b101c962SAlexander Duyck if (!test_bit(__FM10K_DOWN, &interface->state)) { 1190b101c962SAlexander Duyck interface->tx_timeout_count++; 1191b101c962SAlexander Duyck interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1192b101c962SAlexander Duyck fm10k_service_event_schedule(interface); 1193b101c962SAlexander Duyck } 1194b101c962SAlexander Duyck } 1195b101c962SAlexander Duyck 1196b101c962SAlexander Duyck /** 1197b101c962SAlexander Duyck * fm10k_clean_tx_irq - Reclaim resources after transmit completes 1198b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 1199b101c962SAlexander Duyck * @tx_ring: tx ring to clean 1200144d8305SAlexander Duyck * @napi_budget: Used to determine if we are in netpoll 1201b101c962SAlexander Duyck **/ 1202b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, 1203144d8305SAlexander Duyck struct fm10k_ring *tx_ring, int napi_budget) 1204b101c962SAlexander Duyck { 1205b101c962SAlexander Duyck struct fm10k_intfc *interface = q_vector->interface; 1206b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 1207b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 1208b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 1209b101c962SAlexander Duyck unsigned int budget = q_vector->tx.work_limit; 1210b101c962SAlexander Duyck unsigned int i = tx_ring->next_to_clean; 1211b101c962SAlexander Duyck 1212b101c962SAlexander Duyck if (test_bit(__FM10K_DOWN, &interface->state)) 1213b101c962SAlexander Duyck return true; 1214b101c962SAlexander Duyck 1215b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1216b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 1217b101c962SAlexander Duyck i -= tx_ring->count; 1218b101c962SAlexander Duyck 1219b101c962SAlexander Duyck do { 1220b101c962SAlexander Duyck struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; 1221b101c962SAlexander Duyck 1222b101c962SAlexander Duyck /* if next_to_watch is not set then there is no work pending */ 1223b101c962SAlexander Duyck if (!eop_desc) 1224b101c962SAlexander Duyck break; 1225b101c962SAlexander Duyck 1226b101c962SAlexander Duyck /* prevent any other reads prior to eop_desc */ 1227b101c962SAlexander Duyck read_barrier_depends(); 1228b101c962SAlexander Duyck 1229b101c962SAlexander Duyck /* if DD is not set pending work has not been completed */ 1230b101c962SAlexander Duyck if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) 1231b101c962SAlexander Duyck break; 1232b101c962SAlexander Duyck 1233b101c962SAlexander Duyck /* clear next_to_watch to prevent false hangs */ 1234b101c962SAlexander Duyck tx_buffer->next_to_watch = NULL; 1235b101c962SAlexander Duyck 1236b101c962SAlexander Duyck /* update the statistics for this packet */ 1237b101c962SAlexander Duyck total_bytes += tx_buffer->bytecount; 1238b101c962SAlexander Duyck total_packets += tx_buffer->gso_segs; 1239b101c962SAlexander Duyck 1240b101c962SAlexander Duyck /* free the skb */ 1241144d8305SAlexander Duyck napi_consume_skb(tx_buffer->skb, napi_budget); 1242b101c962SAlexander Duyck 1243b101c962SAlexander Duyck /* unmap skb header data */ 1244b101c962SAlexander Duyck dma_unmap_single(tx_ring->dev, 1245b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1246b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1247b101c962SAlexander Duyck DMA_TO_DEVICE); 1248b101c962SAlexander Duyck 1249b101c962SAlexander Duyck /* clear tx_buffer data */ 1250b101c962SAlexander Duyck tx_buffer->skb = NULL; 1251b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1252b101c962SAlexander Duyck 1253b101c962SAlexander Duyck /* unmap remaining buffers */ 1254b101c962SAlexander Duyck while (tx_desc != eop_desc) { 1255b101c962SAlexander Duyck tx_buffer++; 1256b101c962SAlexander Duyck tx_desc++; 1257b101c962SAlexander Duyck i++; 1258b101c962SAlexander Duyck if (unlikely(!i)) { 1259b101c962SAlexander Duyck i -= tx_ring->count; 1260b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1261b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1262b101c962SAlexander Duyck } 1263b101c962SAlexander Duyck 1264b101c962SAlexander Duyck /* unmap any remaining paged data */ 1265b101c962SAlexander Duyck if (dma_unmap_len(tx_buffer, len)) { 1266b101c962SAlexander Duyck dma_unmap_page(tx_ring->dev, 1267b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1268b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1269b101c962SAlexander Duyck DMA_TO_DEVICE); 1270b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1271b101c962SAlexander Duyck } 1272b101c962SAlexander Duyck } 1273b101c962SAlexander Duyck 1274b101c962SAlexander Duyck /* move us one more past the eop_desc for start of next pkt */ 1275b101c962SAlexander Duyck tx_buffer++; 1276b101c962SAlexander Duyck tx_desc++; 1277b101c962SAlexander Duyck i++; 1278b101c962SAlexander Duyck if (unlikely(!i)) { 1279b101c962SAlexander Duyck i -= tx_ring->count; 1280b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1281b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1282b101c962SAlexander Duyck } 1283b101c962SAlexander Duyck 1284b101c962SAlexander Duyck /* issue prefetch for next Tx descriptor */ 1285b101c962SAlexander Duyck prefetch(tx_desc); 1286b101c962SAlexander Duyck 1287b101c962SAlexander Duyck /* update budget accounting */ 1288b101c962SAlexander Duyck budget--; 1289b101c962SAlexander Duyck } while (likely(budget)); 1290b101c962SAlexander Duyck 1291b101c962SAlexander Duyck i += tx_ring->count; 1292b101c962SAlexander Duyck tx_ring->next_to_clean = i; 1293b101c962SAlexander Duyck u64_stats_update_begin(&tx_ring->syncp); 1294b101c962SAlexander Duyck tx_ring->stats.bytes += total_bytes; 1295b101c962SAlexander Duyck tx_ring->stats.packets += total_packets; 1296b101c962SAlexander Duyck u64_stats_update_end(&tx_ring->syncp); 1297b101c962SAlexander Duyck q_vector->tx.total_bytes += total_bytes; 1298b101c962SAlexander Duyck q_vector->tx.total_packets += total_packets; 1299b101c962SAlexander Duyck 1300b101c962SAlexander Duyck if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { 1301b101c962SAlexander Duyck /* schedule immediate reset if we believe we hung */ 1302b101c962SAlexander Duyck struct fm10k_hw *hw = &interface->hw; 1303b101c962SAlexander Duyck 1304b101c962SAlexander Duyck netif_err(interface, drv, tx_ring->netdev, 1305b101c962SAlexander Duyck "Detected Tx Unit Hang\n" 1306b101c962SAlexander Duyck " Tx Queue <%d>\n" 1307b101c962SAlexander Duyck " TDH, TDT <%x>, <%x>\n" 1308b101c962SAlexander Duyck " next_to_use <%x>\n" 1309b101c962SAlexander Duyck " next_to_clean <%x>\n", 1310b101c962SAlexander Duyck tx_ring->queue_index, 1311b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), 1312b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), 1313b101c962SAlexander Duyck tx_ring->next_to_use, i); 1314b101c962SAlexander Duyck 1315b101c962SAlexander Duyck netif_stop_subqueue(tx_ring->netdev, 1316b101c962SAlexander Duyck tx_ring->queue_index); 1317b101c962SAlexander Duyck 1318b101c962SAlexander Duyck netif_info(interface, probe, tx_ring->netdev, 1319b101c962SAlexander Duyck "tx hang %d detected on queue %d, resetting interface\n", 1320b101c962SAlexander Duyck interface->tx_timeout_count + 1, 1321b101c962SAlexander Duyck tx_ring->queue_index); 1322b101c962SAlexander Duyck 1323b101c962SAlexander Duyck fm10k_tx_timeout_reset(interface); 1324b101c962SAlexander Duyck 1325b101c962SAlexander Duyck /* the netdev is about to reset, no point in enabling stuff */ 1326b101c962SAlexander Duyck return true; 1327b101c962SAlexander Duyck } 1328b101c962SAlexander Duyck 1329b101c962SAlexander Duyck /* notify netdev of completed buffers */ 1330b101c962SAlexander Duyck netdev_tx_completed_queue(txring_txq(tx_ring), 1331b101c962SAlexander Duyck total_packets, total_bytes); 1332b101c962SAlexander Duyck 1333b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) 1334b101c962SAlexander Duyck if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1335b101c962SAlexander Duyck (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1336b101c962SAlexander Duyck /* Make sure that anybody stopping the queue after this 1337b101c962SAlexander Duyck * sees the new next_to_clean. 1338b101c962SAlexander Duyck */ 1339b101c962SAlexander Duyck smp_mb(); 1340b101c962SAlexander Duyck if (__netif_subqueue_stopped(tx_ring->netdev, 1341b101c962SAlexander Duyck tx_ring->queue_index) && 1342b101c962SAlexander Duyck !test_bit(__FM10K_DOWN, &interface->state)) { 1343b101c962SAlexander Duyck netif_wake_subqueue(tx_ring->netdev, 1344b101c962SAlexander Duyck tx_ring->queue_index); 1345b101c962SAlexander Duyck ++tx_ring->tx_stats.restart_queue; 1346b101c962SAlexander Duyck } 1347b101c962SAlexander Duyck } 1348b101c962SAlexander Duyck 1349b101c962SAlexander Duyck return !!budget; 1350b101c962SAlexander Duyck } 1351b101c962SAlexander Duyck 135218283cadSAlexander Duyck /** 135318283cadSAlexander Duyck * fm10k_update_itr - update the dynamic ITR value based on packet size 135418283cadSAlexander Duyck * 135518283cadSAlexander Duyck * Stores a new ITR value based on strictly on packet size. The 135618283cadSAlexander Duyck * divisors and thresholds used by this function were determined based 135718283cadSAlexander Duyck * on theoretical maximum wire speed and testing data, in order to 135818283cadSAlexander Duyck * minimize response time while increasing bulk throughput. 135918283cadSAlexander Duyck * 136018283cadSAlexander Duyck * @ring_container: Container for rings to have ITR updated 136118283cadSAlexander Duyck **/ 136218283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container) 136318283cadSAlexander Duyck { 1364242722ddSJacob Keller unsigned int avg_wire_size, packets, itr_round; 136518283cadSAlexander Duyck 136618283cadSAlexander Duyck /* Only update ITR if we are using adaptive setting */ 1367584373f5SJacob Keller if (!ITR_IS_ADAPTIVE(ring_container->itr)) 136818283cadSAlexander Duyck goto clear_counts; 136918283cadSAlexander Duyck 137018283cadSAlexander Duyck packets = ring_container->total_packets; 137118283cadSAlexander Duyck if (!packets) 137218283cadSAlexander Duyck goto clear_counts; 137318283cadSAlexander Duyck 137418283cadSAlexander Duyck avg_wire_size = ring_container->total_bytes / packets; 137518283cadSAlexander Duyck 1376242722ddSJacob Keller /* The following is a crude approximation of: 1377242722ddSJacob Keller * wmem_default / (size + overhead) = desired_pkts_per_int 1378242722ddSJacob Keller * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1379242722ddSJacob Keller * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1380242722ddSJacob Keller * 1381242722ddSJacob Keller * Assuming wmem_default is 212992 and overhead is 640 bytes per 1382242722ddSJacob Keller * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1383242722ddSJacob Keller * formula down to 1384242722ddSJacob Keller * 1385242722ddSJacob Keller * (34 * (size + 24)) / (size + 640) = ITR 1386242722ddSJacob Keller * 1387242722ddSJacob Keller * We first do some math on the packet size and then finally bitshift 1388242722ddSJacob Keller * by 8 after rounding up. We also have to account for PCIe link speed 1389242722ddSJacob Keller * difference as ITR scales based on this. 1390242722ddSJacob Keller */ 1391242722ddSJacob Keller if (avg_wire_size <= 360) { 1392242722ddSJacob Keller /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ 1393242722ddSJacob Keller avg_wire_size *= 8; 1394242722ddSJacob Keller avg_wire_size += 376; 1395242722ddSJacob Keller } else if (avg_wire_size <= 1152) { 1396242722ddSJacob Keller /* 77K ints/sec to 45K ints/sec */ 1397242722ddSJacob Keller avg_wire_size *= 3; 1398242722ddSJacob Keller avg_wire_size += 2176; 1399242722ddSJacob Keller } else if (avg_wire_size <= 1920) { 1400242722ddSJacob Keller /* 45K ints/sec to 38K ints/sec */ 1401242722ddSJacob Keller avg_wire_size += 4480; 1402242722ddSJacob Keller } else { 1403242722ddSJacob Keller /* plateau at a limit of 38K ints/sec */ 1404242722ddSJacob Keller avg_wire_size = 6656; 1405242722ddSJacob Keller } 140618283cadSAlexander Duyck 1407242722ddSJacob Keller /* Perform final bitshift for division after rounding up to ensure 1408242722ddSJacob Keller * that the calculation will never get below a 1. The bit shift 1409242722ddSJacob Keller * accounts for changes in the ITR due to PCIe link speed. 1410242722ddSJacob Keller */ 1411242722ddSJacob Keller itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; 1412fcdb0a99SBruce Allan avg_wire_size += BIT(itr_round) - 1; 1413242722ddSJacob Keller avg_wire_size >>= itr_round; 141418283cadSAlexander Duyck 141518283cadSAlexander Duyck /* write back value and retain adaptive flag */ 141618283cadSAlexander Duyck ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; 141718283cadSAlexander Duyck 141818283cadSAlexander Duyck clear_counts: 141918283cadSAlexander Duyck ring_container->total_bytes = 0; 142018283cadSAlexander Duyck ring_container->total_packets = 0; 142118283cadSAlexander Duyck } 142218283cadSAlexander Duyck 142318283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) 142418283cadSAlexander Duyck { 142518283cadSAlexander Duyck /* Enable auto-mask and clear the current mask */ 142618283cadSAlexander Duyck u32 itr = FM10K_ITR_ENABLE; 142718283cadSAlexander Duyck 142818283cadSAlexander Duyck /* Update Tx ITR */ 142918283cadSAlexander Duyck fm10k_update_itr(&q_vector->tx); 143018283cadSAlexander Duyck 143118283cadSAlexander Duyck /* Update Rx ITR */ 143218283cadSAlexander Duyck fm10k_update_itr(&q_vector->rx); 143318283cadSAlexander Duyck 143418283cadSAlexander Duyck /* Store Tx itr in timer slot 0 */ 143518283cadSAlexander Duyck itr |= (q_vector->tx.itr & FM10K_ITR_MAX); 143618283cadSAlexander Duyck 143718283cadSAlexander Duyck /* Shift Rx itr to timer slot 1 */ 143818283cadSAlexander Duyck itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; 143918283cadSAlexander Duyck 144018283cadSAlexander Duyck /* Write the final value to the ITR register */ 144118283cadSAlexander Duyck writel(itr, q_vector->itr); 144218283cadSAlexander Duyck } 144318283cadSAlexander Duyck 144418283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget) 144518283cadSAlexander Duyck { 144618283cadSAlexander Duyck struct fm10k_q_vector *q_vector = 144718283cadSAlexander Duyck container_of(napi, struct fm10k_q_vector, napi); 1448b101c962SAlexander Duyck struct fm10k_ring *ring; 144932b3e08fSJesse Brandeburg int per_ring_budget, work_done = 0; 1450b101c962SAlexander Duyck bool clean_complete = true; 1451b101c962SAlexander Duyck 1452144d8305SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) { 1453144d8305SAlexander Duyck if (!fm10k_clean_tx_irq(q_vector, ring, budget)) 1454144d8305SAlexander Duyck clean_complete = false; 1455144d8305SAlexander Duyck } 1456b101c962SAlexander Duyck 14579f872986SAlexander Duyck /* Handle case where we are called by netpoll with a budget of 0 */ 14589f872986SAlexander Duyck if (budget <= 0) 14599f872986SAlexander Duyck return budget; 14609f872986SAlexander Duyck 1461b101c962SAlexander Duyck /* attempt to distribute budget to each queue fairly, but don't 1462b101c962SAlexander Duyck * allow the budget to go below 1 because we'll exit polling 1463b101c962SAlexander Duyck */ 1464b101c962SAlexander Duyck if (q_vector->rx.count > 1) 1465b101c962SAlexander Duyck per_ring_budget = max(budget / q_vector->rx.count, 1); 1466b101c962SAlexander Duyck else 1467b101c962SAlexander Duyck per_ring_budget = budget; 1468b101c962SAlexander Duyck 146932b3e08fSJesse Brandeburg fm10k_for_each_ring(ring, q_vector->rx) { 147032b3e08fSJesse Brandeburg int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); 147132b3e08fSJesse Brandeburg 147232b3e08fSJesse Brandeburg work_done += work; 1473144d8305SAlexander Duyck if (work >= per_ring_budget) 1474144d8305SAlexander Duyck clean_complete = false; 147532b3e08fSJesse Brandeburg } 1476b101c962SAlexander Duyck 1477b101c962SAlexander Duyck /* If all work not completed, return budget and keep polling */ 1478b101c962SAlexander Duyck if (!clean_complete) 1479b101c962SAlexander Duyck return budget; 148018283cadSAlexander Duyck 148118283cadSAlexander Duyck /* all work done, exit the polling mode */ 148232b3e08fSJesse Brandeburg napi_complete_done(napi, work_done); 148318283cadSAlexander Duyck 148418283cadSAlexander Duyck /* re-enable the q_vector */ 148518283cadSAlexander Duyck fm10k_qv_enable(q_vector); 148618283cadSAlexander Duyck 148718283cadSAlexander Duyck return 0; 148818283cadSAlexander Duyck } 148918283cadSAlexander Duyck 149018283cadSAlexander Duyck /** 1491aa3ac822SAlexander Duyck * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device 1492aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1493aa3ac822SAlexander Duyck * 1494aa3ac822SAlexander Duyck * When QoS (Quality of Service) is enabled, allocate queues for 1495aa3ac822SAlexander Duyck * each traffic class. If multiqueue isn't available,then abort QoS 1496aa3ac822SAlexander Duyck * initialization. 1497aa3ac822SAlexander Duyck * 1498aa3ac822SAlexander Duyck * This function handles all combinations of Qos and RSS. 1499aa3ac822SAlexander Duyck * 1500aa3ac822SAlexander Duyck **/ 1501aa3ac822SAlexander Duyck static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) 1502aa3ac822SAlexander Duyck { 1503aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1504aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1505aa3ac822SAlexander Duyck int rss_i, i; 1506aa3ac822SAlexander Duyck int pcs; 1507aa3ac822SAlexander Duyck 1508aa3ac822SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 1509aa3ac822SAlexander Duyck pcs = netdev_get_num_tc(dev); 1510aa3ac822SAlexander Duyck 1511aa3ac822SAlexander Duyck if (pcs <= 1) 1512aa3ac822SAlexander Duyck return false; 1513aa3ac822SAlexander Duyck 1514aa3ac822SAlexander Duyck /* set QoS mask and indices */ 1515aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_QOS]; 1516aa3ac822SAlexander Duyck f->indices = pcs; 1517fcdb0a99SBruce Allan f->mask = BIT(fls(pcs - 1)) - 1; 1518aa3ac822SAlexander Duyck 1519aa3ac822SAlexander Duyck /* determine the upper limit for our current DCB mode */ 1520aa3ac822SAlexander Duyck rss_i = interface->hw.mac.max_queues / pcs; 1521fcdb0a99SBruce Allan rss_i = BIT(fls(rss_i) - 1); 1522aa3ac822SAlexander Duyck 1523aa3ac822SAlexander Duyck /* set RSS mask and indices */ 1524aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1525aa3ac822SAlexander Duyck rss_i = min_t(u16, rss_i, f->limit); 1526aa3ac822SAlexander Duyck f->indices = rss_i; 1527fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1528aa3ac822SAlexander Duyck 1529aa3ac822SAlexander Duyck /* configure pause class to queue mapping */ 1530aa3ac822SAlexander Duyck for (i = 0; i < pcs; i++) 1531aa3ac822SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 1532aa3ac822SAlexander Duyck 1533aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i * pcs; 1534aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i * pcs; 1535aa3ac822SAlexander Duyck 1536aa3ac822SAlexander Duyck return true; 1537aa3ac822SAlexander Duyck } 1538aa3ac822SAlexander Duyck 1539aa3ac822SAlexander Duyck /** 1540aa3ac822SAlexander Duyck * fm10k_set_rss_queues: Allocate queues for RSS 1541aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1542aa3ac822SAlexander Duyck * 1543aa3ac822SAlexander Duyck * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1544aa3ac822SAlexander Duyck * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1545aa3ac822SAlexander Duyck * 1546aa3ac822SAlexander Duyck **/ 1547aa3ac822SAlexander Duyck static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) 1548aa3ac822SAlexander Duyck { 1549aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1550aa3ac822SAlexander Duyck u16 rss_i; 1551aa3ac822SAlexander Duyck 1552aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1553aa3ac822SAlexander Duyck rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); 1554aa3ac822SAlexander Duyck 1555aa3ac822SAlexander Duyck /* record indices and power of 2 mask for RSS */ 1556aa3ac822SAlexander Duyck f->indices = rss_i; 1557fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1558aa3ac822SAlexander Duyck 1559aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i; 1560aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i; 1561aa3ac822SAlexander Duyck 1562aa3ac822SAlexander Duyck return true; 1563aa3ac822SAlexander Duyck } 1564aa3ac822SAlexander Duyck 1565aa3ac822SAlexander Duyck /** 156618283cadSAlexander Duyck * fm10k_set_num_queues: Allocate queues for device, feature dependent 156718283cadSAlexander Duyck * @interface: board private structure to initialize 156818283cadSAlexander Duyck * 156918283cadSAlexander Duyck * This is the top level queue allocation routine. The order here is very 157018283cadSAlexander Duyck * important, starting with the "most" number of features turned on at once, 157118283cadSAlexander Duyck * and ending with the smallest set of features. This way large combinations 157218283cadSAlexander Duyck * can be allocated if they're turned on, and smaller combinations are the 157318283cadSAlexander Duyck * fallthrough conditions. 157418283cadSAlexander Duyck * 157518283cadSAlexander Duyck **/ 157618283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface) 157718283cadSAlexander Duyck { 1578b3525696SJacob Keller /* Attempt to setup QoS and RSS first */ 1579aa3ac822SAlexander Duyck if (fm10k_set_qos_queues(interface)) 1580aa3ac822SAlexander Duyck return; 1581aa3ac822SAlexander Duyck 1582b3525696SJacob Keller /* If we don't have QoS, just fallback to only RSS. */ 1583aa3ac822SAlexander Duyck fm10k_set_rss_queues(interface); 158418283cadSAlexander Duyck } 158518283cadSAlexander Duyck 158618283cadSAlexander Duyck /** 15874be37c42SJacob Keller * fm10k_reset_num_queues - Reset the number of queues to zero 15884be37c42SJacob Keller * @interface: board private structure 15894be37c42SJacob Keller * 15904be37c42SJacob Keller * This function should be called whenever we need to reset the number of 15914be37c42SJacob Keller * queues after an error condition. 15924be37c42SJacob Keller */ 15934be37c42SJacob Keller static void fm10k_reset_num_queues(struct fm10k_intfc *interface) 15944be37c42SJacob Keller { 15954be37c42SJacob Keller interface->num_tx_queues = 0; 15964be37c42SJacob Keller interface->num_rx_queues = 0; 15974be37c42SJacob Keller interface->num_q_vectors = 0; 15984be37c42SJacob Keller } 15994be37c42SJacob Keller 16004be37c42SJacob Keller /** 160118283cadSAlexander Duyck * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 160218283cadSAlexander Duyck * @interface: board private structure to initialize 160318283cadSAlexander Duyck * @v_count: q_vectors allocated on interface, used for ring interleaving 160418283cadSAlexander Duyck * @v_idx: index of vector in interface struct 160518283cadSAlexander Duyck * @txr_count: total number of Tx rings to allocate 160618283cadSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 160718283cadSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 160818283cadSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 160918283cadSAlexander Duyck * 161018283cadSAlexander Duyck * We allocate one q_vector. If allocation fails we return -ENOMEM. 161118283cadSAlexander Duyck **/ 161218283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, 161318283cadSAlexander Duyck unsigned int v_count, unsigned int v_idx, 161418283cadSAlexander Duyck unsigned int txr_count, unsigned int txr_idx, 161518283cadSAlexander Duyck unsigned int rxr_count, unsigned int rxr_idx) 161618283cadSAlexander Duyck { 161718283cadSAlexander Duyck struct fm10k_q_vector *q_vector; 1618e27ef599SAlexander Duyck struct fm10k_ring *ring; 161918283cadSAlexander Duyck int ring_count, size; 162018283cadSAlexander Duyck 162118283cadSAlexander Duyck ring_count = txr_count + rxr_count; 1622e27ef599SAlexander Duyck size = sizeof(struct fm10k_q_vector) + 1623e27ef599SAlexander Duyck (sizeof(struct fm10k_ring) * ring_count); 162418283cadSAlexander Duyck 162518283cadSAlexander Duyck /* allocate q_vector and rings */ 162618283cadSAlexander Duyck q_vector = kzalloc(size, GFP_KERNEL); 162718283cadSAlexander Duyck if (!q_vector) 162818283cadSAlexander Duyck return -ENOMEM; 162918283cadSAlexander Duyck 163018283cadSAlexander Duyck /* initialize NAPI */ 163118283cadSAlexander Duyck netif_napi_add(interface->netdev, &q_vector->napi, 163218283cadSAlexander Duyck fm10k_poll, NAPI_POLL_WEIGHT); 163318283cadSAlexander Duyck 163418283cadSAlexander Duyck /* tie q_vector and interface together */ 163518283cadSAlexander Duyck interface->q_vector[v_idx] = q_vector; 163618283cadSAlexander Duyck q_vector->interface = interface; 163718283cadSAlexander Duyck q_vector->v_idx = v_idx; 163818283cadSAlexander Duyck 1639e27ef599SAlexander Duyck /* initialize pointer to rings */ 1640e27ef599SAlexander Duyck ring = q_vector->ring; 1641e27ef599SAlexander Duyck 164218283cadSAlexander Duyck /* save Tx ring container info */ 1643e27ef599SAlexander Duyck q_vector->tx.ring = ring; 1644e27ef599SAlexander Duyck q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; 164518283cadSAlexander Duyck q_vector->tx.itr = interface->tx_itr; 1646242722ddSJacob Keller q_vector->tx.itr_scale = interface->hw.mac.itr_scale; 164718283cadSAlexander Duyck q_vector->tx.count = txr_count; 164818283cadSAlexander Duyck 1649e27ef599SAlexander Duyck while (txr_count) { 1650e27ef599SAlexander Duyck /* assign generic ring traits */ 1651e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1652e27ef599SAlexander Duyck ring->netdev = interface->netdev; 1653e27ef599SAlexander Duyck 1654e27ef599SAlexander Duyck /* configure backlink on ring */ 1655e27ef599SAlexander Duyck ring->q_vector = q_vector; 1656e27ef599SAlexander Duyck 1657e27ef599SAlexander Duyck /* apply Tx specific ring traits */ 1658e27ef599SAlexander Duyck ring->count = interface->tx_ring_count; 1659e27ef599SAlexander Duyck ring->queue_index = txr_idx; 1660e27ef599SAlexander Duyck 1661e27ef599SAlexander Duyck /* assign ring to interface */ 1662e27ef599SAlexander Duyck interface->tx_ring[txr_idx] = ring; 1663e27ef599SAlexander Duyck 1664e27ef599SAlexander Duyck /* update count and index */ 1665e27ef599SAlexander Duyck txr_count--; 1666e27ef599SAlexander Duyck txr_idx += v_count; 1667e27ef599SAlexander Duyck 1668e27ef599SAlexander Duyck /* push pointer to next ring */ 1669e27ef599SAlexander Duyck ring++; 1670e27ef599SAlexander Duyck } 1671e27ef599SAlexander Duyck 167218283cadSAlexander Duyck /* save Rx ring container info */ 1673e27ef599SAlexander Duyck q_vector->rx.ring = ring; 167418283cadSAlexander Duyck q_vector->rx.itr = interface->rx_itr; 1675242722ddSJacob Keller q_vector->rx.itr_scale = interface->hw.mac.itr_scale; 167618283cadSAlexander Duyck q_vector->rx.count = rxr_count; 167718283cadSAlexander Duyck 1678e27ef599SAlexander Duyck while (rxr_count) { 1679e27ef599SAlexander Duyck /* assign generic ring traits */ 1680e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1681e27ef599SAlexander Duyck ring->netdev = interface->netdev; 16825cd5e2e9SAlexander Duyck rcu_assign_pointer(ring->l2_accel, interface->l2_accel); 1683e27ef599SAlexander Duyck 1684e27ef599SAlexander Duyck /* configure backlink on ring */ 1685e27ef599SAlexander Duyck ring->q_vector = q_vector; 1686e27ef599SAlexander Duyck 1687e27ef599SAlexander Duyck /* apply Rx specific ring traits */ 1688e27ef599SAlexander Duyck ring->count = interface->rx_ring_count; 1689e27ef599SAlexander Duyck ring->queue_index = rxr_idx; 1690e27ef599SAlexander Duyck 1691e27ef599SAlexander Duyck /* assign ring to interface */ 1692e27ef599SAlexander Duyck interface->rx_ring[rxr_idx] = ring; 1693e27ef599SAlexander Duyck 1694e27ef599SAlexander Duyck /* update count and index */ 1695e27ef599SAlexander Duyck rxr_count--; 1696e27ef599SAlexander Duyck rxr_idx += v_count; 1697e27ef599SAlexander Duyck 1698e27ef599SAlexander Duyck /* push pointer to next ring */ 1699e27ef599SAlexander Duyck ring++; 1700e27ef599SAlexander Duyck } 1701e27ef599SAlexander Duyck 17027461fd91SAlexander Duyck fm10k_dbg_q_vector_init(q_vector); 17037461fd91SAlexander Duyck 170418283cadSAlexander Duyck return 0; 170518283cadSAlexander Duyck } 170618283cadSAlexander Duyck 170718283cadSAlexander Duyck /** 170818283cadSAlexander Duyck * fm10k_free_q_vector - Free memory allocated for specific interrupt vector 170918283cadSAlexander Duyck * @interface: board private structure to initialize 171018283cadSAlexander Duyck * @v_idx: Index of vector to be freed 171118283cadSAlexander Duyck * 171218283cadSAlexander Duyck * This function frees the memory allocated to the q_vector. In addition if 171318283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 171418283cadSAlexander Duyck * to freeing the q_vector. 171518283cadSAlexander Duyck **/ 171618283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 171718283cadSAlexander Duyck { 171818283cadSAlexander Duyck struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 1719e27ef599SAlexander Duyck struct fm10k_ring *ring; 1720e27ef599SAlexander Duyck 17217461fd91SAlexander Duyck fm10k_dbg_q_vector_exit(q_vector); 17227461fd91SAlexander Duyck 1723e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1724e27ef599SAlexander Duyck interface->tx_ring[ring->queue_index] = NULL; 1725e27ef599SAlexander Duyck 1726e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->rx) 1727e27ef599SAlexander Duyck interface->rx_ring[ring->queue_index] = NULL; 172818283cadSAlexander Duyck 172918283cadSAlexander Duyck interface->q_vector[v_idx] = NULL; 173018283cadSAlexander Duyck netif_napi_del(&q_vector->napi); 173118283cadSAlexander Duyck kfree_rcu(q_vector, rcu); 173218283cadSAlexander Duyck } 173318283cadSAlexander Duyck 173418283cadSAlexander Duyck /** 173518283cadSAlexander Duyck * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors 173618283cadSAlexander Duyck * @interface: board private structure to initialize 173718283cadSAlexander Duyck * 173818283cadSAlexander Duyck * We allocate one q_vector per queue interrupt. If allocation fails we 173918283cadSAlexander Duyck * return -ENOMEM. 174018283cadSAlexander Duyck **/ 174118283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) 174218283cadSAlexander Duyck { 174318283cadSAlexander Duyck unsigned int q_vectors = interface->num_q_vectors; 174418283cadSAlexander Duyck unsigned int rxr_remaining = interface->num_rx_queues; 174518283cadSAlexander Duyck unsigned int txr_remaining = interface->num_tx_queues; 174618283cadSAlexander Duyck unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 174718283cadSAlexander Duyck int err; 174818283cadSAlexander Duyck 174918283cadSAlexander Duyck if (q_vectors >= (rxr_remaining + txr_remaining)) { 175018283cadSAlexander Duyck for (; rxr_remaining; v_idx++) { 175118283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 175218283cadSAlexander Duyck 0, 0, 1, rxr_idx); 175318283cadSAlexander Duyck if (err) 175418283cadSAlexander Duyck goto err_out; 175518283cadSAlexander Duyck 175618283cadSAlexander Duyck /* update counts and index */ 175718283cadSAlexander Duyck rxr_remaining--; 175818283cadSAlexander Duyck rxr_idx++; 175918283cadSAlexander Duyck } 176018283cadSAlexander Duyck } 176118283cadSAlexander Duyck 176218283cadSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 176318283cadSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 176418283cadSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 176518283cadSAlexander Duyck 176618283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 176718283cadSAlexander Duyck tqpv, txr_idx, 176818283cadSAlexander Duyck rqpv, rxr_idx); 176918283cadSAlexander Duyck 177018283cadSAlexander Duyck if (err) 177118283cadSAlexander Duyck goto err_out; 177218283cadSAlexander Duyck 177318283cadSAlexander Duyck /* update counts and index */ 177418283cadSAlexander Duyck rxr_remaining -= rqpv; 177518283cadSAlexander Duyck txr_remaining -= tqpv; 177618283cadSAlexander Duyck rxr_idx++; 177718283cadSAlexander Duyck txr_idx++; 177818283cadSAlexander Duyck } 177918283cadSAlexander Duyck 178018283cadSAlexander Duyck return 0; 178118283cadSAlexander Duyck 178218283cadSAlexander Duyck err_out: 17834be37c42SJacob Keller fm10k_reset_num_queues(interface); 178418283cadSAlexander Duyck 178518283cadSAlexander Duyck while (v_idx--) 178618283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 178718283cadSAlexander Duyck 178818283cadSAlexander Duyck return -ENOMEM; 178918283cadSAlexander Duyck } 179018283cadSAlexander Duyck 179118283cadSAlexander Duyck /** 179218283cadSAlexander Duyck * fm10k_free_q_vectors - Free memory allocated for interrupt vectors 179318283cadSAlexander Duyck * @interface: board private structure to initialize 179418283cadSAlexander Duyck * 179518283cadSAlexander Duyck * This function frees the memory allocated to the q_vectors. In addition if 179618283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 179718283cadSAlexander Duyck * to freeing the q_vector. 179818283cadSAlexander Duyck **/ 179918283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface) 180018283cadSAlexander Duyck { 180118283cadSAlexander Duyck int v_idx = interface->num_q_vectors; 180218283cadSAlexander Duyck 18034be37c42SJacob Keller fm10k_reset_num_queues(interface); 180418283cadSAlexander Duyck 180518283cadSAlexander Duyck while (v_idx--) 180618283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 180718283cadSAlexander Duyck } 180818283cadSAlexander Duyck 180918283cadSAlexander Duyck /** 181018283cadSAlexander Duyck * f10k_reset_msix_capability - reset MSI-X capability 181118283cadSAlexander Duyck * @interface: board private structure to initialize 181218283cadSAlexander Duyck * 181318283cadSAlexander Duyck * Reset the MSI-X capability back to its starting state 181418283cadSAlexander Duyck **/ 181518283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) 181618283cadSAlexander Duyck { 181718283cadSAlexander Duyck pci_disable_msix(interface->pdev); 181818283cadSAlexander Duyck kfree(interface->msix_entries); 181918283cadSAlexander Duyck interface->msix_entries = NULL; 182018283cadSAlexander Duyck } 182118283cadSAlexander Duyck 182218283cadSAlexander Duyck /** 182318283cadSAlexander Duyck * f10k_init_msix_capability - configure MSI-X capability 182418283cadSAlexander Duyck * @interface: board private structure to initialize 182518283cadSAlexander Duyck * 182618283cadSAlexander Duyck * Attempt to configure the interrupts using the best available 182718283cadSAlexander Duyck * capabilities of the hardware and the kernel. 182818283cadSAlexander Duyck **/ 182918283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface) 183018283cadSAlexander Duyck { 183118283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw; 183218283cadSAlexander Duyck int v_budget, vector; 183318283cadSAlexander Duyck 183418283cadSAlexander Duyck /* It's easy to be greedy for MSI-X vectors, but it really 183518283cadSAlexander Duyck * doesn't do us much good if we have a lot more vectors 183618283cadSAlexander Duyck * than CPU's. So let's be conservative and only ask for 183718283cadSAlexander Duyck * (roughly) the same number of vectors as there are CPU's. 183818283cadSAlexander Duyck * the default is to use pairs of vectors 183918283cadSAlexander Duyck */ 184018283cadSAlexander Duyck v_budget = max(interface->num_rx_queues, interface->num_tx_queues); 184118283cadSAlexander Duyck v_budget = min_t(u16, v_budget, num_online_cpus()); 184218283cadSAlexander Duyck 184318283cadSAlexander Duyck /* account for vectors not related to queues */ 184418283cadSAlexander Duyck v_budget += NON_Q_VECTORS(hw); 184518283cadSAlexander Duyck 184618283cadSAlexander Duyck /* At the same time, hardware can only support a maximum of 184718283cadSAlexander Duyck * hw.mac->max_msix_vectors vectors. With features 184818283cadSAlexander Duyck * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 184918283cadSAlexander Duyck * descriptor queues supported by our device. Thus, we cap it off in 185018283cadSAlexander Duyck * those rare cases where the cpu count also exceeds our vector limit. 185118283cadSAlexander Duyck */ 185218283cadSAlexander Duyck v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 185318283cadSAlexander Duyck 185418283cadSAlexander Duyck /* A failure in MSI-X entry allocation is fatal. */ 185518283cadSAlexander Duyck interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 185618283cadSAlexander Duyck GFP_KERNEL); 185718283cadSAlexander Duyck if (!interface->msix_entries) 185818283cadSAlexander Duyck return -ENOMEM; 185918283cadSAlexander Duyck 186018283cadSAlexander Duyck /* populate entry values */ 186118283cadSAlexander Duyck for (vector = 0; vector < v_budget; vector++) 186218283cadSAlexander Duyck interface->msix_entries[vector].entry = vector; 186318283cadSAlexander Duyck 186418283cadSAlexander Duyck /* Attempt to enable MSI-X with requested value */ 186518283cadSAlexander Duyck v_budget = pci_enable_msix_range(interface->pdev, 186618283cadSAlexander Duyck interface->msix_entries, 186718283cadSAlexander Duyck MIN_MSIX_COUNT(hw), 186818283cadSAlexander Duyck v_budget); 186918283cadSAlexander Duyck if (v_budget < 0) { 187018283cadSAlexander Duyck kfree(interface->msix_entries); 187118283cadSAlexander Duyck interface->msix_entries = NULL; 187230e23b71SJacob Keller return v_budget; 187318283cadSAlexander Duyck } 187418283cadSAlexander Duyck 187518283cadSAlexander Duyck /* record the number of queues available for q_vectors */ 187618283cadSAlexander Duyck interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); 187718283cadSAlexander Duyck 187818283cadSAlexander Duyck return 0; 187918283cadSAlexander Duyck } 188018283cadSAlexander Duyck 1881aa3ac822SAlexander Duyck /** 1882aa3ac822SAlexander Duyck * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS 1883aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1884aa3ac822SAlexander Duyck * 1885aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for Qos 1886aa3ac822SAlexander Duyck **/ 1887aa3ac822SAlexander Duyck static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) 1888aa3ac822SAlexander Duyck { 1889aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1890aa3ac822SAlexander Duyck int pc, offset, rss_i, i, q_idx; 1891aa3ac822SAlexander Duyck u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; 1892aa3ac822SAlexander Duyck u8 num_pcs = netdev_get_num_tc(dev); 1893aa3ac822SAlexander Duyck 1894aa3ac822SAlexander Duyck if (num_pcs <= 1) 1895aa3ac822SAlexander Duyck return false; 1896aa3ac822SAlexander Duyck 1897aa3ac822SAlexander Duyck rss_i = interface->ring_feature[RING_F_RSS].indices; 1898aa3ac822SAlexander Duyck 1899aa3ac822SAlexander Duyck for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { 1900aa3ac822SAlexander Duyck q_idx = pc; 1901aa3ac822SAlexander Duyck for (i = 0; i < rss_i; i++) { 1902aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->reg_idx = q_idx; 1903aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->qos_pc = pc; 1904aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->reg_idx = q_idx; 1905aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->qos_pc = pc; 1906aa3ac822SAlexander Duyck q_idx += pc_stride; 1907aa3ac822SAlexander Duyck } 1908aa3ac822SAlexander Duyck } 1909aa3ac822SAlexander Duyck 1910aa3ac822SAlexander Duyck return true; 1911aa3ac822SAlexander Duyck } 1912aa3ac822SAlexander Duyck 1913aa3ac822SAlexander Duyck /** 1914aa3ac822SAlexander Duyck * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS 1915aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1916aa3ac822SAlexander Duyck * 1917aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for RSS 1918aa3ac822SAlexander Duyck **/ 1919aa3ac822SAlexander Duyck static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) 1920aa3ac822SAlexander Duyck { 1921aa3ac822SAlexander Duyck int i; 1922aa3ac822SAlexander Duyck 1923aa3ac822SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) 1924aa3ac822SAlexander Duyck interface->rx_ring[i]->reg_idx = i; 1925aa3ac822SAlexander Duyck 1926aa3ac822SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) 1927aa3ac822SAlexander Duyck interface->tx_ring[i]->reg_idx = i; 1928aa3ac822SAlexander Duyck } 1929aa3ac822SAlexander Duyck 1930aa3ac822SAlexander Duyck /** 1931aa3ac822SAlexander Duyck * fm10k_assign_rings - Map rings to network devices 1932aa3ac822SAlexander Duyck * @interface: Interface structure containing rings and devices 1933aa3ac822SAlexander Duyck * 1934aa3ac822SAlexander Duyck * This function is meant to go though and configure both the network 1935aa3ac822SAlexander Duyck * devices so that they contain rings, and configure the rings so that 1936aa3ac822SAlexander Duyck * they function with their network devices. 1937aa3ac822SAlexander Duyck **/ 1938aa3ac822SAlexander Duyck static void fm10k_assign_rings(struct fm10k_intfc *interface) 1939aa3ac822SAlexander Duyck { 1940aa3ac822SAlexander Duyck if (fm10k_cache_ring_qos(interface)) 1941aa3ac822SAlexander Duyck return; 1942aa3ac822SAlexander Duyck 1943aa3ac822SAlexander Duyck fm10k_cache_ring_rss(interface); 1944aa3ac822SAlexander Duyck } 1945aa3ac822SAlexander Duyck 194618283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface) 194718283cadSAlexander Duyck { 194818283cadSAlexander Duyck u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 1949540a5d85SJacob Keller u32 reta; 195018283cadSAlexander Duyck 19511012014eSKeller, Jacob E /* If the Rx flow indirection table has been configured manually, we 19521012014eSKeller, Jacob E * need to maintain it when possible. 19531012014eSKeller, Jacob E */ 19541012014eSKeller, Jacob E if (netif_is_rxfh_configured(interface->netdev)) { 195518283cadSAlexander Duyck for (i = FM10K_RETA_SIZE; i--;) { 195618283cadSAlexander Duyck reta = interface->reta[i]; 195718283cadSAlexander Duyck if ((((reta << 24) >> 24) < rss_i) && 195818283cadSAlexander Duyck (((reta << 16) >> 24) < rss_i) && 195918283cadSAlexander Duyck (((reta << 8) >> 24) < rss_i) && 196018283cadSAlexander Duyck (((reta) >> 24) < rss_i)) 196118283cadSAlexander Duyck continue; 19621012014eSKeller, Jacob E 19631012014eSKeller, Jacob E /* this should never happen */ 19641012014eSKeller, Jacob E dev_err(&interface->pdev->dev, 19651012014eSKeller, Jacob E "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); 196618283cadSAlexander Duyck goto repopulate_reta; 196718283cadSAlexander Duyck } 196818283cadSAlexander Duyck 196918283cadSAlexander Duyck /* do nothing if all of the elements are in bounds */ 197018283cadSAlexander Duyck return; 197118283cadSAlexander Duyck } 197218283cadSAlexander Duyck 197318283cadSAlexander Duyck repopulate_reta: 1974540a5d85SJacob Keller fm10k_write_reta(interface, NULL); 197518283cadSAlexander Duyck } 197618283cadSAlexander Duyck 197718283cadSAlexander Duyck /** 197818283cadSAlexander Duyck * fm10k_init_queueing_scheme - Determine proper queueing scheme 197918283cadSAlexander Duyck * @interface: board private structure to initialize 198018283cadSAlexander Duyck * 198118283cadSAlexander Duyck * We determine which queueing scheme to use based on... 198218283cadSAlexander Duyck * - Hardware queue count (num_*_queues) 198318283cadSAlexander Duyck * - defined by miscellaneous hardware support/features (RSS, etc.) 198418283cadSAlexander Duyck **/ 198518283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) 198618283cadSAlexander Duyck { 198718283cadSAlexander Duyck int err; 198818283cadSAlexander Duyck 198918283cadSAlexander Duyck /* Number of supported queues */ 199018283cadSAlexander Duyck fm10k_set_num_queues(interface); 199118283cadSAlexander Duyck 199218283cadSAlexander Duyck /* Configure MSI-X capability */ 199318283cadSAlexander Duyck err = fm10k_init_msix_capability(interface); 199418283cadSAlexander Duyck if (err) { 199518283cadSAlexander Duyck dev_err(&interface->pdev->dev, 199618283cadSAlexander Duyck "Unable to initialize MSI-X capability\n"); 19974be37c42SJacob Keller goto err_init_msix; 199818283cadSAlexander Duyck } 199918283cadSAlexander Duyck 200018283cadSAlexander Duyck /* Allocate memory for queues */ 200118283cadSAlexander Duyck err = fm10k_alloc_q_vectors(interface); 2002587731e6SAlexander Duyck if (err) { 20034be37c42SJacob Keller dev_err(&interface->pdev->dev, 20044be37c42SJacob Keller "Unable to allocate queue vectors\n"); 20054be37c42SJacob Keller goto err_alloc_q_vectors; 2006587731e6SAlexander Duyck } 200718283cadSAlexander Duyck 2008aa3ac822SAlexander Duyck /* Map rings to devices, and map devices to physical queues */ 2009aa3ac822SAlexander Duyck fm10k_assign_rings(interface); 2010aa3ac822SAlexander Duyck 201118283cadSAlexander Duyck /* Initialize RSS redirection table */ 201218283cadSAlexander Duyck fm10k_init_reta(interface); 201318283cadSAlexander Duyck 201418283cadSAlexander Duyck return 0; 20154be37c42SJacob Keller 20164be37c42SJacob Keller err_alloc_q_vectors: 20174be37c42SJacob Keller fm10k_reset_msix_capability(interface); 20184be37c42SJacob Keller err_init_msix: 20194be37c42SJacob Keller fm10k_reset_num_queues(interface); 20204be37c42SJacob Keller return err; 202118283cadSAlexander Duyck } 202218283cadSAlexander Duyck 202318283cadSAlexander Duyck /** 202418283cadSAlexander Duyck * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings 202518283cadSAlexander Duyck * @interface: board private structure to clear queueing scheme on 202618283cadSAlexander Duyck * 202718283cadSAlexander Duyck * We go through and clear queueing specific resources and reset the structure 202818283cadSAlexander Duyck * to pre-load conditions 202918283cadSAlexander Duyck **/ 203018283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) 203118283cadSAlexander Duyck { 203218283cadSAlexander Duyck fm10k_free_q_vectors(interface); 203318283cadSAlexander Duyck fm10k_reset_msix_capability(interface); 203418283cadSAlexander Duyck } 2035