1b3890e30SAlexander Duyck /* Intel Ethernet Switch Host Interface Driver 2b3890e30SAlexander Duyck * Copyright(c) 2013 - 2014 Intel Corporation. 3b3890e30SAlexander Duyck * 4b3890e30SAlexander Duyck * This program is free software; you can redistribute it and/or modify it 5b3890e30SAlexander Duyck * under the terms and conditions of the GNU General Public License, 6b3890e30SAlexander Duyck * version 2, as published by the Free Software Foundation. 7b3890e30SAlexander Duyck * 8b3890e30SAlexander Duyck * This program is distributed in the hope it will be useful, but WITHOUT 9b3890e30SAlexander Duyck * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10b3890e30SAlexander Duyck * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11b3890e30SAlexander Duyck * more details. 12b3890e30SAlexander Duyck * 13b3890e30SAlexander Duyck * The full GNU General Public License is included in this distribution in 14b3890e30SAlexander Duyck * the file called "COPYING". 15b3890e30SAlexander Duyck * 16b3890e30SAlexander Duyck * Contact Information: 17b3890e30SAlexander Duyck * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 18b3890e30SAlexander Duyck * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 19b3890e30SAlexander Duyck */ 20b3890e30SAlexander Duyck 21b3890e30SAlexander Duyck #include <linux/types.h> 22b3890e30SAlexander Duyck #include <linux/module.h> 23b3890e30SAlexander Duyck #include <net/ipv6.h> 24b3890e30SAlexander Duyck #include <net/ip.h> 25b3890e30SAlexander Duyck #include <net/tcp.h> 26b3890e30SAlexander Duyck #include <linux/if_macvlan.h> 27b101c962SAlexander Duyck #include <linux/prefetch.h> 28b3890e30SAlexander Duyck 29b3890e30SAlexander Duyck #include "fm10k.h" 30b3890e30SAlexander Duyck 31e3b6e95dSJacob Keller #define DRV_VERSION "0.19.3-k" 32b3890e30SAlexander Duyck const char fm10k_driver_version[] = DRV_VERSION; 33b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k"; 34b3890e30SAlexander Duyck static const char fm10k_driver_string[] = 35b3890e30SAlexander Duyck "Intel(R) Ethernet Switch Host Interface Driver"; 36b3890e30SAlexander Duyck static const char fm10k_copyright[] = 37b3890e30SAlexander Duyck "Copyright (c) 2013 Intel Corporation."; 38b3890e30SAlexander Duyck 39b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 40b3890e30SAlexander Duyck MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); 41b3890e30SAlexander Duyck MODULE_LICENSE("GPL"); 42b3890e30SAlexander Duyck MODULE_VERSION(DRV_VERSION); 43b3890e30SAlexander Duyck 44b382bb1bSJeff Kirsher /* single workqueue for entire fm10k driver */ 4507146e2eSBruce Allan struct workqueue_struct *fm10k_workqueue; 46b382bb1bSJeff Kirsher 476d2ce900SAlexander Duyck /** 486d2ce900SAlexander Duyck * fm10k_init_module - Driver Registration Routine 49b3890e30SAlexander Duyck * 50b3890e30SAlexander Duyck * fm10k_init_module is the first routine called when the driver is 51b3890e30SAlexander Duyck * loaded. All it does is register with the PCI subsystem. 52b3890e30SAlexander Duyck **/ 53b3890e30SAlexander Duyck static int __init fm10k_init_module(void) 54b3890e30SAlexander Duyck { 55b3890e30SAlexander Duyck pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); 56b3890e30SAlexander Duyck pr_info("%s\n", fm10k_copyright); 57b3890e30SAlexander Duyck 58b382bb1bSJeff Kirsher /* create driver workqueue */ 59b382bb1bSJeff Kirsher fm10k_workqueue = create_workqueue("fm10k"); 60b382bb1bSJeff Kirsher 617461fd91SAlexander Duyck fm10k_dbg_init(); 627461fd91SAlexander Duyck 63b3890e30SAlexander Duyck return fm10k_register_pci_driver(); 64b3890e30SAlexander Duyck } 65b3890e30SAlexander Duyck module_init(fm10k_init_module); 66b3890e30SAlexander Duyck 67b3890e30SAlexander Duyck /** 68b3890e30SAlexander Duyck * fm10k_exit_module - Driver Exit Cleanup Routine 69b3890e30SAlexander Duyck * 70b3890e30SAlexander Duyck * fm10k_exit_module is called just before the driver is removed 71b3890e30SAlexander Duyck * from memory. 72b3890e30SAlexander Duyck **/ 73b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void) 74b3890e30SAlexander Duyck { 75b3890e30SAlexander Duyck fm10k_unregister_pci_driver(); 767461fd91SAlexander Duyck 777461fd91SAlexander Duyck fm10k_dbg_exit(); 78b382bb1bSJeff Kirsher 79b382bb1bSJeff Kirsher /* destroy driver workqueue */ 80b382bb1bSJeff Kirsher flush_workqueue(fm10k_workqueue); 81b382bb1bSJeff Kirsher destroy_workqueue(fm10k_workqueue); 82b3890e30SAlexander Duyck } 83b3890e30SAlexander Duyck module_exit(fm10k_exit_module); 8418283cadSAlexander Duyck 85b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, 86b101c962SAlexander Duyck struct fm10k_rx_buffer *bi) 87b101c962SAlexander Duyck { 88b101c962SAlexander Duyck struct page *page = bi->page; 89b101c962SAlexander Duyck dma_addr_t dma; 90b101c962SAlexander Duyck 91b101c962SAlexander Duyck /* Only page will be NULL if buffer was consumed */ 92b101c962SAlexander Duyck if (likely(page)) 93b101c962SAlexander Duyck return true; 94b101c962SAlexander Duyck 95b101c962SAlexander Duyck /* alloc new page for storage */ 9642b17f09SAlexander Duyck page = dev_alloc_page(); 97b101c962SAlexander Duyck if (unlikely(!page)) { 98b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 99b101c962SAlexander Duyck return false; 100b101c962SAlexander Duyck } 101b101c962SAlexander Duyck 102b101c962SAlexander Duyck /* map page for use */ 103b101c962SAlexander Duyck dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 104b101c962SAlexander Duyck 105b101c962SAlexander Duyck /* if mapping failed free memory back to system since 106b101c962SAlexander Duyck * there isn't much point in holding memory we can't use 107b101c962SAlexander Duyck */ 108b101c962SAlexander Duyck if (dma_mapping_error(rx_ring->dev, dma)) { 109b101c962SAlexander Duyck __free_page(page); 110b101c962SAlexander Duyck 111b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 112b101c962SAlexander Duyck return false; 113b101c962SAlexander Duyck } 114b101c962SAlexander Duyck 115b101c962SAlexander Duyck bi->dma = dma; 116b101c962SAlexander Duyck bi->page = page; 117b101c962SAlexander Duyck bi->page_offset = 0; 118b101c962SAlexander Duyck 119b101c962SAlexander Duyck return true; 120b101c962SAlexander Duyck } 121b101c962SAlexander Duyck 122b101c962SAlexander Duyck /** 123b101c962SAlexander Duyck * fm10k_alloc_rx_buffers - Replace used receive buffers 124b101c962SAlexander Duyck * @rx_ring: ring to place buffers on 125b101c962SAlexander Duyck * @cleaned_count: number of buffers to replace 126b101c962SAlexander Duyck **/ 127b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) 128b101c962SAlexander Duyck { 129b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 130b101c962SAlexander Duyck struct fm10k_rx_buffer *bi; 131b101c962SAlexander Duyck u16 i = rx_ring->next_to_use; 132b101c962SAlexander Duyck 133b101c962SAlexander Duyck /* nothing to do */ 134b101c962SAlexander Duyck if (!cleaned_count) 135b101c962SAlexander Duyck return; 136b101c962SAlexander Duyck 137b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, i); 138b101c962SAlexander Duyck bi = &rx_ring->rx_buffer[i]; 139b101c962SAlexander Duyck i -= rx_ring->count; 140b101c962SAlexander Duyck 141b101c962SAlexander Duyck do { 142b101c962SAlexander Duyck if (!fm10k_alloc_mapped_page(rx_ring, bi)) 143b101c962SAlexander Duyck break; 144b101c962SAlexander Duyck 145b101c962SAlexander Duyck /* Refresh the desc even if buffer_addrs didn't change 146b101c962SAlexander Duyck * because each write-back erases this info. 147b101c962SAlexander Duyck */ 148b101c962SAlexander Duyck rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 149b101c962SAlexander Duyck 150b101c962SAlexander Duyck rx_desc++; 151b101c962SAlexander Duyck bi++; 152b101c962SAlexander Duyck i++; 153b101c962SAlexander Duyck if (unlikely(!i)) { 154b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, 0); 155b101c962SAlexander Duyck bi = rx_ring->rx_buffer; 156b101c962SAlexander Duyck i -= rx_ring->count; 157b101c962SAlexander Duyck } 158b101c962SAlexander Duyck 159ba5b8dcdSAlexander Duyck /* clear the status bits for the next_to_use descriptor */ 160ba5b8dcdSAlexander Duyck rx_desc->d.staterr = 0; 161b101c962SAlexander Duyck 162b101c962SAlexander Duyck cleaned_count--; 163b101c962SAlexander Duyck } while (cleaned_count); 164b101c962SAlexander Duyck 165b101c962SAlexander Duyck i += rx_ring->count; 166b101c962SAlexander Duyck 167b101c962SAlexander Duyck if (rx_ring->next_to_use != i) { 168b101c962SAlexander Duyck /* record the next descriptor to use */ 169b101c962SAlexander Duyck rx_ring->next_to_use = i; 170b101c962SAlexander Duyck 171b101c962SAlexander Duyck /* update next to alloc since we have filled the ring */ 172b101c962SAlexander Duyck rx_ring->next_to_alloc = i; 173b101c962SAlexander Duyck 174b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w 175b101c962SAlexander Duyck * know there are new descriptors to fetch. (Only 176b101c962SAlexander Duyck * applicable for weak-ordered memory model archs, 177b101c962SAlexander Duyck * such as IA-64). 178b101c962SAlexander Duyck */ 179b101c962SAlexander Duyck wmb(); 180b101c962SAlexander Duyck 181b101c962SAlexander Duyck /* notify hardware of new descriptors */ 182b101c962SAlexander Duyck writel(i, rx_ring->tail); 183b101c962SAlexander Duyck } 184b101c962SAlexander Duyck } 185b101c962SAlexander Duyck 186b101c962SAlexander Duyck /** 187b101c962SAlexander Duyck * fm10k_reuse_rx_page - page flip buffer and store it back on the ring 188b101c962SAlexander Duyck * @rx_ring: rx descriptor ring to store buffers on 189b101c962SAlexander Duyck * @old_buff: donor buffer to have page reused 190b101c962SAlexander Duyck * 191b101c962SAlexander Duyck * Synchronizes page for reuse by the interface 192b101c962SAlexander Duyck **/ 193b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, 194b101c962SAlexander Duyck struct fm10k_rx_buffer *old_buff) 195b101c962SAlexander Duyck { 196b101c962SAlexander Duyck struct fm10k_rx_buffer *new_buff; 197b101c962SAlexander Duyck u16 nta = rx_ring->next_to_alloc; 198b101c962SAlexander Duyck 199b101c962SAlexander Duyck new_buff = &rx_ring->rx_buffer[nta]; 200b101c962SAlexander Duyck 201b101c962SAlexander Duyck /* update, and store next to alloc */ 202b101c962SAlexander Duyck nta++; 203b101c962SAlexander Duyck rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 204b101c962SAlexander Duyck 205b101c962SAlexander Duyck /* transfer page from old buffer to new buffer */ 206ba5b8dcdSAlexander Duyck *new_buff = *old_buff; 207b101c962SAlexander Duyck 208b101c962SAlexander Duyck /* sync the buffer for use by the device */ 209b101c962SAlexander Duyck dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 210b101c962SAlexander Duyck old_buff->page_offset, 211b101c962SAlexander Duyck FM10K_RX_BUFSZ, 212b101c962SAlexander Duyck DMA_FROM_DEVICE); 213b101c962SAlexander Duyck } 214b101c962SAlexander Duyck 215ba5b8dcdSAlexander Duyck static inline bool fm10k_page_is_reserved(struct page *page) 216ba5b8dcdSAlexander Duyck { 2172f064f34SMichal Hocko return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 218ba5b8dcdSAlexander Duyck } 219ba5b8dcdSAlexander Duyck 220b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 221b101c962SAlexander Duyck struct page *page, 222de445199SJeff Kirsher unsigned int __maybe_unused truesize) 223b101c962SAlexander Duyck { 224b101c962SAlexander Duyck /* avoid re-using remote pages */ 225ba5b8dcdSAlexander Duyck if (unlikely(fm10k_page_is_reserved(page))) 226b101c962SAlexander Duyck return false; 227b101c962SAlexander Duyck 228b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 229b101c962SAlexander Duyck /* if we are only owner of page we can reuse it */ 230b101c962SAlexander Duyck if (unlikely(page_count(page) != 1)) 231b101c962SAlexander Duyck return false; 232b101c962SAlexander Duyck 233b101c962SAlexander Duyck /* flip page offset to other buffer */ 234b101c962SAlexander Duyck rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 235b101c962SAlexander Duyck #else 236b101c962SAlexander Duyck /* move offset up to the next cache line */ 237b101c962SAlexander Duyck rx_buffer->page_offset += truesize; 238b101c962SAlexander Duyck 239b101c962SAlexander Duyck if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 240b101c962SAlexander Duyck return false; 241b101c962SAlexander Duyck #endif 242b101c962SAlexander Duyck 243ba5b8dcdSAlexander Duyck /* Even if we own the page, we are not allowed to use atomic_set() 244ba5b8dcdSAlexander Duyck * This would break get_page_unless_zero() users. 245ba5b8dcdSAlexander Duyck */ 246fe896d18SJoonsoo Kim page_ref_inc(page); 247ba5b8dcdSAlexander Duyck 248b101c962SAlexander Duyck return true; 249b101c962SAlexander Duyck } 250b101c962SAlexander Duyck 251b101c962SAlexander Duyck /** 252b101c962SAlexander Duyck * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff 253b101c962SAlexander Duyck * @rx_buffer: buffer containing page to add 254b101c962SAlexander Duyck * @rx_desc: descriptor containing length of buffer written by hardware 255b101c962SAlexander Duyck * @skb: sk_buff to place the data into 256b101c962SAlexander Duyck * 257b101c962SAlexander Duyck * This function will add the data contained in rx_buffer->page to the skb. 258b101c962SAlexander Duyck * This is done either through a direct copy if the data in the buffer is 259b101c962SAlexander Duyck * less than the skb header size, otherwise it will just attach the page as 260b101c962SAlexander Duyck * a frag to the skb. 261b101c962SAlexander Duyck * 262b101c962SAlexander Duyck * The function will then update the page offset if necessary and return 263b101c962SAlexander Duyck * true if the buffer can be reused by the interface. 264b101c962SAlexander Duyck **/ 265de445199SJeff Kirsher static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, 266b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 267b101c962SAlexander Duyck struct sk_buff *skb) 268b101c962SAlexander Duyck { 269b101c962SAlexander Duyck struct page *page = rx_buffer->page; 2701a8782e5SAlexander Duyck unsigned char *va = page_address(page) + rx_buffer->page_offset; 271b101c962SAlexander Duyck unsigned int size = le16_to_cpu(rx_desc->w.length); 272b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 273b101c962SAlexander Duyck unsigned int truesize = FM10K_RX_BUFSZ; 274b101c962SAlexander Duyck #else 2751a8782e5SAlexander Duyck unsigned int truesize = SKB_DATA_ALIGN(size); 276b101c962SAlexander Duyck #endif 2771a8782e5SAlexander Duyck unsigned int pull_len; 278b101c962SAlexander Duyck 2791a8782e5SAlexander Duyck if (unlikely(skb_is_nonlinear(skb))) 2801a8782e5SAlexander Duyck goto add_tail_frag; 281b101c962SAlexander Duyck 2821a8782e5SAlexander Duyck if (likely(size <= FM10K_RX_HDR_LEN)) { 283b101c962SAlexander Duyck memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 284b101c962SAlexander Duyck 285ba5b8dcdSAlexander Duyck /* page is not reserved, we can reuse buffer as-is */ 286ba5b8dcdSAlexander Duyck if (likely(!fm10k_page_is_reserved(page))) 287b101c962SAlexander Duyck return true; 288b101c962SAlexander Duyck 289b101c962SAlexander Duyck /* this page cannot be reused so discard it */ 290ba5b8dcdSAlexander Duyck __free_page(page); 291b101c962SAlexander Duyck return false; 292b101c962SAlexander Duyck } 293b101c962SAlexander Duyck 2941a8782e5SAlexander Duyck /* we need the header to contain the greater of either ETH_HLEN or 2951a8782e5SAlexander Duyck * 60 bytes if the skb->len is less than 60 for skb_pad. 2961a8782e5SAlexander Duyck */ 2971a8782e5SAlexander Duyck pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); 2981a8782e5SAlexander Duyck 2991a8782e5SAlexander Duyck /* align pull length to size of long to optimize memcpy performance */ 3001a8782e5SAlexander Duyck memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); 3011a8782e5SAlexander Duyck 3021a8782e5SAlexander Duyck /* update all of the pointers */ 3031a8782e5SAlexander Duyck va += pull_len; 3041a8782e5SAlexander Duyck size -= pull_len; 3051a8782e5SAlexander Duyck 3061a8782e5SAlexander Duyck add_tail_frag: 307b101c962SAlexander Duyck skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 3081a8782e5SAlexander Duyck (unsigned long)va & ~PAGE_MASK, size, truesize); 309b101c962SAlexander Duyck 310b101c962SAlexander Duyck return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); 311b101c962SAlexander Duyck } 312b101c962SAlexander Duyck 313b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, 314b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 315b101c962SAlexander Duyck struct sk_buff *skb) 316b101c962SAlexander Duyck { 317b101c962SAlexander Duyck struct fm10k_rx_buffer *rx_buffer; 318b101c962SAlexander Duyck struct page *page; 319b101c962SAlexander Duyck 320b101c962SAlexander Duyck rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 321b101c962SAlexander Duyck page = rx_buffer->page; 322b101c962SAlexander Duyck prefetchw(page); 323b101c962SAlexander Duyck 324b101c962SAlexander Duyck if (likely(!skb)) { 325b101c962SAlexander Duyck void *page_addr = page_address(page) + 326b101c962SAlexander Duyck rx_buffer->page_offset; 327b101c962SAlexander Duyck 328b101c962SAlexander Duyck /* prefetch first cache line of first page */ 329b101c962SAlexander Duyck prefetch(page_addr); 330b101c962SAlexander Duyck #if L1_CACHE_BYTES < 128 331b101c962SAlexander Duyck prefetch(page_addr + L1_CACHE_BYTES); 332b101c962SAlexander Duyck #endif 333b101c962SAlexander Duyck 334b101c962SAlexander Duyck /* allocate a skb to store the frags */ 33567fd893eSAlexander Duyck skb = napi_alloc_skb(&rx_ring->q_vector->napi, 336b101c962SAlexander Duyck FM10K_RX_HDR_LEN); 337b101c962SAlexander Duyck if (unlikely(!skb)) { 338b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 339b101c962SAlexander Duyck return NULL; 340b101c962SAlexander Duyck } 341b101c962SAlexander Duyck 342b101c962SAlexander Duyck /* we will be copying header into skb->data in 343b101c962SAlexander Duyck * pskb_may_pull so it is in our interest to prefetch 344b101c962SAlexander Duyck * it now to avoid a possible cache miss 345b101c962SAlexander Duyck */ 346b101c962SAlexander Duyck prefetchw(skb->data); 347b101c962SAlexander Duyck } 348b101c962SAlexander Duyck 349b101c962SAlexander Duyck /* we are reusing so sync this buffer for CPU use */ 350b101c962SAlexander Duyck dma_sync_single_range_for_cpu(rx_ring->dev, 351b101c962SAlexander Duyck rx_buffer->dma, 352b101c962SAlexander Duyck rx_buffer->page_offset, 353b101c962SAlexander Duyck FM10K_RX_BUFSZ, 354b101c962SAlexander Duyck DMA_FROM_DEVICE); 355b101c962SAlexander Duyck 356b101c962SAlexander Duyck /* pull page into skb */ 357de445199SJeff Kirsher if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { 358b101c962SAlexander Duyck /* hand second half of page back to the ring */ 359b101c962SAlexander Duyck fm10k_reuse_rx_page(rx_ring, rx_buffer); 360b101c962SAlexander Duyck } else { 361b101c962SAlexander Duyck /* we are not reusing the buffer so unmap it */ 362b101c962SAlexander Duyck dma_unmap_page(rx_ring->dev, rx_buffer->dma, 363b101c962SAlexander Duyck PAGE_SIZE, DMA_FROM_DEVICE); 364b101c962SAlexander Duyck } 365b101c962SAlexander Duyck 366b101c962SAlexander Duyck /* clear contents of rx_buffer */ 367b101c962SAlexander Duyck rx_buffer->page = NULL; 368b101c962SAlexander Duyck 369b101c962SAlexander Duyck return skb; 370b101c962SAlexander Duyck } 371b101c962SAlexander Duyck 37276a540d4SAlexander Duyck static inline void fm10k_rx_checksum(struct fm10k_ring *ring, 37376a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 37476a540d4SAlexander Duyck struct sk_buff *skb) 37576a540d4SAlexander Duyck { 37676a540d4SAlexander Duyck skb_checksum_none_assert(skb); 37776a540d4SAlexander Duyck 37876a540d4SAlexander Duyck /* Rx checksum disabled via ethtool */ 37976a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXCSUM)) 38076a540d4SAlexander Duyck return; 38176a540d4SAlexander Duyck 38276a540d4SAlexander Duyck /* TCP/UDP checksum error bit is set */ 38376a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, 38476a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E | 38576a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E2 | 38676a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE | 38776a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE2)) { 38876a540d4SAlexander Duyck ring->rx_stats.csum_err++; 38976a540d4SAlexander Duyck return; 39076a540d4SAlexander Duyck } 39176a540d4SAlexander Duyck 39276a540d4SAlexander Duyck /* It must be a TCP or UDP packet with a valid checksum */ 39376a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) 39476a540d4SAlexander Duyck skb->encapsulation = true; 39576a540d4SAlexander Duyck else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) 39676a540d4SAlexander Duyck return; 39776a540d4SAlexander Duyck 39876a540d4SAlexander Duyck skb->ip_summed = CHECKSUM_UNNECESSARY; 39980043f3bSJacob Keller 40080043f3bSJacob Keller ring->rx_stats.csum_good++; 40176a540d4SAlexander Duyck } 40276a540d4SAlexander Duyck 40376a540d4SAlexander Duyck #define FM10K_RSS_L4_TYPES_MASK \ 404fcdb0a99SBruce Allan (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ 405fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV4_UDP) | \ 406fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_TCP) | \ 407fcdb0a99SBruce Allan BIT(FM10K_RSSTYPE_IPV6_UDP)) 40876a540d4SAlexander Duyck 40976a540d4SAlexander Duyck static inline void fm10k_rx_hash(struct fm10k_ring *ring, 41076a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 41176a540d4SAlexander Duyck struct sk_buff *skb) 41276a540d4SAlexander Duyck { 41376a540d4SAlexander Duyck u16 rss_type; 41476a540d4SAlexander Duyck 41576a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXHASH)) 41676a540d4SAlexander Duyck return; 41776a540d4SAlexander Duyck 41876a540d4SAlexander Duyck rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; 41976a540d4SAlexander Duyck if (!rss_type) 42076a540d4SAlexander Duyck return; 42176a540d4SAlexander Duyck 42276a540d4SAlexander Duyck skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 423fcdb0a99SBruce Allan (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? 42476a540d4SAlexander Duyck PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 42576a540d4SAlexander Duyck } 42676a540d4SAlexander Duyck 427a211e013SAlexander Duyck static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, 428a211e013SAlexander Duyck union fm10k_rx_desc *rx_desc, 429a211e013SAlexander Duyck struct sk_buff *skb) 430a211e013SAlexander Duyck { 431a211e013SAlexander Duyck struct fm10k_intfc *interface = rx_ring->q_vector->interface; 432a211e013SAlexander Duyck 433a211e013SAlexander Duyck FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; 434a211e013SAlexander Duyck 435a211e013SAlexander Duyck if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) 436a211e013SAlexander Duyck fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), 437a211e013SAlexander Duyck le64_to_cpu(rx_desc->q.timestamp)); 438a211e013SAlexander Duyck } 439a211e013SAlexander Duyck 4405cd5e2e9SAlexander Duyck static void fm10k_type_trans(struct fm10k_ring *rx_ring, 441de445199SJeff Kirsher union fm10k_rx_desc __maybe_unused *rx_desc, 4425cd5e2e9SAlexander Duyck struct sk_buff *skb) 4435cd5e2e9SAlexander Duyck { 4445cd5e2e9SAlexander Duyck struct net_device *dev = rx_ring->netdev; 4455cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); 4465cd5e2e9SAlexander Duyck 4475cd5e2e9SAlexander Duyck /* check to see if DGLORT belongs to a MACVLAN */ 4485cd5e2e9SAlexander Duyck if (l2_accel) { 4495cd5e2e9SAlexander Duyck u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; 4505cd5e2e9SAlexander Duyck 4515cd5e2e9SAlexander Duyck idx -= l2_accel->dglort; 4525cd5e2e9SAlexander Duyck if (idx < l2_accel->size && l2_accel->macvlan[idx]) 4535cd5e2e9SAlexander Duyck dev = l2_accel->macvlan[idx]; 4545cd5e2e9SAlexander Duyck else 4555cd5e2e9SAlexander Duyck l2_accel = NULL; 4565cd5e2e9SAlexander Duyck } 4575cd5e2e9SAlexander Duyck 4585cd5e2e9SAlexander Duyck skb->protocol = eth_type_trans(skb, dev); 4595cd5e2e9SAlexander Duyck 4605cd5e2e9SAlexander Duyck if (!l2_accel) 4615cd5e2e9SAlexander Duyck return; 4625cd5e2e9SAlexander Duyck 4635cd5e2e9SAlexander Duyck /* update MACVLAN statistics */ 4645cd5e2e9SAlexander Duyck macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, 4655cd5e2e9SAlexander Duyck !!(rx_desc->w.hdr_info & 4665cd5e2e9SAlexander Duyck cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); 4675cd5e2e9SAlexander Duyck } 4685cd5e2e9SAlexander Duyck 469b101c962SAlexander Duyck /** 470b101c962SAlexander Duyck * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor 471b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 472b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 473b101c962SAlexander Duyck * @skb: pointer to current skb being populated 474b101c962SAlexander Duyck * 475b101c962SAlexander Duyck * This function checks the ring, descriptor, and packet information in 476b101c962SAlexander Duyck * order to populate the hash, checksum, VLAN, timestamp, protocol, and 477b101c962SAlexander Duyck * other fields within the skb. 478b101c962SAlexander Duyck **/ 479b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, 480b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 481b101c962SAlexander Duyck struct sk_buff *skb) 482b101c962SAlexander Duyck { 483b101c962SAlexander Duyck unsigned int len = skb->len; 484b101c962SAlexander Duyck 48576a540d4SAlexander Duyck fm10k_rx_hash(rx_ring, rx_desc, skb); 48676a540d4SAlexander Duyck 48776a540d4SAlexander Duyck fm10k_rx_checksum(rx_ring, rx_desc, skb); 48876a540d4SAlexander Duyck 489a211e013SAlexander Duyck fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); 490a211e013SAlexander Duyck 491b101c962SAlexander Duyck FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; 492b101c962SAlexander Duyck 493b101c962SAlexander Duyck skb_record_rx_queue(skb, rx_ring->queue_index); 494b101c962SAlexander Duyck 495b101c962SAlexander Duyck FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; 496b101c962SAlexander Duyck 497b101c962SAlexander Duyck if (rx_desc->w.vlan) { 498b101c962SAlexander Duyck u16 vid = le16_to_cpu(rx_desc->w.vlan); 499b101c962SAlexander Duyck 500e71c9318SJacob Keller if ((vid & VLAN_VID_MASK) != rx_ring->vid) 501b101c962SAlexander Duyck __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 502e71c9318SJacob Keller else if (vid & VLAN_PRIO_MASK) 503e71c9318SJacob Keller __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 504e71c9318SJacob Keller vid & VLAN_PRIO_MASK); 505b101c962SAlexander Duyck } 506b101c962SAlexander Duyck 5075cd5e2e9SAlexander Duyck fm10k_type_trans(rx_ring, rx_desc, skb); 508b101c962SAlexander Duyck 509b101c962SAlexander Duyck return len; 510b101c962SAlexander Duyck } 511b101c962SAlexander Duyck 512b101c962SAlexander Duyck /** 513b101c962SAlexander Duyck * fm10k_is_non_eop - process handling of non-EOP buffers 514b101c962SAlexander Duyck * @rx_ring: Rx ring being processed 515b101c962SAlexander Duyck * @rx_desc: Rx descriptor for current buffer 516b101c962SAlexander Duyck * 517b101c962SAlexander Duyck * This function updates next to clean. If the buffer is an EOP buffer 518b101c962SAlexander Duyck * this function exits returning false, otherwise it will place the 519b101c962SAlexander Duyck * sk_buff in the next buffer to be chained and return true indicating 520b101c962SAlexander Duyck * that this is in fact a non-EOP buffer. 521b101c962SAlexander Duyck **/ 522b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, 523b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc) 524b101c962SAlexander Duyck { 525b101c962SAlexander Duyck u32 ntc = rx_ring->next_to_clean + 1; 526b101c962SAlexander Duyck 527b101c962SAlexander Duyck /* fetch, update, and store next to clean */ 528b101c962SAlexander Duyck ntc = (ntc < rx_ring->count) ? ntc : 0; 529b101c962SAlexander Duyck rx_ring->next_to_clean = ntc; 530b101c962SAlexander Duyck 531b101c962SAlexander Duyck prefetch(FM10K_RX_DESC(rx_ring, ntc)); 532b101c962SAlexander Duyck 533b101c962SAlexander Duyck if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) 534b101c962SAlexander Duyck return false; 535b101c962SAlexander Duyck 536b101c962SAlexander Duyck return true; 537b101c962SAlexander Duyck } 538b101c962SAlexander Duyck 539b101c962SAlexander Duyck /** 540b101c962SAlexander Duyck * fm10k_cleanup_headers - Correct corrupted or empty headers 541b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 542b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 543b101c962SAlexander Duyck * @skb: pointer to current skb being fixed 544b101c962SAlexander Duyck * 545b101c962SAlexander Duyck * Address the case where we are pulling data in on pages only 546b101c962SAlexander Duyck * and as such no data is present in the skb header. 547b101c962SAlexander Duyck * 548b101c962SAlexander Duyck * In addition if skb is not at least 60 bytes we need to pad it so that 549b101c962SAlexander Duyck * it is large enough to qualify as a valid Ethernet frame. 550b101c962SAlexander Duyck * 551b101c962SAlexander Duyck * Returns true if an error was encountered and skb was freed. 552b101c962SAlexander Duyck **/ 553b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, 554b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 555b101c962SAlexander Duyck struct sk_buff *skb) 556b101c962SAlexander Duyck { 557b101c962SAlexander Duyck if (unlikely((fm10k_test_staterr(rx_desc, 558b101c962SAlexander Duyck FM10K_RXD_STATUS_RXE)))) { 55980043f3bSJacob Keller #define FM10K_TEST_RXD_BIT(rxd, bit) \ 56080043f3bSJacob Keller ((rxd)->w.csum_err & cpu_to_le16(bit)) 56180043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) 56280043f3bSJacob Keller rx_ring->rx_stats.switch_errors++; 56380043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) 56480043f3bSJacob Keller rx_ring->rx_stats.drops++; 56580043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) 56680043f3bSJacob Keller rx_ring->rx_stats.pp_errors++; 56780043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) 56880043f3bSJacob Keller rx_ring->rx_stats.link_errors++; 56980043f3bSJacob Keller if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) 57080043f3bSJacob Keller rx_ring->rx_stats.length_errors++; 571b101c962SAlexander Duyck dev_kfree_skb_any(skb); 572b101c962SAlexander Duyck rx_ring->rx_stats.errors++; 573b101c962SAlexander Duyck return true; 574b101c962SAlexander Duyck } 575b101c962SAlexander Duyck 576a94d9e22SAlexander Duyck /* if eth_skb_pad returns an error the skb was freed */ 577a94d9e22SAlexander Duyck if (eth_skb_pad(skb)) 578b101c962SAlexander Duyck return true; 579b101c962SAlexander Duyck 580b101c962SAlexander Duyck return false; 581b101c962SAlexander Duyck } 582b101c962SAlexander Duyck 583b101c962SAlexander Duyck /** 584b101c962SAlexander Duyck * fm10k_receive_skb - helper function to handle rx indications 585b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 586b101c962SAlexander Duyck * @skb: packet to send up 587b101c962SAlexander Duyck **/ 588b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, 589b101c962SAlexander Duyck struct sk_buff *skb) 590b101c962SAlexander Duyck { 591b101c962SAlexander Duyck napi_gro_receive(&q_vector->napi, skb); 592b101c962SAlexander Duyck } 593b101c962SAlexander Duyck 59432b3e08fSJesse Brandeburg static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, 595b101c962SAlexander Duyck struct fm10k_ring *rx_ring, 596b101c962SAlexander Duyck int budget) 597b101c962SAlexander Duyck { 598b101c962SAlexander Duyck struct sk_buff *skb = rx_ring->skb; 599b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 600b101c962SAlexander Duyck u16 cleaned_count = fm10k_desc_unused(rx_ring); 601b101c962SAlexander Duyck 60259486329SAlexander Duyck while (likely(total_packets < budget)) { 603b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 604b101c962SAlexander Duyck 605b101c962SAlexander Duyck /* return some buffers to hardware, one at a time is too slow */ 606b101c962SAlexander Duyck if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { 607b101c962SAlexander Duyck fm10k_alloc_rx_buffers(rx_ring, cleaned_count); 608b101c962SAlexander Duyck cleaned_count = 0; 609b101c962SAlexander Duyck } 610b101c962SAlexander Duyck 611b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 612b101c962SAlexander Duyck 613124b74c1SAlexander Duyck if (!rx_desc->d.staterr) 614b101c962SAlexander Duyck break; 615b101c962SAlexander Duyck 616b101c962SAlexander Duyck /* This memory barrier is needed to keep us from reading 617b101c962SAlexander Duyck * any other fields out of the rx_desc until we know the 618124b74c1SAlexander Duyck * descriptor has been written back 619b101c962SAlexander Duyck */ 620124b74c1SAlexander Duyck dma_rmb(); 621b101c962SAlexander Duyck 622b101c962SAlexander Duyck /* retrieve a buffer from the ring */ 623b101c962SAlexander Duyck skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); 624b101c962SAlexander Duyck 625b101c962SAlexander Duyck /* exit if we failed to retrieve a buffer */ 626b101c962SAlexander Duyck if (!skb) 627b101c962SAlexander Duyck break; 628b101c962SAlexander Duyck 629b101c962SAlexander Duyck cleaned_count++; 630b101c962SAlexander Duyck 631b101c962SAlexander Duyck /* fetch next buffer in frame if non-eop */ 632b101c962SAlexander Duyck if (fm10k_is_non_eop(rx_ring, rx_desc)) 633b101c962SAlexander Duyck continue; 634b101c962SAlexander Duyck 635b101c962SAlexander Duyck /* verify the packet layout is correct */ 636b101c962SAlexander Duyck if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { 637b101c962SAlexander Duyck skb = NULL; 638b101c962SAlexander Duyck continue; 639b101c962SAlexander Duyck } 640b101c962SAlexander Duyck 641b101c962SAlexander Duyck /* populate checksum, timestamp, VLAN, and protocol */ 642b101c962SAlexander Duyck total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); 643b101c962SAlexander Duyck 644b101c962SAlexander Duyck fm10k_receive_skb(q_vector, skb); 645b101c962SAlexander Duyck 646b101c962SAlexander Duyck /* reset skb pointer */ 647b101c962SAlexander Duyck skb = NULL; 648b101c962SAlexander Duyck 649b101c962SAlexander Duyck /* update budget accounting */ 650b101c962SAlexander Duyck total_packets++; 65159486329SAlexander Duyck } 652b101c962SAlexander Duyck 653b101c962SAlexander Duyck /* place incomplete frames back on ring for completion */ 654b101c962SAlexander Duyck rx_ring->skb = skb; 655b101c962SAlexander Duyck 656b101c962SAlexander Duyck u64_stats_update_begin(&rx_ring->syncp); 657b101c962SAlexander Duyck rx_ring->stats.packets += total_packets; 658b101c962SAlexander Duyck rx_ring->stats.bytes += total_bytes; 659b101c962SAlexander Duyck u64_stats_update_end(&rx_ring->syncp); 660b101c962SAlexander Duyck q_vector->rx.total_packets += total_packets; 661b101c962SAlexander Duyck q_vector->rx.total_bytes += total_bytes; 662b101c962SAlexander Duyck 66332b3e08fSJesse Brandeburg return total_packets; 664b101c962SAlexander Duyck } 665b101c962SAlexander Duyck 66676a540d4SAlexander Duyck #define VXLAN_HLEN (sizeof(struct udphdr) + 8) 66776a540d4SAlexander Duyck static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) 66876a540d4SAlexander Duyck { 66976a540d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(skb->dev); 67076a540d4SAlexander Duyck struct fm10k_vxlan_port *vxlan_port; 67176a540d4SAlexander Duyck 67276a540d4SAlexander Duyck /* we can only offload a vxlan if we recognize it as such */ 67376a540d4SAlexander Duyck vxlan_port = list_first_entry_or_null(&interface->vxlan_port, 67476a540d4SAlexander Duyck struct fm10k_vxlan_port, list); 67576a540d4SAlexander Duyck 67676a540d4SAlexander Duyck if (!vxlan_port) 67776a540d4SAlexander Duyck return NULL; 67876a540d4SAlexander Duyck if (vxlan_port->port != udp_hdr(skb)->dest) 67976a540d4SAlexander Duyck return NULL; 68076a540d4SAlexander Duyck 68176a540d4SAlexander Duyck /* return offset of udp_hdr plus 8 bytes for VXLAN header */ 68276a540d4SAlexander Duyck return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); 68376a540d4SAlexander Duyck } 68476a540d4SAlexander Duyck 68576a540d4SAlexander Duyck #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) 68676a540d4SAlexander Duyck #define NVGRE_TNI htons(0x2000) 68776a540d4SAlexander Duyck struct fm10k_nvgre_hdr { 68876a540d4SAlexander Duyck __be16 flags; 68976a540d4SAlexander Duyck __be16 proto; 69076a540d4SAlexander Duyck __be32 tni; 69176a540d4SAlexander Duyck }; 69276a540d4SAlexander Duyck 69376a540d4SAlexander Duyck static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) 69476a540d4SAlexander Duyck { 69576a540d4SAlexander Duyck struct fm10k_nvgre_hdr *nvgre_hdr; 69676a540d4SAlexander Duyck int hlen = ip_hdrlen(skb); 69776a540d4SAlexander Duyck 69876a540d4SAlexander Duyck /* currently only IPv4 is supported due to hlen above */ 69976a540d4SAlexander Duyck if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 70076a540d4SAlexander Duyck return NULL; 70176a540d4SAlexander Duyck 70276a540d4SAlexander Duyck /* our transport header should be NVGRE */ 70376a540d4SAlexander Duyck nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); 70476a540d4SAlexander Duyck 70576a540d4SAlexander Duyck /* verify all reserved flags are 0 */ 70676a540d4SAlexander Duyck if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) 70776a540d4SAlexander Duyck return NULL; 70876a540d4SAlexander Duyck 70976a540d4SAlexander Duyck /* report start of ethernet header */ 71076a540d4SAlexander Duyck if (nvgre_hdr->flags & NVGRE_TNI) 71176a540d4SAlexander Duyck return (struct ethhdr *)(nvgre_hdr + 1); 71276a540d4SAlexander Duyck 71376a540d4SAlexander Duyck return (struct ethhdr *)(&nvgre_hdr->tni); 71476a540d4SAlexander Duyck } 71576a540d4SAlexander Duyck 7165bf33dc6SMatthew Vick __be16 fm10k_tx_encap_offload(struct sk_buff *skb) 71776a540d4SAlexander Duyck { 7188c1a90aaSMatthew Vick u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; 71976a540d4SAlexander Duyck struct ethhdr *eth_hdr; 72076a540d4SAlexander Duyck 7218c1a90aaSMatthew Vick if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 7228c1a90aaSMatthew Vick skb->inner_protocol != htons(ETH_P_TEB)) 723b66b6d9fSJoe Stringer return 0; 724b66b6d9fSJoe Stringer 72576a540d4SAlexander Duyck switch (vlan_get_protocol(skb)) { 72676a540d4SAlexander Duyck case htons(ETH_P_IP): 72776a540d4SAlexander Duyck l4_hdr = ip_hdr(skb)->protocol; 72876a540d4SAlexander Duyck break; 72976a540d4SAlexander Duyck case htons(ETH_P_IPV6): 73076a540d4SAlexander Duyck l4_hdr = ipv6_hdr(skb)->nexthdr; 73176a540d4SAlexander Duyck break; 73276a540d4SAlexander Duyck default: 73376a540d4SAlexander Duyck return 0; 73476a540d4SAlexander Duyck } 73576a540d4SAlexander Duyck 73676a540d4SAlexander Duyck switch (l4_hdr) { 73776a540d4SAlexander Duyck case IPPROTO_UDP: 73876a540d4SAlexander Duyck eth_hdr = fm10k_port_is_vxlan(skb); 73976a540d4SAlexander Duyck break; 74076a540d4SAlexander Duyck case IPPROTO_GRE: 74176a540d4SAlexander Duyck eth_hdr = fm10k_gre_is_nvgre(skb); 74276a540d4SAlexander Duyck break; 74376a540d4SAlexander Duyck default: 74476a540d4SAlexander Duyck return 0; 74576a540d4SAlexander Duyck } 74676a540d4SAlexander Duyck 74776a540d4SAlexander Duyck if (!eth_hdr) 74876a540d4SAlexander Duyck return 0; 74976a540d4SAlexander Duyck 75076a540d4SAlexander Duyck switch (eth_hdr->h_proto) { 75176a540d4SAlexander Duyck case htons(ETH_P_IP): 7528c1a90aaSMatthew Vick inner_l4_hdr = inner_ip_hdr(skb)->protocol; 7538c1a90aaSMatthew Vick break; 75476a540d4SAlexander Duyck case htons(ETH_P_IPV6): 7558c1a90aaSMatthew Vick inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; 75676a540d4SAlexander Duyck break; 75776a540d4SAlexander Duyck default: 75876a540d4SAlexander Duyck return 0; 75976a540d4SAlexander Duyck } 76076a540d4SAlexander Duyck 7618c1a90aaSMatthew Vick switch (inner_l4_hdr) { 7628c1a90aaSMatthew Vick case IPPROTO_TCP: 7638c1a90aaSMatthew Vick inner_l4_hlen = inner_tcp_hdrlen(skb); 7648c1a90aaSMatthew Vick break; 7658c1a90aaSMatthew Vick case IPPROTO_UDP: 7668c1a90aaSMatthew Vick inner_l4_hlen = 8; 7678c1a90aaSMatthew Vick break; 7688c1a90aaSMatthew Vick default: 7698c1a90aaSMatthew Vick return 0; 7708c1a90aaSMatthew Vick } 7718c1a90aaSMatthew Vick 7728c1a90aaSMatthew Vick /* The hardware allows tunnel offloads only if the combined inner and 7738c1a90aaSMatthew Vick * outer header is 184 bytes or less 7748c1a90aaSMatthew Vick */ 7758c1a90aaSMatthew Vick if (skb_inner_transport_header(skb) + inner_l4_hlen - 7768c1a90aaSMatthew Vick skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) 7778c1a90aaSMatthew Vick return 0; 7788c1a90aaSMatthew Vick 77976a540d4SAlexander Duyck return eth_hdr->h_proto; 78076a540d4SAlexander Duyck } 78176a540d4SAlexander Duyck 78276a540d4SAlexander Duyck static int fm10k_tso(struct fm10k_ring *tx_ring, 78376a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 78476a540d4SAlexander Duyck { 78576a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 78676a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 78776a540d4SAlexander Duyck unsigned char *th; 78876a540d4SAlexander Duyck u8 hdrlen; 78976a540d4SAlexander Duyck 79076a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 79176a540d4SAlexander Duyck return 0; 79276a540d4SAlexander Duyck 79376a540d4SAlexander Duyck if (!skb_is_gso(skb)) 79476a540d4SAlexander Duyck return 0; 79576a540d4SAlexander Duyck 79676a540d4SAlexander Duyck /* compute header lengths */ 79776a540d4SAlexander Duyck if (skb->encapsulation) { 79876a540d4SAlexander Duyck if (!fm10k_tx_encap_offload(skb)) 79976a540d4SAlexander Duyck goto err_vxlan; 80076a540d4SAlexander Duyck th = skb_inner_transport_header(skb); 80176a540d4SAlexander Duyck } else { 80276a540d4SAlexander Duyck th = skb_transport_header(skb); 80376a540d4SAlexander Duyck } 80476a540d4SAlexander Duyck 80576a540d4SAlexander Duyck /* compute offset from SOF to transport header and add header len */ 80676a540d4SAlexander Duyck hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); 80776a540d4SAlexander Duyck 80876a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 80976a540d4SAlexander Duyck 81076a540d4SAlexander Duyck /* update gso size and bytecount with header size */ 81176a540d4SAlexander Duyck first->gso_segs = skb_shinfo(skb)->gso_segs; 81276a540d4SAlexander Duyck first->bytecount += (first->gso_segs - 1) * hdrlen; 81376a540d4SAlexander Duyck 81476a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 81576a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 81676a540d4SAlexander Duyck tx_desc->hdrlen = hdrlen; 81776a540d4SAlexander Duyck tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 81876a540d4SAlexander Duyck 81976a540d4SAlexander Duyck return 1; 82076a540d4SAlexander Duyck err_vxlan: 82176a540d4SAlexander Duyck tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; 82276a540d4SAlexander Duyck if (!net_ratelimit()) 82376a540d4SAlexander Duyck netdev_err(tx_ring->netdev, 82476a540d4SAlexander Duyck "TSO requested for unsupported tunnel, disabling offload\n"); 82576a540d4SAlexander Duyck return -1; 82676a540d4SAlexander Duyck } 82776a540d4SAlexander Duyck 82876a540d4SAlexander Duyck static void fm10k_tx_csum(struct fm10k_ring *tx_ring, 82976a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 83076a540d4SAlexander Duyck { 83176a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 83276a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 83376a540d4SAlexander Duyck union { 83476a540d4SAlexander Duyck struct iphdr *ipv4; 83576a540d4SAlexander Duyck struct ipv6hdr *ipv6; 83676a540d4SAlexander Duyck u8 *raw; 83776a540d4SAlexander Duyck } network_hdr; 83876a540d4SAlexander Duyck __be16 protocol; 83976a540d4SAlexander Duyck u8 l4_hdr = 0; 84076a540d4SAlexander Duyck 84176a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 84276a540d4SAlexander Duyck goto no_csum; 84376a540d4SAlexander Duyck 84476a540d4SAlexander Duyck if (skb->encapsulation) { 84576a540d4SAlexander Duyck protocol = fm10k_tx_encap_offload(skb); 84676a540d4SAlexander Duyck if (!protocol) { 84776a540d4SAlexander Duyck if (skb_checksum_help(skb)) { 84876a540d4SAlexander Duyck dev_warn(tx_ring->dev, 84976a540d4SAlexander Duyck "failed to offload encap csum!\n"); 85076a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 85176a540d4SAlexander Duyck } 85276a540d4SAlexander Duyck goto no_csum; 85376a540d4SAlexander Duyck } 85476a540d4SAlexander Duyck network_hdr.raw = skb_inner_network_header(skb); 85576a540d4SAlexander Duyck } else { 85676a540d4SAlexander Duyck protocol = vlan_get_protocol(skb); 85776a540d4SAlexander Duyck network_hdr.raw = skb_network_header(skb); 85876a540d4SAlexander Duyck } 85976a540d4SAlexander Duyck 86076a540d4SAlexander Duyck switch (protocol) { 86176a540d4SAlexander Duyck case htons(ETH_P_IP): 86276a540d4SAlexander Duyck l4_hdr = network_hdr.ipv4->protocol; 86376a540d4SAlexander Duyck break; 86476a540d4SAlexander Duyck case htons(ETH_P_IPV6): 86576a540d4SAlexander Duyck l4_hdr = network_hdr.ipv6->nexthdr; 86676a540d4SAlexander Duyck break; 86776a540d4SAlexander Duyck default: 86876a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 86976a540d4SAlexander Duyck dev_warn(tx_ring->dev, 87076a540d4SAlexander Duyck "partial checksum but ip version=%x!\n", 87176a540d4SAlexander Duyck protocol); 87276a540d4SAlexander Duyck } 87376a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 87476a540d4SAlexander Duyck goto no_csum; 87576a540d4SAlexander Duyck } 87676a540d4SAlexander Duyck 87776a540d4SAlexander Duyck switch (l4_hdr) { 87876a540d4SAlexander Duyck case IPPROTO_TCP: 87976a540d4SAlexander Duyck case IPPROTO_UDP: 88076a540d4SAlexander Duyck break; 88176a540d4SAlexander Duyck case IPPROTO_GRE: 88276a540d4SAlexander Duyck if (skb->encapsulation) 88376a540d4SAlexander Duyck break; 88476a540d4SAlexander Duyck default: 88576a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 88676a540d4SAlexander Duyck dev_warn(tx_ring->dev, 88776a540d4SAlexander Duyck "partial checksum but l4 proto=%x!\n", 88876a540d4SAlexander Duyck l4_hdr); 88976a540d4SAlexander Duyck } 89076a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 89176a540d4SAlexander Duyck goto no_csum; 89276a540d4SAlexander Duyck } 89376a540d4SAlexander Duyck 89476a540d4SAlexander Duyck /* update TX checksum flag */ 89576a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 89680043f3bSJacob Keller tx_ring->tx_stats.csum_good++; 89776a540d4SAlexander Duyck 89876a540d4SAlexander Duyck no_csum: 89976a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 90076a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 90176a540d4SAlexander Duyck tx_desc->hdrlen = 0; 90276a540d4SAlexander Duyck tx_desc->mss = 0; 90376a540d4SAlexander Duyck } 90476a540d4SAlexander Duyck 90576a540d4SAlexander Duyck #define FM10K_SET_FLAG(_input, _flag, _result) \ 90676a540d4SAlexander Duyck ((_flag <= _result) ? \ 90776a540d4SAlexander Duyck ((u32)(_input & _flag) * (_result / _flag)) : \ 90876a540d4SAlexander Duyck ((u32)(_input & _flag) / (_flag / _result))) 90976a540d4SAlexander Duyck 91076a540d4SAlexander Duyck static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) 91176a540d4SAlexander Duyck { 91276a540d4SAlexander Duyck /* set type for advanced descriptor with frame checksum insertion */ 91376a540d4SAlexander Duyck u32 desc_flags = 0; 91476a540d4SAlexander Duyck 915a211e013SAlexander Duyck /* set timestamping bits */ 916a211e013SAlexander Duyck if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 917a211e013SAlexander Duyck likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 918a211e013SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_TIME; 919a211e013SAlexander Duyck 92076a540d4SAlexander Duyck /* set checksum offload bits */ 92176a540d4SAlexander Duyck desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, 92276a540d4SAlexander Duyck FM10K_TXD_FLAG_CSUM); 92376a540d4SAlexander Duyck 92476a540d4SAlexander Duyck return desc_flags; 92576a540d4SAlexander Duyck } 92676a540d4SAlexander Duyck 927b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, 928b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc, u16 i, 929b101c962SAlexander Duyck dma_addr_t dma, unsigned int size, u8 desc_flags) 930b101c962SAlexander Duyck { 931b101c962SAlexander Duyck /* set RS and INT for last frame in a cache line */ 932b101c962SAlexander Duyck if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) 933b101c962SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; 934b101c962SAlexander Duyck 935b101c962SAlexander Duyck /* record values to descriptor */ 936b101c962SAlexander Duyck tx_desc->buffer_addr = cpu_to_le64(dma); 937b101c962SAlexander Duyck tx_desc->flags = desc_flags; 938b101c962SAlexander Duyck tx_desc->buflen = cpu_to_le16(size); 939b101c962SAlexander Duyck 940b101c962SAlexander Duyck /* return true if we just wrapped the ring */ 941b101c962SAlexander Duyck return i == tx_ring->count; 942b101c962SAlexander Duyck } 943b101c962SAlexander Duyck 9442c2b2f0cSAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9452c2b2f0cSAlexander Duyck { 9462c2b2f0cSAlexander Duyck netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 9472c2b2f0cSAlexander Duyck 948eca32047SMatthew Vick /* Memory barrier before checking head and tail */ 9492c2b2f0cSAlexander Duyck smp_mb(); 9502c2b2f0cSAlexander Duyck 951eca32047SMatthew Vick /* Check again in a case another CPU has just made room available */ 9522c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) < size)) 9532c2b2f0cSAlexander Duyck return -EBUSY; 9542c2b2f0cSAlexander Duyck 9552c2b2f0cSAlexander Duyck /* A reprieve! - use start_queue because it doesn't call schedule */ 9562c2b2f0cSAlexander Duyck netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 9572c2b2f0cSAlexander Duyck ++tx_ring->tx_stats.restart_queue; 9582c2b2f0cSAlexander Duyck return 0; 9592c2b2f0cSAlexander Duyck } 9602c2b2f0cSAlexander Duyck 9612c2b2f0cSAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 9622c2b2f0cSAlexander Duyck { 9632c2b2f0cSAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) >= size)) 9642c2b2f0cSAlexander Duyck return 0; 9652c2b2f0cSAlexander Duyck return __fm10k_maybe_stop_tx(tx_ring, size); 9662c2b2f0cSAlexander Duyck } 9672c2b2f0cSAlexander Duyck 968b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring, 969b101c962SAlexander Duyck struct fm10k_tx_buffer *first) 970b101c962SAlexander Duyck { 971b101c962SAlexander Duyck struct sk_buff *skb = first->skb; 972b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 973b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 974b101c962SAlexander Duyck struct skb_frag_struct *frag; 975b101c962SAlexander Duyck unsigned char *data; 976b101c962SAlexander Duyck dma_addr_t dma; 977b101c962SAlexander Duyck unsigned int data_len, size; 97876a540d4SAlexander Duyck u32 tx_flags = first->tx_flags; 979b101c962SAlexander Duyck u16 i = tx_ring->next_to_use; 98076a540d4SAlexander Duyck u8 flags = fm10k_tx_desc_flags(skb, tx_flags); 981b101c962SAlexander Duyck 982b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 983b101c962SAlexander Duyck 984b101c962SAlexander Duyck /* add HW VLAN tag */ 985df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 986df8a39deSJiri Pirko tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 987b101c962SAlexander Duyck else 988b101c962SAlexander Duyck tx_desc->vlan = 0; 989b101c962SAlexander Duyck 990b101c962SAlexander Duyck size = skb_headlen(skb); 991b101c962SAlexander Duyck data = skb->data; 992b101c962SAlexander Duyck 993b101c962SAlexander Duyck dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 994b101c962SAlexander Duyck 995b101c962SAlexander Duyck data_len = skb->data_len; 996b101c962SAlexander Duyck tx_buffer = first; 997b101c962SAlexander Duyck 998b101c962SAlexander Duyck for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 999b101c962SAlexander Duyck if (dma_mapping_error(tx_ring->dev, dma)) 1000b101c962SAlexander Duyck goto dma_error; 1001b101c962SAlexander Duyck 1002b101c962SAlexander Duyck /* record length, and DMA address */ 1003b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, size); 1004b101c962SAlexander Duyck dma_unmap_addr_set(tx_buffer, dma, dma); 1005b101c962SAlexander Duyck 1006b101c962SAlexander Duyck while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { 1007b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, 1008b101c962SAlexander Duyck FM10K_MAX_DATA_PER_TXD, flags)) { 1009b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1010b101c962SAlexander Duyck i = 0; 1011b101c962SAlexander Duyck } 1012b101c962SAlexander Duyck 1013b101c962SAlexander Duyck dma += FM10K_MAX_DATA_PER_TXD; 1014b101c962SAlexander Duyck size -= FM10K_MAX_DATA_PER_TXD; 1015b101c962SAlexander Duyck } 1016b101c962SAlexander Duyck 1017b101c962SAlexander Duyck if (likely(!data_len)) 1018b101c962SAlexander Duyck break; 1019b101c962SAlexander Duyck 1020b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, 1021b101c962SAlexander Duyck dma, size, flags)) { 1022b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1023b101c962SAlexander Duyck i = 0; 1024b101c962SAlexander Duyck } 1025b101c962SAlexander Duyck 1026b101c962SAlexander Duyck size = skb_frag_size(frag); 1027b101c962SAlexander Duyck data_len -= size; 1028b101c962SAlexander Duyck 1029b101c962SAlexander Duyck dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1030b101c962SAlexander Duyck DMA_TO_DEVICE); 1031b101c962SAlexander Duyck 1032b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1033b101c962SAlexander Duyck } 1034b101c962SAlexander Duyck 1035b101c962SAlexander Duyck /* write last descriptor with LAST bit set */ 1036b101c962SAlexander Duyck flags |= FM10K_TXD_FLAG_LAST; 1037b101c962SAlexander Duyck 1038b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) 1039b101c962SAlexander Duyck i = 0; 1040b101c962SAlexander Duyck 1041b101c962SAlexander Duyck /* record bytecount for BQL */ 1042b101c962SAlexander Duyck netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1043b101c962SAlexander Duyck 1044b101c962SAlexander Duyck /* record SW timestamp if HW timestamp is not available */ 1045b101c962SAlexander Duyck skb_tx_timestamp(first->skb); 1046b101c962SAlexander Duyck 1047b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w know there 1048b101c962SAlexander Duyck * are new descriptors to fetch. (Only applicable for weak-ordered 1049b101c962SAlexander Duyck * memory model archs, such as IA-64). 1050b101c962SAlexander Duyck * 1051b101c962SAlexander Duyck * We also need this memory barrier to make certain all of the 1052b101c962SAlexander Duyck * status bits have been updated before next_to_watch is written. 1053b101c962SAlexander Duyck */ 1054b101c962SAlexander Duyck wmb(); 1055b101c962SAlexander Duyck 1056b101c962SAlexander Duyck /* set next_to_watch value indicating a packet is present */ 1057b101c962SAlexander Duyck first->next_to_watch = tx_desc; 1058b101c962SAlexander Duyck 1059b101c962SAlexander Duyck tx_ring->next_to_use = i; 1060b101c962SAlexander Duyck 10612c2b2f0cSAlexander Duyck /* Make sure there is space in the ring for the next send. */ 10622c2b2f0cSAlexander Duyck fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); 10632c2b2f0cSAlexander Duyck 1064b101c962SAlexander Duyck /* notify HW of packet */ 10652c2b2f0cSAlexander Duyck if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 1066b101c962SAlexander Duyck writel(i, tx_ring->tail); 1067b101c962SAlexander Duyck 1068b101c962SAlexander Duyck /* we need this if more than one processor can write to our tail 1069b101c962SAlexander Duyck * at a time, it synchronizes IO on IA64/Altix systems 1070b101c962SAlexander Duyck */ 1071b101c962SAlexander Duyck mmiowb(); 10722c2b2f0cSAlexander Duyck } 1073b101c962SAlexander Duyck 1074b101c962SAlexander Duyck return; 1075b101c962SAlexander Duyck dma_error: 1076b101c962SAlexander Duyck dev_err(tx_ring->dev, "TX DMA map failed\n"); 1077b101c962SAlexander Duyck 1078b101c962SAlexander Duyck /* clear dma mappings for failed tx_buffer map */ 1079b101c962SAlexander Duyck for (;;) { 1080b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1081b101c962SAlexander Duyck fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1082b101c962SAlexander Duyck if (tx_buffer == first) 1083b101c962SAlexander Duyck break; 1084b101c962SAlexander Duyck if (i == 0) 1085b101c962SAlexander Duyck i = tx_ring->count; 1086b101c962SAlexander Duyck i--; 1087b101c962SAlexander Duyck } 1088b101c962SAlexander Duyck 1089b101c962SAlexander Duyck tx_ring->next_to_use = i; 1090b101c962SAlexander Duyck } 1091b101c962SAlexander Duyck 1092b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1093b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1094b101c962SAlexander Duyck { 1095b101c962SAlexander Duyck u16 count = TXD_USE_COUNT(skb_headlen(skb)); 109603d13a51SJacob Keller struct fm10k_tx_buffer *first; 109703d13a51SJacob Keller unsigned short f; 109803d13a51SJacob Keller u32 tx_flags = 0; 109903d13a51SJacob Keller int tso; 1100b101c962SAlexander Duyck 1101b101c962SAlexander Duyck /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, 1102b101c962SAlexander Duyck * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, 1103b101c962SAlexander Duyck * + 2 desc gap to keep tail from touching head 1104b101c962SAlexander Duyck * otherwise try next time 1105b101c962SAlexander Duyck */ 1106b101c962SAlexander Duyck for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1107b101c962SAlexander Duyck count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1108aae072e3SAlexander Duyck 1109b101c962SAlexander Duyck if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { 1110b101c962SAlexander Duyck tx_ring->tx_stats.tx_busy++; 1111b101c962SAlexander Duyck return NETDEV_TX_BUSY; 1112b101c962SAlexander Duyck } 1113b101c962SAlexander Duyck 1114b101c962SAlexander Duyck /* record the location of the first descriptor for this packet */ 1115b101c962SAlexander Duyck first = &tx_ring->tx_buffer[tx_ring->next_to_use]; 1116b101c962SAlexander Duyck first->skb = skb; 1117b101c962SAlexander Duyck first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1118b101c962SAlexander Duyck first->gso_segs = 1; 1119b101c962SAlexander Duyck 1120b101c962SAlexander Duyck /* record initial flags and protocol */ 1121b101c962SAlexander Duyck first->tx_flags = tx_flags; 1122b101c962SAlexander Duyck 112376a540d4SAlexander Duyck tso = fm10k_tso(tx_ring, first); 112476a540d4SAlexander Duyck if (tso < 0) 112576a540d4SAlexander Duyck goto out_drop; 112676a540d4SAlexander Duyck else if (!tso) 112776a540d4SAlexander Duyck fm10k_tx_csum(tx_ring, first); 112876a540d4SAlexander Duyck 1129b101c962SAlexander Duyck fm10k_tx_map(tx_ring, first); 1130b101c962SAlexander Duyck 1131b101c962SAlexander Duyck return NETDEV_TX_OK; 113276a540d4SAlexander Duyck 113376a540d4SAlexander Duyck out_drop: 113476a540d4SAlexander Duyck dev_kfree_skb_any(first->skb); 113576a540d4SAlexander Duyck first->skb = NULL; 113676a540d4SAlexander Duyck 113776a540d4SAlexander Duyck return NETDEV_TX_OK; 1138b101c962SAlexander Duyck } 1139b101c962SAlexander Duyck 1140b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) 1141b101c962SAlexander Duyck { 1142b101c962SAlexander Duyck return ring->stats.packets; 1143b101c962SAlexander Duyck } 1144b101c962SAlexander Duyck 1145b101c962SAlexander Duyck static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) 1146b101c962SAlexander Duyck { 1147b101c962SAlexander Duyck /* use SW head and tail until we have real hardware */ 1148b101c962SAlexander Duyck u32 head = ring->next_to_clean; 1149b101c962SAlexander Duyck u32 tail = ring->next_to_use; 1150b101c962SAlexander Duyck 1151b101c962SAlexander Duyck return ((head <= tail) ? tail : tail + ring->count) - head; 1152b101c962SAlexander Duyck } 1153b101c962SAlexander Duyck 1154b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) 1155b101c962SAlexander Duyck { 1156b101c962SAlexander Duyck u32 tx_done = fm10k_get_tx_completed(tx_ring); 1157b101c962SAlexander Duyck u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1158b101c962SAlexander Duyck u32 tx_pending = fm10k_get_tx_pending(tx_ring); 1159b101c962SAlexander Duyck 1160b101c962SAlexander Duyck clear_check_for_tx_hang(tx_ring); 1161b101c962SAlexander Duyck 1162b101c962SAlexander Duyck /* Check for a hung queue, but be thorough. This verifies 1163b101c962SAlexander Duyck * that a transmit has been completed since the previous 1164b101c962SAlexander Duyck * check AND there is at least one packet pending. By 1165b101c962SAlexander Duyck * requiring this to fail twice we avoid races with 1166b101c962SAlexander Duyck * clearing the ARMED bit and conditions where we 1167b101c962SAlexander Duyck * run the check_tx_hang logic with a transmit completion 1168b101c962SAlexander Duyck * pending but without time to complete it yet. 1169b101c962SAlexander Duyck */ 1170b101c962SAlexander Duyck if (!tx_pending || (tx_done_old != tx_done)) { 1171b101c962SAlexander Duyck /* update completed stats and continue */ 1172b101c962SAlexander Duyck tx_ring->tx_stats.tx_done_old = tx_done; 1173b101c962SAlexander Duyck /* reset the countdown */ 1174b101c962SAlexander Duyck clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1175b101c962SAlexander Duyck 1176b101c962SAlexander Duyck return false; 1177b101c962SAlexander Duyck } 1178b101c962SAlexander Duyck 1179b101c962SAlexander Duyck /* make sure it is true for two checks in a row */ 1180b101c962SAlexander Duyck return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1181b101c962SAlexander Duyck } 1182b101c962SAlexander Duyck 1183b101c962SAlexander Duyck /** 1184b101c962SAlexander Duyck * fm10k_tx_timeout_reset - initiate reset due to Tx timeout 1185b101c962SAlexander Duyck * @interface: driver private struct 1186b101c962SAlexander Duyck **/ 1187b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) 1188b101c962SAlexander Duyck { 1189b101c962SAlexander Duyck /* Do the reset outside of interrupt context */ 1190b101c962SAlexander Duyck if (!test_bit(__FM10K_DOWN, &interface->state)) { 1191b101c962SAlexander Duyck interface->tx_timeout_count++; 1192b101c962SAlexander Duyck interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1193b101c962SAlexander Duyck fm10k_service_event_schedule(interface); 1194b101c962SAlexander Duyck } 1195b101c962SAlexander Duyck } 1196b101c962SAlexander Duyck 1197b101c962SAlexander Duyck /** 1198b101c962SAlexander Duyck * fm10k_clean_tx_irq - Reclaim resources after transmit completes 1199b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 1200b101c962SAlexander Duyck * @tx_ring: tx ring to clean 1201b101c962SAlexander Duyck **/ 1202b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, 1203b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1204b101c962SAlexander Duyck { 1205b101c962SAlexander Duyck struct fm10k_intfc *interface = q_vector->interface; 1206b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 1207b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 1208b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 1209b101c962SAlexander Duyck unsigned int budget = q_vector->tx.work_limit; 1210b101c962SAlexander Duyck unsigned int i = tx_ring->next_to_clean; 1211b101c962SAlexander Duyck 1212b101c962SAlexander Duyck if (test_bit(__FM10K_DOWN, &interface->state)) 1213b101c962SAlexander Duyck return true; 1214b101c962SAlexander Duyck 1215b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1216b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 1217b101c962SAlexander Duyck i -= tx_ring->count; 1218b101c962SAlexander Duyck 1219b101c962SAlexander Duyck do { 1220b101c962SAlexander Duyck struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; 1221b101c962SAlexander Duyck 1222b101c962SAlexander Duyck /* if next_to_watch is not set then there is no work pending */ 1223b101c962SAlexander Duyck if (!eop_desc) 1224b101c962SAlexander Duyck break; 1225b101c962SAlexander Duyck 1226b101c962SAlexander Duyck /* prevent any other reads prior to eop_desc */ 1227b101c962SAlexander Duyck read_barrier_depends(); 1228b101c962SAlexander Duyck 1229b101c962SAlexander Duyck /* if DD is not set pending work has not been completed */ 1230b101c962SAlexander Duyck if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) 1231b101c962SAlexander Duyck break; 1232b101c962SAlexander Duyck 1233b101c962SAlexander Duyck /* clear next_to_watch to prevent false hangs */ 1234b101c962SAlexander Duyck tx_buffer->next_to_watch = NULL; 1235b101c962SAlexander Duyck 1236b101c962SAlexander Duyck /* update the statistics for this packet */ 1237b101c962SAlexander Duyck total_bytes += tx_buffer->bytecount; 1238b101c962SAlexander Duyck total_packets += tx_buffer->gso_segs; 1239b101c962SAlexander Duyck 1240b101c962SAlexander Duyck /* free the skb */ 1241b101c962SAlexander Duyck dev_consume_skb_any(tx_buffer->skb); 1242b101c962SAlexander Duyck 1243b101c962SAlexander Duyck /* unmap skb header data */ 1244b101c962SAlexander Duyck dma_unmap_single(tx_ring->dev, 1245b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1246b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1247b101c962SAlexander Duyck DMA_TO_DEVICE); 1248b101c962SAlexander Duyck 1249b101c962SAlexander Duyck /* clear tx_buffer data */ 1250b101c962SAlexander Duyck tx_buffer->skb = NULL; 1251b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1252b101c962SAlexander Duyck 1253b101c962SAlexander Duyck /* unmap remaining buffers */ 1254b101c962SAlexander Duyck while (tx_desc != eop_desc) { 1255b101c962SAlexander Duyck tx_buffer++; 1256b101c962SAlexander Duyck tx_desc++; 1257b101c962SAlexander Duyck i++; 1258b101c962SAlexander Duyck if (unlikely(!i)) { 1259b101c962SAlexander Duyck i -= tx_ring->count; 1260b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1261b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1262b101c962SAlexander Duyck } 1263b101c962SAlexander Duyck 1264b101c962SAlexander Duyck /* unmap any remaining paged data */ 1265b101c962SAlexander Duyck if (dma_unmap_len(tx_buffer, len)) { 1266b101c962SAlexander Duyck dma_unmap_page(tx_ring->dev, 1267b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1268b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1269b101c962SAlexander Duyck DMA_TO_DEVICE); 1270b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1271b101c962SAlexander Duyck } 1272b101c962SAlexander Duyck } 1273b101c962SAlexander Duyck 1274b101c962SAlexander Duyck /* move us one more past the eop_desc for start of next pkt */ 1275b101c962SAlexander Duyck tx_buffer++; 1276b101c962SAlexander Duyck tx_desc++; 1277b101c962SAlexander Duyck i++; 1278b101c962SAlexander Duyck if (unlikely(!i)) { 1279b101c962SAlexander Duyck i -= tx_ring->count; 1280b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1281b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1282b101c962SAlexander Duyck } 1283b101c962SAlexander Duyck 1284b101c962SAlexander Duyck /* issue prefetch for next Tx descriptor */ 1285b101c962SAlexander Duyck prefetch(tx_desc); 1286b101c962SAlexander Duyck 1287b101c962SAlexander Duyck /* update budget accounting */ 1288b101c962SAlexander Duyck budget--; 1289b101c962SAlexander Duyck } while (likely(budget)); 1290b101c962SAlexander Duyck 1291b101c962SAlexander Duyck i += tx_ring->count; 1292b101c962SAlexander Duyck tx_ring->next_to_clean = i; 1293b101c962SAlexander Duyck u64_stats_update_begin(&tx_ring->syncp); 1294b101c962SAlexander Duyck tx_ring->stats.bytes += total_bytes; 1295b101c962SAlexander Duyck tx_ring->stats.packets += total_packets; 1296b101c962SAlexander Duyck u64_stats_update_end(&tx_ring->syncp); 1297b101c962SAlexander Duyck q_vector->tx.total_bytes += total_bytes; 1298b101c962SAlexander Duyck q_vector->tx.total_packets += total_packets; 1299b101c962SAlexander Duyck 1300b101c962SAlexander Duyck if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { 1301b101c962SAlexander Duyck /* schedule immediate reset if we believe we hung */ 1302b101c962SAlexander Duyck struct fm10k_hw *hw = &interface->hw; 1303b101c962SAlexander Duyck 1304b101c962SAlexander Duyck netif_err(interface, drv, tx_ring->netdev, 1305b101c962SAlexander Duyck "Detected Tx Unit Hang\n" 1306b101c962SAlexander Duyck " Tx Queue <%d>\n" 1307b101c962SAlexander Duyck " TDH, TDT <%x>, <%x>\n" 1308b101c962SAlexander Duyck " next_to_use <%x>\n" 1309b101c962SAlexander Duyck " next_to_clean <%x>\n", 1310b101c962SAlexander Duyck tx_ring->queue_index, 1311b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), 1312b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), 1313b101c962SAlexander Duyck tx_ring->next_to_use, i); 1314b101c962SAlexander Duyck 1315b101c962SAlexander Duyck netif_stop_subqueue(tx_ring->netdev, 1316b101c962SAlexander Duyck tx_ring->queue_index); 1317b101c962SAlexander Duyck 1318b101c962SAlexander Duyck netif_info(interface, probe, tx_ring->netdev, 1319b101c962SAlexander Duyck "tx hang %d detected on queue %d, resetting interface\n", 1320b101c962SAlexander Duyck interface->tx_timeout_count + 1, 1321b101c962SAlexander Duyck tx_ring->queue_index); 1322b101c962SAlexander Duyck 1323b101c962SAlexander Duyck fm10k_tx_timeout_reset(interface); 1324b101c962SAlexander Duyck 1325b101c962SAlexander Duyck /* the netdev is about to reset, no point in enabling stuff */ 1326b101c962SAlexander Duyck return true; 1327b101c962SAlexander Duyck } 1328b101c962SAlexander Duyck 1329b101c962SAlexander Duyck /* notify netdev of completed buffers */ 1330b101c962SAlexander Duyck netdev_tx_completed_queue(txring_txq(tx_ring), 1331b101c962SAlexander Duyck total_packets, total_bytes); 1332b101c962SAlexander Duyck 1333b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) 1334b101c962SAlexander Duyck if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1335b101c962SAlexander Duyck (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1336b101c962SAlexander Duyck /* Make sure that anybody stopping the queue after this 1337b101c962SAlexander Duyck * sees the new next_to_clean. 1338b101c962SAlexander Duyck */ 1339b101c962SAlexander Duyck smp_mb(); 1340b101c962SAlexander Duyck if (__netif_subqueue_stopped(tx_ring->netdev, 1341b101c962SAlexander Duyck tx_ring->queue_index) && 1342b101c962SAlexander Duyck !test_bit(__FM10K_DOWN, &interface->state)) { 1343b101c962SAlexander Duyck netif_wake_subqueue(tx_ring->netdev, 1344b101c962SAlexander Duyck tx_ring->queue_index); 1345b101c962SAlexander Duyck ++tx_ring->tx_stats.restart_queue; 1346b101c962SAlexander Duyck } 1347b101c962SAlexander Duyck } 1348b101c962SAlexander Duyck 1349b101c962SAlexander Duyck return !!budget; 1350b101c962SAlexander Duyck } 1351b101c962SAlexander Duyck 135218283cadSAlexander Duyck /** 135318283cadSAlexander Duyck * fm10k_update_itr - update the dynamic ITR value based on packet size 135418283cadSAlexander Duyck * 135518283cadSAlexander Duyck * Stores a new ITR value based on strictly on packet size. The 135618283cadSAlexander Duyck * divisors and thresholds used by this function were determined based 135718283cadSAlexander Duyck * on theoretical maximum wire speed and testing data, in order to 135818283cadSAlexander Duyck * minimize response time while increasing bulk throughput. 135918283cadSAlexander Duyck * 136018283cadSAlexander Duyck * @ring_container: Container for rings to have ITR updated 136118283cadSAlexander Duyck **/ 136218283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container) 136318283cadSAlexander Duyck { 1364242722ddSJacob Keller unsigned int avg_wire_size, packets, itr_round; 136518283cadSAlexander Duyck 136618283cadSAlexander Duyck /* Only update ITR if we are using adaptive setting */ 1367584373f5SJacob Keller if (!ITR_IS_ADAPTIVE(ring_container->itr)) 136818283cadSAlexander Duyck goto clear_counts; 136918283cadSAlexander Duyck 137018283cadSAlexander Duyck packets = ring_container->total_packets; 137118283cadSAlexander Duyck if (!packets) 137218283cadSAlexander Duyck goto clear_counts; 137318283cadSAlexander Duyck 137418283cadSAlexander Duyck avg_wire_size = ring_container->total_bytes / packets; 137518283cadSAlexander Duyck 1376242722ddSJacob Keller /* The following is a crude approximation of: 1377242722ddSJacob Keller * wmem_default / (size + overhead) = desired_pkts_per_int 1378242722ddSJacob Keller * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1379242722ddSJacob Keller * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1380242722ddSJacob Keller * 1381242722ddSJacob Keller * Assuming wmem_default is 212992 and overhead is 640 bytes per 1382242722ddSJacob Keller * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1383242722ddSJacob Keller * formula down to 1384242722ddSJacob Keller * 1385242722ddSJacob Keller * (34 * (size + 24)) / (size + 640) = ITR 1386242722ddSJacob Keller * 1387242722ddSJacob Keller * We first do some math on the packet size and then finally bitshift 1388242722ddSJacob Keller * by 8 after rounding up. We also have to account for PCIe link speed 1389242722ddSJacob Keller * difference as ITR scales based on this. 1390242722ddSJacob Keller */ 1391242722ddSJacob Keller if (avg_wire_size <= 360) { 1392242722ddSJacob Keller /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ 1393242722ddSJacob Keller avg_wire_size *= 8; 1394242722ddSJacob Keller avg_wire_size += 376; 1395242722ddSJacob Keller } else if (avg_wire_size <= 1152) { 1396242722ddSJacob Keller /* 77K ints/sec to 45K ints/sec */ 1397242722ddSJacob Keller avg_wire_size *= 3; 1398242722ddSJacob Keller avg_wire_size += 2176; 1399242722ddSJacob Keller } else if (avg_wire_size <= 1920) { 1400242722ddSJacob Keller /* 45K ints/sec to 38K ints/sec */ 1401242722ddSJacob Keller avg_wire_size += 4480; 1402242722ddSJacob Keller } else { 1403242722ddSJacob Keller /* plateau at a limit of 38K ints/sec */ 1404242722ddSJacob Keller avg_wire_size = 6656; 1405242722ddSJacob Keller } 140618283cadSAlexander Duyck 1407242722ddSJacob Keller /* Perform final bitshift for division after rounding up to ensure 1408242722ddSJacob Keller * that the calculation will never get below a 1. The bit shift 1409242722ddSJacob Keller * accounts for changes in the ITR due to PCIe link speed. 1410242722ddSJacob Keller */ 1411242722ddSJacob Keller itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; 1412fcdb0a99SBruce Allan avg_wire_size += BIT(itr_round) - 1; 1413242722ddSJacob Keller avg_wire_size >>= itr_round; 141418283cadSAlexander Duyck 141518283cadSAlexander Duyck /* write back value and retain adaptive flag */ 141618283cadSAlexander Duyck ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; 141718283cadSAlexander Duyck 141818283cadSAlexander Duyck clear_counts: 141918283cadSAlexander Duyck ring_container->total_bytes = 0; 142018283cadSAlexander Duyck ring_container->total_packets = 0; 142118283cadSAlexander Duyck } 142218283cadSAlexander Duyck 142318283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) 142418283cadSAlexander Duyck { 142518283cadSAlexander Duyck /* Enable auto-mask and clear the current mask */ 142618283cadSAlexander Duyck u32 itr = FM10K_ITR_ENABLE; 142718283cadSAlexander Duyck 142818283cadSAlexander Duyck /* Update Tx ITR */ 142918283cadSAlexander Duyck fm10k_update_itr(&q_vector->tx); 143018283cadSAlexander Duyck 143118283cadSAlexander Duyck /* Update Rx ITR */ 143218283cadSAlexander Duyck fm10k_update_itr(&q_vector->rx); 143318283cadSAlexander Duyck 143418283cadSAlexander Duyck /* Store Tx itr in timer slot 0 */ 143518283cadSAlexander Duyck itr |= (q_vector->tx.itr & FM10K_ITR_MAX); 143618283cadSAlexander Duyck 143718283cadSAlexander Duyck /* Shift Rx itr to timer slot 1 */ 143818283cadSAlexander Duyck itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; 143918283cadSAlexander Duyck 144018283cadSAlexander Duyck /* Write the final value to the ITR register */ 144118283cadSAlexander Duyck writel(itr, q_vector->itr); 144218283cadSAlexander Duyck } 144318283cadSAlexander Duyck 144418283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget) 144518283cadSAlexander Duyck { 144618283cadSAlexander Duyck struct fm10k_q_vector *q_vector = 144718283cadSAlexander Duyck container_of(napi, struct fm10k_q_vector, napi); 1448b101c962SAlexander Duyck struct fm10k_ring *ring; 144932b3e08fSJesse Brandeburg int per_ring_budget, work_done = 0; 1450b101c962SAlexander Duyck bool clean_complete = true; 1451b101c962SAlexander Duyck 1452b101c962SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1453b101c962SAlexander Duyck clean_complete &= fm10k_clean_tx_irq(q_vector, ring); 1454b101c962SAlexander Duyck 14559f872986SAlexander Duyck /* Handle case where we are called by netpoll with a budget of 0 */ 14569f872986SAlexander Duyck if (budget <= 0) 14579f872986SAlexander Duyck return budget; 14589f872986SAlexander Duyck 1459b101c962SAlexander Duyck /* attempt to distribute budget to each queue fairly, but don't 1460b101c962SAlexander Duyck * allow the budget to go below 1 because we'll exit polling 1461b101c962SAlexander Duyck */ 1462b101c962SAlexander Duyck if (q_vector->rx.count > 1) 1463b101c962SAlexander Duyck per_ring_budget = max(budget / q_vector->rx.count, 1); 1464b101c962SAlexander Duyck else 1465b101c962SAlexander Duyck per_ring_budget = budget; 1466b101c962SAlexander Duyck 146732b3e08fSJesse Brandeburg fm10k_for_each_ring(ring, q_vector->rx) { 146832b3e08fSJesse Brandeburg int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); 146932b3e08fSJesse Brandeburg 147032b3e08fSJesse Brandeburg work_done += work; 147132b3e08fSJesse Brandeburg clean_complete &= !!(work < per_ring_budget); 147232b3e08fSJesse Brandeburg } 1473b101c962SAlexander Duyck 1474b101c962SAlexander Duyck /* If all work not completed, return budget and keep polling */ 1475b101c962SAlexander Duyck if (!clean_complete) 1476b101c962SAlexander Duyck return budget; 147718283cadSAlexander Duyck 147818283cadSAlexander Duyck /* all work done, exit the polling mode */ 147932b3e08fSJesse Brandeburg napi_complete_done(napi, work_done); 148018283cadSAlexander Duyck 148118283cadSAlexander Duyck /* re-enable the q_vector */ 148218283cadSAlexander Duyck fm10k_qv_enable(q_vector); 148318283cadSAlexander Duyck 148418283cadSAlexander Duyck return 0; 148518283cadSAlexander Duyck } 148618283cadSAlexander Duyck 148718283cadSAlexander Duyck /** 1488aa3ac822SAlexander Duyck * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device 1489aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1490aa3ac822SAlexander Duyck * 1491aa3ac822SAlexander Duyck * When QoS (Quality of Service) is enabled, allocate queues for 1492aa3ac822SAlexander Duyck * each traffic class. If multiqueue isn't available,then abort QoS 1493aa3ac822SAlexander Duyck * initialization. 1494aa3ac822SAlexander Duyck * 1495aa3ac822SAlexander Duyck * This function handles all combinations of Qos and RSS. 1496aa3ac822SAlexander Duyck * 1497aa3ac822SAlexander Duyck **/ 1498aa3ac822SAlexander Duyck static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) 1499aa3ac822SAlexander Duyck { 1500aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1501aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1502aa3ac822SAlexander Duyck int rss_i, i; 1503aa3ac822SAlexander Duyck int pcs; 1504aa3ac822SAlexander Duyck 1505aa3ac822SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 1506aa3ac822SAlexander Duyck pcs = netdev_get_num_tc(dev); 1507aa3ac822SAlexander Duyck 1508aa3ac822SAlexander Duyck if (pcs <= 1) 1509aa3ac822SAlexander Duyck return false; 1510aa3ac822SAlexander Duyck 1511aa3ac822SAlexander Duyck /* set QoS mask and indices */ 1512aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_QOS]; 1513aa3ac822SAlexander Duyck f->indices = pcs; 1514fcdb0a99SBruce Allan f->mask = BIT(fls(pcs - 1)) - 1; 1515aa3ac822SAlexander Duyck 1516aa3ac822SAlexander Duyck /* determine the upper limit for our current DCB mode */ 1517aa3ac822SAlexander Duyck rss_i = interface->hw.mac.max_queues / pcs; 1518fcdb0a99SBruce Allan rss_i = BIT(fls(rss_i) - 1); 1519aa3ac822SAlexander Duyck 1520aa3ac822SAlexander Duyck /* set RSS mask and indices */ 1521aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1522aa3ac822SAlexander Duyck rss_i = min_t(u16, rss_i, f->limit); 1523aa3ac822SAlexander Duyck f->indices = rss_i; 1524fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1525aa3ac822SAlexander Duyck 1526aa3ac822SAlexander Duyck /* configure pause class to queue mapping */ 1527aa3ac822SAlexander Duyck for (i = 0; i < pcs; i++) 1528aa3ac822SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 1529aa3ac822SAlexander Duyck 1530aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i * pcs; 1531aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i * pcs; 1532aa3ac822SAlexander Duyck 1533aa3ac822SAlexander Duyck return true; 1534aa3ac822SAlexander Duyck } 1535aa3ac822SAlexander Duyck 1536aa3ac822SAlexander Duyck /** 1537aa3ac822SAlexander Duyck * fm10k_set_rss_queues: Allocate queues for RSS 1538aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1539aa3ac822SAlexander Duyck * 1540aa3ac822SAlexander Duyck * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1541aa3ac822SAlexander Duyck * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1542aa3ac822SAlexander Duyck * 1543aa3ac822SAlexander Duyck **/ 1544aa3ac822SAlexander Duyck static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) 1545aa3ac822SAlexander Duyck { 1546aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1547aa3ac822SAlexander Duyck u16 rss_i; 1548aa3ac822SAlexander Duyck 1549aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1550aa3ac822SAlexander Duyck rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); 1551aa3ac822SAlexander Duyck 1552aa3ac822SAlexander Duyck /* record indices and power of 2 mask for RSS */ 1553aa3ac822SAlexander Duyck f->indices = rss_i; 1554fcdb0a99SBruce Allan f->mask = BIT(fls(rss_i - 1)) - 1; 1555aa3ac822SAlexander Duyck 1556aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i; 1557aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i; 1558aa3ac822SAlexander Duyck 1559aa3ac822SAlexander Duyck return true; 1560aa3ac822SAlexander Duyck } 1561aa3ac822SAlexander Duyck 1562aa3ac822SAlexander Duyck /** 156318283cadSAlexander Duyck * fm10k_set_num_queues: Allocate queues for device, feature dependent 156418283cadSAlexander Duyck * @interface: board private structure to initialize 156518283cadSAlexander Duyck * 156618283cadSAlexander Duyck * This is the top level queue allocation routine. The order here is very 156718283cadSAlexander Duyck * important, starting with the "most" number of features turned on at once, 156818283cadSAlexander Duyck * and ending with the smallest set of features. This way large combinations 156918283cadSAlexander Duyck * can be allocated if they're turned on, and smaller combinations are the 157018283cadSAlexander Duyck * fallthrough conditions. 157118283cadSAlexander Duyck * 157218283cadSAlexander Duyck **/ 157318283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface) 157418283cadSAlexander Duyck { 1575b3525696SJacob Keller /* Attempt to setup QoS and RSS first */ 1576aa3ac822SAlexander Duyck if (fm10k_set_qos_queues(interface)) 1577aa3ac822SAlexander Duyck return; 1578aa3ac822SAlexander Duyck 1579b3525696SJacob Keller /* If we don't have QoS, just fallback to only RSS. */ 1580aa3ac822SAlexander Duyck fm10k_set_rss_queues(interface); 158118283cadSAlexander Duyck } 158218283cadSAlexander Duyck 158318283cadSAlexander Duyck /** 15844be37c42SJacob Keller * fm10k_reset_num_queues - Reset the number of queues to zero 15854be37c42SJacob Keller * @interface: board private structure 15864be37c42SJacob Keller * 15874be37c42SJacob Keller * This function should be called whenever we need to reset the number of 15884be37c42SJacob Keller * queues after an error condition. 15894be37c42SJacob Keller */ 15904be37c42SJacob Keller static void fm10k_reset_num_queues(struct fm10k_intfc *interface) 15914be37c42SJacob Keller { 15924be37c42SJacob Keller interface->num_tx_queues = 0; 15934be37c42SJacob Keller interface->num_rx_queues = 0; 15944be37c42SJacob Keller interface->num_q_vectors = 0; 15954be37c42SJacob Keller } 15964be37c42SJacob Keller 15974be37c42SJacob Keller /** 159818283cadSAlexander Duyck * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 159918283cadSAlexander Duyck * @interface: board private structure to initialize 160018283cadSAlexander Duyck * @v_count: q_vectors allocated on interface, used for ring interleaving 160118283cadSAlexander Duyck * @v_idx: index of vector in interface struct 160218283cadSAlexander Duyck * @txr_count: total number of Tx rings to allocate 160318283cadSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 160418283cadSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 160518283cadSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 160618283cadSAlexander Duyck * 160718283cadSAlexander Duyck * We allocate one q_vector. If allocation fails we return -ENOMEM. 160818283cadSAlexander Duyck **/ 160918283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, 161018283cadSAlexander Duyck unsigned int v_count, unsigned int v_idx, 161118283cadSAlexander Duyck unsigned int txr_count, unsigned int txr_idx, 161218283cadSAlexander Duyck unsigned int rxr_count, unsigned int rxr_idx) 161318283cadSAlexander Duyck { 161418283cadSAlexander Duyck struct fm10k_q_vector *q_vector; 1615e27ef599SAlexander Duyck struct fm10k_ring *ring; 161618283cadSAlexander Duyck int ring_count, size; 161718283cadSAlexander Duyck 161818283cadSAlexander Duyck ring_count = txr_count + rxr_count; 1619e27ef599SAlexander Duyck size = sizeof(struct fm10k_q_vector) + 1620e27ef599SAlexander Duyck (sizeof(struct fm10k_ring) * ring_count); 162118283cadSAlexander Duyck 162218283cadSAlexander Duyck /* allocate q_vector and rings */ 162318283cadSAlexander Duyck q_vector = kzalloc(size, GFP_KERNEL); 162418283cadSAlexander Duyck if (!q_vector) 162518283cadSAlexander Duyck return -ENOMEM; 162618283cadSAlexander Duyck 162718283cadSAlexander Duyck /* initialize NAPI */ 162818283cadSAlexander Duyck netif_napi_add(interface->netdev, &q_vector->napi, 162918283cadSAlexander Duyck fm10k_poll, NAPI_POLL_WEIGHT); 163018283cadSAlexander Duyck 163118283cadSAlexander Duyck /* tie q_vector and interface together */ 163218283cadSAlexander Duyck interface->q_vector[v_idx] = q_vector; 163318283cadSAlexander Duyck q_vector->interface = interface; 163418283cadSAlexander Duyck q_vector->v_idx = v_idx; 163518283cadSAlexander Duyck 1636e27ef599SAlexander Duyck /* initialize pointer to rings */ 1637e27ef599SAlexander Duyck ring = q_vector->ring; 1638e27ef599SAlexander Duyck 163918283cadSAlexander Duyck /* save Tx ring container info */ 1640e27ef599SAlexander Duyck q_vector->tx.ring = ring; 1641e27ef599SAlexander Duyck q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; 164218283cadSAlexander Duyck q_vector->tx.itr = interface->tx_itr; 1643242722ddSJacob Keller q_vector->tx.itr_scale = interface->hw.mac.itr_scale; 164418283cadSAlexander Duyck q_vector->tx.count = txr_count; 164518283cadSAlexander Duyck 1646e27ef599SAlexander Duyck while (txr_count) { 1647e27ef599SAlexander Duyck /* assign generic ring traits */ 1648e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1649e27ef599SAlexander Duyck ring->netdev = interface->netdev; 1650e27ef599SAlexander Duyck 1651e27ef599SAlexander Duyck /* configure backlink on ring */ 1652e27ef599SAlexander Duyck ring->q_vector = q_vector; 1653e27ef599SAlexander Duyck 1654e27ef599SAlexander Duyck /* apply Tx specific ring traits */ 1655e27ef599SAlexander Duyck ring->count = interface->tx_ring_count; 1656e27ef599SAlexander Duyck ring->queue_index = txr_idx; 1657e27ef599SAlexander Duyck 1658e27ef599SAlexander Duyck /* assign ring to interface */ 1659e27ef599SAlexander Duyck interface->tx_ring[txr_idx] = ring; 1660e27ef599SAlexander Duyck 1661e27ef599SAlexander Duyck /* update count and index */ 1662e27ef599SAlexander Duyck txr_count--; 1663e27ef599SAlexander Duyck txr_idx += v_count; 1664e27ef599SAlexander Duyck 1665e27ef599SAlexander Duyck /* push pointer to next ring */ 1666e27ef599SAlexander Duyck ring++; 1667e27ef599SAlexander Duyck } 1668e27ef599SAlexander Duyck 166918283cadSAlexander Duyck /* save Rx ring container info */ 1670e27ef599SAlexander Duyck q_vector->rx.ring = ring; 167118283cadSAlexander Duyck q_vector->rx.itr = interface->rx_itr; 1672242722ddSJacob Keller q_vector->rx.itr_scale = interface->hw.mac.itr_scale; 167318283cadSAlexander Duyck q_vector->rx.count = rxr_count; 167418283cadSAlexander Duyck 1675e27ef599SAlexander Duyck while (rxr_count) { 1676e27ef599SAlexander Duyck /* assign generic ring traits */ 1677e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1678e27ef599SAlexander Duyck ring->netdev = interface->netdev; 16795cd5e2e9SAlexander Duyck rcu_assign_pointer(ring->l2_accel, interface->l2_accel); 1680e27ef599SAlexander Duyck 1681e27ef599SAlexander Duyck /* configure backlink on ring */ 1682e27ef599SAlexander Duyck ring->q_vector = q_vector; 1683e27ef599SAlexander Duyck 1684e27ef599SAlexander Duyck /* apply Rx specific ring traits */ 1685e27ef599SAlexander Duyck ring->count = interface->rx_ring_count; 1686e27ef599SAlexander Duyck ring->queue_index = rxr_idx; 1687e27ef599SAlexander Duyck 1688e27ef599SAlexander Duyck /* assign ring to interface */ 1689e27ef599SAlexander Duyck interface->rx_ring[rxr_idx] = ring; 1690e27ef599SAlexander Duyck 1691e27ef599SAlexander Duyck /* update count and index */ 1692e27ef599SAlexander Duyck rxr_count--; 1693e27ef599SAlexander Duyck rxr_idx += v_count; 1694e27ef599SAlexander Duyck 1695e27ef599SAlexander Duyck /* push pointer to next ring */ 1696e27ef599SAlexander Duyck ring++; 1697e27ef599SAlexander Duyck } 1698e27ef599SAlexander Duyck 16997461fd91SAlexander Duyck fm10k_dbg_q_vector_init(q_vector); 17007461fd91SAlexander Duyck 170118283cadSAlexander Duyck return 0; 170218283cadSAlexander Duyck } 170318283cadSAlexander Duyck 170418283cadSAlexander Duyck /** 170518283cadSAlexander Duyck * fm10k_free_q_vector - Free memory allocated for specific interrupt vector 170618283cadSAlexander Duyck * @interface: board private structure to initialize 170718283cadSAlexander Duyck * @v_idx: Index of vector to be freed 170818283cadSAlexander Duyck * 170918283cadSAlexander Duyck * This function frees the memory allocated to the q_vector. In addition if 171018283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 171118283cadSAlexander Duyck * to freeing the q_vector. 171218283cadSAlexander Duyck **/ 171318283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 171418283cadSAlexander Duyck { 171518283cadSAlexander Duyck struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 1716e27ef599SAlexander Duyck struct fm10k_ring *ring; 1717e27ef599SAlexander Duyck 17187461fd91SAlexander Duyck fm10k_dbg_q_vector_exit(q_vector); 17197461fd91SAlexander Duyck 1720e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1721e27ef599SAlexander Duyck interface->tx_ring[ring->queue_index] = NULL; 1722e27ef599SAlexander Duyck 1723e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->rx) 1724e27ef599SAlexander Duyck interface->rx_ring[ring->queue_index] = NULL; 172518283cadSAlexander Duyck 172618283cadSAlexander Duyck interface->q_vector[v_idx] = NULL; 172718283cadSAlexander Duyck netif_napi_del(&q_vector->napi); 172818283cadSAlexander Duyck kfree_rcu(q_vector, rcu); 172918283cadSAlexander Duyck } 173018283cadSAlexander Duyck 173118283cadSAlexander Duyck /** 173218283cadSAlexander Duyck * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors 173318283cadSAlexander Duyck * @interface: board private structure to initialize 173418283cadSAlexander Duyck * 173518283cadSAlexander Duyck * We allocate one q_vector per queue interrupt. If allocation fails we 173618283cadSAlexander Duyck * return -ENOMEM. 173718283cadSAlexander Duyck **/ 173818283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) 173918283cadSAlexander Duyck { 174018283cadSAlexander Duyck unsigned int q_vectors = interface->num_q_vectors; 174118283cadSAlexander Duyck unsigned int rxr_remaining = interface->num_rx_queues; 174218283cadSAlexander Duyck unsigned int txr_remaining = interface->num_tx_queues; 174318283cadSAlexander Duyck unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 174418283cadSAlexander Duyck int err; 174518283cadSAlexander Duyck 174618283cadSAlexander Duyck if (q_vectors >= (rxr_remaining + txr_remaining)) { 174718283cadSAlexander Duyck for (; rxr_remaining; v_idx++) { 174818283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 174918283cadSAlexander Duyck 0, 0, 1, rxr_idx); 175018283cadSAlexander Duyck if (err) 175118283cadSAlexander Duyck goto err_out; 175218283cadSAlexander Duyck 175318283cadSAlexander Duyck /* update counts and index */ 175418283cadSAlexander Duyck rxr_remaining--; 175518283cadSAlexander Duyck rxr_idx++; 175618283cadSAlexander Duyck } 175718283cadSAlexander Duyck } 175818283cadSAlexander Duyck 175918283cadSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 176018283cadSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 176118283cadSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 176218283cadSAlexander Duyck 176318283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 176418283cadSAlexander Duyck tqpv, txr_idx, 176518283cadSAlexander Duyck rqpv, rxr_idx); 176618283cadSAlexander Duyck 176718283cadSAlexander Duyck if (err) 176818283cadSAlexander Duyck goto err_out; 176918283cadSAlexander Duyck 177018283cadSAlexander Duyck /* update counts and index */ 177118283cadSAlexander Duyck rxr_remaining -= rqpv; 177218283cadSAlexander Duyck txr_remaining -= tqpv; 177318283cadSAlexander Duyck rxr_idx++; 177418283cadSAlexander Duyck txr_idx++; 177518283cadSAlexander Duyck } 177618283cadSAlexander Duyck 177718283cadSAlexander Duyck return 0; 177818283cadSAlexander Duyck 177918283cadSAlexander Duyck err_out: 17804be37c42SJacob Keller fm10k_reset_num_queues(interface); 178118283cadSAlexander Duyck 178218283cadSAlexander Duyck while (v_idx--) 178318283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 178418283cadSAlexander Duyck 178518283cadSAlexander Duyck return -ENOMEM; 178618283cadSAlexander Duyck } 178718283cadSAlexander Duyck 178818283cadSAlexander Duyck /** 178918283cadSAlexander Duyck * fm10k_free_q_vectors - Free memory allocated for interrupt vectors 179018283cadSAlexander Duyck * @interface: board private structure to initialize 179118283cadSAlexander Duyck * 179218283cadSAlexander Duyck * This function frees the memory allocated to the q_vectors. In addition if 179318283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 179418283cadSAlexander Duyck * to freeing the q_vector. 179518283cadSAlexander Duyck **/ 179618283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface) 179718283cadSAlexander Duyck { 179818283cadSAlexander Duyck int v_idx = interface->num_q_vectors; 179918283cadSAlexander Duyck 18004be37c42SJacob Keller fm10k_reset_num_queues(interface); 180118283cadSAlexander Duyck 180218283cadSAlexander Duyck while (v_idx--) 180318283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 180418283cadSAlexander Duyck } 180518283cadSAlexander Duyck 180618283cadSAlexander Duyck /** 180718283cadSAlexander Duyck * f10k_reset_msix_capability - reset MSI-X capability 180818283cadSAlexander Duyck * @interface: board private structure to initialize 180918283cadSAlexander Duyck * 181018283cadSAlexander Duyck * Reset the MSI-X capability back to its starting state 181118283cadSAlexander Duyck **/ 181218283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) 181318283cadSAlexander Duyck { 181418283cadSAlexander Duyck pci_disable_msix(interface->pdev); 181518283cadSAlexander Duyck kfree(interface->msix_entries); 181618283cadSAlexander Duyck interface->msix_entries = NULL; 181718283cadSAlexander Duyck } 181818283cadSAlexander Duyck 181918283cadSAlexander Duyck /** 182018283cadSAlexander Duyck * f10k_init_msix_capability - configure MSI-X capability 182118283cadSAlexander Duyck * @interface: board private structure to initialize 182218283cadSAlexander Duyck * 182318283cadSAlexander Duyck * Attempt to configure the interrupts using the best available 182418283cadSAlexander Duyck * capabilities of the hardware and the kernel. 182518283cadSAlexander Duyck **/ 182618283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface) 182718283cadSAlexander Duyck { 182818283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw; 182918283cadSAlexander Duyck int v_budget, vector; 183018283cadSAlexander Duyck 183118283cadSAlexander Duyck /* It's easy to be greedy for MSI-X vectors, but it really 183218283cadSAlexander Duyck * doesn't do us much good if we have a lot more vectors 183318283cadSAlexander Duyck * than CPU's. So let's be conservative and only ask for 183418283cadSAlexander Duyck * (roughly) the same number of vectors as there are CPU's. 183518283cadSAlexander Duyck * the default is to use pairs of vectors 183618283cadSAlexander Duyck */ 183718283cadSAlexander Duyck v_budget = max(interface->num_rx_queues, interface->num_tx_queues); 183818283cadSAlexander Duyck v_budget = min_t(u16, v_budget, num_online_cpus()); 183918283cadSAlexander Duyck 184018283cadSAlexander Duyck /* account for vectors not related to queues */ 184118283cadSAlexander Duyck v_budget += NON_Q_VECTORS(hw); 184218283cadSAlexander Duyck 184318283cadSAlexander Duyck /* At the same time, hardware can only support a maximum of 184418283cadSAlexander Duyck * hw.mac->max_msix_vectors vectors. With features 184518283cadSAlexander Duyck * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 184618283cadSAlexander Duyck * descriptor queues supported by our device. Thus, we cap it off in 184718283cadSAlexander Duyck * those rare cases where the cpu count also exceeds our vector limit. 184818283cadSAlexander Duyck */ 184918283cadSAlexander Duyck v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 185018283cadSAlexander Duyck 185118283cadSAlexander Duyck /* A failure in MSI-X entry allocation is fatal. */ 185218283cadSAlexander Duyck interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 185318283cadSAlexander Duyck GFP_KERNEL); 185418283cadSAlexander Duyck if (!interface->msix_entries) 185518283cadSAlexander Duyck return -ENOMEM; 185618283cadSAlexander Duyck 185718283cadSAlexander Duyck /* populate entry values */ 185818283cadSAlexander Duyck for (vector = 0; vector < v_budget; vector++) 185918283cadSAlexander Duyck interface->msix_entries[vector].entry = vector; 186018283cadSAlexander Duyck 186118283cadSAlexander Duyck /* Attempt to enable MSI-X with requested value */ 186218283cadSAlexander Duyck v_budget = pci_enable_msix_range(interface->pdev, 186318283cadSAlexander Duyck interface->msix_entries, 186418283cadSAlexander Duyck MIN_MSIX_COUNT(hw), 186518283cadSAlexander Duyck v_budget); 186618283cadSAlexander Duyck if (v_budget < 0) { 186718283cadSAlexander Duyck kfree(interface->msix_entries); 186818283cadSAlexander Duyck interface->msix_entries = NULL; 186918283cadSAlexander Duyck return -ENOMEM; 187018283cadSAlexander Duyck } 187118283cadSAlexander Duyck 187218283cadSAlexander Duyck /* record the number of queues available for q_vectors */ 187318283cadSAlexander Duyck interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); 187418283cadSAlexander Duyck 187518283cadSAlexander Duyck return 0; 187618283cadSAlexander Duyck } 187718283cadSAlexander Duyck 1878aa3ac822SAlexander Duyck /** 1879aa3ac822SAlexander Duyck * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS 1880aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1881aa3ac822SAlexander Duyck * 1882aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for Qos 1883aa3ac822SAlexander Duyck **/ 1884aa3ac822SAlexander Duyck static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) 1885aa3ac822SAlexander Duyck { 1886aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1887aa3ac822SAlexander Duyck int pc, offset, rss_i, i, q_idx; 1888aa3ac822SAlexander Duyck u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; 1889aa3ac822SAlexander Duyck u8 num_pcs = netdev_get_num_tc(dev); 1890aa3ac822SAlexander Duyck 1891aa3ac822SAlexander Duyck if (num_pcs <= 1) 1892aa3ac822SAlexander Duyck return false; 1893aa3ac822SAlexander Duyck 1894aa3ac822SAlexander Duyck rss_i = interface->ring_feature[RING_F_RSS].indices; 1895aa3ac822SAlexander Duyck 1896aa3ac822SAlexander Duyck for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { 1897aa3ac822SAlexander Duyck q_idx = pc; 1898aa3ac822SAlexander Duyck for (i = 0; i < rss_i; i++) { 1899aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->reg_idx = q_idx; 1900aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->qos_pc = pc; 1901aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->reg_idx = q_idx; 1902aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->qos_pc = pc; 1903aa3ac822SAlexander Duyck q_idx += pc_stride; 1904aa3ac822SAlexander Duyck } 1905aa3ac822SAlexander Duyck } 1906aa3ac822SAlexander Duyck 1907aa3ac822SAlexander Duyck return true; 1908aa3ac822SAlexander Duyck } 1909aa3ac822SAlexander Duyck 1910aa3ac822SAlexander Duyck /** 1911aa3ac822SAlexander Duyck * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS 1912aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1913aa3ac822SAlexander Duyck * 1914aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for RSS 1915aa3ac822SAlexander Duyck **/ 1916aa3ac822SAlexander Duyck static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) 1917aa3ac822SAlexander Duyck { 1918aa3ac822SAlexander Duyck int i; 1919aa3ac822SAlexander Duyck 1920aa3ac822SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) 1921aa3ac822SAlexander Duyck interface->rx_ring[i]->reg_idx = i; 1922aa3ac822SAlexander Duyck 1923aa3ac822SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) 1924aa3ac822SAlexander Duyck interface->tx_ring[i]->reg_idx = i; 1925aa3ac822SAlexander Duyck } 1926aa3ac822SAlexander Duyck 1927aa3ac822SAlexander Duyck /** 1928aa3ac822SAlexander Duyck * fm10k_assign_rings - Map rings to network devices 1929aa3ac822SAlexander Duyck * @interface: Interface structure containing rings and devices 1930aa3ac822SAlexander Duyck * 1931aa3ac822SAlexander Duyck * This function is meant to go though and configure both the network 1932aa3ac822SAlexander Duyck * devices so that they contain rings, and configure the rings so that 1933aa3ac822SAlexander Duyck * they function with their network devices. 1934aa3ac822SAlexander Duyck **/ 1935aa3ac822SAlexander Duyck static void fm10k_assign_rings(struct fm10k_intfc *interface) 1936aa3ac822SAlexander Duyck { 1937aa3ac822SAlexander Duyck if (fm10k_cache_ring_qos(interface)) 1938aa3ac822SAlexander Duyck return; 1939aa3ac822SAlexander Duyck 1940aa3ac822SAlexander Duyck fm10k_cache_ring_rss(interface); 1941aa3ac822SAlexander Duyck } 1942aa3ac822SAlexander Duyck 194318283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface) 194418283cadSAlexander Duyck { 194518283cadSAlexander Duyck u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 19460ea7fae4SJacob Keller struct net_device *netdev = interface->netdev; 19470ea7fae4SJacob Keller u32 reta, *indir; 194818283cadSAlexander Duyck 19491012014eSKeller, Jacob E /* If the Rx flow indirection table has been configured manually, we 19501012014eSKeller, Jacob E * need to maintain it when possible. 19511012014eSKeller, Jacob E */ 19521012014eSKeller, Jacob E if (netif_is_rxfh_configured(interface->netdev)) { 195318283cadSAlexander Duyck for (i = FM10K_RETA_SIZE; i--;) { 195418283cadSAlexander Duyck reta = interface->reta[i]; 195518283cadSAlexander Duyck if ((((reta << 24) >> 24) < rss_i) && 195618283cadSAlexander Duyck (((reta << 16) >> 24) < rss_i) && 195718283cadSAlexander Duyck (((reta << 8) >> 24) < rss_i) && 195818283cadSAlexander Duyck (((reta) >> 24) < rss_i)) 195918283cadSAlexander Duyck continue; 19601012014eSKeller, Jacob E 19611012014eSKeller, Jacob E /* this should never happen */ 19621012014eSKeller, Jacob E dev_err(&interface->pdev->dev, 19631012014eSKeller, Jacob E "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); 196418283cadSAlexander Duyck goto repopulate_reta; 196518283cadSAlexander Duyck } 196618283cadSAlexander Duyck 196718283cadSAlexander Duyck /* do nothing if all of the elements are in bounds */ 196818283cadSAlexander Duyck return; 196918283cadSAlexander Duyck } 197018283cadSAlexander Duyck 197118283cadSAlexander Duyck repopulate_reta: 19720ea7fae4SJacob Keller indir = kcalloc(fm10k_get_reta_size(netdev), 19730ea7fae4SJacob Keller sizeof(indir[0]), GFP_KERNEL); 197418283cadSAlexander Duyck 19750ea7fae4SJacob Keller /* generate redirection table using the default kernel policy */ 19760ea7fae4SJacob Keller for (i = 0; i < fm10k_get_reta_size(netdev); i++) 19770ea7fae4SJacob Keller indir[i] = ethtool_rxfh_indir_default(i, rss_i); 197818283cadSAlexander Duyck 19790ea7fae4SJacob Keller fm10k_write_reta(interface, indir); 19800ea7fae4SJacob Keller 19810ea7fae4SJacob Keller kfree(indir); 198218283cadSAlexander Duyck } 198318283cadSAlexander Duyck 198418283cadSAlexander Duyck /** 198518283cadSAlexander Duyck * fm10k_init_queueing_scheme - Determine proper queueing scheme 198618283cadSAlexander Duyck * @interface: board private structure to initialize 198718283cadSAlexander Duyck * 198818283cadSAlexander Duyck * We determine which queueing scheme to use based on... 198918283cadSAlexander Duyck * - Hardware queue count (num_*_queues) 199018283cadSAlexander Duyck * - defined by miscellaneous hardware support/features (RSS, etc.) 199118283cadSAlexander Duyck **/ 199218283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) 199318283cadSAlexander Duyck { 199418283cadSAlexander Duyck int err; 199518283cadSAlexander Duyck 199618283cadSAlexander Duyck /* Number of supported queues */ 199718283cadSAlexander Duyck fm10k_set_num_queues(interface); 199818283cadSAlexander Duyck 199918283cadSAlexander Duyck /* Configure MSI-X capability */ 200018283cadSAlexander Duyck err = fm10k_init_msix_capability(interface); 200118283cadSAlexander Duyck if (err) { 200218283cadSAlexander Duyck dev_err(&interface->pdev->dev, 200318283cadSAlexander Duyck "Unable to initialize MSI-X capability\n"); 20044be37c42SJacob Keller goto err_init_msix; 200518283cadSAlexander Duyck } 200618283cadSAlexander Duyck 200718283cadSAlexander Duyck /* Allocate memory for queues */ 200818283cadSAlexander Duyck err = fm10k_alloc_q_vectors(interface); 2009587731e6SAlexander Duyck if (err) { 20104be37c42SJacob Keller dev_err(&interface->pdev->dev, 20114be37c42SJacob Keller "Unable to allocate queue vectors\n"); 20124be37c42SJacob Keller goto err_alloc_q_vectors; 2013587731e6SAlexander Duyck } 201418283cadSAlexander Duyck 2015aa3ac822SAlexander Duyck /* Map rings to devices, and map devices to physical queues */ 2016aa3ac822SAlexander Duyck fm10k_assign_rings(interface); 2017aa3ac822SAlexander Duyck 201818283cadSAlexander Duyck /* Initialize RSS redirection table */ 201918283cadSAlexander Duyck fm10k_init_reta(interface); 202018283cadSAlexander Duyck 202118283cadSAlexander Duyck return 0; 20224be37c42SJacob Keller 20234be37c42SJacob Keller err_alloc_q_vectors: 20244be37c42SJacob Keller fm10k_reset_msix_capability(interface); 20254be37c42SJacob Keller err_init_msix: 20264be37c42SJacob Keller fm10k_reset_num_queues(interface); 20274be37c42SJacob Keller return err; 202818283cadSAlexander Duyck } 202918283cadSAlexander Duyck 203018283cadSAlexander Duyck /** 203118283cadSAlexander Duyck * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings 203218283cadSAlexander Duyck * @interface: board private structure to clear queueing scheme on 203318283cadSAlexander Duyck * 203418283cadSAlexander Duyck * We go through and clear queueing specific resources and reset the structure 203518283cadSAlexander Duyck * to pre-load conditions 203618283cadSAlexander Duyck **/ 203718283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) 203818283cadSAlexander Duyck { 203918283cadSAlexander Duyck fm10k_free_q_vectors(interface); 204018283cadSAlexander Duyck fm10k_reset_msix_capability(interface); 204118283cadSAlexander Duyck } 2042