1b3890e30SAlexander Duyck /* Intel Ethernet Switch Host Interface Driver 2b3890e30SAlexander Duyck * Copyright(c) 2013 - 2014 Intel Corporation. 3b3890e30SAlexander Duyck * 4b3890e30SAlexander Duyck * This program is free software; you can redistribute it and/or modify it 5b3890e30SAlexander Duyck * under the terms and conditions of the GNU General Public License, 6b3890e30SAlexander Duyck * version 2, as published by the Free Software Foundation. 7b3890e30SAlexander Duyck * 8b3890e30SAlexander Duyck * This program is distributed in the hope it will be useful, but WITHOUT 9b3890e30SAlexander Duyck * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10b3890e30SAlexander Duyck * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11b3890e30SAlexander Duyck * more details. 12b3890e30SAlexander Duyck * 13b3890e30SAlexander Duyck * The full GNU General Public License is included in this distribution in 14b3890e30SAlexander Duyck * the file called "COPYING". 15b3890e30SAlexander Duyck * 16b3890e30SAlexander Duyck * Contact Information: 17b3890e30SAlexander Duyck * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 18b3890e30SAlexander Duyck * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 19b3890e30SAlexander Duyck */ 20b3890e30SAlexander Duyck 21b3890e30SAlexander Duyck #include <linux/types.h> 22b3890e30SAlexander Duyck #include <linux/module.h> 23b3890e30SAlexander Duyck #include <net/ipv6.h> 24b3890e30SAlexander Duyck #include <net/ip.h> 25b3890e30SAlexander Duyck #include <net/tcp.h> 26b3890e30SAlexander Duyck #include <linux/if_macvlan.h> 27b101c962SAlexander Duyck #include <linux/prefetch.h> 28b3890e30SAlexander Duyck 29b3890e30SAlexander Duyck #include "fm10k.h" 30b3890e30SAlexander Duyck 31b3890e30SAlexander Duyck #define DRV_VERSION "0.12.2-k" 32b3890e30SAlexander Duyck const char fm10k_driver_version[] = DRV_VERSION; 33b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k"; 34b3890e30SAlexander Duyck static const char fm10k_driver_string[] = 35b3890e30SAlexander Duyck "Intel(R) Ethernet Switch Host Interface Driver"; 36b3890e30SAlexander Duyck static const char fm10k_copyright[] = 37b3890e30SAlexander Duyck "Copyright (c) 2013 Intel Corporation."; 38b3890e30SAlexander Duyck 39b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 40b3890e30SAlexander Duyck MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); 41b3890e30SAlexander Duyck MODULE_LICENSE("GPL"); 42b3890e30SAlexander Duyck MODULE_VERSION(DRV_VERSION); 43b3890e30SAlexander Duyck 446d2ce900SAlexander Duyck /** 456d2ce900SAlexander Duyck * fm10k_init_module - Driver Registration Routine 46b3890e30SAlexander Duyck * 47b3890e30SAlexander Duyck * fm10k_init_module is the first routine called when the driver is 48b3890e30SAlexander Duyck * loaded. All it does is register with the PCI subsystem. 49b3890e30SAlexander Duyck **/ 50b3890e30SAlexander Duyck static int __init fm10k_init_module(void) 51b3890e30SAlexander Duyck { 52b3890e30SAlexander Duyck pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); 53b3890e30SAlexander Duyck pr_info("%s\n", fm10k_copyright); 54b3890e30SAlexander Duyck 557461fd91SAlexander Duyck fm10k_dbg_init(); 567461fd91SAlexander Duyck 57b3890e30SAlexander Duyck return fm10k_register_pci_driver(); 58b3890e30SAlexander Duyck } 59b3890e30SAlexander Duyck module_init(fm10k_init_module); 60b3890e30SAlexander Duyck 61b3890e30SAlexander Duyck /** 62b3890e30SAlexander Duyck * fm10k_exit_module - Driver Exit Cleanup Routine 63b3890e30SAlexander Duyck * 64b3890e30SAlexander Duyck * fm10k_exit_module is called just before the driver is removed 65b3890e30SAlexander Duyck * from memory. 66b3890e30SAlexander Duyck **/ 67b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void) 68b3890e30SAlexander Duyck { 69b3890e30SAlexander Duyck fm10k_unregister_pci_driver(); 707461fd91SAlexander Duyck 717461fd91SAlexander Duyck fm10k_dbg_exit(); 72b3890e30SAlexander Duyck } 73b3890e30SAlexander Duyck module_exit(fm10k_exit_module); 7418283cadSAlexander Duyck 75b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, 76b101c962SAlexander Duyck struct fm10k_rx_buffer *bi) 77b101c962SAlexander Duyck { 78b101c962SAlexander Duyck struct page *page = bi->page; 79b101c962SAlexander Duyck dma_addr_t dma; 80b101c962SAlexander Duyck 81b101c962SAlexander Duyck /* Only page will be NULL if buffer was consumed */ 82b101c962SAlexander Duyck if (likely(page)) 83b101c962SAlexander Duyck return true; 84b101c962SAlexander Duyck 85b101c962SAlexander Duyck /* alloc new page for storage */ 86b101c962SAlexander Duyck page = alloc_page(GFP_ATOMIC | __GFP_COLD); 87b101c962SAlexander Duyck if (unlikely(!page)) { 88b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 89b101c962SAlexander Duyck return false; 90b101c962SAlexander Duyck } 91b101c962SAlexander Duyck 92b101c962SAlexander Duyck /* map page for use */ 93b101c962SAlexander Duyck dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 94b101c962SAlexander Duyck 95b101c962SAlexander Duyck /* if mapping failed free memory back to system since 96b101c962SAlexander Duyck * there isn't much point in holding memory we can't use 97b101c962SAlexander Duyck */ 98b101c962SAlexander Duyck if (dma_mapping_error(rx_ring->dev, dma)) { 99b101c962SAlexander Duyck __free_page(page); 100b101c962SAlexander Duyck bi->page = NULL; 101b101c962SAlexander Duyck 102b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 103b101c962SAlexander Duyck return false; 104b101c962SAlexander Duyck } 105b101c962SAlexander Duyck 106b101c962SAlexander Duyck bi->dma = dma; 107b101c962SAlexander Duyck bi->page = page; 108b101c962SAlexander Duyck bi->page_offset = 0; 109b101c962SAlexander Duyck 110b101c962SAlexander Duyck return true; 111b101c962SAlexander Duyck } 112b101c962SAlexander Duyck 113b101c962SAlexander Duyck /** 114b101c962SAlexander Duyck * fm10k_alloc_rx_buffers - Replace used receive buffers 115b101c962SAlexander Duyck * @rx_ring: ring to place buffers on 116b101c962SAlexander Duyck * @cleaned_count: number of buffers to replace 117b101c962SAlexander Duyck **/ 118b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) 119b101c962SAlexander Duyck { 120b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 121b101c962SAlexander Duyck struct fm10k_rx_buffer *bi; 122b101c962SAlexander Duyck u16 i = rx_ring->next_to_use; 123b101c962SAlexander Duyck 124b101c962SAlexander Duyck /* nothing to do */ 125b101c962SAlexander Duyck if (!cleaned_count) 126b101c962SAlexander Duyck return; 127b101c962SAlexander Duyck 128b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, i); 129b101c962SAlexander Duyck bi = &rx_ring->rx_buffer[i]; 130b101c962SAlexander Duyck i -= rx_ring->count; 131b101c962SAlexander Duyck 132b101c962SAlexander Duyck do { 133b101c962SAlexander Duyck if (!fm10k_alloc_mapped_page(rx_ring, bi)) 134b101c962SAlexander Duyck break; 135b101c962SAlexander Duyck 136b101c962SAlexander Duyck /* Refresh the desc even if buffer_addrs didn't change 137b101c962SAlexander Duyck * because each write-back erases this info. 138b101c962SAlexander Duyck */ 139b101c962SAlexander Duyck rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 140b101c962SAlexander Duyck 141b101c962SAlexander Duyck rx_desc++; 142b101c962SAlexander Duyck bi++; 143b101c962SAlexander Duyck i++; 144b101c962SAlexander Duyck if (unlikely(!i)) { 145b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, 0); 146b101c962SAlexander Duyck bi = rx_ring->rx_buffer; 147b101c962SAlexander Duyck i -= rx_ring->count; 148b101c962SAlexander Duyck } 149b101c962SAlexander Duyck 150b101c962SAlexander Duyck /* clear the hdr_addr for the next_to_use descriptor */ 151b101c962SAlexander Duyck rx_desc->q.hdr_addr = 0; 152b101c962SAlexander Duyck 153b101c962SAlexander Duyck cleaned_count--; 154b101c962SAlexander Duyck } while (cleaned_count); 155b101c962SAlexander Duyck 156b101c962SAlexander Duyck i += rx_ring->count; 157b101c962SAlexander Duyck 158b101c962SAlexander Duyck if (rx_ring->next_to_use != i) { 159b101c962SAlexander Duyck /* record the next descriptor to use */ 160b101c962SAlexander Duyck rx_ring->next_to_use = i; 161b101c962SAlexander Duyck 162b101c962SAlexander Duyck /* update next to alloc since we have filled the ring */ 163b101c962SAlexander Duyck rx_ring->next_to_alloc = i; 164b101c962SAlexander Duyck 165b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w 166b101c962SAlexander Duyck * know there are new descriptors to fetch. (Only 167b101c962SAlexander Duyck * applicable for weak-ordered memory model archs, 168b101c962SAlexander Duyck * such as IA-64). 169b101c962SAlexander Duyck */ 170b101c962SAlexander Duyck wmb(); 171b101c962SAlexander Duyck 172b101c962SAlexander Duyck /* notify hardware of new descriptors */ 173b101c962SAlexander Duyck writel(i, rx_ring->tail); 174b101c962SAlexander Duyck } 175b101c962SAlexander Duyck } 176b101c962SAlexander Duyck 177b101c962SAlexander Duyck /** 178b101c962SAlexander Duyck * fm10k_reuse_rx_page - page flip buffer and store it back on the ring 179b101c962SAlexander Duyck * @rx_ring: rx descriptor ring to store buffers on 180b101c962SAlexander Duyck * @old_buff: donor buffer to have page reused 181b101c962SAlexander Duyck * 182b101c962SAlexander Duyck * Synchronizes page for reuse by the interface 183b101c962SAlexander Duyck **/ 184b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, 185b101c962SAlexander Duyck struct fm10k_rx_buffer *old_buff) 186b101c962SAlexander Duyck { 187b101c962SAlexander Duyck struct fm10k_rx_buffer *new_buff; 188b101c962SAlexander Duyck u16 nta = rx_ring->next_to_alloc; 189b101c962SAlexander Duyck 190b101c962SAlexander Duyck new_buff = &rx_ring->rx_buffer[nta]; 191b101c962SAlexander Duyck 192b101c962SAlexander Duyck /* update, and store next to alloc */ 193b101c962SAlexander Duyck nta++; 194b101c962SAlexander Duyck rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 195b101c962SAlexander Duyck 196b101c962SAlexander Duyck /* transfer page from old buffer to new buffer */ 197b101c962SAlexander Duyck memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); 198b101c962SAlexander Duyck 199b101c962SAlexander Duyck /* sync the buffer for use by the device */ 200b101c962SAlexander Duyck dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 201b101c962SAlexander Duyck old_buff->page_offset, 202b101c962SAlexander Duyck FM10K_RX_BUFSZ, 203b101c962SAlexander Duyck DMA_FROM_DEVICE); 204b101c962SAlexander Duyck } 205b101c962SAlexander Duyck 206b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 207b101c962SAlexander Duyck struct page *page, 208b101c962SAlexander Duyck unsigned int truesize) 209b101c962SAlexander Duyck { 210b101c962SAlexander Duyck /* avoid re-using remote pages */ 211b101c962SAlexander Duyck if (unlikely(page_to_nid(page) != numa_mem_id())) 212b101c962SAlexander Duyck return false; 213b101c962SAlexander Duyck 214b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 215b101c962SAlexander Duyck /* if we are only owner of page we can reuse it */ 216b101c962SAlexander Duyck if (unlikely(page_count(page) != 1)) 217b101c962SAlexander Duyck return false; 218b101c962SAlexander Duyck 219b101c962SAlexander Duyck /* flip page offset to other buffer */ 220b101c962SAlexander Duyck rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 221b101c962SAlexander Duyck 22242b0270bSEric Dumazet /* Even if we own the page, we are not allowed to use atomic_set() 22342b0270bSEric Dumazet * This would break get_page_unless_zero() users. 224b101c962SAlexander Duyck */ 22542b0270bSEric Dumazet atomic_inc(&page->_count); 226b101c962SAlexander Duyck #else 227b101c962SAlexander Duyck /* move offset up to the next cache line */ 228b101c962SAlexander Duyck rx_buffer->page_offset += truesize; 229b101c962SAlexander Duyck 230b101c962SAlexander Duyck if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 231b101c962SAlexander Duyck return false; 232b101c962SAlexander Duyck 233b101c962SAlexander Duyck /* bump ref count on page before it is given to the stack */ 234b101c962SAlexander Duyck get_page(page); 235b101c962SAlexander Duyck #endif 236b101c962SAlexander Duyck 237b101c962SAlexander Duyck return true; 238b101c962SAlexander Duyck } 239b101c962SAlexander Duyck 240b101c962SAlexander Duyck /** 241b101c962SAlexander Duyck * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff 242b101c962SAlexander Duyck * @rx_ring: rx descriptor ring to transact packets on 243b101c962SAlexander Duyck * @rx_buffer: buffer containing page to add 244b101c962SAlexander Duyck * @rx_desc: descriptor containing length of buffer written by hardware 245b101c962SAlexander Duyck * @skb: sk_buff to place the data into 246b101c962SAlexander Duyck * 247b101c962SAlexander Duyck * This function will add the data contained in rx_buffer->page to the skb. 248b101c962SAlexander Duyck * This is done either through a direct copy if the data in the buffer is 249b101c962SAlexander Duyck * less than the skb header size, otherwise it will just attach the page as 250b101c962SAlexander Duyck * a frag to the skb. 251b101c962SAlexander Duyck * 252b101c962SAlexander Duyck * The function will then update the page offset if necessary and return 253b101c962SAlexander Duyck * true if the buffer can be reused by the interface. 254b101c962SAlexander Duyck **/ 255b101c962SAlexander Duyck static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, 256b101c962SAlexander Duyck struct fm10k_rx_buffer *rx_buffer, 257b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 258b101c962SAlexander Duyck struct sk_buff *skb) 259b101c962SAlexander Duyck { 260b101c962SAlexander Duyck struct page *page = rx_buffer->page; 261b101c962SAlexander Duyck unsigned int size = le16_to_cpu(rx_desc->w.length); 262b101c962SAlexander Duyck #if (PAGE_SIZE < 8192) 263b101c962SAlexander Duyck unsigned int truesize = FM10K_RX_BUFSZ; 264b101c962SAlexander Duyck #else 265b101c962SAlexander Duyck unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); 266b101c962SAlexander Duyck #endif 267b101c962SAlexander Duyck 268b101c962SAlexander Duyck if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { 269b101c962SAlexander Duyck unsigned char *va = page_address(page) + rx_buffer->page_offset; 270b101c962SAlexander Duyck 271b101c962SAlexander Duyck memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 272b101c962SAlexander Duyck 273b101c962SAlexander Duyck /* we can reuse buffer as-is, just make sure it is local */ 274b101c962SAlexander Duyck if (likely(page_to_nid(page) == numa_mem_id())) 275b101c962SAlexander Duyck return true; 276b101c962SAlexander Duyck 277b101c962SAlexander Duyck /* this page cannot be reused so discard it */ 278b101c962SAlexander Duyck put_page(page); 279b101c962SAlexander Duyck return false; 280b101c962SAlexander Duyck } 281b101c962SAlexander Duyck 282b101c962SAlexander Duyck skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 283b101c962SAlexander Duyck rx_buffer->page_offset, size, truesize); 284b101c962SAlexander Duyck 285b101c962SAlexander Duyck return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); 286b101c962SAlexander Duyck } 287b101c962SAlexander Duyck 288b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, 289b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 290b101c962SAlexander Duyck struct sk_buff *skb) 291b101c962SAlexander Duyck { 292b101c962SAlexander Duyck struct fm10k_rx_buffer *rx_buffer; 293b101c962SAlexander Duyck struct page *page; 294b101c962SAlexander Duyck 295b101c962SAlexander Duyck rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 296b101c962SAlexander Duyck 297b101c962SAlexander Duyck page = rx_buffer->page; 298b101c962SAlexander Duyck prefetchw(page); 299b101c962SAlexander Duyck 300b101c962SAlexander Duyck if (likely(!skb)) { 301b101c962SAlexander Duyck void *page_addr = page_address(page) + 302b101c962SAlexander Duyck rx_buffer->page_offset; 303b101c962SAlexander Duyck 304b101c962SAlexander Duyck /* prefetch first cache line of first page */ 305b101c962SAlexander Duyck prefetch(page_addr); 306b101c962SAlexander Duyck #if L1_CACHE_BYTES < 128 307b101c962SAlexander Duyck prefetch(page_addr + L1_CACHE_BYTES); 308b101c962SAlexander Duyck #endif 309b101c962SAlexander Duyck 310b101c962SAlexander Duyck /* allocate a skb to store the frags */ 311b101c962SAlexander Duyck skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 312b101c962SAlexander Duyck FM10K_RX_HDR_LEN); 313b101c962SAlexander Duyck if (unlikely(!skb)) { 314b101c962SAlexander Duyck rx_ring->rx_stats.alloc_failed++; 315b101c962SAlexander Duyck return NULL; 316b101c962SAlexander Duyck } 317b101c962SAlexander Duyck 318b101c962SAlexander Duyck /* we will be copying header into skb->data in 319b101c962SAlexander Duyck * pskb_may_pull so it is in our interest to prefetch 320b101c962SAlexander Duyck * it now to avoid a possible cache miss 321b101c962SAlexander Duyck */ 322b101c962SAlexander Duyck prefetchw(skb->data); 323b101c962SAlexander Duyck } 324b101c962SAlexander Duyck 325b101c962SAlexander Duyck /* we are reusing so sync this buffer for CPU use */ 326b101c962SAlexander Duyck dma_sync_single_range_for_cpu(rx_ring->dev, 327b101c962SAlexander Duyck rx_buffer->dma, 328b101c962SAlexander Duyck rx_buffer->page_offset, 329b101c962SAlexander Duyck FM10K_RX_BUFSZ, 330b101c962SAlexander Duyck DMA_FROM_DEVICE); 331b101c962SAlexander Duyck 332b101c962SAlexander Duyck /* pull page into skb */ 333b101c962SAlexander Duyck if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 334b101c962SAlexander Duyck /* hand second half of page back to the ring */ 335b101c962SAlexander Duyck fm10k_reuse_rx_page(rx_ring, rx_buffer); 336b101c962SAlexander Duyck } else { 337b101c962SAlexander Duyck /* we are not reusing the buffer so unmap it */ 338b101c962SAlexander Duyck dma_unmap_page(rx_ring->dev, rx_buffer->dma, 339b101c962SAlexander Duyck PAGE_SIZE, DMA_FROM_DEVICE); 340b101c962SAlexander Duyck } 341b101c962SAlexander Duyck 342b101c962SAlexander Duyck /* clear contents of rx_buffer */ 343b101c962SAlexander Duyck rx_buffer->page = NULL; 344b101c962SAlexander Duyck 345b101c962SAlexander Duyck return skb; 346b101c962SAlexander Duyck } 347b101c962SAlexander Duyck 34876a540d4SAlexander Duyck static inline void fm10k_rx_checksum(struct fm10k_ring *ring, 34976a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 35076a540d4SAlexander Duyck struct sk_buff *skb) 35176a540d4SAlexander Duyck { 35276a540d4SAlexander Duyck skb_checksum_none_assert(skb); 35376a540d4SAlexander Duyck 35476a540d4SAlexander Duyck /* Rx checksum disabled via ethtool */ 35576a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXCSUM)) 35676a540d4SAlexander Duyck return; 35776a540d4SAlexander Duyck 35876a540d4SAlexander Duyck /* TCP/UDP checksum error bit is set */ 35976a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, 36076a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E | 36176a540d4SAlexander Duyck FM10K_RXD_STATUS_L4E2 | 36276a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE | 36376a540d4SAlexander Duyck FM10K_RXD_STATUS_IPE2)) { 36476a540d4SAlexander Duyck ring->rx_stats.csum_err++; 36576a540d4SAlexander Duyck return; 36676a540d4SAlexander Duyck } 36776a540d4SAlexander Duyck 36876a540d4SAlexander Duyck /* It must be a TCP or UDP packet with a valid checksum */ 36976a540d4SAlexander Duyck if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) 37076a540d4SAlexander Duyck skb->encapsulation = true; 37176a540d4SAlexander Duyck else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) 37276a540d4SAlexander Duyck return; 37376a540d4SAlexander Duyck 37476a540d4SAlexander Duyck skb->ip_summed = CHECKSUM_UNNECESSARY; 37576a540d4SAlexander Duyck } 37676a540d4SAlexander Duyck 37776a540d4SAlexander Duyck #define FM10K_RSS_L4_TYPES_MASK \ 37876a540d4SAlexander Duyck ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ 37976a540d4SAlexander Duyck (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ 38076a540d4SAlexander Duyck (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ 38176a540d4SAlexander Duyck (1ul << FM10K_RSSTYPE_IPV6_UDP)) 38276a540d4SAlexander Duyck 38376a540d4SAlexander Duyck static inline void fm10k_rx_hash(struct fm10k_ring *ring, 38476a540d4SAlexander Duyck union fm10k_rx_desc *rx_desc, 38576a540d4SAlexander Duyck struct sk_buff *skb) 38676a540d4SAlexander Duyck { 38776a540d4SAlexander Duyck u16 rss_type; 38876a540d4SAlexander Duyck 38976a540d4SAlexander Duyck if (!(ring->netdev->features & NETIF_F_RXHASH)) 39076a540d4SAlexander Duyck return; 39176a540d4SAlexander Duyck 39276a540d4SAlexander Duyck rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; 39376a540d4SAlexander Duyck if (!rss_type) 39476a540d4SAlexander Duyck return; 39576a540d4SAlexander Duyck 39676a540d4SAlexander Duyck skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), 39776a540d4SAlexander Duyck (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? 39876a540d4SAlexander Duyck PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 39976a540d4SAlexander Duyck } 40076a540d4SAlexander Duyck 401a211e013SAlexander Duyck static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, 402a211e013SAlexander Duyck union fm10k_rx_desc *rx_desc, 403a211e013SAlexander Duyck struct sk_buff *skb) 404a211e013SAlexander Duyck { 405a211e013SAlexander Duyck struct fm10k_intfc *interface = rx_ring->q_vector->interface; 406a211e013SAlexander Duyck 407a211e013SAlexander Duyck FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; 408a211e013SAlexander Duyck 409a211e013SAlexander Duyck if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) 410a211e013SAlexander Duyck fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), 411a211e013SAlexander Duyck le64_to_cpu(rx_desc->q.timestamp)); 412a211e013SAlexander Duyck } 413a211e013SAlexander Duyck 4145cd5e2e9SAlexander Duyck static void fm10k_type_trans(struct fm10k_ring *rx_ring, 4155cd5e2e9SAlexander Duyck union fm10k_rx_desc *rx_desc, 4165cd5e2e9SAlexander Duyck struct sk_buff *skb) 4175cd5e2e9SAlexander Duyck { 4185cd5e2e9SAlexander Duyck struct net_device *dev = rx_ring->netdev; 4195cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); 4205cd5e2e9SAlexander Duyck 4215cd5e2e9SAlexander Duyck /* check to see if DGLORT belongs to a MACVLAN */ 4225cd5e2e9SAlexander Duyck if (l2_accel) { 4235cd5e2e9SAlexander Duyck u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; 4245cd5e2e9SAlexander Duyck 4255cd5e2e9SAlexander Duyck idx -= l2_accel->dglort; 4265cd5e2e9SAlexander Duyck if (idx < l2_accel->size && l2_accel->macvlan[idx]) 4275cd5e2e9SAlexander Duyck dev = l2_accel->macvlan[idx]; 4285cd5e2e9SAlexander Duyck else 4295cd5e2e9SAlexander Duyck l2_accel = NULL; 4305cd5e2e9SAlexander Duyck } 4315cd5e2e9SAlexander Duyck 4325cd5e2e9SAlexander Duyck skb->protocol = eth_type_trans(skb, dev); 4335cd5e2e9SAlexander Duyck 4345cd5e2e9SAlexander Duyck if (!l2_accel) 4355cd5e2e9SAlexander Duyck return; 4365cd5e2e9SAlexander Duyck 4375cd5e2e9SAlexander Duyck /* update MACVLAN statistics */ 4385cd5e2e9SAlexander Duyck macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, 4395cd5e2e9SAlexander Duyck !!(rx_desc->w.hdr_info & 4405cd5e2e9SAlexander Duyck cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); 4415cd5e2e9SAlexander Duyck } 4425cd5e2e9SAlexander Duyck 443b101c962SAlexander Duyck /** 444b101c962SAlexander Duyck * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor 445b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 446b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 447b101c962SAlexander Duyck * @skb: pointer to current skb being populated 448b101c962SAlexander Duyck * 449b101c962SAlexander Duyck * This function checks the ring, descriptor, and packet information in 450b101c962SAlexander Duyck * order to populate the hash, checksum, VLAN, timestamp, protocol, and 451b101c962SAlexander Duyck * other fields within the skb. 452b101c962SAlexander Duyck **/ 453b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, 454b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 455b101c962SAlexander Duyck struct sk_buff *skb) 456b101c962SAlexander Duyck { 457b101c962SAlexander Duyck unsigned int len = skb->len; 458b101c962SAlexander Duyck 45976a540d4SAlexander Duyck fm10k_rx_hash(rx_ring, rx_desc, skb); 46076a540d4SAlexander Duyck 46176a540d4SAlexander Duyck fm10k_rx_checksum(rx_ring, rx_desc, skb); 46276a540d4SAlexander Duyck 463a211e013SAlexander Duyck fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); 464a211e013SAlexander Duyck 465b101c962SAlexander Duyck FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; 466b101c962SAlexander Duyck 467b101c962SAlexander Duyck skb_record_rx_queue(skb, rx_ring->queue_index); 468b101c962SAlexander Duyck 469b101c962SAlexander Duyck FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; 470b101c962SAlexander Duyck 471b101c962SAlexander Duyck if (rx_desc->w.vlan) { 472b101c962SAlexander Duyck u16 vid = le16_to_cpu(rx_desc->w.vlan); 473b101c962SAlexander Duyck 474b101c962SAlexander Duyck if (vid != rx_ring->vid) 475b101c962SAlexander Duyck __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 476b101c962SAlexander Duyck } 477b101c962SAlexander Duyck 4785cd5e2e9SAlexander Duyck fm10k_type_trans(rx_ring, rx_desc, skb); 479b101c962SAlexander Duyck 480b101c962SAlexander Duyck return len; 481b101c962SAlexander Duyck } 482b101c962SAlexander Duyck 483b101c962SAlexander Duyck /** 484b101c962SAlexander Duyck * fm10k_is_non_eop - process handling of non-EOP buffers 485b101c962SAlexander Duyck * @rx_ring: Rx ring being processed 486b101c962SAlexander Duyck * @rx_desc: Rx descriptor for current buffer 487b101c962SAlexander Duyck * 488b101c962SAlexander Duyck * This function updates next to clean. If the buffer is an EOP buffer 489b101c962SAlexander Duyck * this function exits returning false, otherwise it will place the 490b101c962SAlexander Duyck * sk_buff in the next buffer to be chained and return true indicating 491b101c962SAlexander Duyck * that this is in fact a non-EOP buffer. 492b101c962SAlexander Duyck **/ 493b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, 494b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc) 495b101c962SAlexander Duyck { 496b101c962SAlexander Duyck u32 ntc = rx_ring->next_to_clean + 1; 497b101c962SAlexander Duyck 498b101c962SAlexander Duyck /* fetch, update, and store next to clean */ 499b101c962SAlexander Duyck ntc = (ntc < rx_ring->count) ? ntc : 0; 500b101c962SAlexander Duyck rx_ring->next_to_clean = ntc; 501b101c962SAlexander Duyck 502b101c962SAlexander Duyck prefetch(FM10K_RX_DESC(rx_ring, ntc)); 503b101c962SAlexander Duyck 504b101c962SAlexander Duyck if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) 505b101c962SAlexander Duyck return false; 506b101c962SAlexander Duyck 507b101c962SAlexander Duyck return true; 508b101c962SAlexander Duyck } 509b101c962SAlexander Duyck 510b101c962SAlexander Duyck /** 511b101c962SAlexander Duyck * fm10k_pull_tail - fm10k specific version of skb_pull_tail 512b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 513b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 514b101c962SAlexander Duyck * @skb: pointer to current skb being adjusted 515b101c962SAlexander Duyck * 516b101c962SAlexander Duyck * This function is an fm10k specific version of __pskb_pull_tail. The 517b101c962SAlexander Duyck * main difference between this version and the original function is that 518b101c962SAlexander Duyck * this function can make several assumptions about the state of things 519b101c962SAlexander Duyck * that allow for significant optimizations versus the standard function. 520b101c962SAlexander Duyck * As a result we can do things like drop a frag and maintain an accurate 521b101c962SAlexander Duyck * truesize for the skb. 522b101c962SAlexander Duyck */ 523b101c962SAlexander Duyck static void fm10k_pull_tail(struct fm10k_ring *rx_ring, 524b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 525b101c962SAlexander Duyck struct sk_buff *skb) 526b101c962SAlexander Duyck { 527b101c962SAlexander Duyck struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 528b101c962SAlexander Duyck unsigned char *va; 529b101c962SAlexander Duyck unsigned int pull_len; 530b101c962SAlexander Duyck 531b101c962SAlexander Duyck /* it is valid to use page_address instead of kmap since we are 532b101c962SAlexander Duyck * working with pages allocated out of the lomem pool per 533b101c962SAlexander Duyck * alloc_page(GFP_ATOMIC) 534b101c962SAlexander Duyck */ 535b101c962SAlexander Duyck va = skb_frag_address(frag); 536b101c962SAlexander Duyck 537b101c962SAlexander Duyck /* we need the header to contain the greater of either ETH_HLEN or 538b101c962SAlexander Duyck * 60 bytes if the skb->len is less than 60 for skb_pad. 539b101c962SAlexander Duyck */ 540b101c962SAlexander Duyck pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); 541b101c962SAlexander Duyck 542b101c962SAlexander Duyck /* align pull length to size of long to optimize memcpy performance */ 543b101c962SAlexander Duyck skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 544b101c962SAlexander Duyck 545b101c962SAlexander Duyck /* update all of the pointers */ 546b101c962SAlexander Duyck skb_frag_size_sub(frag, pull_len); 547b101c962SAlexander Duyck frag->page_offset += pull_len; 548b101c962SAlexander Duyck skb->data_len -= pull_len; 549b101c962SAlexander Duyck skb->tail += pull_len; 550b101c962SAlexander Duyck } 551b101c962SAlexander Duyck 552b101c962SAlexander Duyck /** 553b101c962SAlexander Duyck * fm10k_cleanup_headers - Correct corrupted or empty headers 554b101c962SAlexander Duyck * @rx_ring: rx descriptor ring packet is being transacted on 555b101c962SAlexander Duyck * @rx_desc: pointer to the EOP Rx descriptor 556b101c962SAlexander Duyck * @skb: pointer to current skb being fixed 557b101c962SAlexander Duyck * 558b101c962SAlexander Duyck * Address the case where we are pulling data in on pages only 559b101c962SAlexander Duyck * and as such no data is present in the skb header. 560b101c962SAlexander Duyck * 561b101c962SAlexander Duyck * In addition if skb is not at least 60 bytes we need to pad it so that 562b101c962SAlexander Duyck * it is large enough to qualify as a valid Ethernet frame. 563b101c962SAlexander Duyck * 564b101c962SAlexander Duyck * Returns true if an error was encountered and skb was freed. 565b101c962SAlexander Duyck **/ 566b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, 567b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc, 568b101c962SAlexander Duyck struct sk_buff *skb) 569b101c962SAlexander Duyck { 570b101c962SAlexander Duyck if (unlikely((fm10k_test_staterr(rx_desc, 571b101c962SAlexander Duyck FM10K_RXD_STATUS_RXE)))) { 572b101c962SAlexander Duyck dev_kfree_skb_any(skb); 573b101c962SAlexander Duyck rx_ring->rx_stats.errors++; 574b101c962SAlexander Duyck return true; 575b101c962SAlexander Duyck } 576b101c962SAlexander Duyck 577b101c962SAlexander Duyck /* place header in linear portion of buffer */ 578b101c962SAlexander Duyck if (skb_is_nonlinear(skb)) 579b101c962SAlexander Duyck fm10k_pull_tail(rx_ring, rx_desc, skb); 580b101c962SAlexander Duyck 581b101c962SAlexander Duyck /* if skb_pad returns an error the skb was freed */ 582b101c962SAlexander Duyck if (unlikely(skb->len < 60)) { 583b101c962SAlexander Duyck int pad_len = 60 - skb->len; 584b101c962SAlexander Duyck 585b101c962SAlexander Duyck if (skb_pad(skb, pad_len)) 586b101c962SAlexander Duyck return true; 587b101c962SAlexander Duyck __skb_put(skb, pad_len); 588b101c962SAlexander Duyck } 589b101c962SAlexander Duyck 590b101c962SAlexander Duyck return false; 591b101c962SAlexander Duyck } 592b101c962SAlexander Duyck 593b101c962SAlexander Duyck /** 594b101c962SAlexander Duyck * fm10k_receive_skb - helper function to handle rx indications 595b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 596b101c962SAlexander Duyck * @skb: packet to send up 597b101c962SAlexander Duyck **/ 598b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, 599b101c962SAlexander Duyck struct sk_buff *skb) 600b101c962SAlexander Duyck { 601b101c962SAlexander Duyck napi_gro_receive(&q_vector->napi, skb); 602b101c962SAlexander Duyck } 603b101c962SAlexander Duyck 604b101c962SAlexander Duyck static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, 605b101c962SAlexander Duyck struct fm10k_ring *rx_ring, 606b101c962SAlexander Duyck int budget) 607b101c962SAlexander Duyck { 608b101c962SAlexander Duyck struct sk_buff *skb = rx_ring->skb; 609b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 610b101c962SAlexander Duyck u16 cleaned_count = fm10k_desc_unused(rx_ring); 611b101c962SAlexander Duyck 612b101c962SAlexander Duyck do { 613b101c962SAlexander Duyck union fm10k_rx_desc *rx_desc; 614b101c962SAlexander Duyck 615b101c962SAlexander Duyck /* return some buffers to hardware, one at a time is too slow */ 616b101c962SAlexander Duyck if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { 617b101c962SAlexander Duyck fm10k_alloc_rx_buffers(rx_ring, cleaned_count); 618b101c962SAlexander Duyck cleaned_count = 0; 619b101c962SAlexander Duyck } 620b101c962SAlexander Duyck 621b101c962SAlexander Duyck rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 622b101c962SAlexander Duyck 623b101c962SAlexander Duyck if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD)) 624b101c962SAlexander Duyck break; 625b101c962SAlexander Duyck 626b101c962SAlexander Duyck /* This memory barrier is needed to keep us from reading 627b101c962SAlexander Duyck * any other fields out of the rx_desc until we know the 628b101c962SAlexander Duyck * RXD_STATUS_DD bit is set 629b101c962SAlexander Duyck */ 630b101c962SAlexander Duyck rmb(); 631b101c962SAlexander Duyck 632b101c962SAlexander Duyck /* retrieve a buffer from the ring */ 633b101c962SAlexander Duyck skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); 634b101c962SAlexander Duyck 635b101c962SAlexander Duyck /* exit if we failed to retrieve a buffer */ 636b101c962SAlexander Duyck if (!skb) 637b101c962SAlexander Duyck break; 638b101c962SAlexander Duyck 639b101c962SAlexander Duyck cleaned_count++; 640b101c962SAlexander Duyck 641b101c962SAlexander Duyck /* fetch next buffer in frame if non-eop */ 642b101c962SAlexander Duyck if (fm10k_is_non_eop(rx_ring, rx_desc)) 643b101c962SAlexander Duyck continue; 644b101c962SAlexander Duyck 645b101c962SAlexander Duyck /* verify the packet layout is correct */ 646b101c962SAlexander Duyck if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { 647b101c962SAlexander Duyck skb = NULL; 648b101c962SAlexander Duyck continue; 649b101c962SAlexander Duyck } 650b101c962SAlexander Duyck 651b101c962SAlexander Duyck /* populate checksum, timestamp, VLAN, and protocol */ 652b101c962SAlexander Duyck total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); 653b101c962SAlexander Duyck 654b101c962SAlexander Duyck fm10k_receive_skb(q_vector, skb); 655b101c962SAlexander Duyck 656b101c962SAlexander Duyck /* reset skb pointer */ 657b101c962SAlexander Duyck skb = NULL; 658b101c962SAlexander Duyck 659b101c962SAlexander Duyck /* update budget accounting */ 660b101c962SAlexander Duyck total_packets++; 661b101c962SAlexander Duyck } while (likely(total_packets < budget)); 662b101c962SAlexander Duyck 663b101c962SAlexander Duyck /* place incomplete frames back on ring for completion */ 664b101c962SAlexander Duyck rx_ring->skb = skb; 665b101c962SAlexander Duyck 666b101c962SAlexander Duyck u64_stats_update_begin(&rx_ring->syncp); 667b101c962SAlexander Duyck rx_ring->stats.packets += total_packets; 668b101c962SAlexander Duyck rx_ring->stats.bytes += total_bytes; 669b101c962SAlexander Duyck u64_stats_update_end(&rx_ring->syncp); 670b101c962SAlexander Duyck q_vector->rx.total_packets += total_packets; 671b101c962SAlexander Duyck q_vector->rx.total_bytes += total_bytes; 672b101c962SAlexander Duyck 673b101c962SAlexander Duyck return total_packets < budget; 674b101c962SAlexander Duyck } 675b101c962SAlexander Duyck 67676a540d4SAlexander Duyck #define VXLAN_HLEN (sizeof(struct udphdr) + 8) 67776a540d4SAlexander Duyck static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) 67876a540d4SAlexander Duyck { 67976a540d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(skb->dev); 68076a540d4SAlexander Duyck struct fm10k_vxlan_port *vxlan_port; 68176a540d4SAlexander Duyck 68276a540d4SAlexander Duyck /* we can only offload a vxlan if we recognize it as such */ 68376a540d4SAlexander Duyck vxlan_port = list_first_entry_or_null(&interface->vxlan_port, 68476a540d4SAlexander Duyck struct fm10k_vxlan_port, list); 68576a540d4SAlexander Duyck 68676a540d4SAlexander Duyck if (!vxlan_port) 68776a540d4SAlexander Duyck return NULL; 68876a540d4SAlexander Duyck if (vxlan_port->port != udp_hdr(skb)->dest) 68976a540d4SAlexander Duyck return NULL; 69076a540d4SAlexander Duyck 69176a540d4SAlexander Duyck /* return offset of udp_hdr plus 8 bytes for VXLAN header */ 69276a540d4SAlexander Duyck return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); 69376a540d4SAlexander Duyck } 69476a540d4SAlexander Duyck 69576a540d4SAlexander Duyck #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) 69676a540d4SAlexander Duyck #define NVGRE_TNI htons(0x2000) 69776a540d4SAlexander Duyck struct fm10k_nvgre_hdr { 69876a540d4SAlexander Duyck __be16 flags; 69976a540d4SAlexander Duyck __be16 proto; 70076a540d4SAlexander Duyck __be32 tni; 70176a540d4SAlexander Duyck }; 70276a540d4SAlexander Duyck 70376a540d4SAlexander Duyck static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) 70476a540d4SAlexander Duyck { 70576a540d4SAlexander Duyck struct fm10k_nvgre_hdr *nvgre_hdr; 70676a540d4SAlexander Duyck int hlen = ip_hdrlen(skb); 70776a540d4SAlexander Duyck 70876a540d4SAlexander Duyck /* currently only IPv4 is supported due to hlen above */ 70976a540d4SAlexander Duyck if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 71076a540d4SAlexander Duyck return NULL; 71176a540d4SAlexander Duyck 71276a540d4SAlexander Duyck /* our transport header should be NVGRE */ 71376a540d4SAlexander Duyck nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); 71476a540d4SAlexander Duyck 71576a540d4SAlexander Duyck /* verify all reserved flags are 0 */ 71676a540d4SAlexander Duyck if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) 71776a540d4SAlexander Duyck return NULL; 71876a540d4SAlexander Duyck 71976a540d4SAlexander Duyck /* verify protocol is transparent Ethernet bridging */ 72076a540d4SAlexander Duyck if (nvgre_hdr->proto != htons(ETH_P_TEB)) 72176a540d4SAlexander Duyck return NULL; 72276a540d4SAlexander Duyck 72376a540d4SAlexander Duyck /* report start of ethernet header */ 72476a540d4SAlexander Duyck if (nvgre_hdr->flags & NVGRE_TNI) 72576a540d4SAlexander Duyck return (struct ethhdr *)(nvgre_hdr + 1); 72676a540d4SAlexander Duyck 72776a540d4SAlexander Duyck return (struct ethhdr *)(&nvgre_hdr->tni); 72876a540d4SAlexander Duyck } 72976a540d4SAlexander Duyck 73076a540d4SAlexander Duyck static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) 73176a540d4SAlexander Duyck { 73276a540d4SAlexander Duyck struct ethhdr *eth_hdr; 73376a540d4SAlexander Duyck u8 l4_hdr = 0; 73476a540d4SAlexander Duyck 73576a540d4SAlexander Duyck switch (vlan_get_protocol(skb)) { 73676a540d4SAlexander Duyck case htons(ETH_P_IP): 73776a540d4SAlexander Duyck l4_hdr = ip_hdr(skb)->protocol; 73876a540d4SAlexander Duyck break; 73976a540d4SAlexander Duyck case htons(ETH_P_IPV6): 74076a540d4SAlexander Duyck l4_hdr = ipv6_hdr(skb)->nexthdr; 74176a540d4SAlexander Duyck break; 74276a540d4SAlexander Duyck default: 74376a540d4SAlexander Duyck return 0; 74476a540d4SAlexander Duyck } 74576a540d4SAlexander Duyck 74676a540d4SAlexander Duyck switch (l4_hdr) { 74776a540d4SAlexander Duyck case IPPROTO_UDP: 74876a540d4SAlexander Duyck eth_hdr = fm10k_port_is_vxlan(skb); 74976a540d4SAlexander Duyck break; 75076a540d4SAlexander Duyck case IPPROTO_GRE: 75176a540d4SAlexander Duyck eth_hdr = fm10k_gre_is_nvgre(skb); 75276a540d4SAlexander Duyck break; 75376a540d4SAlexander Duyck default: 75476a540d4SAlexander Duyck return 0; 75576a540d4SAlexander Duyck } 75676a540d4SAlexander Duyck 75776a540d4SAlexander Duyck if (!eth_hdr) 75876a540d4SAlexander Duyck return 0; 75976a540d4SAlexander Duyck 76076a540d4SAlexander Duyck switch (eth_hdr->h_proto) { 76176a540d4SAlexander Duyck case htons(ETH_P_IP): 76276a540d4SAlexander Duyck case htons(ETH_P_IPV6): 76376a540d4SAlexander Duyck break; 76476a540d4SAlexander Duyck default: 76576a540d4SAlexander Duyck return 0; 76676a540d4SAlexander Duyck } 76776a540d4SAlexander Duyck 76876a540d4SAlexander Duyck return eth_hdr->h_proto; 76976a540d4SAlexander Duyck } 77076a540d4SAlexander Duyck 77176a540d4SAlexander Duyck static int fm10k_tso(struct fm10k_ring *tx_ring, 77276a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 77376a540d4SAlexander Duyck { 77476a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 77576a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 77676a540d4SAlexander Duyck unsigned char *th; 77776a540d4SAlexander Duyck u8 hdrlen; 77876a540d4SAlexander Duyck 77976a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 78076a540d4SAlexander Duyck return 0; 78176a540d4SAlexander Duyck 78276a540d4SAlexander Duyck if (!skb_is_gso(skb)) 78376a540d4SAlexander Duyck return 0; 78476a540d4SAlexander Duyck 78576a540d4SAlexander Duyck /* compute header lengths */ 78676a540d4SAlexander Duyck if (skb->encapsulation) { 78776a540d4SAlexander Duyck if (!fm10k_tx_encap_offload(skb)) 78876a540d4SAlexander Duyck goto err_vxlan; 78976a540d4SAlexander Duyck th = skb_inner_transport_header(skb); 79076a540d4SAlexander Duyck } else { 79176a540d4SAlexander Duyck th = skb_transport_header(skb); 79276a540d4SAlexander Duyck } 79376a540d4SAlexander Duyck 79476a540d4SAlexander Duyck /* compute offset from SOF to transport header and add header len */ 79576a540d4SAlexander Duyck hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); 79676a540d4SAlexander Duyck 79776a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 79876a540d4SAlexander Duyck 79976a540d4SAlexander Duyck /* update gso size and bytecount with header size */ 80076a540d4SAlexander Duyck first->gso_segs = skb_shinfo(skb)->gso_segs; 80176a540d4SAlexander Duyck first->bytecount += (first->gso_segs - 1) * hdrlen; 80276a540d4SAlexander Duyck 80376a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 80476a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 80576a540d4SAlexander Duyck tx_desc->hdrlen = hdrlen; 80676a540d4SAlexander Duyck tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 80776a540d4SAlexander Duyck 80876a540d4SAlexander Duyck return 1; 80976a540d4SAlexander Duyck err_vxlan: 81076a540d4SAlexander Duyck tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; 81176a540d4SAlexander Duyck if (!net_ratelimit()) 81276a540d4SAlexander Duyck netdev_err(tx_ring->netdev, 81376a540d4SAlexander Duyck "TSO requested for unsupported tunnel, disabling offload\n"); 81476a540d4SAlexander Duyck return -1; 81576a540d4SAlexander Duyck } 81676a540d4SAlexander Duyck 81776a540d4SAlexander Duyck static void fm10k_tx_csum(struct fm10k_ring *tx_ring, 81876a540d4SAlexander Duyck struct fm10k_tx_buffer *first) 81976a540d4SAlexander Duyck { 82076a540d4SAlexander Duyck struct sk_buff *skb = first->skb; 82176a540d4SAlexander Duyck struct fm10k_tx_desc *tx_desc; 82276a540d4SAlexander Duyck union { 82376a540d4SAlexander Duyck struct iphdr *ipv4; 82476a540d4SAlexander Duyck struct ipv6hdr *ipv6; 82576a540d4SAlexander Duyck u8 *raw; 82676a540d4SAlexander Duyck } network_hdr; 82776a540d4SAlexander Duyck __be16 protocol; 82876a540d4SAlexander Duyck u8 l4_hdr = 0; 82976a540d4SAlexander Duyck 83076a540d4SAlexander Duyck if (skb->ip_summed != CHECKSUM_PARTIAL) 83176a540d4SAlexander Duyck goto no_csum; 83276a540d4SAlexander Duyck 83376a540d4SAlexander Duyck if (skb->encapsulation) { 83476a540d4SAlexander Duyck protocol = fm10k_tx_encap_offload(skb); 83576a540d4SAlexander Duyck if (!protocol) { 83676a540d4SAlexander Duyck if (skb_checksum_help(skb)) { 83776a540d4SAlexander Duyck dev_warn(tx_ring->dev, 83876a540d4SAlexander Duyck "failed to offload encap csum!\n"); 83976a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 84076a540d4SAlexander Duyck } 84176a540d4SAlexander Duyck goto no_csum; 84276a540d4SAlexander Duyck } 84376a540d4SAlexander Duyck network_hdr.raw = skb_inner_network_header(skb); 84476a540d4SAlexander Duyck } else { 84576a540d4SAlexander Duyck protocol = vlan_get_protocol(skb); 84676a540d4SAlexander Duyck network_hdr.raw = skb_network_header(skb); 84776a540d4SAlexander Duyck } 84876a540d4SAlexander Duyck 84976a540d4SAlexander Duyck switch (protocol) { 85076a540d4SAlexander Duyck case htons(ETH_P_IP): 85176a540d4SAlexander Duyck l4_hdr = network_hdr.ipv4->protocol; 85276a540d4SAlexander Duyck break; 85376a540d4SAlexander Duyck case htons(ETH_P_IPV6): 85476a540d4SAlexander Duyck l4_hdr = network_hdr.ipv6->nexthdr; 85576a540d4SAlexander Duyck break; 85676a540d4SAlexander Duyck default: 85776a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 85876a540d4SAlexander Duyck dev_warn(tx_ring->dev, 85976a540d4SAlexander Duyck "partial checksum but ip version=%x!\n", 86076a540d4SAlexander Duyck protocol); 86176a540d4SAlexander Duyck } 86276a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 86376a540d4SAlexander Duyck goto no_csum; 86476a540d4SAlexander Duyck } 86576a540d4SAlexander Duyck 86676a540d4SAlexander Duyck switch (l4_hdr) { 86776a540d4SAlexander Duyck case IPPROTO_TCP: 86876a540d4SAlexander Duyck case IPPROTO_UDP: 86976a540d4SAlexander Duyck break; 87076a540d4SAlexander Duyck case IPPROTO_GRE: 87176a540d4SAlexander Duyck if (skb->encapsulation) 87276a540d4SAlexander Duyck break; 87376a540d4SAlexander Duyck default: 87476a540d4SAlexander Duyck if (unlikely(net_ratelimit())) { 87576a540d4SAlexander Duyck dev_warn(tx_ring->dev, 87676a540d4SAlexander Duyck "partial checksum but l4 proto=%x!\n", 87776a540d4SAlexander Duyck l4_hdr); 87876a540d4SAlexander Duyck } 87976a540d4SAlexander Duyck tx_ring->tx_stats.csum_err++; 88076a540d4SAlexander Duyck goto no_csum; 88176a540d4SAlexander Duyck } 88276a540d4SAlexander Duyck 88376a540d4SAlexander Duyck /* update TX checksum flag */ 88476a540d4SAlexander Duyck first->tx_flags |= FM10K_TX_FLAGS_CSUM; 88576a540d4SAlexander Duyck 88676a540d4SAlexander Duyck no_csum: 88776a540d4SAlexander Duyck /* populate Tx descriptor header size and mss */ 88876a540d4SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); 88976a540d4SAlexander Duyck tx_desc->hdrlen = 0; 89076a540d4SAlexander Duyck tx_desc->mss = 0; 89176a540d4SAlexander Duyck } 89276a540d4SAlexander Duyck 89376a540d4SAlexander Duyck #define FM10K_SET_FLAG(_input, _flag, _result) \ 89476a540d4SAlexander Duyck ((_flag <= _result) ? \ 89576a540d4SAlexander Duyck ((u32)(_input & _flag) * (_result / _flag)) : \ 89676a540d4SAlexander Duyck ((u32)(_input & _flag) / (_flag / _result))) 89776a540d4SAlexander Duyck 89876a540d4SAlexander Duyck static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) 89976a540d4SAlexander Duyck { 90076a540d4SAlexander Duyck /* set type for advanced descriptor with frame checksum insertion */ 90176a540d4SAlexander Duyck u32 desc_flags = 0; 90276a540d4SAlexander Duyck 903a211e013SAlexander Duyck /* set timestamping bits */ 904a211e013SAlexander Duyck if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 905a211e013SAlexander Duyck likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 906a211e013SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_TIME; 907a211e013SAlexander Duyck 90876a540d4SAlexander Duyck /* set checksum offload bits */ 90976a540d4SAlexander Duyck desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, 91076a540d4SAlexander Duyck FM10K_TXD_FLAG_CSUM); 91176a540d4SAlexander Duyck 91276a540d4SAlexander Duyck return desc_flags; 91376a540d4SAlexander Duyck } 91476a540d4SAlexander Duyck 915b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, 916b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc, u16 i, 917b101c962SAlexander Duyck dma_addr_t dma, unsigned int size, u8 desc_flags) 918b101c962SAlexander Duyck { 919b101c962SAlexander Duyck /* set RS and INT for last frame in a cache line */ 920b101c962SAlexander Duyck if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) 921b101c962SAlexander Duyck desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; 922b101c962SAlexander Duyck 923b101c962SAlexander Duyck /* record values to descriptor */ 924b101c962SAlexander Duyck tx_desc->buffer_addr = cpu_to_le64(dma); 925b101c962SAlexander Duyck tx_desc->flags = desc_flags; 926b101c962SAlexander Duyck tx_desc->buflen = cpu_to_le16(size); 927b101c962SAlexander Duyck 928b101c962SAlexander Duyck /* return true if we just wrapped the ring */ 929b101c962SAlexander Duyck return i == tx_ring->count; 930b101c962SAlexander Duyck } 931b101c962SAlexander Duyck 932b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring, 933b101c962SAlexander Duyck struct fm10k_tx_buffer *first) 934b101c962SAlexander Duyck { 935b101c962SAlexander Duyck struct sk_buff *skb = first->skb; 936b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 937b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 938b101c962SAlexander Duyck struct skb_frag_struct *frag; 939b101c962SAlexander Duyck unsigned char *data; 940b101c962SAlexander Duyck dma_addr_t dma; 941b101c962SAlexander Duyck unsigned int data_len, size; 94276a540d4SAlexander Duyck u32 tx_flags = first->tx_flags; 943b101c962SAlexander Duyck u16 i = tx_ring->next_to_use; 94476a540d4SAlexander Duyck u8 flags = fm10k_tx_desc_flags(skb, tx_flags); 945b101c962SAlexander Duyck 946b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 947b101c962SAlexander Duyck 948b101c962SAlexander Duyck /* add HW VLAN tag */ 949b101c962SAlexander Duyck if (vlan_tx_tag_present(skb)) 950b101c962SAlexander Duyck tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 951b101c962SAlexander Duyck else 952b101c962SAlexander Duyck tx_desc->vlan = 0; 953b101c962SAlexander Duyck 954b101c962SAlexander Duyck size = skb_headlen(skb); 955b101c962SAlexander Duyck data = skb->data; 956b101c962SAlexander Duyck 957b101c962SAlexander Duyck dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 958b101c962SAlexander Duyck 959b101c962SAlexander Duyck data_len = skb->data_len; 960b101c962SAlexander Duyck tx_buffer = first; 961b101c962SAlexander Duyck 962b101c962SAlexander Duyck for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 963b101c962SAlexander Duyck if (dma_mapping_error(tx_ring->dev, dma)) 964b101c962SAlexander Duyck goto dma_error; 965b101c962SAlexander Duyck 966b101c962SAlexander Duyck /* record length, and DMA address */ 967b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, size); 968b101c962SAlexander Duyck dma_unmap_addr_set(tx_buffer, dma, dma); 969b101c962SAlexander Duyck 970b101c962SAlexander Duyck while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { 971b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, 972b101c962SAlexander Duyck FM10K_MAX_DATA_PER_TXD, flags)) { 973b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 974b101c962SAlexander Duyck i = 0; 975b101c962SAlexander Duyck } 976b101c962SAlexander Duyck 977b101c962SAlexander Duyck dma += FM10K_MAX_DATA_PER_TXD; 978b101c962SAlexander Duyck size -= FM10K_MAX_DATA_PER_TXD; 979b101c962SAlexander Duyck } 980b101c962SAlexander Duyck 981b101c962SAlexander Duyck if (likely(!data_len)) 982b101c962SAlexander Duyck break; 983b101c962SAlexander Duyck 984b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, 985b101c962SAlexander Duyck dma, size, flags)) { 986b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 987b101c962SAlexander Duyck i = 0; 988b101c962SAlexander Duyck } 989b101c962SAlexander Duyck 990b101c962SAlexander Duyck size = skb_frag_size(frag); 991b101c962SAlexander Duyck data_len -= size; 992b101c962SAlexander Duyck 993b101c962SAlexander Duyck dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 994b101c962SAlexander Duyck DMA_TO_DEVICE); 995b101c962SAlexander Duyck 996b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 997b101c962SAlexander Duyck } 998b101c962SAlexander Duyck 999b101c962SAlexander Duyck /* write last descriptor with LAST bit set */ 1000b101c962SAlexander Duyck flags |= FM10K_TXD_FLAG_LAST; 1001b101c962SAlexander Duyck 1002b101c962SAlexander Duyck if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) 1003b101c962SAlexander Duyck i = 0; 1004b101c962SAlexander Duyck 1005b101c962SAlexander Duyck /* record bytecount for BQL */ 1006b101c962SAlexander Duyck netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1007b101c962SAlexander Duyck 1008b101c962SAlexander Duyck /* record SW timestamp if HW timestamp is not available */ 1009b101c962SAlexander Duyck skb_tx_timestamp(first->skb); 1010b101c962SAlexander Duyck 1011b101c962SAlexander Duyck /* Force memory writes to complete before letting h/w know there 1012b101c962SAlexander Duyck * are new descriptors to fetch. (Only applicable for weak-ordered 1013b101c962SAlexander Duyck * memory model archs, such as IA-64). 1014b101c962SAlexander Duyck * 1015b101c962SAlexander Duyck * We also need this memory barrier to make certain all of the 1016b101c962SAlexander Duyck * status bits have been updated before next_to_watch is written. 1017b101c962SAlexander Duyck */ 1018b101c962SAlexander Duyck wmb(); 1019b101c962SAlexander Duyck 1020b101c962SAlexander Duyck /* set next_to_watch value indicating a packet is present */ 1021b101c962SAlexander Duyck first->next_to_watch = tx_desc; 1022b101c962SAlexander Duyck 1023b101c962SAlexander Duyck tx_ring->next_to_use = i; 1024b101c962SAlexander Duyck 1025b101c962SAlexander Duyck /* notify HW of packet */ 1026b101c962SAlexander Duyck writel(i, tx_ring->tail); 1027b101c962SAlexander Duyck 1028b101c962SAlexander Duyck /* we need this if more than one processor can write to our tail 1029b101c962SAlexander Duyck * at a time, it synchronizes IO on IA64/Altix systems 1030b101c962SAlexander Duyck */ 1031b101c962SAlexander Duyck mmiowb(); 1032b101c962SAlexander Duyck 1033b101c962SAlexander Duyck return; 1034b101c962SAlexander Duyck dma_error: 1035b101c962SAlexander Duyck dev_err(tx_ring->dev, "TX DMA map failed\n"); 1036b101c962SAlexander Duyck 1037b101c962SAlexander Duyck /* clear dma mappings for failed tx_buffer map */ 1038b101c962SAlexander Duyck for (;;) { 1039b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1040b101c962SAlexander Duyck fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); 1041b101c962SAlexander Duyck if (tx_buffer == first) 1042b101c962SAlexander Duyck break; 1043b101c962SAlexander Duyck if (i == 0) 1044b101c962SAlexander Duyck i = tx_ring->count; 1045b101c962SAlexander Duyck i--; 1046b101c962SAlexander Duyck } 1047b101c962SAlexander Duyck 1048b101c962SAlexander Duyck tx_ring->next_to_use = i; 1049b101c962SAlexander Duyck } 1050b101c962SAlexander Duyck 1051b101c962SAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 1052b101c962SAlexander Duyck { 1053b101c962SAlexander Duyck netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 1054b101c962SAlexander Duyck 1055b101c962SAlexander Duyck smp_mb(); 1056b101c962SAlexander Duyck 1057b101c962SAlexander Duyck /* We need to check again in a case another CPU has just 1058b101c962SAlexander Duyck * made room available. */ 1059b101c962SAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) < size)) 1060b101c962SAlexander Duyck return -EBUSY; 1061b101c962SAlexander Duyck 1062b101c962SAlexander Duyck /* A reprieve! - use start_queue because it doesn't call schedule */ 1063b101c962SAlexander Duyck netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 1064b101c962SAlexander Duyck ++tx_ring->tx_stats.restart_queue; 1065b101c962SAlexander Duyck return 0; 1066b101c962SAlexander Duyck } 1067b101c962SAlexander Duyck 1068b101c962SAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) 1069b101c962SAlexander Duyck { 1070b101c962SAlexander Duyck if (likely(fm10k_desc_unused(tx_ring) >= size)) 1071b101c962SAlexander Duyck return 0; 1072b101c962SAlexander Duyck return __fm10k_maybe_stop_tx(tx_ring, size); 1073b101c962SAlexander Duyck } 1074b101c962SAlexander Duyck 1075b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1076b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1077b101c962SAlexander Duyck { 1078b101c962SAlexander Duyck struct fm10k_tx_buffer *first; 107976a540d4SAlexander Duyck int tso; 1080b101c962SAlexander Duyck u32 tx_flags = 0; 1081b101c962SAlexander Duyck #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD 1082b101c962SAlexander Duyck unsigned short f; 1083b101c962SAlexander Duyck #endif 1084b101c962SAlexander Duyck u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1085b101c962SAlexander Duyck 1086b101c962SAlexander Duyck /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, 1087b101c962SAlexander Duyck * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, 1088b101c962SAlexander Duyck * + 2 desc gap to keep tail from touching head 1089b101c962SAlexander Duyck * otherwise try next time 1090b101c962SAlexander Duyck */ 1091b101c962SAlexander Duyck #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD 1092b101c962SAlexander Duyck for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1093b101c962SAlexander Duyck count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1094b101c962SAlexander Duyck #else 1095b101c962SAlexander Duyck count += skb_shinfo(skb)->nr_frags; 1096b101c962SAlexander Duyck #endif 1097b101c962SAlexander Duyck if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { 1098b101c962SAlexander Duyck tx_ring->tx_stats.tx_busy++; 1099b101c962SAlexander Duyck return NETDEV_TX_BUSY; 1100b101c962SAlexander Duyck } 1101b101c962SAlexander Duyck 1102b101c962SAlexander Duyck /* record the location of the first descriptor for this packet */ 1103b101c962SAlexander Duyck first = &tx_ring->tx_buffer[tx_ring->next_to_use]; 1104b101c962SAlexander Duyck first->skb = skb; 1105b101c962SAlexander Duyck first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1106b101c962SAlexander Duyck first->gso_segs = 1; 1107b101c962SAlexander Duyck 1108b101c962SAlexander Duyck /* record initial flags and protocol */ 1109b101c962SAlexander Duyck first->tx_flags = tx_flags; 1110b101c962SAlexander Duyck 111176a540d4SAlexander Duyck tso = fm10k_tso(tx_ring, first); 111276a540d4SAlexander Duyck if (tso < 0) 111376a540d4SAlexander Duyck goto out_drop; 111476a540d4SAlexander Duyck else if (!tso) 111576a540d4SAlexander Duyck fm10k_tx_csum(tx_ring, first); 111676a540d4SAlexander Duyck 1117b101c962SAlexander Duyck fm10k_tx_map(tx_ring, first); 1118b101c962SAlexander Duyck 1119b101c962SAlexander Duyck fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); 1120b101c962SAlexander Duyck 1121b101c962SAlexander Duyck return NETDEV_TX_OK; 112276a540d4SAlexander Duyck 112376a540d4SAlexander Duyck out_drop: 112476a540d4SAlexander Duyck dev_kfree_skb_any(first->skb); 112576a540d4SAlexander Duyck first->skb = NULL; 112676a540d4SAlexander Duyck 112776a540d4SAlexander Duyck return NETDEV_TX_OK; 1128b101c962SAlexander Duyck } 1129b101c962SAlexander Duyck 1130b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) 1131b101c962SAlexander Duyck { 1132b101c962SAlexander Duyck return ring->stats.packets; 1133b101c962SAlexander Duyck } 1134b101c962SAlexander Duyck 1135b101c962SAlexander Duyck static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) 1136b101c962SAlexander Duyck { 1137b101c962SAlexander Duyck /* use SW head and tail until we have real hardware */ 1138b101c962SAlexander Duyck u32 head = ring->next_to_clean; 1139b101c962SAlexander Duyck u32 tail = ring->next_to_use; 1140b101c962SAlexander Duyck 1141b101c962SAlexander Duyck return ((head <= tail) ? tail : tail + ring->count) - head; 1142b101c962SAlexander Duyck } 1143b101c962SAlexander Duyck 1144b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) 1145b101c962SAlexander Duyck { 1146b101c962SAlexander Duyck u32 tx_done = fm10k_get_tx_completed(tx_ring); 1147b101c962SAlexander Duyck u32 tx_done_old = tx_ring->tx_stats.tx_done_old; 1148b101c962SAlexander Duyck u32 tx_pending = fm10k_get_tx_pending(tx_ring); 1149b101c962SAlexander Duyck 1150b101c962SAlexander Duyck clear_check_for_tx_hang(tx_ring); 1151b101c962SAlexander Duyck 1152b101c962SAlexander Duyck /* Check for a hung queue, but be thorough. This verifies 1153b101c962SAlexander Duyck * that a transmit has been completed since the previous 1154b101c962SAlexander Duyck * check AND there is at least one packet pending. By 1155b101c962SAlexander Duyck * requiring this to fail twice we avoid races with 1156b101c962SAlexander Duyck * clearing the ARMED bit and conditions where we 1157b101c962SAlexander Duyck * run the check_tx_hang logic with a transmit completion 1158b101c962SAlexander Duyck * pending but without time to complete it yet. 1159b101c962SAlexander Duyck */ 1160b101c962SAlexander Duyck if (!tx_pending || (tx_done_old != tx_done)) { 1161b101c962SAlexander Duyck /* update completed stats and continue */ 1162b101c962SAlexander Duyck tx_ring->tx_stats.tx_done_old = tx_done; 1163b101c962SAlexander Duyck /* reset the countdown */ 1164b101c962SAlexander Duyck clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1165b101c962SAlexander Duyck 1166b101c962SAlexander Duyck return false; 1167b101c962SAlexander Duyck } 1168b101c962SAlexander Duyck 1169b101c962SAlexander Duyck /* make sure it is true for two checks in a row */ 1170b101c962SAlexander Duyck return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); 1171b101c962SAlexander Duyck } 1172b101c962SAlexander Duyck 1173b101c962SAlexander Duyck /** 1174b101c962SAlexander Duyck * fm10k_tx_timeout_reset - initiate reset due to Tx timeout 1175b101c962SAlexander Duyck * @interface: driver private struct 1176b101c962SAlexander Duyck **/ 1177b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) 1178b101c962SAlexander Duyck { 1179b101c962SAlexander Duyck /* Do the reset outside of interrupt context */ 1180b101c962SAlexander Duyck if (!test_bit(__FM10K_DOWN, &interface->state)) { 1181b101c962SAlexander Duyck netdev_err(interface->netdev, "Reset interface\n"); 1182b101c962SAlexander Duyck interface->tx_timeout_count++; 1183b101c962SAlexander Duyck interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1184b101c962SAlexander Duyck fm10k_service_event_schedule(interface); 1185b101c962SAlexander Duyck } 1186b101c962SAlexander Duyck } 1187b101c962SAlexander Duyck 1188b101c962SAlexander Duyck /** 1189b101c962SAlexander Duyck * fm10k_clean_tx_irq - Reclaim resources after transmit completes 1190b101c962SAlexander Duyck * @q_vector: structure containing interrupt and ring information 1191b101c962SAlexander Duyck * @tx_ring: tx ring to clean 1192b101c962SAlexander Duyck **/ 1193b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, 1194b101c962SAlexander Duyck struct fm10k_ring *tx_ring) 1195b101c962SAlexander Duyck { 1196b101c962SAlexander Duyck struct fm10k_intfc *interface = q_vector->interface; 1197b101c962SAlexander Duyck struct fm10k_tx_buffer *tx_buffer; 1198b101c962SAlexander Duyck struct fm10k_tx_desc *tx_desc; 1199b101c962SAlexander Duyck unsigned int total_bytes = 0, total_packets = 0; 1200b101c962SAlexander Duyck unsigned int budget = q_vector->tx.work_limit; 1201b101c962SAlexander Duyck unsigned int i = tx_ring->next_to_clean; 1202b101c962SAlexander Duyck 1203b101c962SAlexander Duyck if (test_bit(__FM10K_DOWN, &interface->state)) 1204b101c962SAlexander Duyck return true; 1205b101c962SAlexander Duyck 1206b101c962SAlexander Duyck tx_buffer = &tx_ring->tx_buffer[i]; 1207b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, i); 1208b101c962SAlexander Duyck i -= tx_ring->count; 1209b101c962SAlexander Duyck 1210b101c962SAlexander Duyck do { 1211b101c962SAlexander Duyck struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; 1212b101c962SAlexander Duyck 1213b101c962SAlexander Duyck /* if next_to_watch is not set then there is no work pending */ 1214b101c962SAlexander Duyck if (!eop_desc) 1215b101c962SAlexander Duyck break; 1216b101c962SAlexander Duyck 1217b101c962SAlexander Duyck /* prevent any other reads prior to eop_desc */ 1218b101c962SAlexander Duyck read_barrier_depends(); 1219b101c962SAlexander Duyck 1220b101c962SAlexander Duyck /* if DD is not set pending work has not been completed */ 1221b101c962SAlexander Duyck if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) 1222b101c962SAlexander Duyck break; 1223b101c962SAlexander Duyck 1224b101c962SAlexander Duyck /* clear next_to_watch to prevent false hangs */ 1225b101c962SAlexander Duyck tx_buffer->next_to_watch = NULL; 1226b101c962SAlexander Duyck 1227b101c962SAlexander Duyck /* update the statistics for this packet */ 1228b101c962SAlexander Duyck total_bytes += tx_buffer->bytecount; 1229b101c962SAlexander Duyck total_packets += tx_buffer->gso_segs; 1230b101c962SAlexander Duyck 1231b101c962SAlexander Duyck /* free the skb */ 1232b101c962SAlexander Duyck dev_consume_skb_any(tx_buffer->skb); 1233b101c962SAlexander Duyck 1234b101c962SAlexander Duyck /* unmap skb header data */ 1235b101c962SAlexander Duyck dma_unmap_single(tx_ring->dev, 1236b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1237b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1238b101c962SAlexander Duyck DMA_TO_DEVICE); 1239b101c962SAlexander Duyck 1240b101c962SAlexander Duyck /* clear tx_buffer data */ 1241b101c962SAlexander Duyck tx_buffer->skb = NULL; 1242b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1243b101c962SAlexander Duyck 1244b101c962SAlexander Duyck /* unmap remaining buffers */ 1245b101c962SAlexander Duyck while (tx_desc != eop_desc) { 1246b101c962SAlexander Duyck tx_buffer++; 1247b101c962SAlexander Duyck tx_desc++; 1248b101c962SAlexander Duyck i++; 1249b101c962SAlexander Duyck if (unlikely(!i)) { 1250b101c962SAlexander Duyck i -= tx_ring->count; 1251b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1252b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1253b101c962SAlexander Duyck } 1254b101c962SAlexander Duyck 1255b101c962SAlexander Duyck /* unmap any remaining paged data */ 1256b101c962SAlexander Duyck if (dma_unmap_len(tx_buffer, len)) { 1257b101c962SAlexander Duyck dma_unmap_page(tx_ring->dev, 1258b101c962SAlexander Duyck dma_unmap_addr(tx_buffer, dma), 1259b101c962SAlexander Duyck dma_unmap_len(tx_buffer, len), 1260b101c962SAlexander Duyck DMA_TO_DEVICE); 1261b101c962SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0); 1262b101c962SAlexander Duyck } 1263b101c962SAlexander Duyck } 1264b101c962SAlexander Duyck 1265b101c962SAlexander Duyck /* move us one more past the eop_desc for start of next pkt */ 1266b101c962SAlexander Duyck tx_buffer++; 1267b101c962SAlexander Duyck tx_desc++; 1268b101c962SAlexander Duyck i++; 1269b101c962SAlexander Duyck if (unlikely(!i)) { 1270b101c962SAlexander Duyck i -= tx_ring->count; 1271b101c962SAlexander Duyck tx_buffer = tx_ring->tx_buffer; 1272b101c962SAlexander Duyck tx_desc = FM10K_TX_DESC(tx_ring, 0); 1273b101c962SAlexander Duyck } 1274b101c962SAlexander Duyck 1275b101c962SAlexander Duyck /* issue prefetch for next Tx descriptor */ 1276b101c962SAlexander Duyck prefetch(tx_desc); 1277b101c962SAlexander Duyck 1278b101c962SAlexander Duyck /* update budget accounting */ 1279b101c962SAlexander Duyck budget--; 1280b101c962SAlexander Duyck } while (likely(budget)); 1281b101c962SAlexander Duyck 1282b101c962SAlexander Duyck i += tx_ring->count; 1283b101c962SAlexander Duyck tx_ring->next_to_clean = i; 1284b101c962SAlexander Duyck u64_stats_update_begin(&tx_ring->syncp); 1285b101c962SAlexander Duyck tx_ring->stats.bytes += total_bytes; 1286b101c962SAlexander Duyck tx_ring->stats.packets += total_packets; 1287b101c962SAlexander Duyck u64_stats_update_end(&tx_ring->syncp); 1288b101c962SAlexander Duyck q_vector->tx.total_bytes += total_bytes; 1289b101c962SAlexander Duyck q_vector->tx.total_packets += total_packets; 1290b101c962SAlexander Duyck 1291b101c962SAlexander Duyck if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { 1292b101c962SAlexander Duyck /* schedule immediate reset if we believe we hung */ 1293b101c962SAlexander Duyck struct fm10k_hw *hw = &interface->hw; 1294b101c962SAlexander Duyck 1295b101c962SAlexander Duyck netif_err(interface, drv, tx_ring->netdev, 1296b101c962SAlexander Duyck "Detected Tx Unit Hang\n" 1297b101c962SAlexander Duyck " Tx Queue <%d>\n" 1298b101c962SAlexander Duyck " TDH, TDT <%x>, <%x>\n" 1299b101c962SAlexander Duyck " next_to_use <%x>\n" 1300b101c962SAlexander Duyck " next_to_clean <%x>\n", 1301b101c962SAlexander Duyck tx_ring->queue_index, 1302b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), 1303b101c962SAlexander Duyck fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), 1304b101c962SAlexander Duyck tx_ring->next_to_use, i); 1305b101c962SAlexander Duyck 1306b101c962SAlexander Duyck netif_stop_subqueue(tx_ring->netdev, 1307b101c962SAlexander Duyck tx_ring->queue_index); 1308b101c962SAlexander Duyck 1309b101c962SAlexander Duyck netif_info(interface, probe, tx_ring->netdev, 1310b101c962SAlexander Duyck "tx hang %d detected on queue %d, resetting interface\n", 1311b101c962SAlexander Duyck interface->tx_timeout_count + 1, 1312b101c962SAlexander Duyck tx_ring->queue_index); 1313b101c962SAlexander Duyck 1314b101c962SAlexander Duyck fm10k_tx_timeout_reset(interface); 1315b101c962SAlexander Duyck 1316b101c962SAlexander Duyck /* the netdev is about to reset, no point in enabling stuff */ 1317b101c962SAlexander Duyck return true; 1318b101c962SAlexander Duyck } 1319b101c962SAlexander Duyck 1320b101c962SAlexander Duyck /* notify netdev of completed buffers */ 1321b101c962SAlexander Duyck netdev_tx_completed_queue(txring_txq(tx_ring), 1322b101c962SAlexander Duyck total_packets, total_bytes); 1323b101c962SAlexander Duyck 1324b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) 1325b101c962SAlexander Duyck if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 1326b101c962SAlexander Duyck (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 1327b101c962SAlexander Duyck /* Make sure that anybody stopping the queue after this 1328b101c962SAlexander Duyck * sees the new next_to_clean. 1329b101c962SAlexander Duyck */ 1330b101c962SAlexander Duyck smp_mb(); 1331b101c962SAlexander Duyck if (__netif_subqueue_stopped(tx_ring->netdev, 1332b101c962SAlexander Duyck tx_ring->queue_index) && 1333b101c962SAlexander Duyck !test_bit(__FM10K_DOWN, &interface->state)) { 1334b101c962SAlexander Duyck netif_wake_subqueue(tx_ring->netdev, 1335b101c962SAlexander Duyck tx_ring->queue_index); 1336b101c962SAlexander Duyck ++tx_ring->tx_stats.restart_queue; 1337b101c962SAlexander Duyck } 1338b101c962SAlexander Duyck } 1339b101c962SAlexander Duyck 1340b101c962SAlexander Duyck return !!budget; 1341b101c962SAlexander Duyck } 1342b101c962SAlexander Duyck 134318283cadSAlexander Duyck /** 134418283cadSAlexander Duyck * fm10k_update_itr - update the dynamic ITR value based on packet size 134518283cadSAlexander Duyck * 134618283cadSAlexander Duyck * Stores a new ITR value based on strictly on packet size. The 134718283cadSAlexander Duyck * divisors and thresholds used by this function were determined based 134818283cadSAlexander Duyck * on theoretical maximum wire speed and testing data, in order to 134918283cadSAlexander Duyck * minimize response time while increasing bulk throughput. 135018283cadSAlexander Duyck * 135118283cadSAlexander Duyck * @ring_container: Container for rings to have ITR updated 135218283cadSAlexander Duyck **/ 135318283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container) 135418283cadSAlexander Duyck { 135518283cadSAlexander Duyck unsigned int avg_wire_size, packets; 135618283cadSAlexander Duyck 135718283cadSAlexander Duyck /* Only update ITR if we are using adaptive setting */ 135818283cadSAlexander Duyck if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) 135918283cadSAlexander Duyck goto clear_counts; 136018283cadSAlexander Duyck 136118283cadSAlexander Duyck packets = ring_container->total_packets; 136218283cadSAlexander Duyck if (!packets) 136318283cadSAlexander Duyck goto clear_counts; 136418283cadSAlexander Duyck 136518283cadSAlexander Duyck avg_wire_size = ring_container->total_bytes / packets; 136618283cadSAlexander Duyck 136718283cadSAlexander Duyck /* Add 24 bytes to size to account for CRC, preamble, and gap */ 136818283cadSAlexander Duyck avg_wire_size += 24; 136918283cadSAlexander Duyck 137018283cadSAlexander Duyck /* Don't starve jumbo frames */ 137118283cadSAlexander Duyck if (avg_wire_size > 3000) 137218283cadSAlexander Duyck avg_wire_size = 3000; 137318283cadSAlexander Duyck 137418283cadSAlexander Duyck /* Give a little boost to mid-size frames */ 137518283cadSAlexander Duyck if ((avg_wire_size > 300) && (avg_wire_size < 1200)) 137618283cadSAlexander Duyck avg_wire_size /= 3; 137718283cadSAlexander Duyck else 137818283cadSAlexander Duyck avg_wire_size /= 2; 137918283cadSAlexander Duyck 138018283cadSAlexander Duyck /* write back value and retain adaptive flag */ 138118283cadSAlexander Duyck ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; 138218283cadSAlexander Duyck 138318283cadSAlexander Duyck clear_counts: 138418283cadSAlexander Duyck ring_container->total_bytes = 0; 138518283cadSAlexander Duyck ring_container->total_packets = 0; 138618283cadSAlexander Duyck } 138718283cadSAlexander Duyck 138818283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) 138918283cadSAlexander Duyck { 139018283cadSAlexander Duyck /* Enable auto-mask and clear the current mask */ 139118283cadSAlexander Duyck u32 itr = FM10K_ITR_ENABLE; 139218283cadSAlexander Duyck 139318283cadSAlexander Duyck /* Update Tx ITR */ 139418283cadSAlexander Duyck fm10k_update_itr(&q_vector->tx); 139518283cadSAlexander Duyck 139618283cadSAlexander Duyck /* Update Rx ITR */ 139718283cadSAlexander Duyck fm10k_update_itr(&q_vector->rx); 139818283cadSAlexander Duyck 139918283cadSAlexander Duyck /* Store Tx itr in timer slot 0 */ 140018283cadSAlexander Duyck itr |= (q_vector->tx.itr & FM10K_ITR_MAX); 140118283cadSAlexander Duyck 140218283cadSAlexander Duyck /* Shift Rx itr to timer slot 1 */ 140318283cadSAlexander Duyck itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; 140418283cadSAlexander Duyck 140518283cadSAlexander Duyck /* Write the final value to the ITR register */ 140618283cadSAlexander Duyck writel(itr, q_vector->itr); 140718283cadSAlexander Duyck } 140818283cadSAlexander Duyck 140918283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget) 141018283cadSAlexander Duyck { 141118283cadSAlexander Duyck struct fm10k_q_vector *q_vector = 141218283cadSAlexander Duyck container_of(napi, struct fm10k_q_vector, napi); 1413b101c962SAlexander Duyck struct fm10k_ring *ring; 1414b101c962SAlexander Duyck int per_ring_budget; 1415b101c962SAlexander Duyck bool clean_complete = true; 1416b101c962SAlexander Duyck 1417b101c962SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1418b101c962SAlexander Duyck clean_complete &= fm10k_clean_tx_irq(q_vector, ring); 1419b101c962SAlexander Duyck 1420b101c962SAlexander Duyck /* attempt to distribute budget to each queue fairly, but don't 1421b101c962SAlexander Duyck * allow the budget to go below 1 because we'll exit polling 1422b101c962SAlexander Duyck */ 1423b101c962SAlexander Duyck if (q_vector->rx.count > 1) 1424b101c962SAlexander Duyck per_ring_budget = max(budget/q_vector->rx.count, 1); 1425b101c962SAlexander Duyck else 1426b101c962SAlexander Duyck per_ring_budget = budget; 1427b101c962SAlexander Duyck 1428b101c962SAlexander Duyck fm10k_for_each_ring(ring, q_vector->rx) 1429b101c962SAlexander Duyck clean_complete &= fm10k_clean_rx_irq(q_vector, ring, 1430b101c962SAlexander Duyck per_ring_budget); 1431b101c962SAlexander Duyck 1432b101c962SAlexander Duyck /* If all work not completed, return budget and keep polling */ 1433b101c962SAlexander Duyck if (!clean_complete) 1434b101c962SAlexander Duyck return budget; 143518283cadSAlexander Duyck 143618283cadSAlexander Duyck /* all work done, exit the polling mode */ 143718283cadSAlexander Duyck napi_complete(napi); 143818283cadSAlexander Duyck 143918283cadSAlexander Duyck /* re-enable the q_vector */ 144018283cadSAlexander Duyck fm10k_qv_enable(q_vector); 144118283cadSAlexander Duyck 144218283cadSAlexander Duyck return 0; 144318283cadSAlexander Duyck } 144418283cadSAlexander Duyck 144518283cadSAlexander Duyck /** 1446aa3ac822SAlexander Duyck * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device 1447aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1448aa3ac822SAlexander Duyck * 1449aa3ac822SAlexander Duyck * When QoS (Quality of Service) is enabled, allocate queues for 1450aa3ac822SAlexander Duyck * each traffic class. If multiqueue isn't available,then abort QoS 1451aa3ac822SAlexander Duyck * initialization. 1452aa3ac822SAlexander Duyck * 1453aa3ac822SAlexander Duyck * This function handles all combinations of Qos and RSS. 1454aa3ac822SAlexander Duyck * 1455aa3ac822SAlexander Duyck **/ 1456aa3ac822SAlexander Duyck static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) 1457aa3ac822SAlexander Duyck { 1458aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1459aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1460aa3ac822SAlexander Duyck int rss_i, i; 1461aa3ac822SAlexander Duyck int pcs; 1462aa3ac822SAlexander Duyck 1463aa3ac822SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 1464aa3ac822SAlexander Duyck pcs = netdev_get_num_tc(dev); 1465aa3ac822SAlexander Duyck 1466aa3ac822SAlexander Duyck if (pcs <= 1) 1467aa3ac822SAlexander Duyck return false; 1468aa3ac822SAlexander Duyck 1469aa3ac822SAlexander Duyck /* set QoS mask and indices */ 1470aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_QOS]; 1471aa3ac822SAlexander Duyck f->indices = pcs; 1472aa3ac822SAlexander Duyck f->mask = (1 << fls(pcs - 1)) - 1; 1473aa3ac822SAlexander Duyck 1474aa3ac822SAlexander Duyck /* determine the upper limit for our current DCB mode */ 1475aa3ac822SAlexander Duyck rss_i = interface->hw.mac.max_queues / pcs; 1476aa3ac822SAlexander Duyck rss_i = 1 << (fls(rss_i) - 1); 1477aa3ac822SAlexander Duyck 1478aa3ac822SAlexander Duyck /* set RSS mask and indices */ 1479aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1480aa3ac822SAlexander Duyck rss_i = min_t(u16, rss_i, f->limit); 1481aa3ac822SAlexander Duyck f->indices = rss_i; 1482aa3ac822SAlexander Duyck f->mask = (1 << fls(rss_i - 1)) - 1; 1483aa3ac822SAlexander Duyck 1484aa3ac822SAlexander Duyck /* configure pause class to queue mapping */ 1485aa3ac822SAlexander Duyck for (i = 0; i < pcs; i++) 1486aa3ac822SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 1487aa3ac822SAlexander Duyck 1488aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i * pcs; 1489aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i * pcs; 1490aa3ac822SAlexander Duyck 1491aa3ac822SAlexander Duyck return true; 1492aa3ac822SAlexander Duyck } 1493aa3ac822SAlexander Duyck 1494aa3ac822SAlexander Duyck /** 1495aa3ac822SAlexander Duyck * fm10k_set_rss_queues: Allocate queues for RSS 1496aa3ac822SAlexander Duyck * @interface: board private structure to initialize 1497aa3ac822SAlexander Duyck * 1498aa3ac822SAlexander Duyck * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 1499aa3ac822SAlexander Duyck * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 1500aa3ac822SAlexander Duyck * 1501aa3ac822SAlexander Duyck **/ 1502aa3ac822SAlexander Duyck static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) 1503aa3ac822SAlexander Duyck { 1504aa3ac822SAlexander Duyck struct fm10k_ring_feature *f; 1505aa3ac822SAlexander Duyck u16 rss_i; 1506aa3ac822SAlexander Duyck 1507aa3ac822SAlexander Duyck f = &interface->ring_feature[RING_F_RSS]; 1508aa3ac822SAlexander Duyck rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); 1509aa3ac822SAlexander Duyck 1510aa3ac822SAlexander Duyck /* record indices and power of 2 mask for RSS */ 1511aa3ac822SAlexander Duyck f->indices = rss_i; 1512aa3ac822SAlexander Duyck f->mask = (1 << fls(rss_i - 1)) - 1; 1513aa3ac822SAlexander Duyck 1514aa3ac822SAlexander Duyck interface->num_rx_queues = rss_i; 1515aa3ac822SAlexander Duyck interface->num_tx_queues = rss_i; 1516aa3ac822SAlexander Duyck 1517aa3ac822SAlexander Duyck return true; 1518aa3ac822SAlexander Duyck } 1519aa3ac822SAlexander Duyck 1520aa3ac822SAlexander Duyck /** 152118283cadSAlexander Duyck * fm10k_set_num_queues: Allocate queues for device, feature dependent 152218283cadSAlexander Duyck * @interface: board private structure to initialize 152318283cadSAlexander Duyck * 152418283cadSAlexander Duyck * This is the top level queue allocation routine. The order here is very 152518283cadSAlexander Duyck * important, starting with the "most" number of features turned on at once, 152618283cadSAlexander Duyck * and ending with the smallest set of features. This way large combinations 152718283cadSAlexander Duyck * can be allocated if they're turned on, and smaller combinations are the 152818283cadSAlexander Duyck * fallthrough conditions. 152918283cadSAlexander Duyck * 153018283cadSAlexander Duyck **/ 153118283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface) 153218283cadSAlexander Duyck { 153318283cadSAlexander Duyck /* Start with base case */ 153418283cadSAlexander Duyck interface->num_rx_queues = 1; 153518283cadSAlexander Duyck interface->num_tx_queues = 1; 1536aa3ac822SAlexander Duyck 1537aa3ac822SAlexander Duyck if (fm10k_set_qos_queues(interface)) 1538aa3ac822SAlexander Duyck return; 1539aa3ac822SAlexander Duyck 1540aa3ac822SAlexander Duyck fm10k_set_rss_queues(interface); 154118283cadSAlexander Duyck } 154218283cadSAlexander Duyck 154318283cadSAlexander Duyck /** 154418283cadSAlexander Duyck * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector 154518283cadSAlexander Duyck * @interface: board private structure to initialize 154618283cadSAlexander Duyck * @v_count: q_vectors allocated on interface, used for ring interleaving 154718283cadSAlexander Duyck * @v_idx: index of vector in interface struct 154818283cadSAlexander Duyck * @txr_count: total number of Tx rings to allocate 154918283cadSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 155018283cadSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 155118283cadSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 155218283cadSAlexander Duyck * 155318283cadSAlexander Duyck * We allocate one q_vector. If allocation fails we return -ENOMEM. 155418283cadSAlexander Duyck **/ 155518283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, 155618283cadSAlexander Duyck unsigned int v_count, unsigned int v_idx, 155718283cadSAlexander Duyck unsigned int txr_count, unsigned int txr_idx, 155818283cadSAlexander Duyck unsigned int rxr_count, unsigned int rxr_idx) 155918283cadSAlexander Duyck { 156018283cadSAlexander Duyck struct fm10k_q_vector *q_vector; 1561e27ef599SAlexander Duyck struct fm10k_ring *ring; 156218283cadSAlexander Duyck int ring_count, size; 156318283cadSAlexander Duyck 156418283cadSAlexander Duyck ring_count = txr_count + rxr_count; 1565e27ef599SAlexander Duyck size = sizeof(struct fm10k_q_vector) + 1566e27ef599SAlexander Duyck (sizeof(struct fm10k_ring) * ring_count); 156718283cadSAlexander Duyck 156818283cadSAlexander Duyck /* allocate q_vector and rings */ 156918283cadSAlexander Duyck q_vector = kzalloc(size, GFP_KERNEL); 157018283cadSAlexander Duyck if (!q_vector) 157118283cadSAlexander Duyck return -ENOMEM; 157218283cadSAlexander Duyck 157318283cadSAlexander Duyck /* initialize NAPI */ 157418283cadSAlexander Duyck netif_napi_add(interface->netdev, &q_vector->napi, 157518283cadSAlexander Duyck fm10k_poll, NAPI_POLL_WEIGHT); 157618283cadSAlexander Duyck 157718283cadSAlexander Duyck /* tie q_vector and interface together */ 157818283cadSAlexander Duyck interface->q_vector[v_idx] = q_vector; 157918283cadSAlexander Duyck q_vector->interface = interface; 158018283cadSAlexander Duyck q_vector->v_idx = v_idx; 158118283cadSAlexander Duyck 1582e27ef599SAlexander Duyck /* initialize pointer to rings */ 1583e27ef599SAlexander Duyck ring = q_vector->ring; 1584e27ef599SAlexander Duyck 158518283cadSAlexander Duyck /* save Tx ring container info */ 1586e27ef599SAlexander Duyck q_vector->tx.ring = ring; 1587e27ef599SAlexander Duyck q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; 158818283cadSAlexander Duyck q_vector->tx.itr = interface->tx_itr; 158918283cadSAlexander Duyck q_vector->tx.count = txr_count; 159018283cadSAlexander Duyck 1591e27ef599SAlexander Duyck while (txr_count) { 1592e27ef599SAlexander Duyck /* assign generic ring traits */ 1593e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1594e27ef599SAlexander Duyck ring->netdev = interface->netdev; 1595e27ef599SAlexander Duyck 1596e27ef599SAlexander Duyck /* configure backlink on ring */ 1597e27ef599SAlexander Duyck ring->q_vector = q_vector; 1598e27ef599SAlexander Duyck 1599e27ef599SAlexander Duyck /* apply Tx specific ring traits */ 1600e27ef599SAlexander Duyck ring->count = interface->tx_ring_count; 1601e27ef599SAlexander Duyck ring->queue_index = txr_idx; 1602e27ef599SAlexander Duyck 1603e27ef599SAlexander Duyck /* assign ring to interface */ 1604e27ef599SAlexander Duyck interface->tx_ring[txr_idx] = ring; 1605e27ef599SAlexander Duyck 1606e27ef599SAlexander Duyck /* update count and index */ 1607e27ef599SAlexander Duyck txr_count--; 1608e27ef599SAlexander Duyck txr_idx += v_count; 1609e27ef599SAlexander Duyck 1610e27ef599SAlexander Duyck /* push pointer to next ring */ 1611e27ef599SAlexander Duyck ring++; 1612e27ef599SAlexander Duyck } 1613e27ef599SAlexander Duyck 161418283cadSAlexander Duyck /* save Rx ring container info */ 1615e27ef599SAlexander Duyck q_vector->rx.ring = ring; 161618283cadSAlexander Duyck q_vector->rx.itr = interface->rx_itr; 161718283cadSAlexander Duyck q_vector->rx.count = rxr_count; 161818283cadSAlexander Duyck 1619e27ef599SAlexander Duyck while (rxr_count) { 1620e27ef599SAlexander Duyck /* assign generic ring traits */ 1621e27ef599SAlexander Duyck ring->dev = &interface->pdev->dev; 1622e27ef599SAlexander Duyck ring->netdev = interface->netdev; 16235cd5e2e9SAlexander Duyck rcu_assign_pointer(ring->l2_accel, interface->l2_accel); 1624e27ef599SAlexander Duyck 1625e27ef599SAlexander Duyck /* configure backlink on ring */ 1626e27ef599SAlexander Duyck ring->q_vector = q_vector; 1627e27ef599SAlexander Duyck 1628e27ef599SAlexander Duyck /* apply Rx specific ring traits */ 1629e27ef599SAlexander Duyck ring->count = interface->rx_ring_count; 1630e27ef599SAlexander Duyck ring->queue_index = rxr_idx; 1631e27ef599SAlexander Duyck 1632e27ef599SAlexander Duyck /* assign ring to interface */ 1633e27ef599SAlexander Duyck interface->rx_ring[rxr_idx] = ring; 1634e27ef599SAlexander Duyck 1635e27ef599SAlexander Duyck /* update count and index */ 1636e27ef599SAlexander Duyck rxr_count--; 1637e27ef599SAlexander Duyck rxr_idx += v_count; 1638e27ef599SAlexander Duyck 1639e27ef599SAlexander Duyck /* push pointer to next ring */ 1640e27ef599SAlexander Duyck ring++; 1641e27ef599SAlexander Duyck } 1642e27ef599SAlexander Duyck 16437461fd91SAlexander Duyck fm10k_dbg_q_vector_init(q_vector); 16447461fd91SAlexander Duyck 164518283cadSAlexander Duyck return 0; 164618283cadSAlexander Duyck } 164718283cadSAlexander Duyck 164818283cadSAlexander Duyck /** 164918283cadSAlexander Duyck * fm10k_free_q_vector - Free memory allocated for specific interrupt vector 165018283cadSAlexander Duyck * @interface: board private structure to initialize 165118283cadSAlexander Duyck * @v_idx: Index of vector to be freed 165218283cadSAlexander Duyck * 165318283cadSAlexander Duyck * This function frees the memory allocated to the q_vector. In addition if 165418283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 165518283cadSAlexander Duyck * to freeing the q_vector. 165618283cadSAlexander Duyck **/ 165718283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 165818283cadSAlexander Duyck { 165918283cadSAlexander Duyck struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 1660e27ef599SAlexander Duyck struct fm10k_ring *ring; 1661e27ef599SAlexander Duyck 16627461fd91SAlexander Duyck fm10k_dbg_q_vector_exit(q_vector); 16637461fd91SAlexander Duyck 1664e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->tx) 1665e27ef599SAlexander Duyck interface->tx_ring[ring->queue_index] = NULL; 1666e27ef599SAlexander Duyck 1667e27ef599SAlexander Duyck fm10k_for_each_ring(ring, q_vector->rx) 1668e27ef599SAlexander Duyck interface->rx_ring[ring->queue_index] = NULL; 166918283cadSAlexander Duyck 167018283cadSAlexander Duyck interface->q_vector[v_idx] = NULL; 167118283cadSAlexander Duyck netif_napi_del(&q_vector->napi); 167218283cadSAlexander Duyck kfree_rcu(q_vector, rcu); 167318283cadSAlexander Duyck } 167418283cadSAlexander Duyck 167518283cadSAlexander Duyck /** 167618283cadSAlexander Duyck * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors 167718283cadSAlexander Duyck * @interface: board private structure to initialize 167818283cadSAlexander Duyck * 167918283cadSAlexander Duyck * We allocate one q_vector per queue interrupt. If allocation fails we 168018283cadSAlexander Duyck * return -ENOMEM. 168118283cadSAlexander Duyck **/ 168218283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) 168318283cadSAlexander Duyck { 168418283cadSAlexander Duyck unsigned int q_vectors = interface->num_q_vectors; 168518283cadSAlexander Duyck unsigned int rxr_remaining = interface->num_rx_queues; 168618283cadSAlexander Duyck unsigned int txr_remaining = interface->num_tx_queues; 168718283cadSAlexander Duyck unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; 168818283cadSAlexander Duyck int err; 168918283cadSAlexander Duyck 169018283cadSAlexander Duyck if (q_vectors >= (rxr_remaining + txr_remaining)) { 169118283cadSAlexander Duyck for (; rxr_remaining; v_idx++) { 169218283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 169318283cadSAlexander Duyck 0, 0, 1, rxr_idx); 169418283cadSAlexander Duyck if (err) 169518283cadSAlexander Duyck goto err_out; 169618283cadSAlexander Duyck 169718283cadSAlexander Duyck /* update counts and index */ 169818283cadSAlexander Duyck rxr_remaining--; 169918283cadSAlexander Duyck rxr_idx++; 170018283cadSAlexander Duyck } 170118283cadSAlexander Duyck } 170218283cadSAlexander Duyck 170318283cadSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 170418283cadSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 170518283cadSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 170618283cadSAlexander Duyck 170718283cadSAlexander Duyck err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, 170818283cadSAlexander Duyck tqpv, txr_idx, 170918283cadSAlexander Duyck rqpv, rxr_idx); 171018283cadSAlexander Duyck 171118283cadSAlexander Duyck if (err) 171218283cadSAlexander Duyck goto err_out; 171318283cadSAlexander Duyck 171418283cadSAlexander Duyck /* update counts and index */ 171518283cadSAlexander Duyck rxr_remaining -= rqpv; 171618283cadSAlexander Duyck txr_remaining -= tqpv; 171718283cadSAlexander Duyck rxr_idx++; 171818283cadSAlexander Duyck txr_idx++; 171918283cadSAlexander Duyck } 172018283cadSAlexander Duyck 172118283cadSAlexander Duyck return 0; 172218283cadSAlexander Duyck 172318283cadSAlexander Duyck err_out: 172418283cadSAlexander Duyck interface->num_tx_queues = 0; 172518283cadSAlexander Duyck interface->num_rx_queues = 0; 172618283cadSAlexander Duyck interface->num_q_vectors = 0; 172718283cadSAlexander Duyck 172818283cadSAlexander Duyck while (v_idx--) 172918283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 173018283cadSAlexander Duyck 173118283cadSAlexander Duyck return -ENOMEM; 173218283cadSAlexander Duyck } 173318283cadSAlexander Duyck 173418283cadSAlexander Duyck /** 173518283cadSAlexander Duyck * fm10k_free_q_vectors - Free memory allocated for interrupt vectors 173618283cadSAlexander Duyck * @interface: board private structure to initialize 173718283cadSAlexander Duyck * 173818283cadSAlexander Duyck * This function frees the memory allocated to the q_vectors. In addition if 173918283cadSAlexander Duyck * NAPI is enabled it will delete any references to the NAPI struct prior 174018283cadSAlexander Duyck * to freeing the q_vector. 174118283cadSAlexander Duyck **/ 174218283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface) 174318283cadSAlexander Duyck { 174418283cadSAlexander Duyck int v_idx = interface->num_q_vectors; 174518283cadSAlexander Duyck 174618283cadSAlexander Duyck interface->num_tx_queues = 0; 174718283cadSAlexander Duyck interface->num_rx_queues = 0; 174818283cadSAlexander Duyck interface->num_q_vectors = 0; 174918283cadSAlexander Duyck 175018283cadSAlexander Duyck while (v_idx--) 175118283cadSAlexander Duyck fm10k_free_q_vector(interface, v_idx); 175218283cadSAlexander Duyck } 175318283cadSAlexander Duyck 175418283cadSAlexander Duyck /** 175518283cadSAlexander Duyck * f10k_reset_msix_capability - reset MSI-X capability 175618283cadSAlexander Duyck * @interface: board private structure to initialize 175718283cadSAlexander Duyck * 175818283cadSAlexander Duyck * Reset the MSI-X capability back to its starting state 175918283cadSAlexander Duyck **/ 176018283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) 176118283cadSAlexander Duyck { 176218283cadSAlexander Duyck pci_disable_msix(interface->pdev); 176318283cadSAlexander Duyck kfree(interface->msix_entries); 176418283cadSAlexander Duyck interface->msix_entries = NULL; 176518283cadSAlexander Duyck } 176618283cadSAlexander Duyck 176718283cadSAlexander Duyck /** 176818283cadSAlexander Duyck * f10k_init_msix_capability - configure MSI-X capability 176918283cadSAlexander Duyck * @interface: board private structure to initialize 177018283cadSAlexander Duyck * 177118283cadSAlexander Duyck * Attempt to configure the interrupts using the best available 177218283cadSAlexander Duyck * capabilities of the hardware and the kernel. 177318283cadSAlexander Duyck **/ 177418283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface) 177518283cadSAlexander Duyck { 177618283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw; 177718283cadSAlexander Duyck int v_budget, vector; 177818283cadSAlexander Duyck 177918283cadSAlexander Duyck /* It's easy to be greedy for MSI-X vectors, but it really 178018283cadSAlexander Duyck * doesn't do us much good if we have a lot more vectors 178118283cadSAlexander Duyck * than CPU's. So let's be conservative and only ask for 178218283cadSAlexander Duyck * (roughly) the same number of vectors as there are CPU's. 178318283cadSAlexander Duyck * the default is to use pairs of vectors 178418283cadSAlexander Duyck */ 178518283cadSAlexander Duyck v_budget = max(interface->num_rx_queues, interface->num_tx_queues); 178618283cadSAlexander Duyck v_budget = min_t(u16, v_budget, num_online_cpus()); 178718283cadSAlexander Duyck 178818283cadSAlexander Duyck /* account for vectors not related to queues */ 178918283cadSAlexander Duyck v_budget += NON_Q_VECTORS(hw); 179018283cadSAlexander Duyck 179118283cadSAlexander Duyck /* At the same time, hardware can only support a maximum of 179218283cadSAlexander Duyck * hw.mac->max_msix_vectors vectors. With features 179318283cadSAlexander Duyck * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 179418283cadSAlexander Duyck * descriptor queues supported by our device. Thus, we cap it off in 179518283cadSAlexander Duyck * those rare cases where the cpu count also exceeds our vector limit. 179618283cadSAlexander Duyck */ 179718283cadSAlexander Duyck v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 179818283cadSAlexander Duyck 179918283cadSAlexander Duyck /* A failure in MSI-X entry allocation is fatal. */ 180018283cadSAlexander Duyck interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 180118283cadSAlexander Duyck GFP_KERNEL); 180218283cadSAlexander Duyck if (!interface->msix_entries) 180318283cadSAlexander Duyck return -ENOMEM; 180418283cadSAlexander Duyck 180518283cadSAlexander Duyck /* populate entry values */ 180618283cadSAlexander Duyck for (vector = 0; vector < v_budget; vector++) 180718283cadSAlexander Duyck interface->msix_entries[vector].entry = vector; 180818283cadSAlexander Duyck 180918283cadSAlexander Duyck /* Attempt to enable MSI-X with requested value */ 181018283cadSAlexander Duyck v_budget = pci_enable_msix_range(interface->pdev, 181118283cadSAlexander Duyck interface->msix_entries, 181218283cadSAlexander Duyck MIN_MSIX_COUNT(hw), 181318283cadSAlexander Duyck v_budget); 181418283cadSAlexander Duyck if (v_budget < 0) { 181518283cadSAlexander Duyck kfree(interface->msix_entries); 181618283cadSAlexander Duyck interface->msix_entries = NULL; 181718283cadSAlexander Duyck return -ENOMEM; 181818283cadSAlexander Duyck } 181918283cadSAlexander Duyck 182018283cadSAlexander Duyck /* record the number of queues available for q_vectors */ 182118283cadSAlexander Duyck interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); 182218283cadSAlexander Duyck 182318283cadSAlexander Duyck return 0; 182418283cadSAlexander Duyck } 182518283cadSAlexander Duyck 1826aa3ac822SAlexander Duyck /** 1827aa3ac822SAlexander Duyck * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS 1828aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1829aa3ac822SAlexander Duyck * 1830aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for Qos 1831aa3ac822SAlexander Duyck **/ 1832aa3ac822SAlexander Duyck static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) 1833aa3ac822SAlexander Duyck { 1834aa3ac822SAlexander Duyck struct net_device *dev = interface->netdev; 1835aa3ac822SAlexander Duyck int pc, offset, rss_i, i, q_idx; 1836aa3ac822SAlexander Duyck u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; 1837aa3ac822SAlexander Duyck u8 num_pcs = netdev_get_num_tc(dev); 1838aa3ac822SAlexander Duyck 1839aa3ac822SAlexander Duyck if (num_pcs <= 1) 1840aa3ac822SAlexander Duyck return false; 1841aa3ac822SAlexander Duyck 1842aa3ac822SAlexander Duyck rss_i = interface->ring_feature[RING_F_RSS].indices; 1843aa3ac822SAlexander Duyck 1844aa3ac822SAlexander Duyck for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { 1845aa3ac822SAlexander Duyck q_idx = pc; 1846aa3ac822SAlexander Duyck for (i = 0; i < rss_i; i++) { 1847aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->reg_idx = q_idx; 1848aa3ac822SAlexander Duyck interface->tx_ring[offset + i]->qos_pc = pc; 1849aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->reg_idx = q_idx; 1850aa3ac822SAlexander Duyck interface->rx_ring[offset + i]->qos_pc = pc; 1851aa3ac822SAlexander Duyck q_idx += pc_stride; 1852aa3ac822SAlexander Duyck } 1853aa3ac822SAlexander Duyck } 1854aa3ac822SAlexander Duyck 1855aa3ac822SAlexander Duyck return true; 1856aa3ac822SAlexander Duyck } 1857aa3ac822SAlexander Duyck 1858aa3ac822SAlexander Duyck /** 1859aa3ac822SAlexander Duyck * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS 1860aa3ac822SAlexander Duyck * @interface: Interface structure continaining rings and devices 1861aa3ac822SAlexander Duyck * 1862aa3ac822SAlexander Duyck * Cache the descriptor ring offsets for RSS 1863aa3ac822SAlexander Duyck **/ 1864aa3ac822SAlexander Duyck static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) 1865aa3ac822SAlexander Duyck { 1866aa3ac822SAlexander Duyck int i; 1867aa3ac822SAlexander Duyck 1868aa3ac822SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) 1869aa3ac822SAlexander Duyck interface->rx_ring[i]->reg_idx = i; 1870aa3ac822SAlexander Duyck 1871aa3ac822SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) 1872aa3ac822SAlexander Duyck interface->tx_ring[i]->reg_idx = i; 1873aa3ac822SAlexander Duyck } 1874aa3ac822SAlexander Duyck 1875aa3ac822SAlexander Duyck /** 1876aa3ac822SAlexander Duyck * fm10k_assign_rings - Map rings to network devices 1877aa3ac822SAlexander Duyck * @interface: Interface structure containing rings and devices 1878aa3ac822SAlexander Duyck * 1879aa3ac822SAlexander Duyck * This function is meant to go though and configure both the network 1880aa3ac822SAlexander Duyck * devices so that they contain rings, and configure the rings so that 1881aa3ac822SAlexander Duyck * they function with their network devices. 1882aa3ac822SAlexander Duyck **/ 1883aa3ac822SAlexander Duyck static void fm10k_assign_rings(struct fm10k_intfc *interface) 1884aa3ac822SAlexander Duyck { 1885aa3ac822SAlexander Duyck if (fm10k_cache_ring_qos(interface)) 1886aa3ac822SAlexander Duyck return; 1887aa3ac822SAlexander Duyck 1888aa3ac822SAlexander Duyck fm10k_cache_ring_rss(interface); 1889aa3ac822SAlexander Duyck } 1890aa3ac822SAlexander Duyck 189118283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface) 189218283cadSAlexander Duyck { 189318283cadSAlexander Duyck u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; 189418283cadSAlexander Duyck u32 reta, base; 189518283cadSAlexander Duyck 189618283cadSAlexander Duyck /* If the netdev is initialized we have to maintain table if possible */ 189718283cadSAlexander Duyck if (interface->netdev->reg_state) { 189818283cadSAlexander Duyck for (i = FM10K_RETA_SIZE; i--;) { 189918283cadSAlexander Duyck reta = interface->reta[i]; 190018283cadSAlexander Duyck if ((((reta << 24) >> 24) < rss_i) && 190118283cadSAlexander Duyck (((reta << 16) >> 24) < rss_i) && 190218283cadSAlexander Duyck (((reta << 8) >> 24) < rss_i) && 190318283cadSAlexander Duyck (((reta) >> 24) < rss_i)) 190418283cadSAlexander Duyck continue; 190518283cadSAlexander Duyck goto repopulate_reta; 190618283cadSAlexander Duyck } 190718283cadSAlexander Duyck 190818283cadSAlexander Duyck /* do nothing if all of the elements are in bounds */ 190918283cadSAlexander Duyck return; 191018283cadSAlexander Duyck } 191118283cadSAlexander Duyck 191218283cadSAlexander Duyck repopulate_reta: 191318283cadSAlexander Duyck /* Populate the redirection table 4 entries at a time. To do this 191418283cadSAlexander Duyck * we are generating the results for n and n+2 and then interleaving 191518283cadSAlexander Duyck * those with the results with n+1 and n+3. 191618283cadSAlexander Duyck */ 191718283cadSAlexander Duyck for (i = FM10K_RETA_SIZE; i--;) { 191818283cadSAlexander Duyck /* first pass generates n and n+2 */ 191918283cadSAlexander Duyck base = ((i * 0x00040004) + 0x00020000) * rss_i; 192018283cadSAlexander Duyck reta = (base & 0x3F803F80) >> 7; 192118283cadSAlexander Duyck 192218283cadSAlexander Duyck /* second pass generates n+1 and n+3 */ 192318283cadSAlexander Duyck base += 0x00010001 * rss_i; 192418283cadSAlexander Duyck reta |= (base & 0x3F803F80) << 1; 192518283cadSAlexander Duyck 192618283cadSAlexander Duyck interface->reta[i] = reta; 192718283cadSAlexander Duyck } 192818283cadSAlexander Duyck } 192918283cadSAlexander Duyck 193018283cadSAlexander Duyck /** 193118283cadSAlexander Duyck * fm10k_init_queueing_scheme - Determine proper queueing scheme 193218283cadSAlexander Duyck * @interface: board private structure to initialize 193318283cadSAlexander Duyck * 193418283cadSAlexander Duyck * We determine which queueing scheme to use based on... 193518283cadSAlexander Duyck * - Hardware queue count (num_*_queues) 193618283cadSAlexander Duyck * - defined by miscellaneous hardware support/features (RSS, etc.) 193718283cadSAlexander Duyck **/ 193818283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) 193918283cadSAlexander Duyck { 194018283cadSAlexander Duyck int err; 194118283cadSAlexander Duyck 194218283cadSAlexander Duyck /* Number of supported queues */ 194318283cadSAlexander Duyck fm10k_set_num_queues(interface); 194418283cadSAlexander Duyck 194518283cadSAlexander Duyck /* Configure MSI-X capability */ 194618283cadSAlexander Duyck err = fm10k_init_msix_capability(interface); 194718283cadSAlexander Duyck if (err) { 194818283cadSAlexander Duyck dev_err(&interface->pdev->dev, 194918283cadSAlexander Duyck "Unable to initialize MSI-X capability\n"); 195018283cadSAlexander Duyck return err; 195118283cadSAlexander Duyck } 195218283cadSAlexander Duyck 195318283cadSAlexander Duyck /* Allocate memory for queues */ 195418283cadSAlexander Duyck err = fm10k_alloc_q_vectors(interface); 195518283cadSAlexander Duyck if (err) 195618283cadSAlexander Duyck return err; 195718283cadSAlexander Duyck 1958aa3ac822SAlexander Duyck /* Map rings to devices, and map devices to physical queues */ 1959aa3ac822SAlexander Duyck fm10k_assign_rings(interface); 1960aa3ac822SAlexander Duyck 196118283cadSAlexander Duyck /* Initialize RSS redirection table */ 196218283cadSAlexander Duyck fm10k_init_reta(interface); 196318283cadSAlexander Duyck 196418283cadSAlexander Duyck return 0; 196518283cadSAlexander Duyck } 196618283cadSAlexander Duyck 196718283cadSAlexander Duyck /** 196818283cadSAlexander Duyck * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings 196918283cadSAlexander Duyck * @interface: board private structure to clear queueing scheme on 197018283cadSAlexander Duyck * 197118283cadSAlexander Duyck * We go through and clear queueing specific resources and reset the structure 197218283cadSAlexander Duyck * to pre-load conditions 197318283cadSAlexander Duyck **/ 197418283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) 197518283cadSAlexander Duyck { 197618283cadSAlexander Duyck fm10k_free_q_vectors(interface); 197718283cadSAlexander Duyck fm10k_reset_msix_capability(interface); 197818283cadSAlexander Duyck } 1979