1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
24d12002fSJacob Keller /* Copyright(c) 2013 - 2019 Intel Corporation. */
30e7b3644SAlexander Duyck
40e7b3644SAlexander Duyck #include "fm10k.h"
53abaae42SAlexander Duyck #include <linux/vmalloc.h>
6f174cdbeSAlexander Duyck #include <net/udp_tunnel.h>
73335915dSAlexander Duyck #include <linux/if_macvlan.h>
83abaae42SAlexander Duyck
93abaae42SAlexander Duyck /**
103abaae42SAlexander Duyck * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
113abaae42SAlexander Duyck * @tx_ring: tx descriptor ring (for a specific queue) to setup
123abaae42SAlexander Duyck *
133abaae42SAlexander Duyck * Return 0 on success, negative on failure
143abaae42SAlexander Duyck **/
fm10k_setup_tx_resources(struct fm10k_ring * tx_ring)153abaae42SAlexander Duyck int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring)
163abaae42SAlexander Duyck {
173abaae42SAlexander Duyck struct device *dev = tx_ring->dev;
183abaae42SAlexander Duyck int size;
193abaae42SAlexander Duyck
203abaae42SAlexander Duyck size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
213abaae42SAlexander Duyck
223abaae42SAlexander Duyck tx_ring->tx_buffer = vzalloc(size);
233abaae42SAlexander Duyck if (!tx_ring->tx_buffer)
243abaae42SAlexander Duyck goto err;
253abaae42SAlexander Duyck
263abaae42SAlexander Duyck u64_stats_init(&tx_ring->syncp);
273abaae42SAlexander Duyck
283abaae42SAlexander Duyck /* round up to nearest 4K */
293abaae42SAlexander Duyck tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
303abaae42SAlexander Duyck tx_ring->size = ALIGN(tx_ring->size, 4096);
313abaae42SAlexander Duyck
323abaae42SAlexander Duyck tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
333abaae42SAlexander Duyck &tx_ring->dma, GFP_KERNEL);
343abaae42SAlexander Duyck if (!tx_ring->desc)
353abaae42SAlexander Duyck goto err;
363abaae42SAlexander Duyck
373abaae42SAlexander Duyck return 0;
383abaae42SAlexander Duyck
393abaae42SAlexander Duyck err:
403abaae42SAlexander Duyck vfree(tx_ring->tx_buffer);
413abaae42SAlexander Duyck tx_ring->tx_buffer = NULL;
423abaae42SAlexander Duyck return -ENOMEM;
433abaae42SAlexander Duyck }
443abaae42SAlexander Duyck
453abaae42SAlexander Duyck /**
463abaae42SAlexander Duyck * fm10k_setup_all_tx_resources - allocate all queues Tx resources
473abaae42SAlexander Duyck * @interface: board private structure
483abaae42SAlexander Duyck *
493abaae42SAlexander Duyck * If this function returns with an error, then it's possible one or
503abaae42SAlexander Duyck * more of the rings is populated (while the rest are not). It is the
513abaae42SAlexander Duyck * callers duty to clean those orphaned rings.
523abaae42SAlexander Duyck *
533abaae42SAlexander Duyck * Return 0 on success, negative on failure
543abaae42SAlexander Duyck **/
fm10k_setup_all_tx_resources(struct fm10k_intfc * interface)553abaae42SAlexander Duyck static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
563abaae42SAlexander Duyck {
574d12002fSJacob Keller int i, err;
583abaae42SAlexander Duyck
593abaae42SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) {
603abaae42SAlexander Duyck err = fm10k_setup_tx_resources(interface->tx_ring[i]);
613abaae42SAlexander Duyck if (!err)
623abaae42SAlexander Duyck continue;
633abaae42SAlexander Duyck
643abaae42SAlexander Duyck netif_err(interface, probe, interface->netdev,
653abaae42SAlexander Duyck "Allocation for Tx Queue %u failed\n", i);
663abaae42SAlexander Duyck goto err_setup_tx;
673abaae42SAlexander Duyck }
683abaae42SAlexander Duyck
693abaae42SAlexander Duyck return 0;
703abaae42SAlexander Duyck err_setup_tx:
713abaae42SAlexander Duyck /* rewind the index freeing the rings as we go */
723abaae42SAlexander Duyck while (i--)
733abaae42SAlexander Duyck fm10k_free_tx_resources(interface->tx_ring[i]);
743abaae42SAlexander Duyck return err;
753abaae42SAlexander Duyck }
763abaae42SAlexander Duyck
773abaae42SAlexander Duyck /**
783abaae42SAlexander Duyck * fm10k_setup_rx_resources - allocate Rx resources (Descriptors)
793abaae42SAlexander Duyck * @rx_ring: rx descriptor ring (for a specific queue) to setup
803abaae42SAlexander Duyck *
813abaae42SAlexander Duyck * Returns 0 on success, negative on failure
823abaae42SAlexander Duyck **/
fm10k_setup_rx_resources(struct fm10k_ring * rx_ring)833abaae42SAlexander Duyck int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring)
843abaae42SAlexander Duyck {
853abaae42SAlexander Duyck struct device *dev = rx_ring->dev;
863abaae42SAlexander Duyck int size;
873abaae42SAlexander Duyck
883abaae42SAlexander Duyck size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
893abaae42SAlexander Duyck
903abaae42SAlexander Duyck rx_ring->rx_buffer = vzalloc(size);
913abaae42SAlexander Duyck if (!rx_ring->rx_buffer)
923abaae42SAlexander Duyck goto err;
933abaae42SAlexander Duyck
943abaae42SAlexander Duyck u64_stats_init(&rx_ring->syncp);
953abaae42SAlexander Duyck
963abaae42SAlexander Duyck /* Round up to nearest 4K */
973abaae42SAlexander Duyck rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc);
983abaae42SAlexander Duyck rx_ring->size = ALIGN(rx_ring->size, 4096);
993abaae42SAlexander Duyck
1003abaae42SAlexander Duyck rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1013abaae42SAlexander Duyck &rx_ring->dma, GFP_KERNEL);
1023abaae42SAlexander Duyck if (!rx_ring->desc)
1033abaae42SAlexander Duyck goto err;
1043abaae42SAlexander Duyck
1053abaae42SAlexander Duyck return 0;
1063abaae42SAlexander Duyck err:
1073abaae42SAlexander Duyck vfree(rx_ring->rx_buffer);
1083abaae42SAlexander Duyck rx_ring->rx_buffer = NULL;
1093abaae42SAlexander Duyck return -ENOMEM;
1103abaae42SAlexander Duyck }
1113abaae42SAlexander Duyck
1123abaae42SAlexander Duyck /**
1133abaae42SAlexander Duyck * fm10k_setup_all_rx_resources - allocate all queues Rx resources
1143abaae42SAlexander Duyck * @interface: board private structure
1153abaae42SAlexander Duyck *
1163abaae42SAlexander Duyck * If this function returns with an error, then it's possible one or
1173abaae42SAlexander Duyck * more of the rings is populated (while the rest are not). It is the
1183abaae42SAlexander Duyck * callers duty to clean those orphaned rings.
1193abaae42SAlexander Duyck *
1203abaae42SAlexander Duyck * Return 0 on success, negative on failure
1213abaae42SAlexander Duyck **/
fm10k_setup_all_rx_resources(struct fm10k_intfc * interface)1223abaae42SAlexander Duyck static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
1233abaae42SAlexander Duyck {
1244d12002fSJacob Keller int i, err;
1253abaae42SAlexander Duyck
1263abaae42SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) {
1273abaae42SAlexander Duyck err = fm10k_setup_rx_resources(interface->rx_ring[i]);
1283abaae42SAlexander Duyck if (!err)
1293abaae42SAlexander Duyck continue;
1303abaae42SAlexander Duyck
1313abaae42SAlexander Duyck netif_err(interface, probe, interface->netdev,
1323abaae42SAlexander Duyck "Allocation for Rx Queue %u failed\n", i);
1333abaae42SAlexander Duyck goto err_setup_rx;
1343abaae42SAlexander Duyck }
1353abaae42SAlexander Duyck
1363abaae42SAlexander Duyck return 0;
1373abaae42SAlexander Duyck err_setup_rx:
1383abaae42SAlexander Duyck /* rewind the index freeing the rings as we go */
1393abaae42SAlexander Duyck while (i--)
1403abaae42SAlexander Duyck fm10k_free_rx_resources(interface->rx_ring[i]);
1413abaae42SAlexander Duyck return err;
1423abaae42SAlexander Duyck }
1433abaae42SAlexander Duyck
fm10k_unmap_and_free_tx_resource(struct fm10k_ring * ring,struct fm10k_tx_buffer * tx_buffer)1443abaae42SAlexander Duyck void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
1453abaae42SAlexander Duyck struct fm10k_tx_buffer *tx_buffer)
1463abaae42SAlexander Duyck {
1473abaae42SAlexander Duyck if (tx_buffer->skb) {
1483abaae42SAlexander Duyck dev_kfree_skb_any(tx_buffer->skb);
1493abaae42SAlexander Duyck if (dma_unmap_len(tx_buffer, len))
1503abaae42SAlexander Duyck dma_unmap_single(ring->dev,
1513abaae42SAlexander Duyck dma_unmap_addr(tx_buffer, dma),
1523abaae42SAlexander Duyck dma_unmap_len(tx_buffer, len),
1533abaae42SAlexander Duyck DMA_TO_DEVICE);
1543abaae42SAlexander Duyck } else if (dma_unmap_len(tx_buffer, len)) {
1553abaae42SAlexander Duyck dma_unmap_page(ring->dev,
1563abaae42SAlexander Duyck dma_unmap_addr(tx_buffer, dma),
1573abaae42SAlexander Duyck dma_unmap_len(tx_buffer, len),
1583abaae42SAlexander Duyck DMA_TO_DEVICE);
1593abaae42SAlexander Duyck }
1603abaae42SAlexander Duyck tx_buffer->next_to_watch = NULL;
1613abaae42SAlexander Duyck tx_buffer->skb = NULL;
1623abaae42SAlexander Duyck dma_unmap_len_set(tx_buffer, len, 0);
1633abaae42SAlexander Duyck /* tx_buffer must be completely set up in the transmit path */
1643abaae42SAlexander Duyck }
1653abaae42SAlexander Duyck
1663abaae42SAlexander Duyck /**
1673abaae42SAlexander Duyck * fm10k_clean_tx_ring - Free Tx Buffers
1683abaae42SAlexander Duyck * @tx_ring: ring to be cleaned
1693abaae42SAlexander Duyck **/
fm10k_clean_tx_ring(struct fm10k_ring * tx_ring)1703abaae42SAlexander Duyck static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
1713abaae42SAlexander Duyck {
1723abaae42SAlexander Duyck unsigned long size;
1733abaae42SAlexander Duyck u16 i;
1743abaae42SAlexander Duyck
1753abaae42SAlexander Duyck /* ring already cleared, nothing to do */
1763abaae42SAlexander Duyck if (!tx_ring->tx_buffer)
1773abaae42SAlexander Duyck return;
1783abaae42SAlexander Duyck
1793abaae42SAlexander Duyck /* Free all the Tx ring sk_buffs */
1803abaae42SAlexander Duyck for (i = 0; i < tx_ring->count; i++) {
181fb381e60SJacob Keller struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
182fb381e60SJacob Keller
1833abaae42SAlexander Duyck fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1843abaae42SAlexander Duyck }
1853abaae42SAlexander Duyck
1863abaae42SAlexander Duyck /* reset BQL values */
1873abaae42SAlexander Duyck netdev_tx_reset_queue(txring_txq(tx_ring));
1883abaae42SAlexander Duyck
1893abaae42SAlexander Duyck size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
1903abaae42SAlexander Duyck memset(tx_ring->tx_buffer, 0, size);
1913abaae42SAlexander Duyck
1923abaae42SAlexander Duyck /* Zero out the descriptor ring */
1933abaae42SAlexander Duyck memset(tx_ring->desc, 0, tx_ring->size);
1943abaae42SAlexander Duyck }
1953abaae42SAlexander Duyck
1963abaae42SAlexander Duyck /**
1973abaae42SAlexander Duyck * fm10k_free_tx_resources - Free Tx Resources per Queue
1983abaae42SAlexander Duyck * @tx_ring: Tx descriptor ring for a specific queue
1993abaae42SAlexander Duyck *
2003abaae42SAlexander Duyck * Free all transmit software resources
2013abaae42SAlexander Duyck **/
fm10k_free_tx_resources(struct fm10k_ring * tx_ring)2023abaae42SAlexander Duyck void fm10k_free_tx_resources(struct fm10k_ring *tx_ring)
2033abaae42SAlexander Duyck {
2043abaae42SAlexander Duyck fm10k_clean_tx_ring(tx_ring);
2053abaae42SAlexander Duyck
2063abaae42SAlexander Duyck vfree(tx_ring->tx_buffer);
2073abaae42SAlexander Duyck tx_ring->tx_buffer = NULL;
2083abaae42SAlexander Duyck
2093abaae42SAlexander Duyck /* if not set, then don't free */
2103abaae42SAlexander Duyck if (!tx_ring->desc)
2113abaae42SAlexander Duyck return;
2123abaae42SAlexander Duyck
2133abaae42SAlexander Duyck dma_free_coherent(tx_ring->dev, tx_ring->size,
2143abaae42SAlexander Duyck tx_ring->desc, tx_ring->dma);
2153abaae42SAlexander Duyck tx_ring->desc = NULL;
2163abaae42SAlexander Duyck }
2173abaae42SAlexander Duyck
2183abaae42SAlexander Duyck /**
2193abaae42SAlexander Duyck * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues
2203abaae42SAlexander Duyck * @interface: board private structure
2213abaae42SAlexander Duyck **/
fm10k_clean_all_tx_rings(struct fm10k_intfc * interface)2223abaae42SAlexander Duyck void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
2233abaae42SAlexander Duyck {
2243abaae42SAlexander Duyck int i;
2253abaae42SAlexander Duyck
2263abaae42SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++)
2273abaae42SAlexander Duyck fm10k_clean_tx_ring(interface->tx_ring[i]);
2283abaae42SAlexander Duyck }
2293abaae42SAlexander Duyck
2303abaae42SAlexander Duyck /**
2313abaae42SAlexander Duyck * fm10k_free_all_tx_resources - Free Tx Resources for All Queues
2323abaae42SAlexander Duyck * @interface: board private structure
2333abaae42SAlexander Duyck *
2343abaae42SAlexander Duyck * Free all transmit software resources
2353abaae42SAlexander Duyck **/
fm10k_free_all_tx_resources(struct fm10k_intfc * interface)2363abaae42SAlexander Duyck static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface)
2373abaae42SAlexander Duyck {
2383abaae42SAlexander Duyck int i = interface->num_tx_queues;
2393abaae42SAlexander Duyck
2403abaae42SAlexander Duyck while (i--)
2413abaae42SAlexander Duyck fm10k_free_tx_resources(interface->tx_ring[i]);
2423abaae42SAlexander Duyck }
2433abaae42SAlexander Duyck
2443abaae42SAlexander Duyck /**
2453abaae42SAlexander Duyck * fm10k_clean_rx_ring - Free Rx Buffers per Queue
2463abaae42SAlexander Duyck * @rx_ring: ring to free buffers from
2473abaae42SAlexander Duyck **/
fm10k_clean_rx_ring(struct fm10k_ring * rx_ring)2483abaae42SAlexander Duyck static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
2493abaae42SAlexander Duyck {
2503abaae42SAlexander Duyck unsigned long size;
2513abaae42SAlexander Duyck u16 i;
2523abaae42SAlexander Duyck
2533abaae42SAlexander Duyck if (!rx_ring->rx_buffer)
2543abaae42SAlexander Duyck return;
2553abaae42SAlexander Duyck
2563abaae42SAlexander Duyck dev_kfree_skb(rx_ring->skb);
2573abaae42SAlexander Duyck rx_ring->skb = NULL;
2583abaae42SAlexander Duyck
2593abaae42SAlexander Duyck /* Free all the Rx ring sk_buffs */
2603abaae42SAlexander Duyck for (i = 0; i < rx_ring->count; i++) {
2613abaae42SAlexander Duyck struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i];
2623abaae42SAlexander Duyck /* clean-up will only set page pointer to NULL */
2633abaae42SAlexander Duyck if (!buffer->page)
2643abaae42SAlexander Duyck continue;
2653abaae42SAlexander Duyck
2663abaae42SAlexander Duyck dma_unmap_page(rx_ring->dev, buffer->dma,
2673abaae42SAlexander Duyck PAGE_SIZE, DMA_FROM_DEVICE);
2683abaae42SAlexander Duyck __free_page(buffer->page);
2693abaae42SAlexander Duyck
2703abaae42SAlexander Duyck buffer->page = NULL;
2713abaae42SAlexander Duyck }
2723abaae42SAlexander Duyck
2733abaae42SAlexander Duyck size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
2743abaae42SAlexander Duyck memset(rx_ring->rx_buffer, 0, size);
2753abaae42SAlexander Duyck
2763abaae42SAlexander Duyck /* Zero out the descriptor ring */
2773abaae42SAlexander Duyck memset(rx_ring->desc, 0, rx_ring->size);
2783abaae42SAlexander Duyck
2793abaae42SAlexander Duyck rx_ring->next_to_alloc = 0;
2803abaae42SAlexander Duyck rx_ring->next_to_clean = 0;
2813abaae42SAlexander Duyck rx_ring->next_to_use = 0;
2823abaae42SAlexander Duyck }
2833abaae42SAlexander Duyck
2843abaae42SAlexander Duyck /**
2853abaae42SAlexander Duyck * fm10k_free_rx_resources - Free Rx Resources
2863abaae42SAlexander Duyck * @rx_ring: ring to clean the resources from
2873abaae42SAlexander Duyck *
2883abaae42SAlexander Duyck * Free all receive software resources
2893abaae42SAlexander Duyck **/
fm10k_free_rx_resources(struct fm10k_ring * rx_ring)2903abaae42SAlexander Duyck void fm10k_free_rx_resources(struct fm10k_ring *rx_ring)
2913abaae42SAlexander Duyck {
2923abaae42SAlexander Duyck fm10k_clean_rx_ring(rx_ring);
2933abaae42SAlexander Duyck
2943abaae42SAlexander Duyck vfree(rx_ring->rx_buffer);
2953abaae42SAlexander Duyck rx_ring->rx_buffer = NULL;
2963abaae42SAlexander Duyck
2973abaae42SAlexander Duyck /* if not set, then don't free */
2983abaae42SAlexander Duyck if (!rx_ring->desc)
2993abaae42SAlexander Duyck return;
3003abaae42SAlexander Duyck
3013abaae42SAlexander Duyck dma_free_coherent(rx_ring->dev, rx_ring->size,
3023abaae42SAlexander Duyck rx_ring->desc, rx_ring->dma);
3033abaae42SAlexander Duyck
3043abaae42SAlexander Duyck rx_ring->desc = NULL;
3053abaae42SAlexander Duyck }
3063abaae42SAlexander Duyck
3073abaae42SAlexander Duyck /**
3083abaae42SAlexander Duyck * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues
3093abaae42SAlexander Duyck * @interface: board private structure
3103abaae42SAlexander Duyck **/
fm10k_clean_all_rx_rings(struct fm10k_intfc * interface)3113abaae42SAlexander Duyck void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface)
3123abaae42SAlexander Duyck {
3133abaae42SAlexander Duyck int i;
3143abaae42SAlexander Duyck
3153abaae42SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++)
3163abaae42SAlexander Duyck fm10k_clean_rx_ring(interface->rx_ring[i]);
3173abaae42SAlexander Duyck }
3183abaae42SAlexander Duyck
3193abaae42SAlexander Duyck /**
3203abaae42SAlexander Duyck * fm10k_free_all_rx_resources - Free Rx Resources for All Queues
3213abaae42SAlexander Duyck * @interface: board private structure
3223abaae42SAlexander Duyck *
3233abaae42SAlexander Duyck * Free all receive software resources
3243abaae42SAlexander Duyck **/
fm10k_free_all_rx_resources(struct fm10k_intfc * interface)3253abaae42SAlexander Duyck static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
3263abaae42SAlexander Duyck {
3273abaae42SAlexander Duyck int i = interface->num_rx_queues;
3283abaae42SAlexander Duyck
3293abaae42SAlexander Duyck while (i--)
3303abaae42SAlexander Duyck fm10k_free_rx_resources(interface->rx_ring[i]);
3313abaae42SAlexander Duyck }
3320e7b3644SAlexander Duyck
333504c5eacSAlexander Duyck /**
334504c5eacSAlexander Duyck * fm10k_request_glort_range - Request GLORTs for use in configuring rules
335504c5eacSAlexander Duyck * @interface: board private structure
336504c5eacSAlexander Duyck *
337eca32047SMatthew Vick * This function allocates a range of glorts for this interface to use.
338504c5eacSAlexander Duyck **/
fm10k_request_glort_range(struct fm10k_intfc * interface)339504c5eacSAlexander Duyck static void fm10k_request_glort_range(struct fm10k_intfc *interface)
340504c5eacSAlexander Duyck {
341504c5eacSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
342504c5eacSAlexander Duyck u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT;
343504c5eacSAlexander Duyck
344504c5eacSAlexander Duyck /* establish GLORT base */
345504c5eacSAlexander Duyck interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
346504c5eacSAlexander Duyck interface->glort_count = 0;
347504c5eacSAlexander Duyck
348504c5eacSAlexander Duyck /* nothing we can do until mask is allocated */
349504c5eacSAlexander Duyck if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
350504c5eacSAlexander Duyck return;
351504c5eacSAlexander Duyck
352883a9ccbSAlexander Duyck /* we support 3 possible GLORT configurations.
353883a9ccbSAlexander Duyck * 1: VFs consume all but the last 1
354883a9ccbSAlexander Duyck * 2: VFs and PF split glorts with possible gap between
355883a9ccbSAlexander Duyck * 3: VFs allocated first 64, all others belong to PF
356883a9ccbSAlexander Duyck */
357883a9ccbSAlexander Duyck if (mask <= hw->iov.total_vfs) {
358883a9ccbSAlexander Duyck interface->glort_count = 1;
359883a9ccbSAlexander Duyck interface->glort += mask;
360883a9ccbSAlexander Duyck } else if (mask < 64) {
361883a9ccbSAlexander Duyck interface->glort_count = (mask + 1) / 2;
362883a9ccbSAlexander Duyck interface->glort += interface->glort_count;
363883a9ccbSAlexander Duyck } else {
364883a9ccbSAlexander Duyck interface->glort_count = mask - 63;
365883a9ccbSAlexander Duyck interface->glort += 64;
366883a9ccbSAlexander Duyck }
367504c5eacSAlexander Duyck }
368504c5eacSAlexander Duyck
369504c5eacSAlexander Duyck /**
370f92e0e48SJacob Keller * fm10k_restore_udp_port_info
37176a540d4SAlexander Duyck * @interface: board private structure
37276a540d4SAlexander Duyck *
373f92e0e48SJacob Keller * This function restores the value in the tunnel_cfg register(s) after reset
37476a540d4SAlexander Duyck **/
fm10k_restore_udp_port_info(struct fm10k_intfc * interface)375f92e0e48SJacob Keller static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
37676a540d4SAlexander Duyck {
37776a540d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
37876a540d4SAlexander Duyck
37976a540d4SAlexander Duyck /* only the PF supports configuring tunnels */
38076a540d4SAlexander Duyck if (hw->mac.type != fm10k_mac_pf)
38176a540d4SAlexander Duyck return;
38276a540d4SAlexander Duyck
38376a540d4SAlexander Duyck /* restore tunnel configuration register */
38476a540d4SAlexander Duyck fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
385f7529b4bSJakub Kicinski ntohs(interface->vxlan_port) |
38676a540d4SAlexander Duyck (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
3871ad78292SJacob Keller
3881ad78292SJacob Keller /* restore Geneve tunnel configuration register */
3891ad78292SJacob Keller fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
390f7529b4bSJakub Kicinski ntohs(interface->geneve_port));
391f92e0e48SJacob Keller }
392f92e0e48SJacob Keller
39376a540d4SAlexander Duyck /**
394f7529b4bSJakub Kicinski * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
395363656ebSJacob Keller * @dev: network interface device structure
396f7529b4bSJakub Kicinski * @table: Tunnel table (according to tables of @fm10k_udp_tunnels)
39776a540d4SAlexander Duyck *
398f7529b4bSJakub Kicinski * This function is called when a new UDP tunnel port is added or deleted.
3991ad78292SJacob Keller * Due to hardware restrictions, only one port per type can be offloaded at
400f7529b4bSJakub Kicinski * once. Core will send to the driver a port of its choice.
40176a540d4SAlexander Duyck **/
fm10k_udp_tunnel_sync(struct net_device * dev,unsigned int table)402f7529b4bSJakub Kicinski static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
403f174cdbeSAlexander Duyck {
40476a540d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
405f7529b4bSJakub Kicinski struct udp_tunnel_info ti;
40676a540d4SAlexander Duyck
407f7529b4bSJakub Kicinski udp_tunnel_nic_get_port(dev, table, 0, &ti);
408f7529b4bSJakub Kicinski if (!table)
409f7529b4bSJakub Kicinski interface->vxlan_port = ti.port;
410f7529b4bSJakub Kicinski else
411f7529b4bSJakub Kicinski interface->geneve_port = ti.port;
41276a540d4SAlexander Duyck
413f92e0e48SJacob Keller fm10k_restore_udp_port_info(interface);
414f7529b4bSJakub Kicinski return 0;
41576a540d4SAlexander Duyck }
41676a540d4SAlexander Duyck
417f7529b4bSJakub Kicinski static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
418f7529b4bSJakub Kicinski .sync_table = fm10k_udp_tunnel_sync,
419f7529b4bSJakub Kicinski .tables = {
420f7529b4bSJakub Kicinski { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
421f7529b4bSJakub Kicinski { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
422f7529b4bSJakub Kicinski },
423f7529b4bSJakub Kicinski };
42476a540d4SAlexander Duyck
42576a540d4SAlexander Duyck /**
426504c5eacSAlexander Duyck * fm10k_open - Called when a network interface is made active
427504c5eacSAlexander Duyck * @netdev: network interface device structure
428504c5eacSAlexander Duyck *
429504c5eacSAlexander Duyck * Returns 0 on success, negative value on failure
430504c5eacSAlexander Duyck *
431504c5eacSAlexander Duyck * The open entry point is called when a network interface is made
432504c5eacSAlexander Duyck * active by the system (IFF_UP). At this point all resources needed
433504c5eacSAlexander Duyck * for transmit and receive operations are allocated, the interrupt
434504c5eacSAlexander Duyck * handler is registered with the OS, the watchdog timer is started,
435504c5eacSAlexander Duyck * and the stack is notified that the interface is ready.
436504c5eacSAlexander Duyck **/
fm10k_open(struct net_device * netdev)437504c5eacSAlexander Duyck int fm10k_open(struct net_device *netdev)
438504c5eacSAlexander Duyck {
439504c5eacSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
44018283cadSAlexander Duyck int err;
44118283cadSAlexander Duyck
4423abaae42SAlexander Duyck /* allocate transmit descriptors */
4433abaae42SAlexander Duyck err = fm10k_setup_all_tx_resources(interface);
4443abaae42SAlexander Duyck if (err)
4453abaae42SAlexander Duyck goto err_setup_tx;
4463abaae42SAlexander Duyck
4473abaae42SAlexander Duyck /* allocate receive descriptors */
4483abaae42SAlexander Duyck err = fm10k_setup_all_rx_resources(interface);
4493abaae42SAlexander Duyck if (err)
4503abaae42SAlexander Duyck goto err_setup_rx;
4513abaae42SAlexander Duyck
45218283cadSAlexander Duyck /* allocate interrupt resources */
45318283cadSAlexander Duyck err = fm10k_qv_request_irq(interface);
45418283cadSAlexander Duyck if (err)
45518283cadSAlexander Duyck goto err_req_irq;
456504c5eacSAlexander Duyck
457504c5eacSAlexander Duyck /* setup GLORT assignment for this port */
458504c5eacSAlexander Duyck fm10k_request_glort_range(interface);
459504c5eacSAlexander Duyck
460e27ef599SAlexander Duyck /* Notify the stack of the actual queue counts */
461c9d49940SAlexander Duyck err = netif_set_real_num_tx_queues(netdev,
462c9d49940SAlexander Duyck interface->num_tx_queues);
463c9d49940SAlexander Duyck if (err)
464c9d49940SAlexander Duyck goto err_set_queues;
465e27ef599SAlexander Duyck
466e27ef599SAlexander Duyck err = netif_set_real_num_rx_queues(netdev,
467e27ef599SAlexander Duyck interface->num_rx_queues);
468e27ef599SAlexander Duyck if (err)
469e27ef599SAlexander Duyck goto err_set_queues;
470e27ef599SAlexander Duyck
471504c5eacSAlexander Duyck fm10k_up(interface);
472504c5eacSAlexander Duyck
473504c5eacSAlexander Duyck return 0;
47418283cadSAlexander Duyck
475e27ef599SAlexander Duyck err_set_queues:
476e27ef599SAlexander Duyck fm10k_qv_free_irq(interface);
47718283cadSAlexander Duyck err_req_irq:
4783abaae42SAlexander Duyck fm10k_free_all_rx_resources(interface);
4793abaae42SAlexander Duyck err_setup_rx:
4803abaae42SAlexander Duyck fm10k_free_all_tx_resources(interface);
4813abaae42SAlexander Duyck err_setup_tx:
48218283cadSAlexander Duyck return err;
483504c5eacSAlexander Duyck }
484504c5eacSAlexander Duyck
485504c5eacSAlexander Duyck /**
486504c5eacSAlexander Duyck * fm10k_close - Disables a network interface
487504c5eacSAlexander Duyck * @netdev: network interface device structure
488504c5eacSAlexander Duyck *
489504c5eacSAlexander Duyck * Returns 0, this is not allowed to fail
490504c5eacSAlexander Duyck *
491504c5eacSAlexander Duyck * The close entry point is called when an interface is de-activated
492504c5eacSAlexander Duyck * by the OS. The hardware is still under the drivers control, but
493504c5eacSAlexander Duyck * needs to be disabled. A global MAC reset is issued to stop the
494504c5eacSAlexander Duyck * hardware, and all transmit and receive resources are freed.
495504c5eacSAlexander Duyck **/
fm10k_close(struct net_device * netdev)496504c5eacSAlexander Duyck int fm10k_close(struct net_device *netdev)
497504c5eacSAlexander Duyck {
498504c5eacSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
499504c5eacSAlexander Duyck
500504c5eacSAlexander Duyck fm10k_down(interface);
501504c5eacSAlexander Duyck
50218283cadSAlexander Duyck fm10k_qv_free_irq(interface);
50318283cadSAlexander Duyck
5043abaae42SAlexander Duyck fm10k_free_all_tx_resources(interface);
5053abaae42SAlexander Duyck fm10k_free_all_rx_resources(interface);
5063abaae42SAlexander Duyck
507504c5eacSAlexander Duyck return 0;
508504c5eacSAlexander Duyck }
509504c5eacSAlexander Duyck
fm10k_xmit_frame(struct sk_buff * skb,struct net_device * dev)5100e7b3644SAlexander Duyck static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
5110e7b3644SAlexander Duyck {
512b101c962SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
513dd5eede2SJacob Keller int num_tx_queues = READ_ONCE(interface->num_tx_queues);
514c9d49940SAlexander Duyck unsigned int r_idx = skb->queue_mapping;
515b101c962SAlexander Duyck int err;
516b101c962SAlexander Duyck
517dd5eede2SJacob Keller if (!num_tx_queues)
518dd5eede2SJacob Keller return NETDEV_TX_BUSY;
519dd5eede2SJacob Keller
520b101c962SAlexander Duyck if ((skb->protocol == htons(ETH_P_8021Q)) &&
521df8a39deSJiri Pirko !skb_vlan_tag_present(skb)) {
522b101c962SAlexander Duyck /* FM10K only supports hardware tagging, any tags in frame
523b101c962SAlexander Duyck * are considered 2nd level or "outer" tags
524b101c962SAlexander Duyck */
525b101c962SAlexander Duyck struct vlan_hdr *vhdr;
526b101c962SAlexander Duyck __be16 proto;
527b101c962SAlexander Duyck
528b101c962SAlexander Duyck /* make sure skb is not shared */
529b101c962SAlexander Duyck skb = skb_share_check(skb, GFP_ATOMIC);
530b101c962SAlexander Duyck if (!skb)
5310e7b3644SAlexander Duyck return NETDEV_TX_OK;
532b101c962SAlexander Duyck
533b101c962SAlexander Duyck /* make sure there is enough room to move the ethernet header */
534b101c962SAlexander Duyck if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
535b101c962SAlexander Duyck return NETDEV_TX_OK;
536b101c962SAlexander Duyck
537b101c962SAlexander Duyck /* verify the skb head is not shared */
538b101c962SAlexander Duyck err = skb_cow_head(skb, 0);
5396f97532eSstephen hemminger if (err) {
5406f97532eSstephen hemminger dev_kfree_skb(skb);
541b101c962SAlexander Duyck return NETDEV_TX_OK;
5426f97532eSstephen hemminger }
543b101c962SAlexander Duyck
544aa502b4aSJacob Keller /* locate VLAN header */
545b101c962SAlexander Duyck vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
546b101c962SAlexander Duyck
547b101c962SAlexander Duyck /* pull the 2 key pieces of data out of it */
548b101c962SAlexander Duyck __vlan_hwaccel_put_tag(skb,
549b101c962SAlexander Duyck htons(ETH_P_8021Q),
550b101c962SAlexander Duyck ntohs(vhdr->h_vlan_TCI));
551b101c962SAlexander Duyck proto = vhdr->h_vlan_encapsulated_proto;
552b101c962SAlexander Duyck skb->protocol = (ntohs(proto) >= 1536) ? proto :
553b101c962SAlexander Duyck htons(ETH_P_802_2);
554b101c962SAlexander Duyck
555b101c962SAlexander Duyck /* squash it by moving the ethernet addresses up 4 bytes */
556b101c962SAlexander Duyck memmove(skb->data + VLAN_HLEN, skb->data, 12);
557b101c962SAlexander Duyck __skb_pull(skb, VLAN_HLEN);
558b101c962SAlexander Duyck skb_reset_mac_header(skb);
559b101c962SAlexander Duyck }
560b101c962SAlexander Duyck
561b101c962SAlexander Duyck /* The minimum packet size for a single buffer is 17B so pad the skb
562b101c962SAlexander Duyck * in order to meet this minimum size requirement.
563b101c962SAlexander Duyck */
564b101c962SAlexander Duyck if (unlikely(skb->len < 17)) {
565b101c962SAlexander Duyck int pad_len = 17 - skb->len;
566b101c962SAlexander Duyck
567b101c962SAlexander Duyck if (skb_pad(skb, pad_len))
568b101c962SAlexander Duyck return NETDEV_TX_OK;
569b101c962SAlexander Duyck __skb_put(skb, pad_len);
570b101c962SAlexander Duyck }
571b101c962SAlexander Duyck
572dd5eede2SJacob Keller if (r_idx >= num_tx_queues)
573dd5eede2SJacob Keller r_idx %= num_tx_queues;
574b101c962SAlexander Duyck
575b101c962SAlexander Duyck err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
576b101c962SAlexander Duyck
577b101c962SAlexander Duyck return err;
5780e7b3644SAlexander Duyck }
5790e7b3644SAlexander Duyck
580b101c962SAlexander Duyck /**
581b101c962SAlexander Duyck * fm10k_tx_timeout - Respond to a Tx Hang
582b101c962SAlexander Duyck * @netdev: network interface device structure
5832da259c5SJacob Keller * @txqueue: the index of the Tx queue that timed out
584b101c962SAlexander Duyck **/
fm10k_tx_timeout(struct net_device * netdev,unsigned int txqueue)5850290bd29SMichael S. Tsirkin static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
586b101c962SAlexander Duyck {
587b101c962SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
5882da259c5SJacob Keller struct fm10k_ring *tx_ring;
589b101c962SAlexander Duyck bool real_tx_hang = false;
590b101c962SAlexander Duyck
5912da259c5SJacob Keller if (txqueue >= interface->num_tx_queues) {
5922da259c5SJacob Keller WARN(1, "invalid Tx queue index %d", txqueue);
5932da259c5SJacob Keller return;
594b101c962SAlexander Duyck }
595b101c962SAlexander Duyck
5962da259c5SJacob Keller tx_ring = interface->tx_ring[txqueue];
5972da259c5SJacob Keller if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
5982da259c5SJacob Keller real_tx_hang = true;
5992da259c5SJacob Keller
6002da259c5SJacob Keller #define TX_TIMEO_LIMIT 16000
601b101c962SAlexander Duyck if (real_tx_hang) {
602b101c962SAlexander Duyck fm10k_tx_timeout_reset(interface);
603b101c962SAlexander Duyck } else {
604b101c962SAlexander Duyck netif_info(interface, drv, netdev,
605b101c962SAlexander Duyck "Fake Tx hang detected with timeout of %d seconds\n",
606b101c962SAlexander Duyck netdev->watchdog_timeo / HZ);
607b101c962SAlexander Duyck
608b101c962SAlexander Duyck /* fake Tx hang - increase the kernel timeout */
609b101c962SAlexander Duyck if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
610b101c962SAlexander Duyck netdev->watchdog_timeo *= 2;
611b101c962SAlexander Duyck }
612b101c962SAlexander Duyck }
613b101c962SAlexander Duyck
6147d4fe0d1SNgai-Mint Kwan /**
6157d4fe0d1SNgai-Mint Kwan * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
6167d4fe0d1SNgai-Mint Kwan * @interface: board private structure
6177d4fe0d1SNgai-Mint Kwan *
6187d4fe0d1SNgai-Mint Kwan * This function checks if the PF interface's mailbox is ready before queueing
6197d4fe0d1SNgai-Mint Kwan * mailbox messages for transmission. This will prevent filling the TX mailbox
6207d4fe0d1SNgai-Mint Kwan * queue when the receiver is not ready. VF interfaces are exempt from this
6217d4fe0d1SNgai-Mint Kwan * check since it will block all PF-VF mailbox messages from being sent from
6227d4fe0d1SNgai-Mint Kwan * the VF to the PF at initialization.
6237d4fe0d1SNgai-Mint Kwan **/
fm10k_host_mbx_ready(struct fm10k_intfc * interface)6247d4fe0d1SNgai-Mint Kwan static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
6257d4fe0d1SNgai-Mint Kwan {
6267d4fe0d1SNgai-Mint Kwan struct fm10k_hw *hw = &interface->hw;
6277d4fe0d1SNgai-Mint Kwan
6287d4fe0d1SNgai-Mint Kwan return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
6297d4fe0d1SNgai-Mint Kwan }
6307d4fe0d1SNgai-Mint Kwan
631fc917368SJacob Keller /**
632fc917368SJacob Keller * fm10k_queue_vlan_request - Queue a VLAN update request
633fc917368SJacob Keller * @interface: the fm10k interface structure
634fc917368SJacob Keller * @vid: the VLAN vid
635fc917368SJacob Keller * @vsi: VSI index number
636fc917368SJacob Keller * @set: whether to set or clear
637fc917368SJacob Keller *
638fc917368SJacob Keller * This function queues up a VLAN update. For VFs, this must be sent to the
639fc917368SJacob Keller * managing PF over the mailbox. For PFs, we'll use the same handling so that
640fc917368SJacob Keller * it's similar to the VF. This avoids storming the PF<->VF mailbox with too
641fc917368SJacob Keller * many VLAN updates during reset.
642fc917368SJacob Keller */
fm10k_queue_vlan_request(struct fm10k_intfc * interface,u32 vid,u8 vsi,bool set)643fc917368SJacob Keller int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
644fc917368SJacob Keller u32 vid, u8 vsi, bool set)
645fc917368SJacob Keller {
646fc917368SJacob Keller struct fm10k_macvlan_request *request;
647fc917368SJacob Keller unsigned long flags;
648fc917368SJacob Keller
649fc917368SJacob Keller /* This must be atomic since we may be called while the netdev
650fc917368SJacob Keller * addr_list_lock is held
651fc917368SJacob Keller */
652fc917368SJacob Keller request = kzalloc(sizeof(*request), GFP_ATOMIC);
653fc917368SJacob Keller if (!request)
654fc917368SJacob Keller return -ENOMEM;
655fc917368SJacob Keller
656fc917368SJacob Keller request->type = FM10K_VLAN_REQUEST;
657fc917368SJacob Keller request->vlan.vid = vid;
658fc917368SJacob Keller request->vlan.vsi = vsi;
659fc917368SJacob Keller request->set = set;
660fc917368SJacob Keller
661fc917368SJacob Keller spin_lock_irqsave(&interface->macvlan_lock, flags);
662fc917368SJacob Keller list_add_tail(&request->list, &interface->macvlan_requests);
663fc917368SJacob Keller spin_unlock_irqrestore(&interface->macvlan_lock, flags);
664fc917368SJacob Keller
665fc917368SJacob Keller fm10k_macvlan_schedule(interface);
666fc917368SJacob Keller
667fc917368SJacob Keller return 0;
668fc917368SJacob Keller }
669fc917368SJacob Keller
670fc917368SJacob Keller /**
671fc917368SJacob Keller * fm10k_queue_mac_request - Queue a MAC update request
672fc917368SJacob Keller * @interface: the fm10k interface structure
673fc917368SJacob Keller * @glort: the target glort for this update
674fc917368SJacob Keller * @addr: the address to update
675fc917368SJacob Keller * @vid: the vid to update
676363656ebSJacob Keller * @set: whether to add or remove
677fc917368SJacob Keller *
678fc917368SJacob Keller * This function queues up a MAC request for sending to the switch manager.
679fc917368SJacob Keller * A separate thread monitors the queue and sends updates to the switch
680fc917368SJacob Keller * manager. Return 0 on success, and negative error code on failure.
681fc917368SJacob Keller **/
fm10k_queue_mac_request(struct fm10k_intfc * interface,u16 glort,const unsigned char * addr,u16 vid,bool set)682fc917368SJacob Keller int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
683fc917368SJacob Keller const unsigned char *addr, u16 vid, bool set)
684fc917368SJacob Keller {
685fc917368SJacob Keller struct fm10k_macvlan_request *request;
686fc917368SJacob Keller unsigned long flags;
687fc917368SJacob Keller
688fc917368SJacob Keller /* This must be atomic since we may be called while the netdev
689fc917368SJacob Keller * addr_list_lock is held
690fc917368SJacob Keller */
691fc917368SJacob Keller request = kzalloc(sizeof(*request), GFP_ATOMIC);
692fc917368SJacob Keller if (!request)
693fc917368SJacob Keller return -ENOMEM;
694fc917368SJacob Keller
695fc917368SJacob Keller if (is_multicast_ether_addr(addr))
696fc917368SJacob Keller request->type = FM10K_MC_MAC_REQUEST;
697fc917368SJacob Keller else
698fc917368SJacob Keller request->type = FM10K_UC_MAC_REQUEST;
699fc917368SJacob Keller
700fc917368SJacob Keller ether_addr_copy(request->mac.addr, addr);
701fc917368SJacob Keller request->mac.glort = glort;
702fc917368SJacob Keller request->mac.vid = vid;
703fc917368SJacob Keller request->set = set;
704fc917368SJacob Keller
705fc917368SJacob Keller spin_lock_irqsave(&interface->macvlan_lock, flags);
706fc917368SJacob Keller list_add_tail(&request->list, &interface->macvlan_requests);
707fc917368SJacob Keller spin_unlock_irqrestore(&interface->macvlan_lock, flags);
708fc917368SJacob Keller
709fc917368SJacob Keller fm10k_macvlan_schedule(interface);
710fc917368SJacob Keller
711fc917368SJacob Keller return 0;
712fc917368SJacob Keller }
713fc917368SJacob Keller
714fc917368SJacob Keller /**
715fc917368SJacob Keller * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort
716fc917368SJacob Keller * @interface: the fm10k interface structure
717fc917368SJacob Keller * @glort: the target glort to clear
718fc917368SJacob Keller * @vlans: true to clear VLAN messages, false to ignore them
719fc917368SJacob Keller *
720fc917368SJacob Keller * Cancel any outstanding MAC/VLAN requests for a given glort. This is
721fc917368SJacob Keller * expected to be called when a logical port goes down.
722fc917368SJacob Keller **/
fm10k_clear_macvlan_queue(struct fm10k_intfc * interface,u16 glort,bool vlans)723fc917368SJacob Keller void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
724fc917368SJacob Keller u16 glort, bool vlans)
725fc917368SJacob Keller
726fc917368SJacob Keller {
727fc917368SJacob Keller struct fm10k_macvlan_request *r, *tmp;
728fc917368SJacob Keller unsigned long flags;
729fc917368SJacob Keller
730fc917368SJacob Keller spin_lock_irqsave(&interface->macvlan_lock, flags);
731fc917368SJacob Keller
732fc917368SJacob Keller /* Free any outstanding MAC/VLAN requests for this interface */
733fc917368SJacob Keller list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) {
734fc917368SJacob Keller switch (r->type) {
735fc917368SJacob Keller case FM10K_MC_MAC_REQUEST:
736fc917368SJacob Keller case FM10K_UC_MAC_REQUEST:
737fc917368SJacob Keller /* Don't free requests for other interfaces */
738fc917368SJacob Keller if (r->mac.glort != glort)
739fc917368SJacob Keller break;
7405463fce6SJeff Kirsher fallthrough;
741fc917368SJacob Keller case FM10K_VLAN_REQUEST:
742fc917368SJacob Keller if (vlans) {
743fc917368SJacob Keller list_del(&r->list);
744fc917368SJacob Keller kfree(r);
745fc917368SJacob Keller }
746fc917368SJacob Keller break;
747fc917368SJacob Keller }
748fc917368SJacob Keller }
749fc917368SJacob Keller
750fc917368SJacob Keller spin_unlock_irqrestore(&interface->macvlan_lock, flags);
751fc917368SJacob Keller }
752fc917368SJacob Keller
fm10k_uc_vlan_unsync(struct net_device * netdev,const unsigned char * uc_addr)7538f5e20d4SAlexander Duyck static int fm10k_uc_vlan_unsync(struct net_device *netdev,
7548f5e20d4SAlexander Duyck const unsigned char *uc_addr)
7558f5e20d4SAlexander Duyck {
7568f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
7578f5e20d4SAlexander Duyck u16 glort = interface->glort;
7588f5e20d4SAlexander Duyck u16 vid = interface->vid;
7598f5e20d4SAlexander Duyck bool set = !!(vid / VLAN_N_VID);
7604d12002fSJacob Keller int err;
7618f5e20d4SAlexander Duyck
7628f5e20d4SAlexander Duyck /* drop any leading bits on the VLAN ID */
7638f5e20d4SAlexander Duyck vid &= VLAN_N_VID - 1;
7648f5e20d4SAlexander Duyck
765fc917368SJacob Keller err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set);
7668f5e20d4SAlexander Duyck if (err)
7678f5e20d4SAlexander Duyck return err;
7688f5e20d4SAlexander Duyck
7698f5e20d4SAlexander Duyck /* return non-zero value as we are only doing a partial sync/unsync */
7708f5e20d4SAlexander Duyck return 1;
7718f5e20d4SAlexander Duyck }
7728f5e20d4SAlexander Duyck
fm10k_mc_vlan_unsync(struct net_device * netdev,const unsigned char * mc_addr)7738f5e20d4SAlexander Duyck static int fm10k_mc_vlan_unsync(struct net_device *netdev,
7748f5e20d4SAlexander Duyck const unsigned char *mc_addr)
7758f5e20d4SAlexander Duyck {
7768f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
7778f5e20d4SAlexander Duyck u16 glort = interface->glort;
7788f5e20d4SAlexander Duyck u16 vid = interface->vid;
7798f5e20d4SAlexander Duyck bool set = !!(vid / VLAN_N_VID);
7804d12002fSJacob Keller int err;
7818f5e20d4SAlexander Duyck
7828f5e20d4SAlexander Duyck /* drop any leading bits on the VLAN ID */
7838f5e20d4SAlexander Duyck vid &= VLAN_N_VID - 1;
7848f5e20d4SAlexander Duyck
785fc917368SJacob Keller err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set);
7868f5e20d4SAlexander Duyck if (err)
7878f5e20d4SAlexander Duyck return err;
7888f5e20d4SAlexander Duyck
7898f5e20d4SAlexander Duyck /* return non-zero value as we are only doing a partial sync/unsync */
7908f5e20d4SAlexander Duyck return 1;
7918f5e20d4SAlexander Duyck }
7928f5e20d4SAlexander Duyck
fm10k_update_vid(struct net_device * netdev,u16 vid,bool set)7938f5e20d4SAlexander Duyck static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
7948f5e20d4SAlexander Duyck {
7958f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
7963c6a67ddSJacob Keller struct fm10k_l2_accel *l2_accel = interface->l2_accel;
7978f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
7983c6a67ddSJacob Keller u16 glort;
7998f5e20d4SAlexander Duyck s32 err;
800e71c9318SJacob Keller int i;
8018f5e20d4SAlexander Duyck
8028f5e20d4SAlexander Duyck /* updates do not apply to VLAN 0 */
8038f5e20d4SAlexander Duyck if (!vid)
8048f5e20d4SAlexander Duyck return 0;
8058f5e20d4SAlexander Duyck
8068f5e20d4SAlexander Duyck if (vid >= VLAN_N_VID)
8078f5e20d4SAlexander Duyck return -EINVAL;
8088f5e20d4SAlexander Duyck
809cf315ea5SNgai-Mint Kwan /* Verify that we have permission to add VLANs. If this is a request
810cf315ea5SNgai-Mint Kwan * to remove a VLAN, we still want to allow the user to remove the
811cf315ea5SNgai-Mint Kwan * VLAN device. In that case, we need to clear the bit in the
812cf315ea5SNgai-Mint Kwan * active_vlans bitmask.
813cf315ea5SNgai-Mint Kwan */
814cf315ea5SNgai-Mint Kwan if (set && hw->mac.vlan_override)
8158f5e20d4SAlexander Duyck return -EACCES;
8168f5e20d4SAlexander Duyck
8178f5e20d4SAlexander Duyck /* update active_vlans bitmask */
8188f5e20d4SAlexander Duyck set_bit(vid, interface->active_vlans);
8198f5e20d4SAlexander Duyck if (!set)
8208f5e20d4SAlexander Duyck clear_bit(vid, interface->active_vlans);
8218f5e20d4SAlexander Duyck
822aa502b4aSJacob Keller /* disable the default VLAN ID on ring if we have an active VLAN */
823e71c9318SJacob Keller for (i = 0; i < interface->num_rx_queues; i++) {
824e71c9318SJacob Keller struct fm10k_ring *rx_ring = interface->rx_ring[i];
825e71c9318SJacob Keller u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
826e71c9318SJacob Keller
827e71c9318SJacob Keller if (test_bit(rx_vid, interface->active_vlans))
828e71c9318SJacob Keller rx_ring->vid |= FM10K_VLAN_CLEAR;
829e71c9318SJacob Keller else
830e71c9318SJacob Keller rx_ring->vid &= ~FM10K_VLAN_CLEAR;
831e71c9318SJacob Keller }
832e71c9318SJacob Keller
833cf315ea5SNgai-Mint Kwan /* If our VLAN has been overridden, there is no reason to send VLAN
834cf315ea5SNgai-Mint Kwan * removal requests as they will be silently ignored.
835cf315ea5SNgai-Mint Kwan */
836cf315ea5SNgai-Mint Kwan if (hw->mac.vlan_override)
837cf315ea5SNgai-Mint Kwan return 0;
838cf315ea5SNgai-Mint Kwan
8393d02b3dfSBruce Allan /* Do not remove default VLAN ID related entries from VLAN and MAC
8403d02b3dfSBruce Allan * tables
8413d02b3dfSBruce Allan */
84256f0569eSJacob Keller if (!set && vid == hw->mac.default_vid)
843661b2067SJeff Kirsher return 0;
844661b2067SJeff Kirsher
8453f0bdb2eSJacob Keller /* Do not throw an error if the interface is down. We will sync once
8463f0bdb2eSJacob Keller * we come up
8473f0bdb2eSJacob Keller */
84846929557SJacob Keller if (test_bit(__FM10K_DOWN, interface->state))
8493f0bdb2eSJacob Keller return 0;
8503f0bdb2eSJacob Keller
8518f5e20d4SAlexander Duyck fm10k_mbx_lock(interface);
8528f5e20d4SAlexander Duyck
853eca32047SMatthew Vick /* only need to update the VLAN if not in promiscuous mode */
8548f5e20d4SAlexander Duyck if (!(netdev->flags & IFF_PROMISC)) {
855fc917368SJacob Keller err = fm10k_queue_vlan_request(interface, vid, 0, set);
8568f5e20d4SAlexander Duyck if (err)
85713cb2dadSMatthew Vick goto err_out;
8588f5e20d4SAlexander Duyck }
8598f5e20d4SAlexander Duyck
860fc917368SJacob Keller /* Update our base MAC address */
861fc917368SJacob Keller err = fm10k_queue_mac_request(interface, interface->glort,
862fc917368SJacob Keller hw->mac.addr, vid, set);
8638f5e20d4SAlexander Duyck if (err)
86413cb2dadSMatthew Vick goto err_out;
8658f5e20d4SAlexander Duyck
8663c6a67ddSJacob Keller /* Update L2 accelerated macvlan addresses */
8673c6a67ddSJacob Keller if (l2_accel) {
8683c6a67ddSJacob Keller for (i = 0; i < l2_accel->size; i++) {
8693c6a67ddSJacob Keller struct net_device *sdev = l2_accel->macvlan[i];
8703c6a67ddSJacob Keller
8713c6a67ddSJacob Keller if (!sdev)
8723c6a67ddSJacob Keller continue;
8733c6a67ddSJacob Keller
8743c6a67ddSJacob Keller glort = l2_accel->dglort + 1 + i;
8753c6a67ddSJacob Keller
8763c6a67ddSJacob Keller fm10k_queue_mac_request(interface, glort,
8773c6a67ddSJacob Keller sdev->dev_addr,
8783c6a67ddSJacob Keller vid, set);
8793c6a67ddSJacob Keller }
8803c6a67ddSJacob Keller }
8813c6a67ddSJacob Keller
882aa502b4aSJacob Keller /* set VLAN ID prior to syncing/unsyncing the VLAN */
8838f5e20d4SAlexander Duyck interface->vid = vid + (set ? VLAN_N_VID : 0);
8848f5e20d4SAlexander Duyck
8858f5e20d4SAlexander Duyck /* Update the unicast and multicast address list to add/drop VLAN */
8868f5e20d4SAlexander Duyck __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
8878f5e20d4SAlexander Duyck __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
8888f5e20d4SAlexander Duyck
88913cb2dadSMatthew Vick err_out:
8908f5e20d4SAlexander Duyck fm10k_mbx_unlock(interface);
8918f5e20d4SAlexander Duyck
89213cb2dadSMatthew Vick return err;
8938f5e20d4SAlexander Duyck }
8948f5e20d4SAlexander Duyck
fm10k_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)8958f5e20d4SAlexander Duyck static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
8968f5e20d4SAlexander Duyck __always_unused __be16 proto, u16 vid)
8978f5e20d4SAlexander Duyck {
8988f5e20d4SAlexander Duyck /* update VLAN and address table based on changes */
8998f5e20d4SAlexander Duyck return fm10k_update_vid(netdev, vid, true);
9008f5e20d4SAlexander Duyck }
9018f5e20d4SAlexander Duyck
fm10k_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)9028f5e20d4SAlexander Duyck static int fm10k_vlan_rx_kill_vid(struct net_device *netdev,
9038f5e20d4SAlexander Duyck __always_unused __be16 proto, u16 vid)
9048f5e20d4SAlexander Duyck {
9058f5e20d4SAlexander Duyck /* update VLAN and address table based on changes */
9068f5e20d4SAlexander Duyck return fm10k_update_vid(netdev, vid, false);
9078f5e20d4SAlexander Duyck }
9088f5e20d4SAlexander Duyck
fm10k_find_next_vlan(struct fm10k_intfc * interface,u16 vid)9098f5e20d4SAlexander Duyck static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
9108f5e20d4SAlexander Duyck {
9118f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
9128f5e20d4SAlexander Duyck u16 default_vid = hw->mac.default_vid;
9138f5e20d4SAlexander Duyck u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;
9148f5e20d4SAlexander Duyck
9158f5e20d4SAlexander Duyck vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);
9168f5e20d4SAlexander Duyck
9178f5e20d4SAlexander Duyck return vid;
9188f5e20d4SAlexander Duyck }
9198f5e20d4SAlexander Duyck
fm10k_clear_unused_vlans(struct fm10k_intfc * interface)9208f5e20d4SAlexander Duyck static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
9218f5e20d4SAlexander Duyck {
9228f5e20d4SAlexander Duyck u32 vid, prev_vid;
9238f5e20d4SAlexander Duyck
9248f5e20d4SAlexander Duyck /* loop through and find any gaps in the table */
9258f5e20d4SAlexander Duyck for (vid = 0, prev_vid = 0;
9268f5e20d4SAlexander Duyck prev_vid < VLAN_N_VID;
9278f5e20d4SAlexander Duyck prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) {
9288f5e20d4SAlexander Duyck if (prev_vid == vid)
9298f5e20d4SAlexander Duyck continue;
9308f5e20d4SAlexander Duyck
9318f5e20d4SAlexander Duyck /* send request to clear multiple bits at a time */
9328f5e20d4SAlexander Duyck prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
933fc917368SJacob Keller fm10k_queue_vlan_request(interface, prev_vid, 0, false);
9348f5e20d4SAlexander Duyck }
9358f5e20d4SAlexander Duyck }
9368f5e20d4SAlexander Duyck
__fm10k_uc_sync(struct net_device * dev,const unsigned char * addr,bool sync)9378f5e20d4SAlexander Duyck static int __fm10k_uc_sync(struct net_device *dev,
9388f5e20d4SAlexander Duyck const unsigned char *addr, bool sync)
9398f5e20d4SAlexander Duyck {
9408f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
9418f5e20d4SAlexander Duyck u16 vid, glort = interface->glort;
9428f5e20d4SAlexander Duyck s32 err;
9438f5e20d4SAlexander Duyck
9448f5e20d4SAlexander Duyck if (!is_valid_ether_addr(addr))
9458f5e20d4SAlexander Duyck return -EADDRNOTAVAIL;
9468f5e20d4SAlexander Duyck
94774d2950cSJacob Keller for (vid = fm10k_find_next_vlan(interface, 0);
9488f5e20d4SAlexander Duyck vid < VLAN_N_VID;
9498f5e20d4SAlexander Duyck vid = fm10k_find_next_vlan(interface, vid)) {
950fc917368SJacob Keller err = fm10k_queue_mac_request(interface, glort,
951fc917368SJacob Keller addr, vid, sync);
9528f5e20d4SAlexander Duyck if (err)
9538f5e20d4SAlexander Duyck return err;
9548f5e20d4SAlexander Duyck }
9558f5e20d4SAlexander Duyck
9568f5e20d4SAlexander Duyck return 0;
9578f5e20d4SAlexander Duyck }
9588f5e20d4SAlexander Duyck
fm10k_uc_sync(struct net_device * dev,const unsigned char * addr)9598f5e20d4SAlexander Duyck static int fm10k_uc_sync(struct net_device *dev,
9608f5e20d4SAlexander Duyck const unsigned char *addr)
9618f5e20d4SAlexander Duyck {
9628f5e20d4SAlexander Duyck return __fm10k_uc_sync(dev, addr, true);
9638f5e20d4SAlexander Duyck }
9648f5e20d4SAlexander Duyck
fm10k_uc_unsync(struct net_device * dev,const unsigned char * addr)9658f5e20d4SAlexander Duyck static int fm10k_uc_unsync(struct net_device *dev,
9668f5e20d4SAlexander Duyck const unsigned char *addr)
9678f5e20d4SAlexander Duyck {
9688f5e20d4SAlexander Duyck return __fm10k_uc_sync(dev, addr, false);
9698f5e20d4SAlexander Duyck }
9708f5e20d4SAlexander Duyck
fm10k_set_mac(struct net_device * dev,void * p)9710e7b3644SAlexander Duyck static int fm10k_set_mac(struct net_device *dev, void *p)
9720e7b3644SAlexander Duyck {
9738f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
9748f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
9750e7b3644SAlexander Duyck struct sockaddr *addr = p;
9760e7b3644SAlexander Duyck s32 err = 0;
9770e7b3644SAlexander Duyck
9780e7b3644SAlexander Duyck if (!is_valid_ether_addr(addr->sa_data))
9790e7b3644SAlexander Duyck return -EADDRNOTAVAIL;
9800e7b3644SAlexander Duyck
9818f5e20d4SAlexander Duyck if (dev->flags & IFF_UP) {
9828f5e20d4SAlexander Duyck /* setting MAC address requires mailbox */
9838f5e20d4SAlexander Duyck fm10k_mbx_lock(interface);
9848f5e20d4SAlexander Duyck
9858f5e20d4SAlexander Duyck err = fm10k_uc_sync(dev, addr->sa_data);
9868f5e20d4SAlexander Duyck if (!err)
9878f5e20d4SAlexander Duyck fm10k_uc_unsync(dev, hw->mac.addr);
9888f5e20d4SAlexander Duyck
9898f5e20d4SAlexander Duyck fm10k_mbx_unlock(interface);
9908f5e20d4SAlexander Duyck }
9918f5e20d4SAlexander Duyck
9920e7b3644SAlexander Duyck if (!err) {
993f3956ebbSJakub Kicinski eth_hw_addr_set(dev, addr->sa_data);
9948f5e20d4SAlexander Duyck ether_addr_copy(hw->mac.addr, addr->sa_data);
9950e7b3644SAlexander Duyck dev->addr_assign_type &= ~NET_ADDR_RANDOM;
9960e7b3644SAlexander Duyck }
9970e7b3644SAlexander Duyck
9988f5e20d4SAlexander Duyck /* if we had a mailbox error suggest trying again */
9998f5e20d4SAlexander Duyck return err ? -EAGAIN : 0;
10008f5e20d4SAlexander Duyck }
10018f5e20d4SAlexander Duyck
__fm10k_mc_sync(struct net_device * dev,const unsigned char * addr,bool sync)10028f5e20d4SAlexander Duyck static int __fm10k_mc_sync(struct net_device *dev,
10038f5e20d4SAlexander Duyck const unsigned char *addr, bool sync)
10048f5e20d4SAlexander Duyck {
10058f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
10068f5e20d4SAlexander Duyck u16 vid, glort = interface->glort;
1007fc917368SJacob Keller s32 err;
10088f5e20d4SAlexander Duyck
1009fc917368SJacob Keller if (!is_multicast_ether_addr(addr))
1010fc917368SJacob Keller return -EADDRNOTAVAIL;
10117d4fe0d1SNgai-Mint Kwan
101274d2950cSJacob Keller for (vid = fm10k_find_next_vlan(interface, 0);
10138f5e20d4SAlexander Duyck vid < VLAN_N_VID;
10148f5e20d4SAlexander Duyck vid = fm10k_find_next_vlan(interface, vid)) {
1015fc917368SJacob Keller err = fm10k_queue_mac_request(interface, glort,
1016fc917368SJacob Keller addr, vid, sync);
1017fc917368SJacob Keller if (err)
1018fc917368SJacob Keller return err;
10190e7b3644SAlexander Duyck }
10200e7b3644SAlexander Duyck
10218f5e20d4SAlexander Duyck return 0;
10228f5e20d4SAlexander Duyck }
10238f5e20d4SAlexander Duyck
fm10k_mc_sync(struct net_device * dev,const unsigned char * addr)10248f5e20d4SAlexander Duyck static int fm10k_mc_sync(struct net_device *dev,
10258f5e20d4SAlexander Duyck const unsigned char *addr)
10268f5e20d4SAlexander Duyck {
10278f5e20d4SAlexander Duyck return __fm10k_mc_sync(dev, addr, true);
10288f5e20d4SAlexander Duyck }
10298f5e20d4SAlexander Duyck
fm10k_mc_unsync(struct net_device * dev,const unsigned char * addr)10308f5e20d4SAlexander Duyck static int fm10k_mc_unsync(struct net_device *dev,
10318f5e20d4SAlexander Duyck const unsigned char *addr)
10328f5e20d4SAlexander Duyck {
10338f5e20d4SAlexander Duyck return __fm10k_mc_sync(dev, addr, false);
10348f5e20d4SAlexander Duyck }
10358f5e20d4SAlexander Duyck
fm10k_set_rx_mode(struct net_device * dev)10360e7b3644SAlexander Duyck static void fm10k_set_rx_mode(struct net_device *dev)
10370e7b3644SAlexander Duyck {
10388f5e20d4SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
10398f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
10408f5e20d4SAlexander Duyck int xcast_mode;
10418f5e20d4SAlexander Duyck
10428f5e20d4SAlexander Duyck /* no need to update the harwdare if we are not running */
10438f5e20d4SAlexander Duyck if (!(dev->flags & IFF_UP))
10448f5e20d4SAlexander Duyck return;
10458f5e20d4SAlexander Duyck
10468f5e20d4SAlexander Duyck /* determine new mode based on flags */
10478f5e20d4SAlexander Duyck xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC :
10488f5e20d4SAlexander Duyck (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI :
10498f5e20d4SAlexander Duyck (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
10508f5e20d4SAlexander Duyck FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE;
10518f5e20d4SAlexander Duyck
10528f5e20d4SAlexander Duyck fm10k_mbx_lock(interface);
10538f5e20d4SAlexander Duyck
1054a7731cc8SJeff Kirsher /* update xcast mode first, but only if it changed */
10558f5e20d4SAlexander Duyck if (interface->xcast_mode != xcast_mode) {
1056e0752a68SNgai-Mint Kwan /* update VLAN table when entering promiscuous mode */
10578f5e20d4SAlexander Duyck if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
1058fc917368SJacob Keller fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
1059fc917368SJacob Keller 0, true);
1060e0752a68SNgai-Mint Kwan
1061e0752a68SNgai-Mint Kwan /* clear VLAN table when exiting promiscuous mode */
10628f5e20d4SAlexander Duyck if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
10638f5e20d4SAlexander Duyck fm10k_clear_unused_vlans(interface);
10648f5e20d4SAlexander Duyck
10657d4fe0d1SNgai-Mint Kwan /* update xcast mode if host's mailbox is ready */
10667d4fe0d1SNgai-Mint Kwan if (fm10k_host_mbx_ready(interface))
10677d4fe0d1SNgai-Mint Kwan hw->mac.ops.update_xcast_mode(hw, interface->glort,
10687d4fe0d1SNgai-Mint Kwan xcast_mode);
10698f5e20d4SAlexander Duyck
10708f5e20d4SAlexander Duyck /* record updated xcast mode state */
10718f5e20d4SAlexander Duyck interface->xcast_mode = xcast_mode;
10728f5e20d4SAlexander Duyck }
10738f5e20d4SAlexander Duyck
1074a7731cc8SJeff Kirsher /* synchronize all of the addresses */
1075a7731cc8SJeff Kirsher __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
1076a7731cc8SJeff Kirsher __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
1077a7731cc8SJeff Kirsher
10788f5e20d4SAlexander Duyck fm10k_mbx_unlock(interface);
10798f5e20d4SAlexander Duyck }
10808f5e20d4SAlexander Duyck
fm10k_restore_rx_state(struct fm10k_intfc * interface)10818f5e20d4SAlexander Duyck void fm10k_restore_rx_state(struct fm10k_intfc *interface)
10828f5e20d4SAlexander Duyck {
108385062f85SAlexander Duyck struct fm10k_l2_accel *l2_accel = interface->l2_accel;
10848f5e20d4SAlexander Duyck struct net_device *netdev = interface->netdev;
10858f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
108685062f85SAlexander Duyck int xcast_mode, i;
10878f5e20d4SAlexander Duyck u16 vid, glort;
10888f5e20d4SAlexander Duyck
10898f5e20d4SAlexander Duyck /* record glort for this interface */
10908f5e20d4SAlexander Duyck glort = interface->glort;
10918f5e20d4SAlexander Duyck
10928f5e20d4SAlexander Duyck /* convert interface flags to xcast mode */
10938f5e20d4SAlexander Duyck if (netdev->flags & IFF_PROMISC)
10948f5e20d4SAlexander Duyck xcast_mode = FM10K_XCAST_MODE_PROMISC;
10958f5e20d4SAlexander Duyck else if (netdev->flags & IFF_ALLMULTI)
10968f5e20d4SAlexander Duyck xcast_mode = FM10K_XCAST_MODE_ALLMULTI;
10978f5e20d4SAlexander Duyck else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST))
10988f5e20d4SAlexander Duyck xcast_mode = FM10K_XCAST_MODE_MULTI;
10998f5e20d4SAlexander Duyck else
11008f5e20d4SAlexander Duyck xcast_mode = FM10K_XCAST_MODE_NONE;
11018f5e20d4SAlexander Duyck
11028f5e20d4SAlexander Duyck fm10k_mbx_lock(interface);
11038f5e20d4SAlexander Duyck
11047d4fe0d1SNgai-Mint Kwan /* Enable logical port if host's mailbox is ready */
11057d4fe0d1SNgai-Mint Kwan if (fm10k_host_mbx_ready(interface))
11067d4fe0d1SNgai-Mint Kwan hw->mac.ops.update_lport_state(hw, glort,
11077d4fe0d1SNgai-Mint Kwan interface->glort_count, true);
11088f5e20d4SAlexander Duyck
11098f5e20d4SAlexander Duyck /* update VLAN table */
1110fc917368SJacob Keller fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
11118f5e20d4SAlexander Duyck xcast_mode == FM10K_XCAST_MODE_PROMISC);
11128f5e20d4SAlexander Duyck
11138f5e20d4SAlexander Duyck /* update table with current entries */
111474d2950cSJacob Keller for (vid = fm10k_find_next_vlan(interface, 0);
11158f5e20d4SAlexander Duyck vid < VLAN_N_VID;
11168f5e20d4SAlexander Duyck vid = fm10k_find_next_vlan(interface, vid)) {
1117fc917368SJacob Keller fm10k_queue_vlan_request(interface, vid, 0, true);
11187d4fe0d1SNgai-Mint Kwan
1119fc917368SJacob Keller fm10k_queue_mac_request(interface, glort,
1120fc917368SJacob Keller hw->mac.addr, vid, true);
11213c6a67ddSJacob Keller
11223c6a67ddSJacob Keller /* synchronize macvlan addresses */
11233c6a67ddSJacob Keller if (l2_accel) {
11243c6a67ddSJacob Keller for (i = 0; i < l2_accel->size; i++) {
11253c6a67ddSJacob Keller struct net_device *sdev = l2_accel->macvlan[i];
11263c6a67ddSJacob Keller
11273c6a67ddSJacob Keller if (!sdev)
11283c6a67ddSJacob Keller continue;
11293c6a67ddSJacob Keller
11303c6a67ddSJacob Keller glort = l2_accel->dglort + 1 + i;
11313c6a67ddSJacob Keller
11323c6a67ddSJacob Keller fm10k_queue_mac_request(interface, glort,
11333c6a67ddSJacob Keller sdev->dev_addr,
11343c6a67ddSJacob Keller vid, true);
11353c6a67ddSJacob Keller }
11363c6a67ddSJacob Keller }
11378f5e20d4SAlexander Duyck }
11388f5e20d4SAlexander Duyck
11397d4fe0d1SNgai-Mint Kwan /* update xcast mode before synchronizing addresses if host's mailbox
11407d4fe0d1SNgai-Mint Kwan * is ready
11417d4fe0d1SNgai-Mint Kwan */
11427d4fe0d1SNgai-Mint Kwan if (fm10k_host_mbx_ready(interface))
1143a7731cc8SJeff Kirsher hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
1144a7731cc8SJeff Kirsher
1145eca32047SMatthew Vick /* synchronize all of the addresses */
11468f5e20d4SAlexander Duyck __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
11478f5e20d4SAlexander Duyck __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
11488f5e20d4SAlexander Duyck
114985062f85SAlexander Duyck /* synchronize macvlan addresses */
115085062f85SAlexander Duyck if (l2_accel) {
115185062f85SAlexander Duyck for (i = 0; i < l2_accel->size; i++) {
115285062f85SAlexander Duyck struct net_device *sdev = l2_accel->macvlan[i];
115385062f85SAlexander Duyck
115485062f85SAlexander Duyck if (!sdev)
115585062f85SAlexander Duyck continue;
115685062f85SAlexander Duyck
115785062f85SAlexander Duyck glort = l2_accel->dglort + 1 + i;
115885062f85SAlexander Duyck
115985062f85SAlexander Duyck hw->mac.ops.update_xcast_mode(hw, glort,
116081d4e91cSAlexander Duyck FM10K_XCAST_MODE_NONE);
116185062f85SAlexander Duyck fm10k_queue_mac_request(interface, glort,
116285062f85SAlexander Duyck sdev->dev_addr,
116385062f85SAlexander Duyck hw->mac.default_vid, true);
116485062f85SAlexander Duyck }
116585062f85SAlexander Duyck }
116685062f85SAlexander Duyck
11678f5e20d4SAlexander Duyck fm10k_mbx_unlock(interface);
11688f5e20d4SAlexander Duyck
11698f5e20d4SAlexander Duyck /* record updated xcast mode state */
11708f5e20d4SAlexander Duyck interface->xcast_mode = xcast_mode;
117176a540d4SAlexander Duyck
117276a540d4SAlexander Duyck /* Restore tunnel configuration */
1173f92e0e48SJacob Keller fm10k_restore_udp_port_info(interface);
11748f5e20d4SAlexander Duyck }
11758f5e20d4SAlexander Duyck
fm10k_reset_rx_state(struct fm10k_intfc * interface)11768f5e20d4SAlexander Duyck void fm10k_reset_rx_state(struct fm10k_intfc *interface)
11778f5e20d4SAlexander Duyck {
11788f5e20d4SAlexander Duyck struct net_device *netdev = interface->netdev;
11798f5e20d4SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
11808f5e20d4SAlexander Duyck
1181fc917368SJacob Keller /* Wait for MAC/VLAN work to finish */
1182fc917368SJacob Keller while (test_bit(__FM10K_MACVLAN_SCHED, interface->state))
1183fc917368SJacob Keller usleep_range(1000, 2000);
1184fc917368SJacob Keller
1185fc917368SJacob Keller /* Cancel pending MAC/VLAN requests */
1186fc917368SJacob Keller fm10k_clear_macvlan_queue(interface, interface->glort, true);
1187fc917368SJacob Keller
11888f5e20d4SAlexander Duyck fm10k_mbx_lock(interface);
11898f5e20d4SAlexander Duyck
11907d4fe0d1SNgai-Mint Kwan /* clear the logical port state on lower device if host's mailbox is
11917d4fe0d1SNgai-Mint Kwan * ready
11927d4fe0d1SNgai-Mint Kwan */
11937d4fe0d1SNgai-Mint Kwan if (fm10k_host_mbx_ready(interface))
11948f5e20d4SAlexander Duyck hw->mac.ops.update_lport_state(hw, interface->glort,
11958f5e20d4SAlexander Duyck interface->glort_count, false);
11968f5e20d4SAlexander Duyck
11978f5e20d4SAlexander Duyck fm10k_mbx_unlock(interface);
11988f5e20d4SAlexander Duyck
11998f5e20d4SAlexander Duyck /* reset flags to default state */
12008f5e20d4SAlexander Duyck interface->xcast_mode = FM10K_XCAST_MODE_NONE;
12018f5e20d4SAlexander Duyck
12028f5e20d4SAlexander Duyck /* clear the sync flag since the lport has been dropped */
12038f5e20d4SAlexander Duyck __dev_uc_unsync(netdev, NULL);
12048f5e20d4SAlexander Duyck __dev_mc_unsync(netdev, NULL);
12050e7b3644SAlexander Duyck }
12060e7b3644SAlexander Duyck
1207e27ef599SAlexander Duyck /**
1208e27ef599SAlexander Duyck * fm10k_get_stats64 - Get System Network Statistics
1209e27ef599SAlexander Duyck * @netdev: network interface device structure
1210e27ef599SAlexander Duyck * @stats: storage space for 64bit statistics
1211e27ef599SAlexander Duyck *
121202957703SJacob Keller * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
121302957703SJacob Keller * architectures.
1214e27ef599SAlexander Duyck */
fm10k_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1215bc1f4470Sstephen hemminger static void fm10k_get_stats64(struct net_device *netdev,
1216e27ef599SAlexander Duyck struct rtnl_link_stats64 *stats)
1217e27ef599SAlexander Duyck {
1218e27ef599SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
1219e27ef599SAlexander Duyck struct fm10k_ring *ring;
1220e27ef599SAlexander Duyck unsigned int start, i;
1221e27ef599SAlexander Duyck u64 bytes, packets;
1222e27ef599SAlexander Duyck
1223e27ef599SAlexander Duyck rcu_read_lock();
1224e27ef599SAlexander Duyck
1225e27ef599SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) {
1226ce4dad2cSJacob Keller ring = READ_ONCE(interface->rx_ring[i]);
1227e27ef599SAlexander Duyck
1228e27ef599SAlexander Duyck if (!ring)
1229e27ef599SAlexander Duyck continue;
1230e27ef599SAlexander Duyck
1231e27ef599SAlexander Duyck do {
1232*068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->syncp);
1233e27ef599SAlexander Duyck packets = ring->stats.packets;
1234e27ef599SAlexander Duyck bytes = ring->stats.bytes;
1235*068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->syncp, start));
1236e27ef599SAlexander Duyck
1237e27ef599SAlexander Duyck stats->rx_packets += packets;
1238e27ef599SAlexander Duyck stats->rx_bytes += bytes;
1239e27ef599SAlexander Duyck }
1240e27ef599SAlexander Duyck
1241e27ef599SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) {
1242ce4dad2cSJacob Keller ring = READ_ONCE(interface->tx_ring[i]);
1243e27ef599SAlexander Duyck
1244e27ef599SAlexander Duyck if (!ring)
1245e27ef599SAlexander Duyck continue;
1246e27ef599SAlexander Duyck
1247e27ef599SAlexander Duyck do {
1248*068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->syncp);
1249e27ef599SAlexander Duyck packets = ring->stats.packets;
1250e27ef599SAlexander Duyck bytes = ring->stats.bytes;
1251*068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->syncp, start));
1252e27ef599SAlexander Duyck
1253e27ef599SAlexander Duyck stats->tx_packets += packets;
1254e27ef599SAlexander Duyck stats->tx_bytes += bytes;
1255e27ef599SAlexander Duyck }
1256e27ef599SAlexander Duyck
1257e27ef599SAlexander Duyck rcu_read_unlock();
1258e27ef599SAlexander Duyck
1259e27ef599SAlexander Duyck /* following stats updated by fm10k_service_task() */
1260e27ef599SAlexander Duyck stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1261e27ef599SAlexander Duyck }
1262e27ef599SAlexander Duyck
fm10k_setup_tc(struct net_device * dev,u8 tc)1263aa3ac822SAlexander Duyck int fm10k_setup_tc(struct net_device *dev, u8 tc)
1264aa3ac822SAlexander Duyck {
1265aa3ac822SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
126609f8a82bSAlexander Duyck int err;
1267aa3ac822SAlexander Duyck
1268aa3ac822SAlexander Duyck /* Currently only the PF supports priority classes */
1269aa3ac822SAlexander Duyck if (tc && (interface->hw.mac.type != fm10k_mac_pf))
1270aa3ac822SAlexander Duyck return -EINVAL;
1271aa3ac822SAlexander Duyck
1272aa3ac822SAlexander Duyck /* Hardware supports up to 8 traffic classes */
1273aa3ac822SAlexander Duyck if (tc > 8)
1274aa3ac822SAlexander Duyck return -EINVAL;
1275aa3ac822SAlexander Duyck
1276aa3ac822SAlexander Duyck /* Hardware has to reinitialize queues to match packet
1277aa3ac822SAlexander Duyck * buffer alignment. Unfortunately, the hardware is not
1278aa3ac822SAlexander Duyck * flexible enough to do this dynamically.
1279aa3ac822SAlexander Duyck */
1280aa3ac822SAlexander Duyck if (netif_running(dev))
1281aa3ac822SAlexander Duyck fm10k_close(dev);
1282aa3ac822SAlexander Duyck
1283aa3ac822SAlexander Duyck fm10k_mbx_free_irq(interface);
1284aa3ac822SAlexander Duyck
1285aa3ac822SAlexander Duyck fm10k_clear_queueing_scheme(interface);
1286aa3ac822SAlexander Duyck
1287aa3ac822SAlexander Duyck /* we expect the prio_tc map to be repopulated later */
1288aa3ac822SAlexander Duyck netdev_reset_tc(dev);
1289aa3ac822SAlexander Duyck netdev_set_num_tc(dev, tc);
1290aa3ac822SAlexander Duyck
129109f8a82bSAlexander Duyck err = fm10k_init_queueing_scheme(interface);
129209f8a82bSAlexander Duyck if (err)
129309f8a82bSAlexander Duyck goto err_queueing_scheme;
1294aa3ac822SAlexander Duyck
129509f8a82bSAlexander Duyck err = fm10k_mbx_request_irq(interface);
129609f8a82bSAlexander Duyck if (err)
129709f8a82bSAlexander Duyck goto err_mbx_irq;
1298aa3ac822SAlexander Duyck
129909f8a82bSAlexander Duyck err = netif_running(dev) ? fm10k_open(dev) : 0;
130009f8a82bSAlexander Duyck if (err)
130109f8a82bSAlexander Duyck goto err_open;
1302aa3ac822SAlexander Duyck
1303aa3ac822SAlexander Duyck /* flag to indicate SWPRI has yet to be updated */
13043ee7b3a3SJacob Keller set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
1305aa3ac822SAlexander Duyck
1306aa3ac822SAlexander Duyck return 0;
130709f8a82bSAlexander Duyck err_open:
130809f8a82bSAlexander Duyck fm10k_mbx_free_irq(interface);
130909f8a82bSAlexander Duyck err_mbx_irq:
131009f8a82bSAlexander Duyck fm10k_clear_queueing_scheme(interface);
131109f8a82bSAlexander Duyck err_queueing_scheme:
131209f8a82bSAlexander Duyck netif_device_detach(dev);
131309f8a82bSAlexander Duyck
131409f8a82bSAlexander Duyck return err;
1315aa3ac822SAlexander Duyck }
1316aa3ac822SAlexander Duyck
__fm10k_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)13172572ac53SJiri Pirko static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
1318de4784caSJiri Pirko void *type_data)
1319e4c6734eSJohn Fastabend {
1320de4784caSJiri Pirko struct tc_mqprio_qopt *mqprio = type_data;
1321de4784caSJiri Pirko
1322575ed7d3SNogah Frankel if (type != TC_SETUP_QDISC_MQPRIO)
132338cf0426SJiri Pirko return -EOPNOTSUPP;
1324e4c6734eSJohn Fastabend
1325de4784caSJiri Pirko mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
132656f36acdSAmritha Nambiar
1327de4784caSJiri Pirko return fm10k_setup_tc(dev, mqprio->num_tc);
1328e4c6734eSJohn Fastabend }
1329e4c6734eSJohn Fastabend
fm10k_assign_l2_accel(struct fm10k_intfc * interface,struct fm10k_l2_accel * l2_accel)13305cd5e2e9SAlexander Duyck static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
13315cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel)
13325cd5e2e9SAlexander Duyck {
13335cd5e2e9SAlexander Duyck int i;
13345cd5e2e9SAlexander Duyck
13355cd5e2e9SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) {
1336df87b8fcSJacob Keller struct fm10k_ring *ring = interface->rx_ring[i];
1337df87b8fcSJacob Keller
13385cd5e2e9SAlexander Duyck rcu_assign_pointer(ring->l2_accel, l2_accel);
13395cd5e2e9SAlexander Duyck }
13405cd5e2e9SAlexander Duyck
13415cd5e2e9SAlexander Duyck interface->l2_accel = l2_accel;
13425cd5e2e9SAlexander Duyck }
13435cd5e2e9SAlexander Duyck
fm10k_dfwd_add_station(struct net_device * dev,struct net_device * sdev)13445cd5e2e9SAlexander Duyck static void *fm10k_dfwd_add_station(struct net_device *dev,
13455cd5e2e9SAlexander Duyck struct net_device *sdev)
13465cd5e2e9SAlexander Duyck {
13475cd5e2e9SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
13485cd5e2e9SAlexander Duyck struct fm10k_l2_accel *l2_accel = interface->l2_accel;
13495cd5e2e9SAlexander Duyck struct fm10k_l2_accel *old_l2_accel = NULL;
13505cd5e2e9SAlexander Duyck struct fm10k_dglort_cfg dglort = { 0 };
13515cd5e2e9SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
1352cb1b5226SJacob Keller int size, i;
13533c6a67ddSJacob Keller u16 vid, glort;
13545cd5e2e9SAlexander Duyck
13553335915dSAlexander Duyck /* The hardware supported by fm10k only filters on the destination MAC
13563335915dSAlexander Duyck * address. In order to avoid issues we only support offloading modes
13573335915dSAlexander Duyck * where the hardware can actually provide the functionality.
13583335915dSAlexander Duyck */
13593335915dSAlexander Duyck if (!macvlan_supports_dest_filter(sdev))
13603335915dSAlexander Duyck return ERR_PTR(-EMEDIUMTYPE);
13613335915dSAlexander Duyck
13625cd5e2e9SAlexander Duyck /* allocate l2 accel structure if it is not available */
13635cd5e2e9SAlexander Duyck if (!l2_accel) {
13645cd5e2e9SAlexander Duyck /* verify there is enough free GLORTs to support l2_accel */
13655cd5e2e9SAlexander Duyck if (interface->glort_count < 7)
13665cd5e2e9SAlexander Duyck return ERR_PTR(-EBUSY);
13675cd5e2e9SAlexander Duyck
13685cd5e2e9SAlexander Duyck size = offsetof(struct fm10k_l2_accel, macvlan[7]);
13695cd5e2e9SAlexander Duyck l2_accel = kzalloc(size, GFP_KERNEL);
13705cd5e2e9SAlexander Duyck if (!l2_accel)
13715cd5e2e9SAlexander Duyck return ERR_PTR(-ENOMEM);
13725cd5e2e9SAlexander Duyck
13735cd5e2e9SAlexander Duyck l2_accel->size = 7;
13745cd5e2e9SAlexander Duyck l2_accel->dglort = interface->glort;
13755cd5e2e9SAlexander Duyck
13765cd5e2e9SAlexander Duyck /* update pointers */
13775cd5e2e9SAlexander Duyck fm10k_assign_l2_accel(interface, l2_accel);
13785cd5e2e9SAlexander Duyck /* do not expand if we are at our limit */
13795cd5e2e9SAlexander Duyck } else if ((l2_accel->count == FM10K_MAX_STATIONS) ||
13805cd5e2e9SAlexander Duyck (l2_accel->count == (interface->glort_count - 1))) {
13815cd5e2e9SAlexander Duyck return ERR_PTR(-EBUSY);
13825cd5e2e9SAlexander Duyck /* expand if we have hit the size limit */
13835cd5e2e9SAlexander Duyck } else if (l2_accel->count == l2_accel->size) {
13845cd5e2e9SAlexander Duyck old_l2_accel = l2_accel;
13855cd5e2e9SAlexander Duyck size = offsetof(struct fm10k_l2_accel,
13865cd5e2e9SAlexander Duyck macvlan[(l2_accel->size * 2) + 1]);
13875cd5e2e9SAlexander Duyck l2_accel = kzalloc(size, GFP_KERNEL);
13885cd5e2e9SAlexander Duyck if (!l2_accel)
13895cd5e2e9SAlexander Duyck return ERR_PTR(-ENOMEM);
13905cd5e2e9SAlexander Duyck
13915cd5e2e9SAlexander Duyck memcpy(l2_accel, old_l2_accel,
13925cd5e2e9SAlexander Duyck offsetof(struct fm10k_l2_accel,
13935cd5e2e9SAlexander Duyck macvlan[old_l2_accel->size]));
13945cd5e2e9SAlexander Duyck
13955cd5e2e9SAlexander Duyck l2_accel->size = (old_l2_accel->size * 2) + 1;
13965cd5e2e9SAlexander Duyck
13975cd5e2e9SAlexander Duyck /* update pointers */
13985cd5e2e9SAlexander Duyck fm10k_assign_l2_accel(interface, l2_accel);
13995cd5e2e9SAlexander Duyck kfree_rcu(old_l2_accel, rcu);
14005cd5e2e9SAlexander Duyck }
14015cd5e2e9SAlexander Duyck
14025cd5e2e9SAlexander Duyck /* add macvlan to accel table, and record GLORT for position */
14035cd5e2e9SAlexander Duyck for (i = 0; i < l2_accel->size; i++) {
14045cd5e2e9SAlexander Duyck if (!l2_accel->macvlan[i])
14055cd5e2e9SAlexander Duyck break;
14065cd5e2e9SAlexander Duyck }
14075cd5e2e9SAlexander Duyck
14085cd5e2e9SAlexander Duyck /* record station */
14095cd5e2e9SAlexander Duyck l2_accel->macvlan[i] = sdev;
14105cd5e2e9SAlexander Duyck l2_accel->count++;
14115cd5e2e9SAlexander Duyck
14125cd5e2e9SAlexander Duyck /* configure default DGLORT mapping for RSS/DCB */
14135cd5e2e9SAlexander Duyck dglort.idx = fm10k_dglort_pf_rss;
14145cd5e2e9SAlexander Duyck dglort.inner_rss = 1;
14155cd5e2e9SAlexander Duyck dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
14165cd5e2e9SAlexander Duyck dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
14175cd5e2e9SAlexander Duyck dglort.glort = interface->glort;
14185cd5e2e9SAlexander Duyck dglort.shared_l = fls(l2_accel->size);
14195cd5e2e9SAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
14205cd5e2e9SAlexander Duyck
14215cd5e2e9SAlexander Duyck /* Add rules for this specific dglort to the switch */
14225cd5e2e9SAlexander Duyck fm10k_mbx_lock(interface);
14235cd5e2e9SAlexander Duyck
14245cd5e2e9SAlexander Duyck glort = l2_accel->dglort + 1 + i;
14257d4fe0d1SNgai-Mint Kwan
14260a3e92deSJacob Keller if (fm10k_host_mbx_ready(interface))
14277d4fe0d1SNgai-Mint Kwan hw->mac.ops.update_xcast_mode(hw, glort,
142881d4e91cSAlexander Duyck FM10K_XCAST_MODE_NONE);
14290a3e92deSJacob Keller
1430fc917368SJacob Keller fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
143185062f85SAlexander Duyck hw->mac.default_vid, true);
14325cd5e2e9SAlexander Duyck
14333c6a67ddSJacob Keller for (vid = fm10k_find_next_vlan(interface, 0);
14343c6a67ddSJacob Keller vid < VLAN_N_VID;
14353c6a67ddSJacob Keller vid = fm10k_find_next_vlan(interface, vid))
14363c6a67ddSJacob Keller fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
14373c6a67ddSJacob Keller vid, true);
14383c6a67ddSJacob Keller
14395cd5e2e9SAlexander Duyck fm10k_mbx_unlock(interface);
14405cd5e2e9SAlexander Duyck
14415cd5e2e9SAlexander Duyck return sdev;
14425cd5e2e9SAlexander Duyck }
14435cd5e2e9SAlexander Duyck
fm10k_dfwd_del_station(struct net_device * dev,void * priv)14445cd5e2e9SAlexander Duyck static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
14455cd5e2e9SAlexander Duyck {
14465cd5e2e9SAlexander Duyck struct fm10k_intfc *interface = netdev_priv(dev);
1447ce4dad2cSJacob Keller struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
14485cd5e2e9SAlexander Duyck struct fm10k_dglort_cfg dglort = { 0 };
14495cd5e2e9SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
14505cd5e2e9SAlexander Duyck struct net_device *sdev = priv;
14513c6a67ddSJacob Keller u16 vid, glort;
14525cd5e2e9SAlexander Duyck int i;
14535cd5e2e9SAlexander Duyck
14545cd5e2e9SAlexander Duyck if (!l2_accel)
14555cd5e2e9SAlexander Duyck return;
14565cd5e2e9SAlexander Duyck
14575cd5e2e9SAlexander Duyck /* search table for matching interface */
14585cd5e2e9SAlexander Duyck for (i = 0; i < l2_accel->size; i++) {
14595cd5e2e9SAlexander Duyck if (l2_accel->macvlan[i] == sdev)
14605cd5e2e9SAlexander Duyck break;
14615cd5e2e9SAlexander Duyck }
14625cd5e2e9SAlexander Duyck
14635cd5e2e9SAlexander Duyck /* exit if macvlan not found */
14645cd5e2e9SAlexander Duyck if (i == l2_accel->size)
14655cd5e2e9SAlexander Duyck return;
14665cd5e2e9SAlexander Duyck
14675cd5e2e9SAlexander Duyck /* Remove any rules specific to this dglort */
14685cd5e2e9SAlexander Duyck fm10k_mbx_lock(interface);
14695cd5e2e9SAlexander Duyck
14705cd5e2e9SAlexander Duyck glort = l2_accel->dglort + 1 + i;
14717d4fe0d1SNgai-Mint Kwan
14720a3e92deSJacob Keller if (fm10k_host_mbx_ready(interface))
14737d4fe0d1SNgai-Mint Kwan hw->mac.ops.update_xcast_mode(hw, glort,
14747d4fe0d1SNgai-Mint Kwan FM10K_XCAST_MODE_NONE);
14750a3e92deSJacob Keller
1476fc917368SJacob Keller fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
147785062f85SAlexander Duyck hw->mac.default_vid, false);
14785cd5e2e9SAlexander Duyck
14793c6a67ddSJacob Keller for (vid = fm10k_find_next_vlan(interface, 0);
14803c6a67ddSJacob Keller vid < VLAN_N_VID;
14813c6a67ddSJacob Keller vid = fm10k_find_next_vlan(interface, vid))
14823c6a67ddSJacob Keller fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
14833c6a67ddSJacob Keller vid, false);
14843c6a67ddSJacob Keller
14855cd5e2e9SAlexander Duyck fm10k_mbx_unlock(interface);
14865cd5e2e9SAlexander Duyck
14875cd5e2e9SAlexander Duyck /* record removal */
14885cd5e2e9SAlexander Duyck l2_accel->macvlan[i] = NULL;
14895cd5e2e9SAlexander Duyck l2_accel->count--;
14905cd5e2e9SAlexander Duyck
14915cd5e2e9SAlexander Duyck /* configure default DGLORT mapping for RSS/DCB */
14925cd5e2e9SAlexander Duyck dglort.idx = fm10k_dglort_pf_rss;
14935cd5e2e9SAlexander Duyck dglort.inner_rss = 1;
14945cd5e2e9SAlexander Duyck dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
14955cd5e2e9SAlexander Duyck dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
14965cd5e2e9SAlexander Duyck dglort.glort = interface->glort;
14975cd5e2e9SAlexander Duyck dglort.shared_l = fls(l2_accel->size);
14985cd5e2e9SAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
14995cd5e2e9SAlexander Duyck
15005cd5e2e9SAlexander Duyck /* If table is empty remove it */
15015cd5e2e9SAlexander Duyck if (l2_accel->count == 0) {
15025cd5e2e9SAlexander Duyck fm10k_assign_l2_accel(interface, NULL);
15035cd5e2e9SAlexander Duyck kfree_rcu(l2_accel, rcu);
15045cd5e2e9SAlexander Duyck }
15055cd5e2e9SAlexander Duyck }
15065cd5e2e9SAlexander Duyck
fm10k_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)15075bf33dc6SMatthew Vick static netdev_features_t fm10k_features_check(struct sk_buff *skb,
15085bf33dc6SMatthew Vick struct net_device *dev,
15095bf33dc6SMatthew Vick netdev_features_t features)
15105bf33dc6SMatthew Vick {
15115bf33dc6SMatthew Vick if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
15125bf33dc6SMatthew Vick return features;
15135bf33dc6SMatthew Vick
1514a188222bSTom Herbert return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
15155bf33dc6SMatthew Vick }
15165bf33dc6SMatthew Vick
15170e7b3644SAlexander Duyck static const struct net_device_ops fm10k_netdev_ops = {
1518504c5eacSAlexander Duyck .ndo_open = fm10k_open,
1519504c5eacSAlexander Duyck .ndo_stop = fm10k_close,
15200e7b3644SAlexander Duyck .ndo_validate_addr = eth_validate_addr,
15210e7b3644SAlexander Duyck .ndo_start_xmit = fm10k_xmit_frame,
15220e7b3644SAlexander Duyck .ndo_set_mac_address = fm10k_set_mac,
1523b101c962SAlexander Duyck .ndo_tx_timeout = fm10k_tx_timeout,
15248f5e20d4SAlexander Duyck .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
15258f5e20d4SAlexander Duyck .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
15260e7b3644SAlexander Duyck .ndo_set_rx_mode = fm10k_set_rx_mode,
1527e27ef599SAlexander Duyck .ndo_get_stats64 = fm10k_get_stats64,
1528e4c6734eSJohn Fastabend .ndo_setup_tc = __fm10k_setup_tc,
1529883a9ccbSAlexander Duyck .ndo_set_vf_mac = fm10k_ndo_set_vf_mac,
1530883a9ccbSAlexander Duyck .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
1531883a9ccbSAlexander Duyck .ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
1532883a9ccbSAlexander Duyck .ndo_get_vf_config = fm10k_ndo_get_vf_config,
15330e100440SJacob Keller .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
15345cd5e2e9SAlexander Duyck .ndo_dfwd_add_station = fm10k_dfwd_add_station,
15355cd5e2e9SAlexander Duyck .ndo_dfwd_del_station = fm10k_dfwd_del_station,
15365bf33dc6SMatthew Vick .ndo_features_check = fm10k_features_check,
15370e7b3644SAlexander Duyck };
15380e7b3644SAlexander Duyck
15390e7b3644SAlexander Duyck #define DEFAULT_DEBUG_LEVEL_SHIFT 3
15400e7b3644SAlexander Duyck
fm10k_alloc_netdev(const struct fm10k_info * info)1541e0244903SJacob Keller struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
15420e7b3644SAlexander Duyck {
1543e0244903SJacob Keller netdev_features_t hw_features;
15440e7b3644SAlexander Duyck struct fm10k_intfc *interface;
15450e7b3644SAlexander Duyck struct net_device *dev;
15460e7b3644SAlexander Duyck
1547e27ef599SAlexander Duyck dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
15480e7b3644SAlexander Duyck if (!dev)
15490e7b3644SAlexander Duyck return NULL;
15500e7b3644SAlexander Duyck
15510e7b3644SAlexander Duyck /* set net device and ethtool ops */
15520e7b3644SAlexander Duyck dev->netdev_ops = &fm10k_netdev_ops;
155382dd0f7eSAlexander Duyck fm10k_set_ethtool_ops(dev);
15540e7b3644SAlexander Duyck
15550e7b3644SAlexander Duyck /* configure default debug level */
15560e7b3644SAlexander Duyck interface = netdev_priv(dev);
1557fcdb0a99SBruce Allan interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
15580e7b3644SAlexander Duyck
15590e7b3644SAlexander Duyck /* configure default features */
156076a540d4SAlexander Duyck dev->features |= NETIF_F_IP_CSUM |
156176a540d4SAlexander Duyck NETIF_F_IPV6_CSUM |
156276a540d4SAlexander Duyck NETIF_F_SG |
156376a540d4SAlexander Duyck NETIF_F_TSO |
156476a540d4SAlexander Duyck NETIF_F_TSO6 |
156576a540d4SAlexander Duyck NETIF_F_TSO_ECN |
156676a540d4SAlexander Duyck NETIF_F_RXHASH |
156776a540d4SAlexander Duyck NETIF_F_RXCSUM;
15680e7b3644SAlexander Duyck
1569e0244903SJacob Keller /* Only the PF can support VXLAN and NVGRE tunnel offloads */
1570e0244903SJacob Keller if (info->mac == fm10k_mac_pf) {
1571e0244903SJacob Keller dev->hw_enc_features = NETIF_F_IP_CSUM |
157276a540d4SAlexander Duyck NETIF_F_TSO |
157376a540d4SAlexander Duyck NETIF_F_TSO6 |
157476a540d4SAlexander Duyck NETIF_F_TSO_ECN |
157576a540d4SAlexander Duyck NETIF_F_GSO_UDP_TUNNEL |
1576e0244903SJacob Keller NETIF_F_IPV6_CSUM |
1577e0244903SJacob Keller NETIF_F_SG;
1578e0244903SJacob Keller
1579e0244903SJacob Keller dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1580f7529b4bSJakub Kicinski
1581f7529b4bSJakub Kicinski dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
1582e0244903SJacob Keller }
1583e0244903SJacob Keller
1584e0244903SJacob Keller /* all features defined to this point should be changeable */
1585e0244903SJacob Keller hw_features = dev->features;
1586e0244903SJacob Keller
1587e0244903SJacob Keller /* allow user to enable L2 forwarding acceleration */
1588e0244903SJacob Keller hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
1589e0244903SJacob Keller
1590e0244903SJacob Keller /* configure VLAN features */
1591e0244903SJacob Keller dev->vlan_features |= dev->features;
15920e7b3644SAlexander Duyck
15938f5e20d4SAlexander Duyck /* we want to leave these both on as we cannot disable VLAN tag
15948f5e20d4SAlexander Duyck * insertion or stripping on the hardware since it is contained
15958f5e20d4SAlexander Duyck * in the FTAG and not in the frame itself.
15968f5e20d4SAlexander Duyck */
15978f5e20d4SAlexander Duyck dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
15988f5e20d4SAlexander Duyck NETIF_F_HW_VLAN_CTAG_RX |
15998f5e20d4SAlexander Duyck NETIF_F_HW_VLAN_CTAG_FILTER;
16008f5e20d4SAlexander Duyck
16018f5e20d4SAlexander Duyck dev->priv_flags |= IFF_UNICAST_FLT;
16028f5e20d4SAlexander Duyck
1603e0244903SJacob Keller dev->hw_features |= hw_features;
1604e0244903SJacob Keller
160591c527a5SJarod Wilson /* MTU range: 68 - 15342 */
160691c527a5SJarod Wilson dev->min_mtu = ETH_MIN_MTU;
160791c527a5SJarod Wilson dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
160891c527a5SJarod Wilson
16090e7b3644SAlexander Duyck return dev;
16100e7b3644SAlexander Duyck }
1611