xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx4/en_rx.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
15a2cc190SJeff Kirsher /*
25a2cc190SJeff Kirsher  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
35a2cc190SJeff Kirsher  *
45a2cc190SJeff Kirsher  * This software is available to you under a choice of one of two
55a2cc190SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
65a2cc190SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
75a2cc190SJeff Kirsher  * COPYING in the main directory of this source tree, or the
85a2cc190SJeff Kirsher  * OpenIB.org BSD license below:
95a2cc190SJeff Kirsher  *
105a2cc190SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
115a2cc190SJeff Kirsher  *     without modification, are permitted provided that the following
125a2cc190SJeff Kirsher  *     conditions are met:
135a2cc190SJeff Kirsher  *
145a2cc190SJeff Kirsher  *      - Redistributions of source code must retain the above
155a2cc190SJeff Kirsher  *        copyright notice, this list of conditions and the following
165a2cc190SJeff Kirsher  *        disclaimer.
175a2cc190SJeff Kirsher  *
185a2cc190SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
195a2cc190SJeff Kirsher  *        copyright notice, this list of conditions and the following
205a2cc190SJeff Kirsher  *        disclaimer in the documentation and/or other materials
215a2cc190SJeff Kirsher  *        provided with the distribution.
225a2cc190SJeff Kirsher  *
235a2cc190SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
245a2cc190SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
255a2cc190SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
265a2cc190SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
275a2cc190SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
285a2cc190SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
295a2cc190SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
305a2cc190SJeff Kirsher  * SOFTWARE.
315a2cc190SJeff Kirsher  *
325a2cc190SJeff Kirsher  */
335a2cc190SJeff Kirsher 
3447a38e15SBrenden Blanco #include <linux/bpf.h>
35a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
365a2cc190SJeff Kirsher #include <linux/mlx4/cq.h>
375a2cc190SJeff Kirsher #include <linux/slab.h>
385a2cc190SJeff Kirsher #include <linux/mlx4/qp.h>
395a2cc190SJeff Kirsher #include <linux/skbuff.h>
40b67bfe0dSSasha Levin #include <linux/rculist.h>
415a2cc190SJeff Kirsher #include <linux/if_ether.h>
425a2cc190SJeff Kirsher #include <linux/if_vlan.h>
435a2cc190SJeff Kirsher #include <linux/vmalloc.h>
4435f6f453SAmir Vadai #include <linux/irq.h>
455a2cc190SJeff Kirsher 
463aa8029eSEric Dumazet #include <net/ip.h>
47f8c6455bSShani Michaeli #if IS_ENABLED(CONFIG_IPV6)
48f8c6455bSShani Michaeli #include <net/ip6_checksum.h>
49f8c6455bSShani Michaeli #endif
50f8c6455bSShani Michaeli 
515a2cc190SJeff Kirsher #include "mlx4_en.h"
525a2cc190SJeff Kirsher 
mlx4_alloc_page(struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frag,gfp_t gfp)5334db548bSEric Dumazet static int mlx4_alloc_page(struct mlx4_en_priv *priv,
5434db548bSEric Dumazet 			   struct mlx4_en_rx_alloc *frag,
55b5a54d9aSEric Dumazet 			   gfp_t gfp)
5651151a16SEric Dumazet {
5751151a16SEric Dumazet 	struct page *page;
5851151a16SEric Dumazet 	dma_addr_t dma;
5951151a16SEric Dumazet 
60b5a54d9aSEric Dumazet 	page = alloc_page(gfp);
61b5a54d9aSEric Dumazet 	if (unlikely(!page))
6251151a16SEric Dumazet 		return -ENOMEM;
63b5a54d9aSEric Dumazet 	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
64de3d6fa8STariq Toukan 	if (unlikely(dma_mapping_error(priv->ddev, dma))) {
6534db548bSEric Dumazet 		__free_page(page);
6651151a16SEric Dumazet 		return -ENOMEM;
6751151a16SEric Dumazet 	}
6834db548bSEric Dumazet 	frag->page = page;
6934db548bSEric Dumazet 	frag->dma = dma;
7034db548bSEric Dumazet 	frag->page_offset = priv->rx_headroom;
7151151a16SEric Dumazet 	return 0;
7251151a16SEric Dumazet }
7351151a16SEric Dumazet 
mlx4_en_alloc_frags(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,struct mlx4_en_rx_desc * rx_desc,struct mlx4_en_rx_alloc * frags,gfp_t gfp)744cce66cdSThadeu Lima de Souza Cascardo static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
757d7bfc6aSEric Dumazet 			       struct mlx4_en_rx_ring *ring,
765a2cc190SJeff Kirsher 			       struct mlx4_en_rx_desc *rx_desc,
774cce66cdSThadeu Lima de Souza Cascardo 			       struct mlx4_en_rx_alloc *frags,
7851151a16SEric Dumazet 			       gfp_t gfp)
794cce66cdSThadeu Lima de Souza Cascardo {
804cce66cdSThadeu Lima de Souza Cascardo 	int i;
814cce66cdSThadeu Lima de Souza Cascardo 
8234db548bSEric Dumazet 	for (i = 0; i < priv->num_frags; i++, frags++) {
837d7bfc6aSEric Dumazet 		if (!frags->page) {
847d7bfc6aSEric Dumazet 			if (mlx4_alloc_page(priv, frags, gfp))
854cce66cdSThadeu Lima de Souza Cascardo 				return -ENOMEM;
867d7bfc6aSEric Dumazet 			ring->rx_alloc_pages++;
877d7bfc6aSEric Dumazet 		}
8834db548bSEric Dumazet 		rx_desc->data[i].addr = cpu_to_be64(frags->dma +
8934db548bSEric Dumazet 						    frags->page_offset);
904cce66cdSThadeu Lima de Souza Cascardo 	}
915a2cc190SJeff Kirsher 	return 0;
925a2cc190SJeff Kirsher }
935a2cc190SJeff Kirsher 
mlx4_en_free_frag(const struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frag)9434db548bSEric Dumazet static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
9534db548bSEric Dumazet 			      struct mlx4_en_rx_alloc *frag)
965a2cc190SJeff Kirsher {
9734db548bSEric Dumazet 	if (frag->page) {
9834db548bSEric Dumazet 		dma_unmap_page(priv->ddev, frag->dma,
99b5a54d9aSEric Dumazet 			       PAGE_SIZE, priv->dma_dir);
10034db548bSEric Dumazet 		__free_page(frag->page);
10151151a16SEric Dumazet 	}
10234db548bSEric Dumazet 	/* We need to clear all fields, otherwise a change of priv->log_rx_info
10334db548bSEric Dumazet 	 * could lead to see garbage later in frag->page.
10434db548bSEric Dumazet 	 */
10534db548bSEric Dumazet 	memset(frag, 0, sizeof(*frag));
1065a2cc190SJeff Kirsher }
1075a2cc190SJeff Kirsher 
mlx4_en_init_rx_desc(const struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index)10834db548bSEric Dumazet static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
1095a2cc190SJeff Kirsher 				 struct mlx4_en_rx_ring *ring, int index)
1105a2cc190SJeff Kirsher {
1115a2cc190SJeff Kirsher 	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
1125a2cc190SJeff Kirsher 	int possible_frags;
1135a2cc190SJeff Kirsher 	int i;
1145a2cc190SJeff Kirsher 
1155a2cc190SJeff Kirsher 	/* Set size and memtype fields */
1165a2cc190SJeff Kirsher 	for (i = 0; i < priv->num_frags; i++) {
1175a2cc190SJeff Kirsher 		rx_desc->data[i].byte_count =
1185a2cc190SJeff Kirsher 			cpu_to_be32(priv->frag_info[i].frag_size);
1195a2cc190SJeff Kirsher 		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
1205a2cc190SJeff Kirsher 	}
1215a2cc190SJeff Kirsher 
1225a2cc190SJeff Kirsher 	/* If the number of used fragments does not fill up the ring stride,
1235a2cc190SJeff Kirsher 	 * remaining (unused) fragments must be padded with null address/size
1245a2cc190SJeff Kirsher 	 * and a special memory key */
1255a2cc190SJeff Kirsher 	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
1265a2cc190SJeff Kirsher 	for (i = priv->num_frags; i < possible_frags; i++) {
1275a2cc190SJeff Kirsher 		rx_desc->data[i].byte_count = 0;
1285a2cc190SJeff Kirsher 		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
1295a2cc190SJeff Kirsher 		rx_desc->data[i].addr = 0;
1305a2cc190SJeff Kirsher 	}
1315a2cc190SJeff Kirsher }
1325a2cc190SJeff Kirsher 
mlx4_en_prepare_rx_desc(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index,gfp_t gfp)1335a2cc190SJeff Kirsher static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
13451151a16SEric Dumazet 				   struct mlx4_en_rx_ring *ring, int index,
13551151a16SEric Dumazet 				   gfp_t gfp)
1365a2cc190SJeff Kirsher {
1379bcee89aSTariq Toukan 	struct mlx4_en_rx_desc *rx_desc = ring->buf +
1389bcee89aSTariq Toukan 		(index << ring->log_stride);
1394cce66cdSThadeu Lima de Souza Cascardo 	struct mlx4_en_rx_alloc *frags = ring->rx_info +
1405a2cc190SJeff Kirsher 					(index << priv->log_rx_info);
1419bcee89aSTariq Toukan 	if (likely(ring->page_cache.index > 0)) {
14234db548bSEric Dumazet 		/* XDP uses a single page per frame */
14334db548bSEric Dumazet 		if (!frags->page) {
144acd7628dSEric Dumazet 			ring->page_cache.index--;
14534db548bSEric Dumazet 			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
14634db548bSEric Dumazet 			frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
14734db548bSEric Dumazet 		}
14834db548bSEric Dumazet 		frags->page_offset = XDP_PACKET_HEADROOM;
14934db548bSEric Dumazet 		rx_desc->data[0].addr = cpu_to_be64(frags->dma +
15034db548bSEric Dumazet 						    XDP_PACKET_HEADROOM);
151d576acf0SBrenden Blanco 		return 0;
152d576acf0SBrenden Blanco 	}
153d576acf0SBrenden Blanco 
1547d7bfc6aSEric Dumazet 	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
1555a2cc190SJeff Kirsher }
1565a2cc190SJeff Kirsher 
mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring * ring)15734db548bSEric Dumazet static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
15807841f9dSIdo Shamay {
15907841f9dSIdo Shamay 	return ring->prod == ring->cons;
16007841f9dSIdo Shamay }
16107841f9dSIdo Shamay 
mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring * ring)1625a2cc190SJeff Kirsher static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
1635a2cc190SJeff Kirsher {
1645a2cc190SJeff Kirsher 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
1655a2cc190SJeff Kirsher }
1665a2cc190SJeff Kirsher 
16734db548bSEric Dumazet /* slow path */
mlx4_en_free_rx_desc(const struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index)16834db548bSEric Dumazet static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
1695a2cc190SJeff Kirsher 				 struct mlx4_en_rx_ring *ring,
1705a2cc190SJeff Kirsher 				 int index)
1715a2cc190SJeff Kirsher {
1724cce66cdSThadeu Lima de Souza Cascardo 	struct mlx4_en_rx_alloc *frags;
1735a2cc190SJeff Kirsher 	int nr;
1745a2cc190SJeff Kirsher 
1754cce66cdSThadeu Lima de Souza Cascardo 	frags = ring->rx_info + (index << priv->log_rx_info);
1765a2cc190SJeff Kirsher 	for (nr = 0; nr < priv->num_frags; nr++) {
1775a2cc190SJeff Kirsher 		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
17834db548bSEric Dumazet 		mlx4_en_free_frag(priv, frags + nr);
1795a2cc190SJeff Kirsher 	}
1805a2cc190SJeff Kirsher }
1815a2cc190SJeff Kirsher 
1829bcee89aSTariq Toukan /* Function not in fast-path */
mlx4_en_fill_rx_buffers(struct mlx4_en_priv * priv)1835a2cc190SJeff Kirsher static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
1845a2cc190SJeff Kirsher {
1855a2cc190SJeff Kirsher 	struct mlx4_en_rx_ring *ring;
1865a2cc190SJeff Kirsher 	int ring_ind;
1875a2cc190SJeff Kirsher 	int buf_ind;
1885a2cc190SJeff Kirsher 	int new_size;
1895a2cc190SJeff Kirsher 
1905a2cc190SJeff Kirsher 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
1915a2cc190SJeff Kirsher 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
19241d942d5SEugenia Emantayev 			ring = priv->rx_ring[ring_ind];
1935a2cc190SJeff Kirsher 
1945a2cc190SJeff Kirsher 			if (mlx4_en_prepare_rx_desc(priv, ring,
19551151a16SEric Dumazet 						    ring->actual_size,
196453f85d4SMel Gorman 						    GFP_KERNEL)) {
1975a2cc190SJeff Kirsher 				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
1981a91de28SJoe Perches 					en_err(priv, "Failed to allocate enough rx buffers\n");
1995a2cc190SJeff Kirsher 					return -ENOMEM;
2005a2cc190SJeff Kirsher 				} else {
2015a2cc190SJeff Kirsher 					new_size = rounddown_pow_of_two(ring->actual_size);
2021a91de28SJoe Perches 					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
2035a2cc190SJeff Kirsher 						ring->actual_size, new_size);
2045a2cc190SJeff Kirsher 					goto reduce_rings;
2055a2cc190SJeff Kirsher 				}
2065a2cc190SJeff Kirsher 			}
2075a2cc190SJeff Kirsher 			ring->actual_size++;
2085a2cc190SJeff Kirsher 			ring->prod++;
2095a2cc190SJeff Kirsher 		}
2105a2cc190SJeff Kirsher 	}
2115a2cc190SJeff Kirsher 	return 0;
2125a2cc190SJeff Kirsher 
2135a2cc190SJeff Kirsher reduce_rings:
2145a2cc190SJeff Kirsher 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
21541d942d5SEugenia Emantayev 		ring = priv->rx_ring[ring_ind];
2165a2cc190SJeff Kirsher 		while (ring->actual_size > new_size) {
2175a2cc190SJeff Kirsher 			ring->actual_size--;
2185a2cc190SJeff Kirsher 			ring->prod--;
2195a2cc190SJeff Kirsher 			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
2205a2cc190SJeff Kirsher 		}
2215a2cc190SJeff Kirsher 	}
2225a2cc190SJeff Kirsher 
2235a2cc190SJeff Kirsher 	return 0;
2245a2cc190SJeff Kirsher }
2255a2cc190SJeff Kirsher 
mlx4_en_free_rx_buf(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)2265a2cc190SJeff Kirsher static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
2275a2cc190SJeff Kirsher 				struct mlx4_en_rx_ring *ring)
2285a2cc190SJeff Kirsher {
2295a2cc190SJeff Kirsher 	int index;
2305a2cc190SJeff Kirsher 
2315a2cc190SJeff Kirsher 	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
2325a2cc190SJeff Kirsher 	       ring->cons, ring->prod);
2335a2cc190SJeff Kirsher 
2345a2cc190SJeff Kirsher 	/* Unmap and free Rx buffers */
23534db548bSEric Dumazet 	for (index = 0; index < ring->size; index++) {
2365a2cc190SJeff Kirsher 		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
2375a2cc190SJeff Kirsher 		mlx4_en_free_rx_desc(priv, ring, index);
2385a2cc190SJeff Kirsher 	}
23934db548bSEric Dumazet 	ring->cons = 0;
24034db548bSEric Dumazet 	ring->prod = 0;
2415a2cc190SJeff Kirsher }
2425a2cc190SJeff Kirsher 
mlx4_en_set_num_rx_rings(struct mlx4_en_dev * mdev)24302512482SIdo Shamay void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
24402512482SIdo Shamay {
24502512482SIdo Shamay 	int i;
24602512482SIdo Shamay 	int num_of_eqs;
247bb2146bcSIdo Shamay 	int num_rx_rings;
24802512482SIdo Shamay 	struct mlx4_dev *dev = mdev->dev;
24902512482SIdo Shamay 
25002512482SIdo Shamay 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
25102512482SIdo Shamay 		num_of_eqs = max_t(int, MIN_RX_RINGS,
25202512482SIdo Shamay 				   min_t(int,
253c66fa19cSMatan Barak 					 mlx4_get_eqs_per_port(mdev->dev, i),
25402512482SIdo Shamay 					 DEF_RX_RINGS));
25502512482SIdo Shamay 
256ea1c1af1SAmir Vadai 		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
25780a8dc75SInbar Karmy 			min_t(int, num_of_eqs, num_online_cpus());
25802512482SIdo Shamay 		mdev->profile.prof[i].rx_ring_num =
259bb2146bcSIdo Shamay 			rounddown_pow_of_two(num_rx_rings);
26002512482SIdo Shamay 	}
26102512482SIdo Shamay }
26202512482SIdo Shamay 
mlx4_en_create_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring ** pring,u32 size,u16 stride,int node,int queue_index)2635a2cc190SJeff Kirsher int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
26441d942d5SEugenia Emantayev 			   struct mlx4_en_rx_ring **pring,
265ae75415dSJesper Dangaard Brouer 			   u32 size, u16 stride, int node, int queue_index)
2665a2cc190SJeff Kirsher {
2675a2cc190SJeff Kirsher 	struct mlx4_en_dev *mdev = priv->mdev;
26841d942d5SEugenia Emantayev 	struct mlx4_en_rx_ring *ring;
2694cce66cdSThadeu Lima de Souza Cascardo 	int err = -ENOMEM;
2705a2cc190SJeff Kirsher 	int tmp;
2715a2cc190SJeff Kirsher 
272163561a4SEugenia Emantayev 	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
273163561a4SEugenia Emantayev 	if (!ring) {
27441d942d5SEugenia Emantayev 		en_err(priv, "Failed to allocate RX ring structure\n");
27541d942d5SEugenia Emantayev 		return -ENOMEM;
27641d942d5SEugenia Emantayev 	}
27741d942d5SEugenia Emantayev 
2785a2cc190SJeff Kirsher 	ring->prod = 0;
2795a2cc190SJeff Kirsher 	ring->cons = 0;
2805a2cc190SJeff Kirsher 	ring->size = size;
2815a2cc190SJeff Kirsher 	ring->size_mask = size - 1;
2825a2cc190SJeff Kirsher 	ring->stride = stride;
2835a2cc190SJeff Kirsher 	ring->log_stride = ffs(ring->stride) - 1;
2845a2cc190SJeff Kirsher 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
2855a2cc190SJeff Kirsher 
286b02e5a0eSBjörn Töpel 	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
287ae75415dSJesper Dangaard Brouer 		goto err_ring;
288ae75415dSJesper Dangaard Brouer 
2895a2cc190SJeff Kirsher 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
2904cce66cdSThadeu Lima de Souza Cascardo 					sizeof(struct mlx4_en_rx_alloc));
291d8c13f22SEric Dumazet 	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
29241d942d5SEugenia Emantayev 	if (!ring->rx_info) {
29341d942d5SEugenia Emantayev 		err = -ENOMEM;
294ae75415dSJesper Dangaard Brouer 		goto err_xdp_info;
29541d942d5SEugenia Emantayev 	}
296e404decbSJoe Perches 
2975a2cc190SJeff Kirsher 	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
2985a2cc190SJeff Kirsher 		 ring->rx_info, tmp);
2995a2cc190SJeff Kirsher 
300163561a4SEugenia Emantayev 	/* Allocate HW buffers on provided NUMA node */
301872bf2fbSYishai Hadas 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
30273898db0SHaggai Abramovsky 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
303872bf2fbSYishai Hadas 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
3045a2cc190SJeff Kirsher 	if (err)
30541d942d5SEugenia Emantayev 		goto err_info;
3065a2cc190SJeff Kirsher 
3075a2cc190SJeff Kirsher 	ring->buf = ring->wqres.buf.direct.buf;
3085a2cc190SJeff Kirsher 
309ec693d47SAmir Vadai 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
310ec693d47SAmir Vadai 
31141d942d5SEugenia Emantayev 	*pring = ring;
3125a2cc190SJeff Kirsher 	return 0;
3135a2cc190SJeff Kirsher 
31441d942d5SEugenia Emantayev err_info:
315d8c13f22SEric Dumazet 	kvfree(ring->rx_info);
3165a2cc190SJeff Kirsher 	ring->rx_info = NULL;
317ae75415dSJesper Dangaard Brouer err_xdp_info:
318ae75415dSJesper Dangaard Brouer 	xdp_rxq_info_unreg(&ring->xdp_rxq);
31941d942d5SEugenia Emantayev err_ring:
32041d942d5SEugenia Emantayev 	kfree(ring);
32141d942d5SEugenia Emantayev 	*pring = NULL;
32241d942d5SEugenia Emantayev 
3235a2cc190SJeff Kirsher 	return err;
3245a2cc190SJeff Kirsher }
3255a2cc190SJeff Kirsher 
mlx4_en_activate_rx_rings(struct mlx4_en_priv * priv)3265a2cc190SJeff Kirsher int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
3275a2cc190SJeff Kirsher {
3285a2cc190SJeff Kirsher 	struct mlx4_en_rx_ring *ring;
3295a2cc190SJeff Kirsher 	int i;
3305a2cc190SJeff Kirsher 	int ring_ind;
3315a2cc190SJeff Kirsher 	int err;
3325a2cc190SJeff Kirsher 	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3335a2cc190SJeff Kirsher 					DS_SIZE * priv->num_frags);
3345a2cc190SJeff Kirsher 
3355a2cc190SJeff Kirsher 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
33641d942d5SEugenia Emantayev 		ring = priv->rx_ring[ring_ind];
3375a2cc190SJeff Kirsher 
3385a2cc190SJeff Kirsher 		ring->prod = 0;
3395a2cc190SJeff Kirsher 		ring->cons = 0;
3405a2cc190SJeff Kirsher 		ring->actual_size = 0;
34141d942d5SEugenia Emantayev 		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
3425a2cc190SJeff Kirsher 
3435a2cc190SJeff Kirsher 		ring->stride = stride;
3446496bbf0SEugenia Emantayev 		if (ring->stride <= TXBB_SIZE) {
3456496bbf0SEugenia Emantayev 			/* Stamp first unused send wqe */
3466496bbf0SEugenia Emantayev 			__be32 *ptr = (__be32 *)ring->buf;
3476496bbf0SEugenia Emantayev 			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
3486496bbf0SEugenia Emantayev 			*ptr = stamp;
3496496bbf0SEugenia Emantayev 			/* Move pointer to start of rx section */
3505a2cc190SJeff Kirsher 			ring->buf += TXBB_SIZE;
3516496bbf0SEugenia Emantayev 		}
3525a2cc190SJeff Kirsher 
3535a2cc190SJeff Kirsher 		ring->log_stride = ffs(ring->stride) - 1;
3545a2cc190SJeff Kirsher 		ring->buf_size = ring->size * ring->stride;
3555a2cc190SJeff Kirsher 
3565a2cc190SJeff Kirsher 		memset(ring->buf, 0, ring->buf_size);
3575a2cc190SJeff Kirsher 		mlx4_en_update_rx_prod_db(ring);
3585a2cc190SJeff Kirsher 
3594cce66cdSThadeu Lima de Souza Cascardo 		/* Initialize all descriptors */
3605a2cc190SJeff Kirsher 		for (i = 0; i < ring->size; i++)
3615a2cc190SJeff Kirsher 			mlx4_en_init_rx_desc(priv, ring, i);
3625a2cc190SJeff Kirsher 	}
3635a2cc190SJeff Kirsher 	err = mlx4_en_fill_rx_buffers(priv);
3645a2cc190SJeff Kirsher 	if (err)
3655a2cc190SJeff Kirsher 		goto err_buffers;
3665a2cc190SJeff Kirsher 
3675a2cc190SJeff Kirsher 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
36841d942d5SEugenia Emantayev 		ring = priv->rx_ring[ring_ind];
3695a2cc190SJeff Kirsher 
3705a2cc190SJeff Kirsher 		ring->size_mask = ring->actual_size - 1;
3715a2cc190SJeff Kirsher 		mlx4_en_update_rx_prod_db(ring);
3725a2cc190SJeff Kirsher 	}
3735a2cc190SJeff Kirsher 
3745a2cc190SJeff Kirsher 	return 0;
3755a2cc190SJeff Kirsher 
3765a2cc190SJeff Kirsher err_buffers:
3775a2cc190SJeff Kirsher 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
37841d942d5SEugenia Emantayev 		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
3795a2cc190SJeff Kirsher 
3805a2cc190SJeff Kirsher 	ring_ind = priv->rx_ring_num - 1;
3815a2cc190SJeff Kirsher 	while (ring_ind >= 0) {
38241d942d5SEugenia Emantayev 		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
38341d942d5SEugenia Emantayev 			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
3845a2cc190SJeff Kirsher 		ring_ind--;
3855a2cc190SJeff Kirsher 	}
3865a2cc190SJeff Kirsher 	return err;
3875a2cc190SJeff Kirsher }
3885a2cc190SJeff Kirsher 
38907841f9dSIdo Shamay /* We recover from out of memory by scheduling our napi poll
39007841f9dSIdo Shamay  * function (mlx4_en_process_cq), which tries to allocate
39107841f9dSIdo Shamay  * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
39207841f9dSIdo Shamay  */
mlx4_en_recover_from_oom(struct mlx4_en_priv * priv)39307841f9dSIdo Shamay void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
39407841f9dSIdo Shamay {
39507841f9dSIdo Shamay 	int ring;
39607841f9dSIdo Shamay 
39707841f9dSIdo Shamay 	if (!priv->port_up)
39807841f9dSIdo Shamay 		return;
39907841f9dSIdo Shamay 
40007841f9dSIdo Shamay 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
401bd4ce941SBenjamin Poirier 		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
402bd4ce941SBenjamin Poirier 			local_bh_disable();
40307841f9dSIdo Shamay 			napi_reschedule(&priv->rx_cq[ring]->napi);
404bd4ce941SBenjamin Poirier 			local_bh_enable();
405bd4ce941SBenjamin Poirier 		}
40607841f9dSIdo Shamay 	}
40707841f9dSIdo Shamay }
40807841f9dSIdo Shamay 
409d576acf0SBrenden Blanco /* When the rx ring is running in page-per-packet mode, a released frame can go
410d576acf0SBrenden Blanco  * directly into a small cache, to avoid unmapping or touching the page
411d576acf0SBrenden Blanco  * allocator. In bpf prog performance scenarios, buffers are either forwarded
412d576acf0SBrenden Blanco  * or dropped, never converted to skbs, so every page can come directly from
413d576acf0SBrenden Blanco  * this cache when it is sized to be a multiple of the napi budget.
414d576acf0SBrenden Blanco  */
mlx4_en_rx_recycle(struct mlx4_en_rx_ring * ring,struct mlx4_en_rx_alloc * frame)415d576acf0SBrenden Blanco bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
416d576acf0SBrenden Blanco 			struct mlx4_en_rx_alloc *frame)
417d576acf0SBrenden Blanco {
418d576acf0SBrenden Blanco 	struct mlx4_en_page_cache *cache = &ring->page_cache;
419d576acf0SBrenden Blanco 
420d576acf0SBrenden Blanco 	if (cache->index >= MLX4_EN_CACHE_SIZE)
421d576acf0SBrenden Blanco 		return false;
422d576acf0SBrenden Blanco 
423acd7628dSEric Dumazet 	cache->buf[cache->index].page = frame->page;
424acd7628dSEric Dumazet 	cache->buf[cache->index].dma = frame->dma;
425acd7628dSEric Dumazet 	cache->index++;
426d576acf0SBrenden Blanco 	return true;
427d576acf0SBrenden Blanco }
428d576acf0SBrenden Blanco 
mlx4_en_destroy_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring ** pring,u32 size,u16 stride)4295a2cc190SJeff Kirsher void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
43041d942d5SEugenia Emantayev 			     struct mlx4_en_rx_ring **pring,
43141d942d5SEugenia Emantayev 			     u32 size, u16 stride)
4325a2cc190SJeff Kirsher {
4335a2cc190SJeff Kirsher 	struct mlx4_en_dev *mdev = priv->mdev;
43441d942d5SEugenia Emantayev 	struct mlx4_en_rx_ring *ring = *pring;
435cb7386d3SBrenden Blanco 	struct bpf_prog *old_prog;
4365a2cc190SJeff Kirsher 
437326fe02dSBrenden Blanco 	old_prog = rcu_dereference_protected(
438326fe02dSBrenden Blanco 					ring->xdp_prog,
439326fe02dSBrenden Blanco 					lockdep_is_held(&mdev->state_lock));
440cb7386d3SBrenden Blanco 	if (old_prog)
441cb7386d3SBrenden Blanco 		bpf_prog_put(old_prog);
442ae75415dSJesper Dangaard Brouer 	xdp_rxq_info_unreg(&ring->xdp_rxq);
44368355f71SThadeu Lima de Souza Cascardo 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
444d8c13f22SEric Dumazet 	kvfree(ring->rx_info);
4455a2cc190SJeff Kirsher 	ring->rx_info = NULL;
44641d942d5SEugenia Emantayev 	kfree(ring);
44741d942d5SEugenia Emantayev 	*pring = NULL;
4485a2cc190SJeff Kirsher }
4495a2cc190SJeff Kirsher 
mlx4_en_deactivate_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)4505a2cc190SJeff Kirsher void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
4515a2cc190SJeff Kirsher 				struct mlx4_en_rx_ring *ring)
4525a2cc190SJeff Kirsher {
453d576acf0SBrenden Blanco 	int i;
454d576acf0SBrenden Blanco 
455d576acf0SBrenden Blanco 	for (i = 0; i < ring->page_cache.index; i++) {
456acd7628dSEric Dumazet 		dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
457acd7628dSEric Dumazet 			       PAGE_SIZE, priv->dma_dir);
458acd7628dSEric Dumazet 		put_page(ring->page_cache.buf[i].page);
459d576acf0SBrenden Blanco 	}
460d576acf0SBrenden Blanco 	ring->page_cache.index = 0;
4615a2cc190SJeff Kirsher 	mlx4_en_free_rx_buf(priv, ring);
4625a2cc190SJeff Kirsher 	if (ring->stride <= TXBB_SIZE)
4635a2cc190SJeff Kirsher 		ring->buf -= TXBB_SIZE;
4645a2cc190SJeff Kirsher }
4655a2cc190SJeff Kirsher 
4665a2cc190SJeff Kirsher 
mlx4_en_complete_rx_desc(struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frags,struct sk_buff * skb,int length)4675a2cc190SJeff Kirsher static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
4684cce66cdSThadeu Lima de Souza Cascardo 				    struct mlx4_en_rx_alloc *frags,
46990278c9fSEric Dumazet 				    struct sk_buff *skb,
4705a2cc190SJeff Kirsher 				    int length)
4715a2cc190SJeff Kirsher {
47234db548bSEric Dumazet 	const struct mlx4_en_frag_info *frag_info = priv->frag_info;
47334db548bSEric Dumazet 	unsigned int truesize = 0;
474432e629eSSaeed Mahameed 	bool release = true;
475aaca121dSEric Dumazet 	int nr, frag_size;
47634db548bSEric Dumazet 	struct page *page;
4775a2cc190SJeff Kirsher 	dma_addr_t dma;
4785a2cc190SJeff Kirsher 
4794cce66cdSThadeu Lima de Souza Cascardo 	/* Collect used fragments while replacing them in the HW descriptors */
48034db548bSEric Dumazet 	for (nr = 0;; frags++) {
481aaca121dSEric Dumazet 		frag_size = min_t(int, length, frag_info->frag_size);
482aaca121dSEric Dumazet 
48334db548bSEric Dumazet 		page = frags->page;
48434db548bSEric Dumazet 		if (unlikely(!page))
4855a2cc190SJeff Kirsher 			goto fail;
4865a2cc190SJeff Kirsher 
48734db548bSEric Dumazet 		dma = frags->dma;
48834db548bSEric Dumazet 		dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
48934db548bSEric Dumazet 					      frag_size, priv->dma_dir);
4904cce66cdSThadeu Lima de Souza Cascardo 
49134db548bSEric Dumazet 		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
492aaca121dSEric Dumazet 				     frag_size);
4937f0137e2SEric Dumazet 
49434db548bSEric Dumazet 		truesize += frag_info->frag_stride;
49534db548bSEric Dumazet 		if (frag_info->frag_stride == PAGE_SIZE / 2) {
49634db548bSEric Dumazet 			frags->page_offset ^= PAGE_SIZE / 2;
49734db548bSEric Dumazet 			release = page_count(page) != 1 ||
49834db548bSEric Dumazet 				  page_is_pfmemalloc(page) ||
49934db548bSEric Dumazet 				  page_to_nid(page) != numa_mem_id();
500432e629eSSaeed Mahameed 		} else if (!priv->rx_headroom) {
501432e629eSSaeed Mahameed 			/* rx_headroom for non XDP setup is always 0.
502432e629eSSaeed Mahameed 			 * When XDP is set, the above condition will
503432e629eSSaeed Mahameed 			 * guarantee page is always released.
504432e629eSSaeed Mahameed 			 */
50534db548bSEric Dumazet 			u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
50634db548bSEric Dumazet 
50734db548bSEric Dumazet 			frags->page_offset += sz_align;
50834db548bSEric Dumazet 			release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
50934db548bSEric Dumazet 		}
51034db548bSEric Dumazet 		if (release) {
51134db548bSEric Dumazet 			dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
51234db548bSEric Dumazet 			frags->page = NULL;
51334db548bSEric Dumazet 		} else {
51434db548bSEric Dumazet 			page_ref_inc(page);
51534db548bSEric Dumazet 		}
51634db548bSEric Dumazet 
517aaca121dSEric Dumazet 		nr++;
518aaca121dSEric Dumazet 		length -= frag_size;
519aaca121dSEric Dumazet 		if (!length)
520aaca121dSEric Dumazet 			break;
521aaca121dSEric Dumazet 		frag_info++;
5225a2cc190SJeff Kirsher 	}
52334db548bSEric Dumazet 	skb->truesize += truesize;
5245a2cc190SJeff Kirsher 	return nr;
5255a2cc190SJeff Kirsher 
5265a2cc190SJeff Kirsher fail:
5275a2cc190SJeff Kirsher 	while (nr > 0) {
5285a2cc190SJeff Kirsher 		nr--;
529c420c989SMatteo Croce 		__skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
5305a2cc190SJeff Kirsher 	}
5315a2cc190SJeff Kirsher 	return 0;
5325a2cc190SJeff Kirsher }
5335a2cc190SJeff Kirsher 
validate_loopback(struct mlx4_en_priv * priv,void * va)5346969cf0fSEric Dumazet static void validate_loopback(struct mlx4_en_priv *priv, void *va)
5355a2cc190SJeff Kirsher {
5366969cf0fSEric Dumazet 	const unsigned char *data = va + ETH_HLEN;
5375a2cc190SJeff Kirsher 	int i;
5385a2cc190SJeff Kirsher 
5396969cf0fSEric Dumazet 	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
5406969cf0fSEric Dumazet 		if (data[i] != (unsigned char)i)
5416969cf0fSEric Dumazet 			return;
5425a2cc190SJeff Kirsher 	}
5435a2cc190SJeff Kirsher 	/* Loopback found */
5445a2cc190SJeff Kirsher 	priv->loopback_ok = 1;
5455a2cc190SJeff Kirsher }
5465a2cc190SJeff Kirsher 
mlx4_en_refill_rx_buffers(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)5479bcee89aSTariq Toukan static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
5484cce66cdSThadeu Lima de Souza Cascardo 				      struct mlx4_en_rx_ring *ring)
5494cce66cdSThadeu Lima de Souza Cascardo {
550dad42c30SEric Dumazet 	u32 missing = ring->actual_size - (ring->prod - ring->cons);
5514cce66cdSThadeu Lima de Souza Cascardo 
552dad42c30SEric Dumazet 	/* Try to batch allocations, but not too much. */
553dad42c30SEric Dumazet 	if (missing < 8)
5549bcee89aSTariq Toukan 		return;
555dad42c30SEric Dumazet 	do {
556dad42c30SEric Dumazet 		if (mlx4_en_prepare_rx_desc(priv, ring,
557dad42c30SEric Dumazet 					    ring->prod & ring->size_mask,
558453f85d4SMel Gorman 					    GFP_ATOMIC | __GFP_MEMALLOC))
5594cce66cdSThadeu Lima de Souza Cascardo 			break;
5604cce66cdSThadeu Lima de Souza Cascardo 		ring->prod++;
5619bcee89aSTariq Toukan 	} while (likely(--missing));
562dad42c30SEric Dumazet 
5639bcee89aSTariq Toukan 	mlx4_en_update_rx_prod_db(ring);
5644cce66cdSThadeu Lima de Souza Cascardo }
5654cce66cdSThadeu Lima de Souza Cascardo 
566f8c6455bSShani Michaeli /* When hardware doesn't strip the vlan, we need to calculate the checksum
567f8c6455bSShani Michaeli  * over it and add it to the hardware's checksum calculation
568f8c6455bSShani Michaeli  */
get_fixed_vlan_csum(__wsum hw_checksum,struct vlan_hdr * vlanh)569f8c6455bSShani Michaeli static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
570f8c6455bSShani Michaeli 					 struct vlan_hdr *vlanh)
571f8c6455bSShani Michaeli {
572f8c6455bSShani Michaeli 	return csum_add(hw_checksum, *(__wsum *)vlanh);
573f8c6455bSShani Michaeli }
574f8c6455bSShani Michaeli 
575f8c6455bSShani Michaeli /* Although the stack expects checksum which doesn't include the pseudo
576f8c6455bSShani Michaeli  * header, the HW adds it. To address that, we are subtracting the pseudo
577f8c6455bSShani Michaeli  * header checksum from the checksum value provided by the HW.
578f8c6455bSShani Michaeli  */
get_fixed_ipv4_csum(__wsum hw_checksum,struct sk_buff * skb,struct iphdr * iph)579e718fe45SDavide Caratti static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
580f8c6455bSShani Michaeli 			       struct iphdr *iph)
581f8c6455bSShani Michaeli {
582f8c6455bSShani Michaeli 	__u16 length_for_csum = 0;
583f8c6455bSShani Michaeli 	__wsum csum_pseudo_header = 0;
584e718fe45SDavide Caratti 	__u8 ipproto = iph->protocol;
585e718fe45SDavide Caratti 
586e718fe45SDavide Caratti 	if (unlikely(ipproto == IPPROTO_SCTP))
587e718fe45SDavide Caratti 		return -1;
588f8c6455bSShani Michaeli 
589f8c6455bSShani Michaeli 	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
590f8c6455bSShani Michaeli 	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
591e718fe45SDavide Caratti 						length_for_csum, ipproto, 0);
592f8c6455bSShani Michaeli 	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
593e718fe45SDavide Caratti 	return 0;
594f8c6455bSShani Michaeli }
595f8c6455bSShani Michaeli 
596f8c6455bSShani Michaeli #if IS_ENABLED(CONFIG_IPV6)
5972d943adfSEric Dumazet /* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
5982d943adfSEric Dumazet  * 4 first bytes : priority, version, flow_lbl
5992d943adfSEric Dumazet  * and 2 additional bytes : nexthdr, hop_limit.
600f8c6455bSShani Michaeli  */
get_fixed_ipv6_csum(__wsum hw_checksum,struct sk_buff * skb,struct ipv6hdr * ipv6h)601f8c6455bSShani Michaeli static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
602f8c6455bSShani Michaeli 			       struct ipv6hdr *ipv6h)
603f8c6455bSShani Michaeli {
604e718fe45SDavide Caratti 	__u8 nexthdr = ipv6h->nexthdr;
6052d943adfSEric Dumazet 	__wsum temp;
606f8c6455bSShani Michaeli 
607e718fe45SDavide Caratti 	if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
608e718fe45SDavide Caratti 		     nexthdr == IPPROTO_HOPOPTS ||
609e718fe45SDavide Caratti 		     nexthdr == IPPROTO_SCTP))
610f8c6455bSShani Michaeli 		return -1;
611f8c6455bSShani Michaeli 
6122d943adfSEric Dumazet 	/* priority, version, flow_lbl */
6132d943adfSEric Dumazet 	temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
6142d943adfSEric Dumazet 	/* nexthdr and hop_limit */
6152d943adfSEric Dumazet 	skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
616f8c6455bSShani Michaeli 	return 0;
617f8c6455bSShani Michaeli }
618f8c6455bSShani Michaeli #endif
619345ef18cSTariq Toukan 
62029dded89SSaeed Mahameed #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
62129dded89SSaeed Mahameed 
622345ef18cSTariq Toukan /* We reach this function only after checking that any of
623345ef18cSTariq Toukan  * the (IPv4 | IPv6) bits are set in cqe->status.
624345ef18cSTariq Toukan  */
check_csum(struct mlx4_cqe * cqe,struct sk_buff * skb,void * va,netdev_features_t dev_features)625f8c6455bSShani Michaeli static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
62679a25852SIdo Shamay 		      netdev_features_t dev_features)
627f8c6455bSShani Michaeli {
628f8c6455bSShani Michaeli 	__wsum hw_checksum = 0;
62929dded89SSaeed Mahameed 	void *hdr;
630f8c6455bSShani Michaeli 
63129dded89SSaeed Mahameed 	/* CQE csum doesn't cover padding octets in short ethernet
63229dded89SSaeed Mahameed 	 * frames. And the pad field is appended prior to calculating
63329dded89SSaeed Mahameed 	 * and appending the FCS field.
63429dded89SSaeed Mahameed 	 *
63529dded89SSaeed Mahameed 	 * Detecting these padded frames requires to verify and parse
63629dded89SSaeed Mahameed 	 * IP headers, so we simply force all those small frames to skip
63729dded89SSaeed Mahameed 	 * checksum complete.
63829dded89SSaeed Mahameed 	 */
63929dded89SSaeed Mahameed 	if (short_frame(skb->len))
64029dded89SSaeed Mahameed 		return -EINVAL;
641f8c6455bSShani Michaeli 
64229dded89SSaeed Mahameed 	hdr = (u8 *)va + sizeof(struct ethhdr);
643f8c6455bSShani Michaeli 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
644f8c6455bSShani Michaeli 
645e802f8e4SHadar Hen Zion 	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
64679a25852SIdo Shamay 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
647f8c6455bSShani Michaeli 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
648f8c6455bSShani Michaeli 		hdr += sizeof(struct vlan_hdr);
649f8c6455bSShani Michaeli 	}
650f8c6455bSShani Michaeli 
651f8c6455bSShani Michaeli #if IS_ENABLED(CONFIG_IPV6)
652e718fe45SDavide Caratti 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
653e718fe45SDavide Caratti 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
654f8c6455bSShani Michaeli #endif
655345ef18cSTariq Toukan 	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
656f8c6455bSShani Michaeli }
657f8c6455bSShani Michaeli 
658a970d8dbSTariq Toukan #if IS_ENABLED(CONFIG_IPV6)
659a970d8dbSTariq Toukan #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
660a970d8dbSTariq Toukan #else
661a970d8dbSTariq Toukan #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
662a970d8dbSTariq Toukan #endif
663a970d8dbSTariq Toukan 
6644444584dSStanislav Fomichev struct mlx4_en_xdp_buff {
6654444584dSStanislav Fomichev 	struct xdp_buff xdp;
666ab46182dSStanislav Fomichev 	struct mlx4_cqe *cqe;
667ab46182dSStanislav Fomichev 	struct mlx4_en_dev *mdev;
668ab46182dSStanislav Fomichev 	struct mlx4_en_rx_ring *ring;
669ab46182dSStanislav Fomichev 	struct net_device *dev;
6704444584dSStanislav Fomichev };
6714444584dSStanislav Fomichev 
mlx4_en_xdp_rx_timestamp(const struct xdp_md * ctx,u64 * timestamp)672ab46182dSStanislav Fomichev int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
673ab46182dSStanislav Fomichev {
674ab46182dSStanislav Fomichev 	struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
675ab46182dSStanislav Fomichev 
676ab46182dSStanislav Fomichev 	if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
677915efd8aSJesper Dangaard Brouer 		return -ENODATA;
678ab46182dSStanislav Fomichev 
679ab46182dSStanislav Fomichev 	*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
680ab46182dSStanislav Fomichev 					  mlx4_en_get_cqe_ts(_ctx->cqe));
681ab46182dSStanislav Fomichev 	return 0;
682ab46182dSStanislav Fomichev }
683ab46182dSStanislav Fomichev 
mlx4_en_xdp_rx_hash(const struct xdp_md * ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)6840cd917a4SJesper Dangaard Brouer int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
6850cd917a4SJesper Dangaard Brouer 			enum xdp_rss_hash_type *rss_type)
686ab46182dSStanislav Fomichev {
687ab46182dSStanislav Fomichev 	struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
688*9123397aSJesper Dangaard Brouer 	struct mlx4_cqe *cqe = _ctx->cqe;
689*9123397aSJesper Dangaard Brouer 	enum xdp_rss_hash_type xht = 0;
690*9123397aSJesper Dangaard Brouer 	__be16 status;
691ab46182dSStanislav Fomichev 
692ab46182dSStanislav Fomichev 	if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
693915efd8aSJesper Dangaard Brouer 		return -ENODATA;
694ab46182dSStanislav Fomichev 
695*9123397aSJesper Dangaard Brouer 	*hash = be32_to_cpu(cqe->immed_rss_invalid);
696*9123397aSJesper Dangaard Brouer 	status = cqe->status;
697*9123397aSJesper Dangaard Brouer 	if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
698*9123397aSJesper Dangaard Brouer 		xht = XDP_RSS_L4_TCP;
699*9123397aSJesper Dangaard Brouer 	if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
700*9123397aSJesper Dangaard Brouer 		xht = XDP_RSS_L4_UDP;
701*9123397aSJesper Dangaard Brouer 	if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
702*9123397aSJesper Dangaard Brouer 		xht |= XDP_RSS_L3_IPV4;
703*9123397aSJesper Dangaard Brouer 	if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
704*9123397aSJesper Dangaard Brouer 		xht |= XDP_RSS_L3_IPV6;
705*9123397aSJesper Dangaard Brouer 		if (cqe->ipv6_ext_mask)
706*9123397aSJesper Dangaard Brouer 			xht |= XDP_RSS_L3_DYNHDR;
707*9123397aSJesper Dangaard Brouer 	}
708*9123397aSJesper Dangaard Brouer 	*rss_type = xht;
709*9123397aSJesper Dangaard Brouer 
710ab46182dSStanislav Fomichev 	return 0;
711ab46182dSStanislav Fomichev }
712ab46182dSStanislav Fomichev 
mlx4_en_process_rx_cq(struct net_device * dev,struct mlx4_en_cq * cq,int budget)7135a2cc190SJeff Kirsher int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
7145a2cc190SJeff Kirsher {
7155a2cc190SJeff Kirsher 	struct mlx4_en_priv *priv = netdev_priv(dev);
7164444584dSStanislav Fomichev 	struct mlx4_en_xdp_buff mxbuf = {};
71708ff3235SOr Gerlitz 	int factor = priv->cqe_factor;
7189bcee89aSTariq Toukan 	struct mlx4_en_rx_ring *ring;
7199bcee89aSTariq Toukan 	struct bpf_prog *xdp_prog;
7209bcee89aSTariq Toukan 	int cq_ring = cq->ring;
72136ea7964STariq Toukan 	bool doorbell_pending;
722a8551c9bSJoshua Roys 	bool xdp_redir_flush;
7239bcee89aSTariq Toukan 	struct mlx4_cqe *cqe;
7249bcee89aSTariq Toukan 	int polled = 0;
7259bcee89aSTariq Toukan 	int index;
7265a2cc190SJeff Kirsher 
7271cb8b121STariq Toukan 	if (unlikely(!priv->port_up || budget <= 0))
7285a2cc190SJeff Kirsher 		return 0;
7295a2cc190SJeff Kirsher 
7309bcee89aSTariq Toukan 	ring = priv->rx_ring[cq_ring];
7319bcee89aSTariq Toukan 
732c4411b37SToke Høiland-Jørgensen 	xdp_prog = rcu_dereference_bh(ring->xdp_prog);
7334444584dSStanislav Fomichev 	xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
734785d21b8SKaixu Xia 	doorbell_pending = false;
735a8551c9bSJoshua Roys 	xdp_redir_flush = false;
73647a38e15SBrenden Blanco 
7375a2cc190SJeff Kirsher 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
7385a2cc190SJeff Kirsher 	 * descriptor offset can be deduced from the CQE index instead of
7395a2cc190SJeff Kirsher 	 * reading 'cqe->index' */
7405a2cc190SJeff Kirsher 	index = cq->mcq.cons_index & ring->size_mask;
741b1b6b4daSIdo Shamay 	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
7425a2cc190SJeff Kirsher 
7435a2cc190SJeff Kirsher 	/* Process all completed CQEs */
7445a2cc190SJeff Kirsher 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
7455a2cc190SJeff Kirsher 		    cq->mcq.cons_index & cq->size)) {
7469bcee89aSTariq Toukan 		struct mlx4_en_rx_alloc *frags;
7479bcee89aSTariq Toukan 		enum pkt_hash_types hash_type;
7489bcee89aSTariq Toukan 		struct sk_buff *skb;
7499bcee89aSTariq Toukan 		unsigned int length;
7509bcee89aSTariq Toukan 		int ip_summed;
75102e6fd3eSEric Dumazet 		void *va;
7529bcee89aSTariq Toukan 		int nr;
7535a2cc190SJeff Kirsher 
7544cce66cdSThadeu Lima de Souza Cascardo 		frags = ring->rx_info + (index << priv->log_rx_info);
75502e6fd3eSEric Dumazet 		va = page_address(frags[0].page) + frags[0].page_offset;
756aed4d4c6STariq Toukan 		net_prefetchw(va);
7575a2cc190SJeff Kirsher 		/*
7585a2cc190SJeff Kirsher 		 * make sure we read the CQE after we read the ownership bit
7595a2cc190SJeff Kirsher 		 */
76012b3375fSAlexander Duyck 		dma_rmb();
7615a2cc190SJeff Kirsher 
7625a2cc190SJeff Kirsher 		/* Drop packet on bad receive or bad checksum */
7635a2cc190SJeff Kirsher 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
7645a2cc190SJeff Kirsher 						MLX4_CQE_OPCODE_ERROR)) {
7651a91de28SJoe Perches 			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
7665a2cc190SJeff Kirsher 			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
7675a2cc190SJeff Kirsher 			       ((struct mlx4_err_cqe *)cqe)->syndrome);
7685a2cc190SJeff Kirsher 			goto next;
7695a2cc190SJeff Kirsher 		}
7705a2cc190SJeff Kirsher 		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
7715a2cc190SJeff Kirsher 			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
7725a2cc190SJeff Kirsher 			goto next;
7735a2cc190SJeff Kirsher 		}
7745a2cc190SJeff Kirsher 
77579aeaccdSYan Burman 		/* Check if we need to drop the packet if SRIOV is not enabled
77679aeaccdSYan Burman 		 * and not performing the selftest or flb disabled
77779aeaccdSYan Burman 		 */
77879aeaccdSYan Burman 		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
77902e6fd3eSEric Dumazet 			const struct ethhdr *ethh = va;
78079aeaccdSYan Burman 			dma_addr_t dma;
78179aeaccdSYan Burman 			/* Get pointer to first fragment since we haven't
78279aeaccdSYan Burman 			 * skb yet and cast it to ethhdr struct
78379aeaccdSYan Burman 			 */
7849e8c0395SEric Dumazet 			dma = frags[0].dma + frags[0].page_offset;
7854cce66cdSThadeu Lima de Souza Cascardo 			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
7864cce66cdSThadeu Lima de Souza Cascardo 						DMA_FROM_DEVICE);
7875b4c4d36SEugenia Emantayev 
788c07cb4b0SYan Burman 			if (is_multicast_ether_addr(ethh->h_dest)) {
789c07cb4b0SYan Burman 				struct mlx4_mac_entry *entry;
790c07cb4b0SYan Burman 				struct hlist_head *bucket;
791c07cb4b0SYan Burman 				unsigned int mac_hash;
792c07cb4b0SYan Burman 
79379aeaccdSYan Burman 				/* Drop the packet, since HW loopback-ed it */
794c07cb4b0SYan Burman 				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
795c07cb4b0SYan Burman 				bucket = &priv->mac_hash[mac_hash];
796c4411b37SToke Høiland-Jørgensen 				hlist_for_each_entry_rcu_bh(entry, bucket, hlist) {
797c07cb4b0SYan Burman 					if (ether_addr_equal_64bits(entry->mac,
798326fe02dSBrenden Blanco 								    ethh->h_source))
7995b4c4d36SEugenia Emantayev 						goto next;
80079aeaccdSYan Burman 				}
801c07cb4b0SYan Burman 			}
802c07cb4b0SYan Burman 		}
8035b4c4d36SEugenia Emantayev 
8046969cf0fSEric Dumazet 		if (unlikely(priv->validate_loopback)) {
8056969cf0fSEric Dumazet 			validate_loopback(priv, va);
8066969cf0fSEric Dumazet 			goto next;
8076969cf0fSEric Dumazet 		}
8086969cf0fSEric Dumazet 
8095a2cc190SJeff Kirsher 		/*
8105a2cc190SJeff Kirsher 		 * Packet is OK - process it.
8115a2cc190SJeff Kirsher 		 */
8125a2cc190SJeff Kirsher 		length = be32_to_cpu(cqe->byte_cnt);
8134a5f4dd8SYevgeny Petrilin 		length -= ring->fcs_del;
8145a2cc190SJeff Kirsher 
81547a38e15SBrenden Blanco 		/* A bpf program gets first chance to drop the packet. It may
81647a38e15SBrenden Blanco 		 * read bytes but not past the end of the frag.
81747a38e15SBrenden Blanco 		 */
81847a38e15SBrenden Blanco 		if (xdp_prog) {
81947a38e15SBrenden Blanco 			dma_addr_t dma;
820ea3349a0SMartin KaFai Lau 			void *orig_data;
82147a38e15SBrenden Blanco 			u32 act;
82247a38e15SBrenden Blanco 
8239e8c0395SEric Dumazet 			dma = frags[0].dma + frags[0].page_offset;
82447a38e15SBrenden Blanco 			dma_sync_single_for_cpu(priv->ddev, dma,
82547a38e15SBrenden Blanco 						priv->frag_info[0].frag_size,
82647a38e15SBrenden Blanco 						DMA_FROM_DEVICE);
82747a38e15SBrenden Blanco 
8284444584dSStanislav Fomichev 			xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
829ab46182dSStanislav Fomichev 					 frags[0].page_offset, length, true);
8304444584dSStanislav Fomichev 			orig_data = mxbuf.xdp.data;
831ab46182dSStanislav Fomichev 			mxbuf.cqe = cqe;
832ab46182dSStanislav Fomichev 			mxbuf.mdev = priv->mdev;
833ab46182dSStanislav Fomichev 			mxbuf.ring = ring;
834ab46182dSStanislav Fomichev 			mxbuf.dev = dev;
83547a38e15SBrenden Blanco 
8364444584dSStanislav Fomichev 			act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
837ea3349a0SMartin KaFai Lau 
8384444584dSStanislav Fomichev 			length = mxbuf.xdp.data_end - mxbuf.xdp.data;
8394444584dSStanislav Fomichev 			if (mxbuf.xdp.data != orig_data) {
8404444584dSStanislav Fomichev 				frags[0].page_offset = mxbuf.xdp.data -
8414444584dSStanislav Fomichev 					mxbuf.xdp.data_hard_start;
8424444584dSStanislav Fomichev 				va = mxbuf.xdp.data;
843ea3349a0SMartin KaFai Lau 			}
844ea3349a0SMartin KaFai Lau 
84547a38e15SBrenden Blanco 			switch (act) {
84647a38e15SBrenden Blanco 			case XDP_PASS:
84747a38e15SBrenden Blanco 				break;
848a8551c9bSJoshua Roys 			case XDP_REDIRECT:
8494444584dSStanislav Fomichev 				if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
850dee3b2d0SJoshua Roys 					ring->xdp_redirect++;
851a8551c9bSJoshua Roys 					xdp_redir_flush = true;
852a8551c9bSJoshua Roys 					frags[0].page = NULL;
853a8551c9bSJoshua Roys 					goto next;
854a8551c9bSJoshua Roys 				}
855dee3b2d0SJoshua Roys 				ring->xdp_redirect_fail++;
856a8551c9bSJoshua Roys 				trace_xdp_exception(dev, xdp_prog, act);
857a8551c9bSJoshua Roys 				goto xdp_drop_no_cnt;
8589ecc2d86SBrenden Blanco 			case XDP_TX:
8595dad61b8STariq Toukan 				if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
8609bcee89aSTariq Toukan 							length, cq_ring,
86134db548bSEric Dumazet 							&doorbell_pending))) {
86234db548bSEric Dumazet 					frags[0].page = NULL;
86334db548bSEric Dumazet 					goto next;
86434db548bSEric Dumazet 				}
865a67edbf4SDaniel Borkmann 				trace_xdp_exception(dev, xdp_prog, act);
86615fca2c8STariq Toukan 				goto xdp_drop_no_cnt; /* Drop on xmit failure */
86747a38e15SBrenden Blanco 			default:
868c8064e5bSPaolo Abeni 				bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
8695e619d73SGustavo A. R. Silva 				fallthrough;
87047a38e15SBrenden Blanco 			case XDP_ABORTED:
871a67edbf4SDaniel Borkmann 				trace_xdp_exception(dev, xdp_prog, act);
8725e619d73SGustavo A. R. Silva 				fallthrough;
87347a38e15SBrenden Blanco 			case XDP_DROP:
87415fca2c8STariq Toukan 				ring->xdp_drop++;
87515fca2c8STariq Toukan xdp_drop_no_cnt:
87647a38e15SBrenden Blanco 				goto next;
87747a38e15SBrenden Blanco 			}
87847a38e15SBrenden Blanco 		}
87947a38e15SBrenden Blanco 
88015fca2c8STariq Toukan 		ring->bytes += length;
88115fca2c8STariq Toukan 		ring->packets++;
88215fca2c8STariq Toukan 
88368b8df46SEric Dumazet 		skb = napi_get_frags(&cq->napi);
8849bcee89aSTariq Toukan 		if (unlikely(!skb))
88568b8df46SEric Dumazet 			goto next;
88668b8df46SEric Dumazet 
88768b8df46SEric Dumazet 		if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
8889bcee89aSTariq Toukan 			u64 timestamp = mlx4_en_get_cqe_ts(cqe);
8899bcee89aSTariq Toukan 
8909bcee89aSTariq Toukan 			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
89168b8df46SEric Dumazet 					       timestamp);
89268b8df46SEric Dumazet 		}
8939bcee89aSTariq Toukan 		skb_record_rx_queue(skb, cq_ring);
89468b8df46SEric Dumazet 
8955a2cc190SJeff Kirsher 		if (likely(dev->features & NETIF_F_RXCSUM)) {
89629dded89SSaeed Mahameed 			/* TODO: For IP non TCP/UDP packets when csum complete is
89729dded89SSaeed Mahameed 			 * not an option (not supported or any other reason) we can
89829dded89SSaeed Mahameed 			 * actually check cqe IPOK status bit and report
89929dded89SSaeed Mahameed 			 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
90029dded89SSaeed Mahameed 			 */
901ce5a453cSEric Dumazet 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
902ce5a453cSEric Dumazet 						       MLX4_CQE_STATUS_UDP)) &&
903ce5a453cSEric Dumazet 			    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
904ce5a453cSEric Dumazet 			    cqe->checksum == cpu_to_be16(0xffff)) {
905dc484851STariq Toukan 				bool l2_tunnel;
9069bcee89aSTariq Toukan 
907dc484851STariq Toukan 				l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
908dc484851STariq Toukan 					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
9099bcee89aSTariq Toukan 				ip_summed = CHECKSUM_UNNECESSARY;
9109bcee89aSTariq Toukan 				hash_type = PKT_HASH_TYPE_L4;
91168b8df46SEric Dumazet 				if (l2_tunnel)
91268b8df46SEric Dumazet 					skb->csum_level = 1;
913f8c6455bSShani Michaeli 				ring->csum_ok++;
914dd65beacSShani Michaeli 			} else {
915dc484851STariq Toukan 				if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
916a970d8dbSTariq Toukan 				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
91768b8df46SEric Dumazet 					goto csum_none;
918dc484851STariq Toukan 				if (check_csum(cqe, skb, va, dev->features))
919dc484851STariq Toukan 					goto csum_none;
920f8c6455bSShani Michaeli 				ip_summed = CHECKSUM_COMPLETE;
9219bcee89aSTariq Toukan 				hash_type = PKT_HASH_TYPE_L3;
922f8c6455bSShani Michaeli 				ring->csum_complete++;
92368b8df46SEric Dumazet 			}
924f8c6455bSShani Michaeli 		} else {
92568b8df46SEric Dumazet csum_none:
926dd65beacSShani Michaeli 			ip_summed = CHECKSUM_NONE;
9279bcee89aSTariq Toukan 			hash_type = PKT_HASH_TYPE_L3;
928dd65beacSShani Michaeli 			ring->csum_none++;
929dd65beacSShani Michaeli 		}
9305a2cc190SJeff Kirsher 		skb->ip_summed = ip_summed;
931ad86107fSYevgeny Petrilin 		if (dev->features & NETIF_F_RXHASH)
93269174416STom Herbert 			skb_set_hash(skb,
93369174416STom Herbert 				     be32_to_cpu(cqe->immed_rss_invalid),
9349bcee89aSTariq Toukan 				     hash_type);
93568b8df46SEric Dumazet 
93668b8df46SEric Dumazet 		if ((cqe->vlan_my_qpn &
93768b8df46SEric Dumazet 		     cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
938ec693d47SAmir Vadai 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
93968b8df46SEric Dumazet 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
94068b8df46SEric Dumazet 					       be16_to_cpu(cqe->sl_vid));
94168b8df46SEric Dumazet 		else if ((cqe->vlan_my_qpn &
94268b8df46SEric Dumazet 			  cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
943e38af4faSHadar Hen Zion 			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
944e38af4faSHadar Hen Zion 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
945e38af4faSHadar Hen Zion 					       be16_to_cpu(cqe->sl_vid));
9465a2cc190SJeff Kirsher 
94768b8df46SEric Dumazet 		nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
94868b8df46SEric Dumazet 		if (likely(nr)) {
94968b8df46SEric Dumazet 			skb_shinfo(skb)->nr_frags = nr;
95068b8df46SEric Dumazet 			skb->len = length;
95168b8df46SEric Dumazet 			skb->data_len = length;
95268b8df46SEric Dumazet 			napi_gro_frags(&cq->napi);
95368b8df46SEric Dumazet 		} else {
9544b17f9feSMichał Mirosław 			__vlan_hwaccel_clear_tag(skb);
95568b8df46SEric Dumazet 			skb_clear_hash(skb);
956ec693d47SAmir Vadai 		}
9575a2cc190SJeff Kirsher next:
9585a2cc190SJeff Kirsher 		++cq->mcq.cons_index;
9595a2cc190SJeff Kirsher 		index = (cq->mcq.cons_index) & ring->size_mask;
960b1b6b4daSIdo Shamay 		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
9619bcee89aSTariq Toukan 		if (unlikely(++polled == budget))
96268b8df46SEric Dumazet 			break;
9635a2cc190SJeff Kirsher 	}
9645a2cc190SJeff Kirsher 
965a8551c9bSJoshua Roys 	if (xdp_redir_flush)
966a8551c9bSJoshua Roys 		xdp_do_flush();
967a8551c9bSJoshua Roys 
9689bcee89aSTariq Toukan 	if (likely(polled)) {
9696c78511bSTariq Toukan 		if (doorbell_pending) {
9706c78511bSTariq Toukan 			priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
9716c78511bSTariq Toukan 			mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
9726c78511bSTariq Toukan 		}
9739ecc2d86SBrenden Blanco 
9745a2cc190SJeff Kirsher 		mlx4_cq_set_ci(&cq->mcq);
9755a2cc190SJeff Kirsher 		wmb(); /* ensure HW sees CQ consumer before we post new buffers */
9765a2cc190SJeff Kirsher 		ring->cons = cq->mcq.cons_index;
977dad42c30SEric Dumazet 	}
978dad42c30SEric Dumazet 
9799bcee89aSTariq Toukan 	mlx4_en_refill_rx_buffers(priv, ring);
980dad42c30SEric Dumazet 
9815a2cc190SJeff Kirsher 	return polled;
9825a2cc190SJeff Kirsher }
9835a2cc190SJeff Kirsher 
9845a2cc190SJeff Kirsher 
mlx4_en_rx_irq(struct mlx4_cq * mcq)9855a2cc190SJeff Kirsher void mlx4_en_rx_irq(struct mlx4_cq *mcq)
9865a2cc190SJeff Kirsher {
9875a2cc190SJeff Kirsher 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
9885a2cc190SJeff Kirsher 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
9895a2cc190SJeff Kirsher 
990477b35b4SEric Dumazet 	if (likely(priv->port_up))
991477b35b4SEric Dumazet 		napi_schedule_irqoff(&cq->napi);
9925a2cc190SJeff Kirsher 	else
9935a2cc190SJeff Kirsher 		mlx4_en_arm_cq(priv, cq);
9945a2cc190SJeff Kirsher }
9955a2cc190SJeff Kirsher 
9965a2cc190SJeff Kirsher /* Rx CQ polling - called by NAPI */
mlx4_en_poll_rx_cq(struct napi_struct * napi,int budget)9975a2cc190SJeff Kirsher int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
9985a2cc190SJeff Kirsher {
9995a2cc190SJeff Kirsher 	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
10005a2cc190SJeff Kirsher 	struct net_device *dev = cq->dev;
10015a2cc190SJeff Kirsher 	struct mlx4_en_priv *priv = netdev_priv(dev);
10026c78511bSTariq Toukan 	struct mlx4_en_cq *xdp_tx_cq = NULL;
10036c78511bSTariq Toukan 	bool clean_complete = true;
10045a2cc190SJeff Kirsher 	int done;
10055a2cc190SJeff Kirsher 
1006b2b8a927SJonathan Lemon 	if (!budget)
1007b2b8a927SJonathan Lemon 		return 0;
1008b2b8a927SJonathan Lemon 
10096c78511bSTariq Toukan 	if (priv->tx_ring_num[TX_XDP]) {
10106c78511bSTariq Toukan 		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
10116c78511bSTariq Toukan 		if (xdp_tx_cq->xdp_busy) {
10126c78511bSTariq Toukan 			clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
1013cf4058dbSEric Dumazet 							       budget) < budget;
10146c78511bSTariq Toukan 			xdp_tx_cq->xdp_busy = !clean_complete;
10156c78511bSTariq Toukan 		}
10166c78511bSTariq Toukan 	}
10176c78511bSTariq Toukan 
10185a2cc190SJeff Kirsher 	done = mlx4_en_process_rx_cq(dev, cq, budget);
10195a2cc190SJeff Kirsher 
10205a2cc190SJeff Kirsher 	/* If we used up all the quota - we're probably not done yet... */
10216c78511bSTariq Toukan 	if (done == budget || !clean_complete) {
1022dc2ec62fSThomas Gleixner 		int cpu_curr;
102335f6f453SAmir Vadai 
10246c78511bSTariq Toukan 		/* in case we got here because of !clean_complete */
10256c78511bSTariq Toukan 		done = budget;
10266c78511bSTariq Toukan 
102735f6f453SAmir Vadai 		cpu_curr = smp_processor_id();
102835f6f453SAmir Vadai 
102980a62deeSThomas Gleixner 		if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
10302e1af7d7SEric Dumazet 			return budget;
10312e1af7d7SEric Dumazet 
103235f6f453SAmir Vadai 		/* Current cpu is not according to smp_irq_affinity -
1033dad42c30SEric Dumazet 		 * probably affinity changed. Need to stop this NAPI
1034dad42c30SEric Dumazet 		 * poll, and restart it on the right CPU.
1035dad42c30SEric Dumazet 		 * Try to avoid returning a too small value (like 0),
1036dad42c30SEric Dumazet 		 * to not fool net_rx_action() and its netdev_budget
103735f6f453SAmir Vadai 		 */
1038dad42c30SEric Dumazet 		if (done)
1039dad42c30SEric Dumazet 			done--;
10402eacc23cSYuval Atias 	}
10415a2cc190SJeff Kirsher 	/* Done for now */
10429bcee89aSTariq Toukan 	if (likely(napi_complete_done(napi, done)))
10435a2cc190SJeff Kirsher 		mlx4_en_arm_cq(priv, cq);
10445a2cc190SJeff Kirsher 	return done;
10455a2cc190SJeff Kirsher }
10465a2cc190SJeff Kirsher 
mlx4_en_calc_rx_buf(struct net_device * dev)10475a2cc190SJeff Kirsher void mlx4_en_calc_rx_buf(struct net_device *dev)
10485a2cc190SJeff Kirsher {
10495a2cc190SJeff Kirsher 	struct mlx4_en_priv *priv = netdev_priv(dev);
105047a38e15SBrenden Blanco 	int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
10515a2cc190SJeff Kirsher 	int i = 0;
10525a2cc190SJeff Kirsher 
1053d576acf0SBrenden Blanco 	/* bpf requires buffers to be set up as 1 packet per page.
1054d576acf0SBrenden Blanco 	 * This only works when num_frags == 1.
1055d576acf0SBrenden Blanco 	 */
105667f8b1dcSTariq Toukan 	if (priv->tx_ring_num[TX_XDP]) {
1057b45f0674SMartin KaFai Lau 		priv->frag_info[0].frag_size = eff_mtu;
1058b45f0674SMartin KaFai Lau 		/* This will gain efficient xdp frame recycling at the
1059b45f0674SMartin KaFai Lau 		 * expense of more costly truesize accounting
1060d576acf0SBrenden Blanco 		 */
1061b45f0674SMartin KaFai Lau 		priv->frag_info[0].frag_stride = PAGE_SIZE;
1062eb9c5c0dSChristophe JAILLET 		priv->dma_dir = DMA_BIDIRECTIONAL;
1063d85f6c14SEric Dumazet 		priv->rx_headroom = XDP_PACKET_HEADROOM;
1064b45f0674SMartin KaFai Lau 		i = 1;
1065b45f0674SMartin KaFai Lau 	} else {
1066b5a54d9aSEric Dumazet 		int frag_size_max = 2048, buf_size = 0;
1067b5a54d9aSEric Dumazet 
1068b5a54d9aSEric Dumazet 		/* should not happen, right ? */
1069b5a54d9aSEric Dumazet 		if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
1070b5a54d9aSEric Dumazet 			frag_size_max = PAGE_SIZE;
1071d576acf0SBrenden Blanco 
10725a2cc190SJeff Kirsher 		while (buf_size < eff_mtu) {
1073b5a54d9aSEric Dumazet 			int frag_stride, frag_size = eff_mtu - buf_size;
1074b5a54d9aSEric Dumazet 			int pad, nb;
107560c7f5aeSEric Dumazet 
107660c7f5aeSEric Dumazet 			if (i < MLX4_EN_MAX_RX_FRAGS - 1)
1077b5a54d9aSEric Dumazet 				frag_size = min(frag_size, frag_size_max);
107860c7f5aeSEric Dumazet 
107960c7f5aeSEric Dumazet 			priv->frag_info[i].frag_size = frag_size;
1080b5a54d9aSEric Dumazet 			frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
1081b5a54d9aSEric Dumazet 			/* We can only pack 2 1536-bytes frames in on 4K page
1082b5a54d9aSEric Dumazet 			 * Therefore, each frame would consume more bytes (truesize)
1083b5a54d9aSEric Dumazet 			 */
1084b5a54d9aSEric Dumazet 			nb = PAGE_SIZE / frag_stride;
1085b5a54d9aSEric Dumazet 			pad = (PAGE_SIZE - nb * frag_stride) / nb;
1086b5a54d9aSEric Dumazet 			pad &= ~(SMP_CACHE_BYTES - 1);
1087b5a54d9aSEric Dumazet 			priv->frag_info[i].frag_stride = frag_stride + pad;
108860c7f5aeSEric Dumazet 
108960c7f5aeSEric Dumazet 			buf_size += frag_size;
10905a2cc190SJeff Kirsher 			i++;
10915a2cc190SJeff Kirsher 		}
1092eb9c5c0dSChristophe JAILLET 		priv->dma_dir = DMA_FROM_DEVICE;
1093d85f6c14SEric Dumazet 		priv->rx_headroom = 0;
1094b45f0674SMartin KaFai Lau 	}
10955a2cc190SJeff Kirsher 
10965a2cc190SJeff Kirsher 	priv->num_frags = i;
10975a2cc190SJeff Kirsher 	priv->rx_skb_size = eff_mtu;
10984cce66cdSThadeu Lima de Souza Cascardo 	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
10995a2cc190SJeff Kirsher 
11001a91de28SJoe Perches 	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
11011a91de28SJoe Perches 	       eff_mtu, priv->num_frags);
11025a2cc190SJeff Kirsher 	for (i = 0; i < priv->num_frags; i++) {
1103505a9249SKamal Heib 		en_dbg(DRV,
1104505a9249SKamal Heib 		       priv,
1105aaca121dSEric Dumazet 		       "  frag:%d - size:%d stride:%d\n",
110651151a16SEric Dumazet 		       i,
11075a2cc190SJeff Kirsher 		       priv->frag_info[i].frag_size,
110851151a16SEric Dumazet 		       priv->frag_info[i].frag_stride);
11095a2cc190SJeff Kirsher 	}
11105a2cc190SJeff Kirsher }
11115a2cc190SJeff Kirsher 
11125a2cc190SJeff Kirsher /* RSS related functions */
11135a2cc190SJeff Kirsher 
mlx4_en_config_rss_qp(struct mlx4_en_priv * priv,int qpn,struct mlx4_en_rx_ring * ring,enum mlx4_qp_state * state,struct mlx4_qp * qp)11145a2cc190SJeff Kirsher static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
11155a2cc190SJeff Kirsher 				 struct mlx4_en_rx_ring *ring,
11165a2cc190SJeff Kirsher 				 enum mlx4_qp_state *state,
11175a2cc190SJeff Kirsher 				 struct mlx4_qp *qp)
11185a2cc190SJeff Kirsher {
11195a2cc190SJeff Kirsher 	struct mlx4_en_dev *mdev = priv->mdev;
11205a2cc190SJeff Kirsher 	struct mlx4_qp_context *context;
11215a2cc190SJeff Kirsher 	int err = 0;
11225a2cc190SJeff Kirsher 
11233c2dfb73SJulia Lawall 	context = kzalloc(sizeof(*context), GFP_KERNEL);
112414f8dc49SJoe Perches 	if (!context)
11255a2cc190SJeff Kirsher 		return -ENOMEM;
11265a2cc190SJeff Kirsher 
11278900b894SLeon Romanovsky 	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
11285a2cc190SJeff Kirsher 	if (err) {
11295a2cc190SJeff Kirsher 		en_err(priv, "Failed to allocate qp #%x\n", qpn);
11305a2cc190SJeff Kirsher 		goto out;
11315a2cc190SJeff Kirsher 	}
11325a2cc190SJeff Kirsher 	qp->event = mlx4_en_sqp_event;
11335a2cc190SJeff Kirsher 
11345a2cc190SJeff Kirsher 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
11350e98b523SAmir Vadai 				qpn, ring->cqn, -1, context);
11365a2cc190SJeff Kirsher 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
11375a2cc190SJeff Kirsher 
1138f3a9d1f2SYevgeny Petrilin 	/* Cancel FCS removal if FW allows */
11394a5f4dd8SYevgeny Petrilin 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1140f3a9d1f2SYevgeny Petrilin 		context->param3 |= cpu_to_be32(1 << 29);
1141f0df3503SMuhammad Mahajna 		if (priv->dev->features & NETIF_F_RXFCS)
1142f0df3503SMuhammad Mahajna 			ring->fcs_del = 0;
1143f0df3503SMuhammad Mahajna 		else
11444a5f4dd8SYevgeny Petrilin 			ring->fcs_del = ETH_FCS_LEN;
11454a5f4dd8SYevgeny Petrilin 	} else
11464a5f4dd8SYevgeny Petrilin 		ring->fcs_del = 0;
1147f3a9d1f2SYevgeny Petrilin 
11485a2cc190SJeff Kirsher 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
11495a2cc190SJeff Kirsher 	if (err) {
11505a2cc190SJeff Kirsher 		mlx4_qp_remove(mdev->dev, qp);
11515a2cc190SJeff Kirsher 		mlx4_qp_free(mdev->dev, qp);
11525a2cc190SJeff Kirsher 	}
11535a2cc190SJeff Kirsher 	mlx4_en_update_rx_prod_db(ring);
11545a2cc190SJeff Kirsher out:
11555a2cc190SJeff Kirsher 	kfree(context);
11565a2cc190SJeff Kirsher 	return err;
11575a2cc190SJeff Kirsher }
11585a2cc190SJeff Kirsher 
mlx4_en_create_drop_qp(struct mlx4_en_priv * priv)1159cabdc8eeSHadar Hen Zion int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1160cabdc8eeSHadar Hen Zion {
1161cabdc8eeSHadar Hen Zion 	int err;
1162cabdc8eeSHadar Hen Zion 	u32 qpn;
1163cabdc8eeSHadar Hen Zion 
1164d57febe1SMatan Barak 	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
1165f3301870SMoshe Shemesh 				    MLX4_RESERVE_A0_QP,
1166f3301870SMoshe Shemesh 				    MLX4_RES_USAGE_DRIVER);
1167cabdc8eeSHadar Hen Zion 	if (err) {
1168cabdc8eeSHadar Hen Zion 		en_err(priv, "Failed reserving drop qpn\n");
1169cabdc8eeSHadar Hen Zion 		return err;
1170cabdc8eeSHadar Hen Zion 	}
11718900b894SLeon Romanovsky 	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1172cabdc8eeSHadar Hen Zion 	if (err) {
1173cabdc8eeSHadar Hen Zion 		en_err(priv, "Failed allocating drop qp\n");
1174cabdc8eeSHadar Hen Zion 		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1175cabdc8eeSHadar Hen Zion 		return err;
1176cabdc8eeSHadar Hen Zion 	}
1177cabdc8eeSHadar Hen Zion 
1178cabdc8eeSHadar Hen Zion 	return 0;
1179cabdc8eeSHadar Hen Zion }
1180cabdc8eeSHadar Hen Zion 
mlx4_en_destroy_drop_qp(struct mlx4_en_priv * priv)1181cabdc8eeSHadar Hen Zion void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
1182cabdc8eeSHadar Hen Zion {
1183cabdc8eeSHadar Hen Zion 	u32 qpn;
1184cabdc8eeSHadar Hen Zion 
1185cabdc8eeSHadar Hen Zion 	qpn = priv->drop_qp.qpn;
1186cabdc8eeSHadar Hen Zion 	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
1187cabdc8eeSHadar Hen Zion 	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
1188cabdc8eeSHadar Hen Zion 	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1189cabdc8eeSHadar Hen Zion }
1190cabdc8eeSHadar Hen Zion 
11915a2cc190SJeff Kirsher /* Allocate rx qp's and configure them according to rss map */
mlx4_en_config_rss_steer(struct mlx4_en_priv * priv)11925a2cc190SJeff Kirsher int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
11935a2cc190SJeff Kirsher {
11945a2cc190SJeff Kirsher 	struct mlx4_en_dev *mdev = priv->mdev;
11955a2cc190SJeff Kirsher 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
11965a2cc190SJeff Kirsher 	struct mlx4_qp_context context;
1197876f6e67SOr Gerlitz 	struct mlx4_rss_context *rss_context;
119893d3e367SYevgeny Petrilin 	int rss_rings;
11995a2cc190SJeff Kirsher 	void *ptr;
1200876f6e67SOr Gerlitz 	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
12011202d460SOr Gerlitz 			MLX4_RSS_TCP_IPV6);
12025a2cc190SJeff Kirsher 	int i, qpn;
12035a2cc190SJeff Kirsher 	int err = 0;
12045a2cc190SJeff Kirsher 	int good_qps = 0;
12054931c6efSSaeed Mahameed 	u8 flags;
12065a2cc190SJeff Kirsher 
12075a2cc190SJeff Kirsher 	en_dbg(DRV, priv, "Configuring rss steering\n");
12084931c6efSSaeed Mahameed 
12094931c6efSSaeed Mahameed 	flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
12105a2cc190SJeff Kirsher 	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
12115a2cc190SJeff Kirsher 				    priv->rx_ring_num,
1212f3301870SMoshe Shemesh 				    &rss_map->base_qpn, flags,
1213f3301870SMoshe Shemesh 				    MLX4_RES_USAGE_DRIVER);
12145a2cc190SJeff Kirsher 	if (err) {
12155a2cc190SJeff Kirsher 		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
12165a2cc190SJeff Kirsher 		return err;
12175a2cc190SJeff Kirsher 	}
12185a2cc190SJeff Kirsher 
12195a2cc190SJeff Kirsher 	for (i = 0; i < priv->rx_ring_num; i++) {
12205a2cc190SJeff Kirsher 		qpn = rss_map->base_qpn + i;
122141d942d5SEugenia Emantayev 		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
12225a2cc190SJeff Kirsher 					    &rss_map->state[i],
12235a2cc190SJeff Kirsher 					    &rss_map->qps[i]);
12245a2cc190SJeff Kirsher 		if (err)
12255a2cc190SJeff Kirsher 			goto rss_err;
12265a2cc190SJeff Kirsher 
12275a2cc190SJeff Kirsher 		++good_qps;
12285a2cc190SJeff Kirsher 	}
12295a2cc190SJeff Kirsher 
12304931c6efSSaeed Mahameed 	if (priv->rx_ring_num == 1) {
12314931c6efSSaeed Mahameed 		rss_map->indir_qp = &rss_map->qps[0];
12324931c6efSSaeed Mahameed 		priv->base_qpn = rss_map->indir_qp->qpn;
12334931c6efSSaeed Mahameed 		en_info(priv, "Optimized Non-RSS steering\n");
12344931c6efSSaeed Mahameed 		return 0;
12354931c6efSSaeed Mahameed 	}
12364931c6efSSaeed Mahameed 
12374931c6efSSaeed Mahameed 	rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
12384931c6efSSaeed Mahameed 	if (!rss_map->indir_qp) {
12394931c6efSSaeed Mahameed 		err = -ENOMEM;
12404931c6efSSaeed Mahameed 		goto rss_err;
12414931c6efSSaeed Mahameed 	}
12424931c6efSSaeed Mahameed 
12435a2cc190SJeff Kirsher 	/* Configure RSS indirection qp */
12448900b894SLeon Romanovsky 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
12455a2cc190SJeff Kirsher 	if (err) {
12465a2cc190SJeff Kirsher 		en_err(priv, "Failed to allocate RSS indirection QP\n");
124748ec7014SWenwen Wang 		goto qp_alloc_err;
12485a2cc190SJeff Kirsher 	}
12494931c6efSSaeed Mahameed 
12504931c6efSSaeed Mahameed 	rss_map->indir_qp->event = mlx4_en_sqp_event;
12515a2cc190SJeff Kirsher 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
125241d942d5SEugenia Emantayev 				priv->rx_ring[0]->cqn, -1, &context);
12535a2cc190SJeff Kirsher 
125493d3e367SYevgeny Petrilin 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
125593d3e367SYevgeny Petrilin 		rss_rings = priv->rx_ring_num;
125693d3e367SYevgeny Petrilin 	else
125793d3e367SYevgeny Petrilin 		rss_rings = priv->prof->rss_rings;
125893d3e367SYevgeny Petrilin 
1259876f6e67SOr Gerlitz 	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
1260876f6e67SOr Gerlitz 					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
12615a2cc190SJeff Kirsher 	rss_context = ptr;
126293d3e367SYevgeny Petrilin 	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
12635a2cc190SJeff Kirsher 					    (rss_map->base_qpn));
126489efea25SYevgeny Petrilin 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
12651202d460SOr Gerlitz 	if (priv->mdev->profile.udp_rss) {
12661202d460SOr Gerlitz 		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
12671202d460SOr Gerlitz 		rss_context->base_qpn_udp = rss_context->default_qpn;
12681202d460SOr Gerlitz 	}
1269837052d0SOr Gerlitz 
1270837052d0SOr Gerlitz 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1271837052d0SOr Gerlitz 		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
1272837052d0SOr Gerlitz 		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
1273837052d0SOr Gerlitz 	}
1274837052d0SOr Gerlitz 
12755a2cc190SJeff Kirsher 	rss_context->flags = rss_mask;
1276876f6e67SOr Gerlitz 	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1277947cbb0aSEyal Perry 	if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
1278947cbb0aSEyal Perry 		rss_context->hash_fn = MLX4_RSS_HASH_XOR;
1279947cbb0aSEyal Perry 	} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
1280947cbb0aSEyal Perry 		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1281947cbb0aSEyal Perry 		memcpy(rss_context->rss_key, priv->rss_key,
1282947cbb0aSEyal Perry 		       MLX4_EN_RSS_KEY_SIZE);
1283947cbb0aSEyal Perry 	} else {
1284947cbb0aSEyal Perry 		en_err(priv, "Unknown RSS hash function requested\n");
1285947cbb0aSEyal Perry 		err = -EINVAL;
1286947cbb0aSEyal Perry 		goto indir_err;
1287947cbb0aSEyal Perry 	}
12884931c6efSSaeed Mahameed 
12895a2cc190SJeff Kirsher 	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
12904931c6efSSaeed Mahameed 			       rss_map->indir_qp, &rss_map->indir_state);
12915a2cc190SJeff Kirsher 	if (err)
12925a2cc190SJeff Kirsher 		goto indir_err;
12935a2cc190SJeff Kirsher 
12945a2cc190SJeff Kirsher 	return 0;
12955a2cc190SJeff Kirsher 
12965a2cc190SJeff Kirsher indir_err:
12975a2cc190SJeff Kirsher 	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
12984931c6efSSaeed Mahameed 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
12994931c6efSSaeed Mahameed 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
13004931c6efSSaeed Mahameed 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
130148ec7014SWenwen Wang qp_alloc_err:
13024931c6efSSaeed Mahameed 	kfree(rss_map->indir_qp);
13034931c6efSSaeed Mahameed 	rss_map->indir_qp = NULL;
13045a2cc190SJeff Kirsher rss_err:
13055a2cc190SJeff Kirsher 	for (i = 0; i < good_qps; i++) {
13065a2cc190SJeff Kirsher 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
13075a2cc190SJeff Kirsher 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
13085a2cc190SJeff Kirsher 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
13095a2cc190SJeff Kirsher 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
13105a2cc190SJeff Kirsher 	}
13115a2cc190SJeff Kirsher 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
13125a2cc190SJeff Kirsher 	return err;
13135a2cc190SJeff Kirsher }
13145a2cc190SJeff Kirsher 
mlx4_en_release_rss_steer(struct mlx4_en_priv * priv)13155a2cc190SJeff Kirsher void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
13165a2cc190SJeff Kirsher {
13175a2cc190SJeff Kirsher 	struct mlx4_en_dev *mdev = priv->mdev;
13185a2cc190SJeff Kirsher 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
13195a2cc190SJeff Kirsher 	int i;
13205a2cc190SJeff Kirsher 
13214931c6efSSaeed Mahameed 	if (priv->rx_ring_num > 1) {
13225a2cc190SJeff Kirsher 		mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
13234931c6efSSaeed Mahameed 			       MLX4_QP_STATE_RST, NULL, 0, 0,
13244931c6efSSaeed Mahameed 			       rss_map->indir_qp);
13254931c6efSSaeed Mahameed 		mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
13264931c6efSSaeed Mahameed 		mlx4_qp_free(mdev->dev, rss_map->indir_qp);
13274931c6efSSaeed Mahameed 		kfree(rss_map->indir_qp);
13284931c6efSSaeed Mahameed 		rss_map->indir_qp = NULL;
13294931c6efSSaeed Mahameed 	}
13305a2cc190SJeff Kirsher 
13315a2cc190SJeff Kirsher 	for (i = 0; i < priv->rx_ring_num; i++) {
13325a2cc190SJeff Kirsher 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
13335a2cc190SJeff Kirsher 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
13345a2cc190SJeff Kirsher 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
13355a2cc190SJeff Kirsher 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
13365a2cc190SJeff Kirsher 	}
13375a2cc190SJeff Kirsher 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
13385a2cc190SJeff Kirsher }
1339