xref: /openbmc/linux/drivers/net/ethernet/qlogic/qede/qede_fp.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
17268f33eSAlexander Lobakin // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2cdda926dSMintz, Yuval /* QLogic qede NIC Driver
3cdda926dSMintz, Yuval  * Copyright (c) 2015-2017  QLogic Corporation
4c4fad2a5SAlexander Lobakin  * Copyright (c) 2019-2020 Marvell International Ltd.
5cdda926dSMintz, Yuval  */
67268f33eSAlexander Lobakin 
7cdda926dSMintz, Yuval #include <linux/netdevice.h>
8cdda926dSMintz, Yuval #include <linux/etherdevice.h>
9cdda926dSMintz, Yuval #include <linux/skbuff.h>
10a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
11cdda926dSMintz, Yuval #include <net/udp_tunnel.h>
12cdda926dSMintz, Yuval #include <linux/ip.h>
134721031cSEric Dumazet #include <net/gro.h>
14cdda926dSMintz, Yuval #include <net/ipv6.h>
15cdda926dSMintz, Yuval #include <net/tcp.h>
16cdda926dSMintz, Yuval #include <linux/if_ether.h>
17cdda926dSMintz, Yuval #include <linux/if_vlan.h>
18cdda926dSMintz, Yuval #include <net/ip6_checksum.h>
194c55215cSSudarsana Reddy Kalluru #include "qede_ptp.h"
20cdda926dSMintz, Yuval 
21cdda926dSMintz, Yuval #include <linux/qed/qed_if.h>
22cdda926dSMintz, Yuval #include "qede.h"
23cdda926dSMintz, Yuval /*********************************
24cdda926dSMintz, Yuval  * Content also used by slowpath *
25cdda926dSMintz, Yuval  *********************************/
26cdda926dSMintz, Yuval 
qede_alloc_rx_buffer(struct qede_rx_queue * rxq,bool allow_lazy)27e3eef7eeSMintz, Yuval int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
28cdda926dSMintz, Yuval {
29cdda926dSMintz, Yuval 	struct sw_rx_data *sw_rx_data;
30cdda926dSMintz, Yuval 	struct eth_rx_bd *rx_bd;
31cdda926dSMintz, Yuval 	dma_addr_t mapping;
32cdda926dSMintz, Yuval 	struct page *data;
33cdda926dSMintz, Yuval 
34e3eef7eeSMintz, Yuval 	/* In case lazy-allocation is allowed, postpone allocation until the
35e3eef7eeSMintz, Yuval 	 * end of the NAPI run. We'd still need to make sure the Rx ring has
36e3eef7eeSMintz, Yuval 	 * sufficient buffers to guarantee an additional Rx interrupt.
37e3eef7eeSMintz, Yuval 	 */
38e3eef7eeSMintz, Yuval 	if (allow_lazy && likely(rxq->filled_buffers > 12)) {
39e3eef7eeSMintz, Yuval 		rxq->filled_buffers--;
40e3eef7eeSMintz, Yuval 		return 0;
41e3eef7eeSMintz, Yuval 	}
42e3eef7eeSMintz, Yuval 
43cdda926dSMintz, Yuval 	data = alloc_pages(GFP_ATOMIC, 0);
44cdda926dSMintz, Yuval 	if (unlikely(!data))
45cdda926dSMintz, Yuval 		return -ENOMEM;
46cdda926dSMintz, Yuval 
47cdda926dSMintz, Yuval 	/* Map the entire page as it would be used
48cdda926dSMintz, Yuval 	 * for multiple RX buffer segment size mapping.
49cdda926dSMintz, Yuval 	 */
50cdda926dSMintz, Yuval 	mapping = dma_map_page(rxq->dev, data, 0,
51cdda926dSMintz, Yuval 			       PAGE_SIZE, rxq->data_direction);
52cdda926dSMintz, Yuval 	if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
53cdda926dSMintz, Yuval 		__free_page(data);
54cdda926dSMintz, Yuval 		return -ENOMEM;
55cdda926dSMintz, Yuval 	}
56cdda926dSMintz, Yuval 
57cdda926dSMintz, Yuval 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
58cdda926dSMintz, Yuval 	sw_rx_data->page_offset = 0;
59cdda926dSMintz, Yuval 	sw_rx_data->data = data;
60cdda926dSMintz, Yuval 	sw_rx_data->mapping = mapping;
61cdda926dSMintz, Yuval 
62cdda926dSMintz, Yuval 	/* Advance PROD and get BD pointer */
63cdda926dSMintz, Yuval 	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
64cdda926dSMintz, Yuval 	WARN_ON(!rx_bd);
65cdda926dSMintz, Yuval 	rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
6615ed8a47SMintz, Yuval 	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
6715ed8a47SMintz, Yuval 				     rxq->rx_headroom);
68cdda926dSMintz, Yuval 
69cdda926dSMintz, Yuval 	rxq->sw_rx_prod++;
70e3eef7eeSMintz, Yuval 	rxq->filled_buffers++;
71cdda926dSMintz, Yuval 
72cdda926dSMintz, Yuval 	return 0;
73cdda926dSMintz, Yuval }
74cdda926dSMintz, Yuval 
75cdda926dSMintz, Yuval /* Unmap the data and free skb */
qede_free_tx_pkt(struct qede_dev * edev,struct qede_tx_queue * txq,int * len)76cdda926dSMintz, Yuval int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
77cdda926dSMintz, Yuval {
785a052d62SSudarsana Reddy Kalluru 	u16 idx = txq->sw_tx_cons;
79cdda926dSMintz, Yuval 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
80cdda926dSMintz, Yuval 	struct eth_tx_1st_bd *first_bd;
81cdda926dSMintz, Yuval 	struct eth_tx_bd *tx_data_bd;
82cdda926dSMintz, Yuval 	int bds_consumed = 0;
83cdda926dSMintz, Yuval 	int nbds;
84cdda926dSMintz, Yuval 	bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
85cdda926dSMintz, Yuval 	int i, split_bd_len = 0;
86cdda926dSMintz, Yuval 
87cdda926dSMintz, Yuval 	if (unlikely(!skb)) {
88cdda926dSMintz, Yuval 		DP_ERR(edev,
89cdda926dSMintz, Yuval 		       "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
90cdda926dSMintz, Yuval 		       idx, txq->sw_tx_cons, txq->sw_tx_prod);
91cdda926dSMintz, Yuval 		return -1;
92cdda926dSMintz, Yuval 	}
93cdda926dSMintz, Yuval 
94cdda926dSMintz, Yuval 	*len = skb->len;
95cdda926dSMintz, Yuval 
96cdda926dSMintz, Yuval 	first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
97cdda926dSMintz, Yuval 
98cdda926dSMintz, Yuval 	bds_consumed++;
99cdda926dSMintz, Yuval 
100cdda926dSMintz, Yuval 	nbds = first_bd->data.nbds;
101cdda926dSMintz, Yuval 
102cdda926dSMintz, Yuval 	if (data_split) {
103cdda926dSMintz, Yuval 		struct eth_tx_bd *split = (struct eth_tx_bd *)
104cdda926dSMintz, Yuval 			qed_chain_consume(&txq->tx_pbl);
105cdda926dSMintz, Yuval 		split_bd_len = BD_UNMAP_LEN(split);
106cdda926dSMintz, Yuval 		bds_consumed++;
107cdda926dSMintz, Yuval 	}
108cdda926dSMintz, Yuval 	dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
109cdda926dSMintz, Yuval 			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
110cdda926dSMintz, Yuval 
111cdda926dSMintz, Yuval 	/* Unmap the data of the skb frags */
112cdda926dSMintz, Yuval 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
113cdda926dSMintz, Yuval 		tx_data_bd = (struct eth_tx_bd *)
114cdda926dSMintz, Yuval 			qed_chain_consume(&txq->tx_pbl);
115cdda926dSMintz, Yuval 		dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116cdda926dSMintz, Yuval 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
117cdda926dSMintz, Yuval 	}
118cdda926dSMintz, Yuval 
119cdda926dSMintz, Yuval 	while (bds_consumed++ < nbds)
120cdda926dSMintz, Yuval 		qed_chain_consume(&txq->tx_pbl);
121cdda926dSMintz, Yuval 
122cdda926dSMintz, Yuval 	/* Free skb */
123cdda926dSMintz, Yuval 	dev_kfree_skb_any(skb);
124cdda926dSMintz, Yuval 	txq->sw_tx_ring.skbs[idx].skb = NULL;
125cdda926dSMintz, Yuval 	txq->sw_tx_ring.skbs[idx].flags = 0;
126cdda926dSMintz, Yuval 
127cdda926dSMintz, Yuval 	return 0;
128cdda926dSMintz, Yuval }
129cdda926dSMintz, Yuval 
130cdda926dSMintz, Yuval /* Unmap the data and free skb when mapping failed during start_xmit */
qede_free_failed_tx_pkt(struct qede_tx_queue * txq,struct eth_tx_1st_bd * first_bd,int nbd,bool data_split)131cdda926dSMintz, Yuval static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
132cdda926dSMintz, Yuval 				    struct eth_tx_1st_bd *first_bd,
133cdda926dSMintz, Yuval 				    int nbd, bool data_split)
134cdda926dSMintz, Yuval {
1355a052d62SSudarsana Reddy Kalluru 	u16 idx = txq->sw_tx_prod;
136cdda926dSMintz, Yuval 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
137cdda926dSMintz, Yuval 	struct eth_tx_bd *tx_data_bd;
138cdda926dSMintz, Yuval 	int i, split_bd_len = 0;
139cdda926dSMintz, Yuval 
140cdda926dSMintz, Yuval 	/* Return prod to its position before this skb was handled */
141cdda926dSMintz, Yuval 	qed_chain_set_prod(&txq->tx_pbl,
142cdda926dSMintz, Yuval 			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
143cdda926dSMintz, Yuval 
144cdda926dSMintz, Yuval 	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
145cdda926dSMintz, Yuval 
146cdda926dSMintz, Yuval 	if (data_split) {
147cdda926dSMintz, Yuval 		struct eth_tx_bd *split = (struct eth_tx_bd *)
148cdda926dSMintz, Yuval 					  qed_chain_produce(&txq->tx_pbl);
149cdda926dSMintz, Yuval 		split_bd_len = BD_UNMAP_LEN(split);
150cdda926dSMintz, Yuval 		nbd--;
151cdda926dSMintz, Yuval 	}
152cdda926dSMintz, Yuval 
153cdda926dSMintz, Yuval 	dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
154cdda926dSMintz, Yuval 			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
155cdda926dSMintz, Yuval 
156cdda926dSMintz, Yuval 	/* Unmap the data of the skb frags */
157cdda926dSMintz, Yuval 	for (i = 0; i < nbd; i++) {
158cdda926dSMintz, Yuval 		tx_data_bd = (struct eth_tx_bd *)
159cdda926dSMintz, Yuval 			qed_chain_produce(&txq->tx_pbl);
160cdda926dSMintz, Yuval 		if (tx_data_bd->nbytes)
161cdda926dSMintz, Yuval 			dma_unmap_page(txq->dev,
162cdda926dSMintz, Yuval 				       BD_UNMAP_ADDR(tx_data_bd),
163cdda926dSMintz, Yuval 				       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
164cdda926dSMintz, Yuval 	}
165cdda926dSMintz, Yuval 
166cdda926dSMintz, Yuval 	/* Return again prod to its position before this skb was handled */
167cdda926dSMintz, Yuval 	qed_chain_set_prod(&txq->tx_pbl,
168cdda926dSMintz, Yuval 			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
169cdda926dSMintz, Yuval 
170cdda926dSMintz, Yuval 	/* Free skb */
171cdda926dSMintz, Yuval 	dev_kfree_skb_any(skb);
172cdda926dSMintz, Yuval 	txq->sw_tx_ring.skbs[idx].skb = NULL;
173cdda926dSMintz, Yuval 	txq->sw_tx_ring.skbs[idx].flags = 0;
174cdda926dSMintz, Yuval }
175cdda926dSMintz, Yuval 
qede_xmit_type(struct sk_buff * skb,int * ipv6_ext)176cdda926dSMintz, Yuval static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
177cdda926dSMintz, Yuval {
178cdda926dSMintz, Yuval 	u32 rc = XMIT_L4_CSUM;
179cdda926dSMintz, Yuval 	__be16 l3_proto;
180cdda926dSMintz, Yuval 
181cdda926dSMintz, Yuval 	if (skb->ip_summed != CHECKSUM_PARTIAL)
182cdda926dSMintz, Yuval 		return XMIT_PLAIN;
183cdda926dSMintz, Yuval 
184cdda926dSMintz, Yuval 	l3_proto = vlan_get_protocol(skb);
185cdda926dSMintz, Yuval 	if (l3_proto == htons(ETH_P_IPV6) &&
186cdda926dSMintz, Yuval 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
187cdda926dSMintz, Yuval 		*ipv6_ext = 1;
188cdda926dSMintz, Yuval 
189cdda926dSMintz, Yuval 	if (skb->encapsulation) {
190cdda926dSMintz, Yuval 		rc |= XMIT_ENC;
191cdda926dSMintz, Yuval 		if (skb_is_gso(skb)) {
192cdda926dSMintz, Yuval 			unsigned short gso_type = skb_shinfo(skb)->gso_type;
193cdda926dSMintz, Yuval 
194cdda926dSMintz, Yuval 			if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
195cdda926dSMintz, Yuval 			    (gso_type & SKB_GSO_GRE_CSUM))
196cdda926dSMintz, Yuval 				rc |= XMIT_ENC_GSO_L4_CSUM;
197cdda926dSMintz, Yuval 
198cdda926dSMintz, Yuval 			rc |= XMIT_LSO;
199cdda926dSMintz, Yuval 			return rc;
200cdda926dSMintz, Yuval 		}
201cdda926dSMintz, Yuval 	}
202cdda926dSMintz, Yuval 
203cdda926dSMintz, Yuval 	if (skb_is_gso(skb))
204cdda926dSMintz, Yuval 		rc |= XMIT_LSO;
205cdda926dSMintz, Yuval 
206cdda926dSMintz, Yuval 	return rc;
207cdda926dSMintz, Yuval }
208cdda926dSMintz, Yuval 
qede_set_params_for_ipv6_ext(struct sk_buff * skb,struct eth_tx_2nd_bd * second_bd,struct eth_tx_3rd_bd * third_bd)209cdda926dSMintz, Yuval static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
210cdda926dSMintz, Yuval 					 struct eth_tx_2nd_bd *second_bd,
211cdda926dSMintz, Yuval 					 struct eth_tx_3rd_bd *third_bd)
212cdda926dSMintz, Yuval {
213cdda926dSMintz, Yuval 	u8 l4_proto;
214cdda926dSMintz, Yuval 	u16 bd2_bits1 = 0, bd2_bits2 = 0;
215cdda926dSMintz, Yuval 
216cdda926dSMintz, Yuval 	bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
217cdda926dSMintz, Yuval 
218cdda926dSMintz, Yuval 	bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
219cdda926dSMintz, Yuval 		     ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
220cdda926dSMintz, Yuval 		    << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
221cdda926dSMintz, Yuval 
222cdda926dSMintz, Yuval 	bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
223cdda926dSMintz, Yuval 		      ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
224cdda926dSMintz, Yuval 
225cdda926dSMintz, Yuval 	if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
226cdda926dSMintz, Yuval 		l4_proto = ipv6_hdr(skb)->nexthdr;
227cdda926dSMintz, Yuval 	else
228cdda926dSMintz, Yuval 		l4_proto = ip_hdr(skb)->protocol;
229cdda926dSMintz, Yuval 
230cdda926dSMintz, Yuval 	if (l4_proto == IPPROTO_UDP)
231cdda926dSMintz, Yuval 		bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
232cdda926dSMintz, Yuval 
233cdda926dSMintz, Yuval 	if (third_bd)
234cdda926dSMintz, Yuval 		third_bd->data.bitfields |=
235cdda926dSMintz, Yuval 			cpu_to_le16(((tcp_hdrlen(skb) / 4) &
236cdda926dSMintz, Yuval 				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
237cdda926dSMintz, Yuval 				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
238cdda926dSMintz, Yuval 
239cdda926dSMintz, Yuval 	second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
240cdda926dSMintz, Yuval 	second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
241cdda926dSMintz, Yuval }
242cdda926dSMintz, Yuval 
map_frag_to_bd(struct qede_tx_queue * txq,skb_frag_t * frag,struct eth_tx_bd * bd)243cdda926dSMintz, Yuval static int map_frag_to_bd(struct qede_tx_queue *txq,
244cdda926dSMintz, Yuval 			  skb_frag_t *frag, struct eth_tx_bd *bd)
245cdda926dSMintz, Yuval {
246cdda926dSMintz, Yuval 	dma_addr_t mapping;
247cdda926dSMintz, Yuval 
248cdda926dSMintz, Yuval 	/* Map skb non-linear frag data for DMA */
249cdda926dSMintz, Yuval 	mapping = skb_frag_dma_map(txq->dev, frag, 0,
250cdda926dSMintz, Yuval 				   skb_frag_size(frag), DMA_TO_DEVICE);
251cdda926dSMintz, Yuval 	if (unlikely(dma_mapping_error(txq->dev, mapping)))
252cdda926dSMintz, Yuval 		return -ENOMEM;
253cdda926dSMintz, Yuval 
254cdda926dSMintz, Yuval 	/* Setup the data pointer of the frag data */
255cdda926dSMintz, Yuval 	BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
256cdda926dSMintz, Yuval 
257cdda926dSMintz, Yuval 	return 0;
258cdda926dSMintz, Yuval }
259cdda926dSMintz, Yuval 
qede_get_skb_hlen(struct sk_buff * skb,bool is_encap_pkt)260cdda926dSMintz, Yuval static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
261cdda926dSMintz, Yuval {
262cdda926dSMintz, Yuval 	if (is_encap_pkt)
263504148feSEric Dumazet 		return skb_inner_tcp_all_headers(skb);
264504148feSEric Dumazet 
265504148feSEric Dumazet 	return skb_tcp_all_headers(skb);
266cdda926dSMintz, Yuval }
267cdda926dSMintz, Yuval 
268cdda926dSMintz, Yuval /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
269cdda926dSMintz, Yuval #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
qede_pkt_req_lin(struct sk_buff * skb,u8 xmit_type)270cdda926dSMintz, Yuval static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
271cdda926dSMintz, Yuval {
272cdda926dSMintz, Yuval 	int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
273cdda926dSMintz, Yuval 
274cdda926dSMintz, Yuval 	if (xmit_type & XMIT_LSO) {
275cdda926dSMintz, Yuval 		int hlen;
276cdda926dSMintz, Yuval 
277cdda926dSMintz, Yuval 		hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
278cdda926dSMintz, Yuval 
279cdda926dSMintz, Yuval 		/* linear payload would require its own BD */
280cdda926dSMintz, Yuval 		if (skb_headlen(skb) > hlen)
281cdda926dSMintz, Yuval 			allowed_frags--;
282cdda926dSMintz, Yuval 	}
283cdda926dSMintz, Yuval 
284cdda926dSMintz, Yuval 	return (skb_shinfo(skb)->nr_frags > allowed_frags);
285cdda926dSMintz, Yuval }
286cdda926dSMintz, Yuval #endif
287cdda926dSMintz, Yuval 
qede_update_tx_producer(struct qede_tx_queue * txq)288cdda926dSMintz, Yuval static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
289cdda926dSMintz, Yuval {
290cdda926dSMintz, Yuval 	/* wmb makes sure that the BDs data is updated before updating the
291cdda926dSMintz, Yuval 	 * producer, otherwise FW may read old data from the BDs.
292cdda926dSMintz, Yuval 	 */
293cdda926dSMintz, Yuval 	wmb();
294cdda926dSMintz, Yuval 	barrier();
295cdda926dSMintz, Yuval 	writel(txq->tx_db.raw, txq->doorbell_addr);
296cdda926dSMintz, Yuval 
297b9fc828dSManish Chopra 	/* Fence required to flush the write combined buffer, since another
298b9fc828dSManish Chopra 	 * CPU may write to the same doorbell address and data may be lost
299b9fc828dSManish Chopra 	 * due to relaxed order nature of write combined bar.
300cdda926dSMintz, Yuval 	 */
301b9fc828dSManish Chopra 	wmb();
302cdda926dSMintz, Yuval }
303cdda926dSMintz, Yuval 
qede_xdp_xmit(struct qede_tx_queue * txq,dma_addr_t dma,u16 pad,u16 len,struct page * page,struct xdp_frame * xdpf)3044c2bacbeSAlexander Lobakin static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
305d1b25b79SAlexander Lobakin 			 u16 len, struct page *page, struct xdp_frame *xdpf)
306cdda926dSMintz, Yuval {
3074c2bacbeSAlexander Lobakin 	struct eth_tx_1st_bd *bd;
3084c2bacbeSAlexander Lobakin 	struct sw_tx_xdp *xdp;
30948848a06SManish Chopra 	u16 val;
310cdda926dSMintz, Yuval 
3114c2bacbeSAlexander Lobakin 	if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
3124c2bacbeSAlexander Lobakin 		     txq->num_tx_buffers)) {
313cdda926dSMintz, Yuval 		txq->stopped_cnt++;
314cdda926dSMintz, Yuval 		return -ENOMEM;
315cdda926dSMintz, Yuval 	}
316cdda926dSMintz, Yuval 
3174c2bacbeSAlexander Lobakin 	bd = qed_chain_produce(&txq->tx_pbl);
3184c2bacbeSAlexander Lobakin 	bd->data.nbds = 1;
3194c2bacbeSAlexander Lobakin 	bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
320cdda926dSMintz, Yuval 
3214c2bacbeSAlexander Lobakin 	val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
322cdda926dSMintz, Yuval 	       ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
32348848a06SManish Chopra 
3244c2bacbeSAlexander Lobakin 	bd->data.bitfields = cpu_to_le16(val);
325cdda926dSMintz, Yuval 
326cdda926dSMintz, Yuval 	/* We can safely ignore the offset, as it's 0 for XDP */
3274c2bacbeSAlexander Lobakin 	BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
328cdda926dSMintz, Yuval 
3294c2bacbeSAlexander Lobakin 	xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
3304c2bacbeSAlexander Lobakin 	xdp->mapping = dma;
3314c2bacbeSAlexander Lobakin 	xdp->page = page;
332d1b25b79SAlexander Lobakin 	xdp->xdpf = xdpf;
333cdda926dSMintz, Yuval 
3345a052d62SSudarsana Reddy Kalluru 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
335cdda926dSMintz, Yuval 
336cdda926dSMintz, Yuval 	return 0;
337cdda926dSMintz, Yuval }
338cdda926dSMintz, Yuval 
qede_xdp_transmit(struct net_device * dev,int n_frames,struct xdp_frame ** frames,u32 flags)339d1b25b79SAlexander Lobakin int qede_xdp_transmit(struct net_device *dev, int n_frames,
340d1b25b79SAlexander Lobakin 		      struct xdp_frame **frames, u32 flags)
341d1b25b79SAlexander Lobakin {
342d1b25b79SAlexander Lobakin 	struct qede_dev *edev = netdev_priv(dev);
343d1b25b79SAlexander Lobakin 	struct device *dmadev = &edev->pdev->dev;
344d1b25b79SAlexander Lobakin 	struct qede_tx_queue *xdp_tx;
345d1b25b79SAlexander Lobakin 	struct xdp_frame *xdpf;
346d1b25b79SAlexander Lobakin 	dma_addr_t mapping;
347fdc13979SLorenzo Bianconi 	int i, nxmit = 0;
348d1b25b79SAlexander Lobakin 	u16 xdp_prod;
349d1b25b79SAlexander Lobakin 
350d1b25b79SAlexander Lobakin 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
351d1b25b79SAlexander Lobakin 		return -EINVAL;
352d1b25b79SAlexander Lobakin 
353d1b25b79SAlexander Lobakin 	if (unlikely(!netif_running(dev)))
354d1b25b79SAlexander Lobakin 		return -ENETDOWN;
355d1b25b79SAlexander Lobakin 
356d1b25b79SAlexander Lobakin 	i = smp_processor_id() % edev->total_xdp_queues;
357d1b25b79SAlexander Lobakin 	xdp_tx = edev->fp_array[i].xdp_tx;
358d1b25b79SAlexander Lobakin 
359d1b25b79SAlexander Lobakin 	spin_lock(&xdp_tx->xdp_tx_lock);
360d1b25b79SAlexander Lobakin 
361d1b25b79SAlexander Lobakin 	for (i = 0; i < n_frames; i++) {
362d1b25b79SAlexander Lobakin 		xdpf = frames[i];
363d1b25b79SAlexander Lobakin 
364d1b25b79SAlexander Lobakin 		mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
365d1b25b79SAlexander Lobakin 					 DMA_TO_DEVICE);
366fdc13979SLorenzo Bianconi 		if (unlikely(dma_mapping_error(dmadev, mapping)))
367fdc13979SLorenzo Bianconi 			break;
368d1b25b79SAlexander Lobakin 
369d1b25b79SAlexander Lobakin 		if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
370fdc13979SLorenzo Bianconi 					   NULL, xdpf)))
371fdc13979SLorenzo Bianconi 			break;
372fdc13979SLorenzo Bianconi 		nxmit++;
373d1b25b79SAlexander Lobakin 	}
374d1b25b79SAlexander Lobakin 
375d1b25b79SAlexander Lobakin 	if (flags & XDP_XMIT_FLUSH) {
376d1b25b79SAlexander Lobakin 		xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
377d1b25b79SAlexander Lobakin 
378d1b25b79SAlexander Lobakin 		xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
379d1b25b79SAlexander Lobakin 		qede_update_tx_producer(xdp_tx);
380d1b25b79SAlexander Lobakin 	}
381d1b25b79SAlexander Lobakin 
382d1b25b79SAlexander Lobakin 	spin_unlock(&xdp_tx->xdp_tx_lock);
383d1b25b79SAlexander Lobakin 
384fdc13979SLorenzo Bianconi 	return nxmit;
385d1b25b79SAlexander Lobakin }
386d1b25b79SAlexander Lobakin 
qede_txq_has_work(struct qede_tx_queue * txq)387cdda926dSMintz, Yuval int qede_txq_has_work(struct qede_tx_queue *txq)
388cdda926dSMintz, Yuval {
389cdda926dSMintz, Yuval 	u16 hw_bd_cons;
390cdda926dSMintz, Yuval 
391cdda926dSMintz, Yuval 	/* Tell compiler that consumer and producer can change */
392cdda926dSMintz, Yuval 	barrier();
393cdda926dSMintz, Yuval 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
394cdda926dSMintz, Yuval 	if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
395cdda926dSMintz, Yuval 		return 0;
396cdda926dSMintz, Yuval 
397cdda926dSMintz, Yuval 	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
398cdda926dSMintz, Yuval }
399cdda926dSMintz, Yuval 
qede_xdp_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)400cdda926dSMintz, Yuval static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
401cdda926dSMintz, Yuval {
4024c2bacbeSAlexander Lobakin 	struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
4034c2bacbeSAlexander Lobakin 	struct device *dev = &edev->pdev->dev;
404d1b25b79SAlexander Lobakin 	struct xdp_frame *xdpf;
4054c2bacbeSAlexander Lobakin 	u16 hw_bd_cons;
406cdda926dSMintz, Yuval 
407cdda926dSMintz, Yuval 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
408cdda926dSMintz, Yuval 	barrier();
409cdda926dSMintz, Yuval 
410cdda926dSMintz, Yuval 	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
4114c2bacbeSAlexander Lobakin 		xdp_info = xdp_arr + txq->sw_tx_cons;
412d1b25b79SAlexander Lobakin 		xdpf = xdp_info->xdpf;
4134c2bacbeSAlexander Lobakin 
414d1b25b79SAlexander Lobakin 		if (xdpf) {
415d1b25b79SAlexander Lobakin 			dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
416d1b25b79SAlexander Lobakin 					 DMA_TO_DEVICE);
417d1b25b79SAlexander Lobakin 			xdp_return_frame(xdpf);
418d1b25b79SAlexander Lobakin 
419d1b25b79SAlexander Lobakin 			xdp_info->xdpf = NULL;
420d1b25b79SAlexander Lobakin 		} else {
4214c2bacbeSAlexander Lobakin 			dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
4224c2bacbeSAlexander Lobakin 				       DMA_BIDIRECTIONAL);
4234c2bacbeSAlexander Lobakin 			__free_page(xdp_info->page);
424d1b25b79SAlexander Lobakin 		}
4254c2bacbeSAlexander Lobakin 
42689e1afc4SMintz, Yuval 		qed_chain_consume(&txq->tx_pbl);
4275a052d62SSudarsana Reddy Kalluru 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
428cdda926dSMintz, Yuval 		txq->xmit_pkts++;
429cdda926dSMintz, Yuval 	}
430cdda926dSMintz, Yuval }
431cdda926dSMintz, Yuval 
qede_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)432cdda926dSMintz, Yuval static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
433cdda926dSMintz, Yuval {
4345e7baf0fSManish Chopra 	unsigned int pkts_compl = 0, bytes_compl = 0;
435cdda926dSMintz, Yuval 	struct netdev_queue *netdev_txq;
436cdda926dSMintz, Yuval 	u16 hw_bd_cons;
437cdda926dSMintz, Yuval 	int rc;
438cdda926dSMintz, Yuval 
4395e7baf0fSManish Chopra 	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
440cdda926dSMintz, Yuval 
441cdda926dSMintz, Yuval 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
442cdda926dSMintz, Yuval 	barrier();
443cdda926dSMintz, Yuval 
444cdda926dSMintz, Yuval 	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
445cdda926dSMintz, Yuval 		int len = 0;
446cdda926dSMintz, Yuval 
447cdda926dSMintz, Yuval 		rc = qede_free_tx_pkt(edev, txq, &len);
448cdda926dSMintz, Yuval 		if (rc) {
449cdda926dSMintz, Yuval 			DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
450cdda926dSMintz, Yuval 				  hw_bd_cons,
451cdda926dSMintz, Yuval 				  qed_chain_get_cons_idx(&txq->tx_pbl));
452cdda926dSMintz, Yuval 			break;
453cdda926dSMintz, Yuval 		}
454cdda926dSMintz, Yuval 
455cdda926dSMintz, Yuval 		bytes_compl += len;
456cdda926dSMintz, Yuval 		pkts_compl++;
4575a052d62SSudarsana Reddy Kalluru 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
458cdda926dSMintz, Yuval 		txq->xmit_pkts++;
459cdda926dSMintz, Yuval 	}
460cdda926dSMintz, Yuval 
461cdda926dSMintz, Yuval 	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
462cdda926dSMintz, Yuval 
463cdda926dSMintz, Yuval 	/* Need to make the tx_bd_cons update visible to start_xmit()
464cdda926dSMintz, Yuval 	 * before checking for netif_tx_queue_stopped().  Without the
465cdda926dSMintz, Yuval 	 * memory barrier, there is a small possibility that
466cdda926dSMintz, Yuval 	 * start_xmit() will miss it and cause the queue to be stopped
467cdda926dSMintz, Yuval 	 * forever.
468cdda926dSMintz, Yuval 	 * On the other hand we need an rmb() here to ensure the proper
469cdda926dSMintz, Yuval 	 * ordering of bit testing in the following
470cdda926dSMintz, Yuval 	 * netif_tx_queue_stopped(txq) call.
471cdda926dSMintz, Yuval 	 */
472cdda926dSMintz, Yuval 	smp_mb();
473cdda926dSMintz, Yuval 
474cdda926dSMintz, Yuval 	if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
475cdda926dSMintz, Yuval 		/* Taking tx_lock is needed to prevent reenabling the queue
476cdda926dSMintz, Yuval 		 * while it's empty. This could have happen if rx_action() gets
477cdda926dSMintz, Yuval 		 * suspended in qede_tx_int() after the condition before
478cdda926dSMintz, Yuval 		 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
479cdda926dSMintz, Yuval 		 *
480cdda926dSMintz, Yuval 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
481cdda926dSMintz, Yuval 		 * sends some packets consuming the whole queue again->
482cdda926dSMintz, Yuval 		 * stops the queue
483cdda926dSMintz, Yuval 		 */
484cdda926dSMintz, Yuval 
485cdda926dSMintz, Yuval 		__netif_tx_lock(netdev_txq, smp_processor_id());
486cdda926dSMintz, Yuval 
487cdda926dSMintz, Yuval 		if ((netif_tx_queue_stopped(netdev_txq)) &&
488cdda926dSMintz, Yuval 		    (edev->state == QEDE_STATE_OPEN) &&
489cdda926dSMintz, Yuval 		    (qed_chain_get_elem_left(&txq->tx_pbl)
490cdda926dSMintz, Yuval 		      >= (MAX_SKB_FRAGS + 1))) {
491cdda926dSMintz, Yuval 			netif_tx_wake_queue(netdev_txq);
492cdda926dSMintz, Yuval 			DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
493cdda926dSMintz, Yuval 				   "Wake queue was called\n");
494cdda926dSMintz, Yuval 		}
495cdda926dSMintz, Yuval 
496cdda926dSMintz, Yuval 		__netif_tx_unlock(netdev_txq);
497cdda926dSMintz, Yuval 	}
498cdda926dSMintz, Yuval 
499cdda926dSMintz, Yuval 	return 0;
500cdda926dSMintz, Yuval }
501cdda926dSMintz, Yuval 
qede_has_rx_work(struct qede_rx_queue * rxq)502cdda926dSMintz, Yuval bool qede_has_rx_work(struct qede_rx_queue *rxq)
503cdda926dSMintz, Yuval {
504cdda926dSMintz, Yuval 	u16 hw_comp_cons, sw_comp_cons;
505cdda926dSMintz, Yuval 
506cdda926dSMintz, Yuval 	/* Tell compiler that status block fields can change */
507cdda926dSMintz, Yuval 	barrier();
508cdda926dSMintz, Yuval 
509cdda926dSMintz, Yuval 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
510cdda926dSMintz, Yuval 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
511cdda926dSMintz, Yuval 
512cdda926dSMintz, Yuval 	return hw_comp_cons != sw_comp_cons;
513cdda926dSMintz, Yuval }
514cdda926dSMintz, Yuval 
qede_rx_bd_ring_consume(struct qede_rx_queue * rxq)515cdda926dSMintz, Yuval static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
516cdda926dSMintz, Yuval {
517cdda926dSMintz, Yuval 	qed_chain_consume(&rxq->rx_bd_ring);
518cdda926dSMintz, Yuval 	rxq->sw_rx_cons++;
519cdda926dSMintz, Yuval }
520cdda926dSMintz, Yuval 
521cdda926dSMintz, Yuval /* This function reuses the buffer(from an offset) from
522cdda926dSMintz, Yuval  * consumer index to producer index in the bd ring
523cdda926dSMintz, Yuval  */
qede_reuse_page(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)524cdda926dSMintz, Yuval static inline void qede_reuse_page(struct qede_rx_queue *rxq,
525cdda926dSMintz, Yuval 				   struct sw_rx_data *curr_cons)
526cdda926dSMintz, Yuval {
527cdda926dSMintz, Yuval 	struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
528cdda926dSMintz, Yuval 	struct sw_rx_data *curr_prod;
529cdda926dSMintz, Yuval 	dma_addr_t new_mapping;
530cdda926dSMintz, Yuval 
531cdda926dSMintz, Yuval 	curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
532cdda926dSMintz, Yuval 	*curr_prod = *curr_cons;
533cdda926dSMintz, Yuval 
534cdda926dSMintz, Yuval 	new_mapping = curr_prod->mapping + curr_prod->page_offset;
535cdda926dSMintz, Yuval 
536cdda926dSMintz, Yuval 	rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
53715ed8a47SMintz, Yuval 	rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
53815ed8a47SMintz, Yuval 					  rxq->rx_headroom);
539cdda926dSMintz, Yuval 
540cdda926dSMintz, Yuval 	rxq->sw_rx_prod++;
541cdda926dSMintz, Yuval 	curr_cons->data = NULL;
542cdda926dSMintz, Yuval }
543cdda926dSMintz, Yuval 
544cdda926dSMintz, Yuval /* In case of allocation failures reuse buffers
545cdda926dSMintz, Yuval  * from consumer index to produce buffers for firmware
546cdda926dSMintz, Yuval  */
qede_recycle_rx_bd_ring(struct qede_rx_queue * rxq,u8 count)547cdda926dSMintz, Yuval void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
548cdda926dSMintz, Yuval {
549cdda926dSMintz, Yuval 	struct sw_rx_data *curr_cons;
550cdda926dSMintz, Yuval 
551cdda926dSMintz, Yuval 	for (; count > 0; count--) {
552cdda926dSMintz, Yuval 		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
553cdda926dSMintz, Yuval 		qede_reuse_page(rxq, curr_cons);
554cdda926dSMintz, Yuval 		qede_rx_bd_ring_consume(rxq);
555cdda926dSMintz, Yuval 	}
556cdda926dSMintz, Yuval }
557cdda926dSMintz, Yuval 
qede_realloc_rx_buffer(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)558cdda926dSMintz, Yuval static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
559cdda926dSMintz, Yuval 					 struct sw_rx_data *curr_cons)
560cdda926dSMintz, Yuval {
561cdda926dSMintz, Yuval 	/* Move to the next segment in the page */
562cdda926dSMintz, Yuval 	curr_cons->page_offset += rxq->rx_buf_seg_size;
563cdda926dSMintz, Yuval 
564cdda926dSMintz, Yuval 	if (curr_cons->page_offset == PAGE_SIZE) {
565e3eef7eeSMintz, Yuval 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
566cdda926dSMintz, Yuval 			/* Since we failed to allocate new buffer
567cdda926dSMintz, Yuval 			 * current buffer can be used again.
568cdda926dSMintz, Yuval 			 */
569cdda926dSMintz, Yuval 			curr_cons->page_offset -= rxq->rx_buf_seg_size;
570cdda926dSMintz, Yuval 
571cdda926dSMintz, Yuval 			return -ENOMEM;
572cdda926dSMintz, Yuval 		}
573cdda926dSMintz, Yuval 
574cdda926dSMintz, Yuval 		dma_unmap_page(rxq->dev, curr_cons->mapping,
575cdda926dSMintz, Yuval 			       PAGE_SIZE, rxq->data_direction);
576cdda926dSMintz, Yuval 	} else {
577cdda926dSMintz, Yuval 		/* Increment refcount of the page as we don't want
578cdda926dSMintz, Yuval 		 * network stack to take the ownership of the page
579cdda926dSMintz, Yuval 		 * which can be recycled multiple times by the driver.
580cdda926dSMintz, Yuval 		 */
581cdda926dSMintz, Yuval 		page_ref_inc(curr_cons->data);
582cdda926dSMintz, Yuval 		qede_reuse_page(rxq, curr_cons);
583cdda926dSMintz, Yuval 	}
584cdda926dSMintz, Yuval 
585cdda926dSMintz, Yuval 	return 0;
586cdda926dSMintz, Yuval }
587cdda926dSMintz, Yuval 
qede_update_rx_prod(struct qede_dev * edev,struct qede_rx_queue * rxq)588cdda926dSMintz, Yuval void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
589cdda926dSMintz, Yuval {
590cdda926dSMintz, Yuval 	u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
591cdda926dSMintz, Yuval 	u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
592cdda926dSMintz, Yuval 	struct eth_rx_prod_data rx_prods = {0};
593cdda926dSMintz, Yuval 
594cdda926dSMintz, Yuval 	/* Update producers */
595cdda926dSMintz, Yuval 	rx_prods.bd_prod = cpu_to_le16(bd_prod);
596cdda926dSMintz, Yuval 	rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
597cdda926dSMintz, Yuval 
598cdda926dSMintz, Yuval 	/* Make sure that the BD and SGE data is updated before updating the
599cdda926dSMintz, Yuval 	 * producers since FW might read the BD/SGE right after the producer
600cdda926dSMintz, Yuval 	 * is updated.
601cdda926dSMintz, Yuval 	 */
602cdda926dSMintz, Yuval 	wmb();
603cdda926dSMintz, Yuval 
604cdda926dSMintz, Yuval 	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
605cdda926dSMintz, Yuval 			(u32 *)&rx_prods);
606cdda926dSMintz, Yuval }
607cdda926dSMintz, Yuval 
qede_get_rxhash(struct sk_buff * skb,u8 bitfields,__le32 rss_hash)608cdda926dSMintz, Yuval static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
609cdda926dSMintz, Yuval {
610cdda926dSMintz, Yuval 	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
611cdda926dSMintz, Yuval 	enum rss_hash_type htype;
612cdda926dSMintz, Yuval 	u32 hash = 0;
613cdda926dSMintz, Yuval 
614cdda926dSMintz, Yuval 	htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
615cdda926dSMintz, Yuval 	if (htype) {
616cdda926dSMintz, Yuval 		hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
617cdda926dSMintz, Yuval 			     (htype == RSS_HASH_TYPE_IPV6)) ?
618cdda926dSMintz, Yuval 			    PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
619cdda926dSMintz, Yuval 		hash = le32_to_cpu(rss_hash);
620cdda926dSMintz, Yuval 	}
621cdda926dSMintz, Yuval 	skb_set_hash(skb, hash, hash_type);
622cdda926dSMintz, Yuval }
623cdda926dSMintz, Yuval 
qede_set_skb_csum(struct sk_buff * skb,u8 csum_flag)624cdda926dSMintz, Yuval static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
625cdda926dSMintz, Yuval {
626cdda926dSMintz, Yuval 	skb_checksum_none_assert(skb);
627cdda926dSMintz, Yuval 
628cdda926dSMintz, Yuval 	if (csum_flag & QEDE_CSUM_UNNECESSARY)
629cdda926dSMintz, Yuval 		skb->ip_summed = CHECKSUM_UNNECESSARY;
630cdda926dSMintz, Yuval 
6317ca547bdSManish Chopra 	if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
632cdda926dSMintz, Yuval 		skb->csum_level = 1;
6337ca547bdSManish Chopra 		skb->encapsulation = 1;
6347ca547bdSManish Chopra 	}
635cdda926dSMintz, Yuval }
636cdda926dSMintz, Yuval 
qede_skb_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct sk_buff * skb,u16 vlan_tag)637cdda926dSMintz, Yuval static inline void qede_skb_receive(struct qede_dev *edev,
638cdda926dSMintz, Yuval 				    struct qede_fastpath *fp,
639cdda926dSMintz, Yuval 				    struct qede_rx_queue *rxq,
640cdda926dSMintz, Yuval 				    struct sk_buff *skb, u16 vlan_tag)
641cdda926dSMintz, Yuval {
642cdda926dSMintz, Yuval 	if (vlan_tag)
643cdda926dSMintz, Yuval 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
644cdda926dSMintz, Yuval 
645cdda926dSMintz, Yuval 	napi_gro_receive(&fp->napi, skb);
646cdda926dSMintz, Yuval }
647cdda926dSMintz, Yuval 
qede_set_gro_params(struct qede_dev * edev,struct sk_buff * skb,struct eth_fast_path_rx_tpa_start_cqe * cqe)648cdda926dSMintz, Yuval static void qede_set_gro_params(struct qede_dev *edev,
649cdda926dSMintz, Yuval 				struct sk_buff *skb,
650cdda926dSMintz, Yuval 				struct eth_fast_path_rx_tpa_start_cqe *cqe)
651cdda926dSMintz, Yuval {
652cdda926dSMintz, Yuval 	u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
653cdda926dSMintz, Yuval 
654cdda926dSMintz, Yuval 	if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
655cdda926dSMintz, Yuval 	    PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
656cdda926dSMintz, Yuval 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
657cdda926dSMintz, Yuval 	else
658cdda926dSMintz, Yuval 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
659cdda926dSMintz, Yuval 
660cdda926dSMintz, Yuval 	skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
661cdda926dSMintz, Yuval 				    cqe->header_len;
662cdda926dSMintz, Yuval }
663cdda926dSMintz, Yuval 
qede_fill_frag_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,u8 tpa_agg_index,u16 len_on_bd)664cdda926dSMintz, Yuval static int qede_fill_frag_skb(struct qede_dev *edev,
665cdda926dSMintz, Yuval 			      struct qede_rx_queue *rxq,
666cdda926dSMintz, Yuval 			      u8 tpa_agg_index, u16 len_on_bd)
667cdda926dSMintz, Yuval {
668cdda926dSMintz, Yuval 	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
669cdda926dSMintz, Yuval 							 NUM_RX_BDS_MAX];
670cdda926dSMintz, Yuval 	struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
671cdda926dSMintz, Yuval 	struct sk_buff *skb = tpa_info->skb;
672cdda926dSMintz, Yuval 
673cdda926dSMintz, Yuval 	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
674cdda926dSMintz, Yuval 		goto out;
675cdda926dSMintz, Yuval 
676cdda926dSMintz, Yuval 	/* Add one frag and update the appropriate fields in the skb */
677cdda926dSMintz, Yuval 	skb_fill_page_desc(skb, tpa_info->frag_id++,
6788a863397SManish Chopra 			   current_bd->data,
6798a863397SManish Chopra 			   current_bd->page_offset + rxq->rx_headroom,
680cdda926dSMintz, Yuval 			   len_on_bd);
681cdda926dSMintz, Yuval 
682cdda926dSMintz, Yuval 	if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
683cdda926dSMintz, Yuval 		/* Incr page ref count to reuse on allocation failure
684cdda926dSMintz, Yuval 		 * so that it doesn't get freed while freeing SKB.
685cdda926dSMintz, Yuval 		 */
686cdda926dSMintz, Yuval 		page_ref_inc(current_bd->data);
687cdda926dSMintz, Yuval 		goto out;
688cdda926dSMintz, Yuval 	}
689cdda926dSMintz, Yuval 
6908a863397SManish Chopra 	qede_rx_bd_ring_consume(rxq);
691cdda926dSMintz, Yuval 
692cdda926dSMintz, Yuval 	skb->data_len += len_on_bd;
693cdda926dSMintz, Yuval 	skb->truesize += rxq->rx_buf_seg_size;
694cdda926dSMintz, Yuval 	skb->len += len_on_bd;
695cdda926dSMintz, Yuval 
696cdda926dSMintz, Yuval 	return 0;
697cdda926dSMintz, Yuval 
698cdda926dSMintz, Yuval out:
699cdda926dSMintz, Yuval 	tpa_info->state = QEDE_AGG_STATE_ERROR;
700cdda926dSMintz, Yuval 	qede_recycle_rx_bd_ring(rxq, 1);
701cdda926dSMintz, Yuval 
702cdda926dSMintz, Yuval 	return -ENOMEM;
703cdda926dSMintz, Yuval }
704cdda926dSMintz, Yuval 
qede_tunn_exist(u16 flag)705cdda926dSMintz, Yuval static bool qede_tunn_exist(u16 flag)
706cdda926dSMintz, Yuval {
707cdda926dSMintz, Yuval 	return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
708cdda926dSMintz, Yuval 			  PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
709cdda926dSMintz, Yuval }
710cdda926dSMintz, Yuval 
qede_check_tunn_csum(u16 flag)711cdda926dSMintz, Yuval static u8 qede_check_tunn_csum(u16 flag)
712cdda926dSMintz, Yuval {
713cdda926dSMintz, Yuval 	u16 csum_flag = 0;
714cdda926dSMintz, Yuval 	u8 tcsum = 0;
715cdda926dSMintz, Yuval 
716cdda926dSMintz, Yuval 	if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
717cdda926dSMintz, Yuval 		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
718cdda926dSMintz, Yuval 		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
719cdda926dSMintz, Yuval 			     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
720cdda926dSMintz, Yuval 
721cdda926dSMintz, Yuval 	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
722cdda926dSMintz, Yuval 		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
723cdda926dSMintz, Yuval 		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
724cdda926dSMintz, Yuval 			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
725cdda926dSMintz, Yuval 		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
726cdda926dSMintz, Yuval 	}
727cdda926dSMintz, Yuval 
728cdda926dSMintz, Yuval 	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
729cdda926dSMintz, Yuval 		     PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
730cdda926dSMintz, Yuval 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
731cdda926dSMintz, Yuval 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
732cdda926dSMintz, Yuval 
733cdda926dSMintz, Yuval 	if (csum_flag & flag)
734cdda926dSMintz, Yuval 		return QEDE_CSUM_ERROR;
735cdda926dSMintz, Yuval 
736cdda926dSMintz, Yuval 	return QEDE_CSUM_UNNECESSARY | tcsum;
737cdda926dSMintz, Yuval }
738cdda926dSMintz, Yuval 
7398a863397SManish Chopra static inline struct sk_buff *
qede_build_skb(struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)7408a863397SManish Chopra qede_build_skb(struct qede_rx_queue *rxq,
7418a863397SManish Chopra 	       struct sw_rx_data *bd, u16 len, u16 pad)
7428a863397SManish Chopra {
7438a863397SManish Chopra 	struct sk_buff *skb;
7448a863397SManish Chopra 	void *buf;
7458a863397SManish Chopra 
7468a863397SManish Chopra 	buf = page_address(bd->data) + bd->page_offset;
7478a863397SManish Chopra 	skb = build_skb(buf, rxq->rx_buf_seg_size);
7488a863397SManish Chopra 
7494e910dbeSJamie Bainbridge 	if (unlikely(!skb))
7504e910dbeSJamie Bainbridge 		return NULL;
7514e910dbeSJamie Bainbridge 
7528a863397SManish Chopra 	skb_reserve(skb, pad);
7538a863397SManish Chopra 	skb_put(skb, len);
7548a863397SManish Chopra 
7558a863397SManish Chopra 	return skb;
7568a863397SManish Chopra }
7578a863397SManish Chopra 
7588a863397SManish Chopra static struct sk_buff *
qede_tpa_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad,bool alloc_skb)7598a863397SManish Chopra qede_tpa_rx_build_skb(struct qede_dev *edev,
7608a863397SManish Chopra 		      struct qede_rx_queue *rxq,
7618a863397SManish Chopra 		      struct sw_rx_data *bd, u16 len, u16 pad,
7628a863397SManish Chopra 		      bool alloc_skb)
7638a863397SManish Chopra {
7648a863397SManish Chopra 	struct sk_buff *skb;
7658a863397SManish Chopra 
7668a863397SManish Chopra 	skb = qede_build_skb(rxq, bd, len, pad);
7678a863397SManish Chopra 	bd->page_offset += rxq->rx_buf_seg_size;
7688a863397SManish Chopra 
7698a863397SManish Chopra 	if (bd->page_offset == PAGE_SIZE) {
7708a863397SManish Chopra 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
7718a863397SManish Chopra 			DP_NOTICE(edev,
7728a863397SManish Chopra 				  "Failed to allocate RX buffer for tpa start\n");
7738a863397SManish Chopra 			bd->page_offset -= rxq->rx_buf_seg_size;
7748a863397SManish Chopra 			page_ref_inc(bd->data);
7758a863397SManish Chopra 			dev_kfree_skb_any(skb);
7768a863397SManish Chopra 			return NULL;
7778a863397SManish Chopra 		}
7788a863397SManish Chopra 	} else {
7798a863397SManish Chopra 		page_ref_inc(bd->data);
7808a863397SManish Chopra 		qede_reuse_page(rxq, bd);
7818a863397SManish Chopra 	}
7828a863397SManish Chopra 
7838a863397SManish Chopra 	/* We've consumed the first BD and prepared an SKB */
7848a863397SManish Chopra 	qede_rx_bd_ring_consume(rxq);
7858a863397SManish Chopra 
7868a863397SManish Chopra 	return skb;
7878a863397SManish Chopra }
7888a863397SManish Chopra 
7898a863397SManish Chopra static struct sk_buff *
qede_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)7908a863397SManish Chopra qede_rx_build_skb(struct qede_dev *edev,
7918a863397SManish Chopra 		  struct qede_rx_queue *rxq,
7928a863397SManish Chopra 		  struct sw_rx_data *bd, u16 len, u16 pad)
7938a863397SManish Chopra {
7948a863397SManish Chopra 	struct sk_buff *skb = NULL;
7958a863397SManish Chopra 
7968a863397SManish Chopra 	/* For smaller frames still need to allocate skb, memcpy
7978a863397SManish Chopra 	 * data and benefit in reusing the page segment instead of
7988a863397SManish Chopra 	 * un-mapping it.
7998a863397SManish Chopra 	 */
8008a863397SManish Chopra 	if ((len + pad <= edev->rx_copybreak)) {
8018a863397SManish Chopra 		unsigned int offset = bd->page_offset + pad;
8028a863397SManish Chopra 
8038a863397SManish Chopra 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
8048a863397SManish Chopra 		if (unlikely(!skb))
8058a863397SManish Chopra 			return NULL;
8068a863397SManish Chopra 
8078a863397SManish Chopra 		skb_reserve(skb, pad);
80824ccb0abSChristophe JAILLET 		skb_put_data(skb, page_address(bd->data) + offset, len);
8098a863397SManish Chopra 		qede_reuse_page(rxq, bd);
8108a863397SManish Chopra 		goto out;
8118a863397SManish Chopra 	}
8128a863397SManish Chopra 
8138a863397SManish Chopra 	skb = qede_build_skb(rxq, bd, len, pad);
8148a863397SManish Chopra 
8158a863397SManish Chopra 	if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
8168a863397SManish Chopra 		/* Incr page ref count to reuse on allocation failure so
8178a863397SManish Chopra 		 * that it doesn't get freed while freeing SKB [as its
8188a863397SManish Chopra 		 * already mapped there].
8198a863397SManish Chopra 		 */
8208a863397SManish Chopra 		page_ref_inc(bd->data);
8218a863397SManish Chopra 		dev_kfree_skb_any(skb);
8228a863397SManish Chopra 		return NULL;
8238a863397SManish Chopra 	}
8248a863397SManish Chopra out:
8258a863397SManish Chopra 	/* We've consumed the first BD and prepared an SKB */
8268a863397SManish Chopra 	qede_rx_bd_ring_consume(rxq);
8278a863397SManish Chopra 
8288a863397SManish Chopra 	return skb;
8298a863397SManish Chopra }
8308a863397SManish Chopra 
qede_tpa_start(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)831cdda926dSMintz, Yuval static void qede_tpa_start(struct qede_dev *edev,
832cdda926dSMintz, Yuval 			   struct qede_rx_queue *rxq,
833cdda926dSMintz, Yuval 			   struct eth_fast_path_rx_tpa_start_cqe *cqe)
834cdda926dSMintz, Yuval {
835cdda926dSMintz, Yuval 	struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
836cdda926dSMintz, Yuval 	struct sw_rx_data *sw_rx_data_cons;
8378a863397SManish Chopra 	u16 pad;
838cdda926dSMintz, Yuval 
839cdda926dSMintz, Yuval 	sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
8408a863397SManish Chopra 	pad = cqe->placement_offset + rxq->rx_headroom;
841cdda926dSMintz, Yuval 
8428a863397SManish Chopra 	tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
8438a863397SManish Chopra 					      le16_to_cpu(cqe->len_on_first_bd),
8448a863397SManish Chopra 					      pad, false);
8458a863397SManish Chopra 	tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
8468a863397SManish Chopra 	tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
847cdda926dSMintz, Yuval 
848cdda926dSMintz, Yuval 	if (unlikely(!tpa_info->skb)) {
849cdda926dSMintz, Yuval 		DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
8508a863397SManish Chopra 
8518a863397SManish Chopra 		/* Consume from ring but do not produce since
8528a863397SManish Chopra 		 * this might be used by FW still, it will be re-used
8538a863397SManish Chopra 		 * at TPA end.
8548a863397SManish Chopra 		 */
8558a863397SManish Chopra 		tpa_info->tpa_start_fail = true;
8568a863397SManish Chopra 		qede_rx_bd_ring_consume(rxq);
857cdda926dSMintz, Yuval 		tpa_info->state = QEDE_AGG_STATE_ERROR;
858cdda926dSMintz, Yuval 		goto cons_buf;
859cdda926dSMintz, Yuval 	}
860cdda926dSMintz, Yuval 
861cdda926dSMintz, Yuval 	tpa_info->frag_id = 0;
862cdda926dSMintz, Yuval 	tpa_info->state = QEDE_AGG_STATE_START;
863cdda926dSMintz, Yuval 
864cdda926dSMintz, Yuval 	if ((le16_to_cpu(cqe->pars_flags.flags) >>
865cdda926dSMintz, Yuval 	     PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
866cdda926dSMintz, Yuval 	    PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
867cdda926dSMintz, Yuval 		tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
868cdda926dSMintz, Yuval 	else
869cdda926dSMintz, Yuval 		tpa_info->vlan_tag = 0;
870cdda926dSMintz, Yuval 
871cdda926dSMintz, Yuval 	qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
872cdda926dSMintz, Yuval 
873cdda926dSMintz, Yuval 	/* This is needed in order to enable forwarding support */
874cdda926dSMintz, Yuval 	qede_set_gro_params(edev, tpa_info->skb, cqe);
875cdda926dSMintz, Yuval 
876cdda926dSMintz, Yuval cons_buf: /* We still need to handle bd_len_list to consume buffers */
8770500a70dSMichal Kalderon 	if (likely(cqe->bw_ext_bd_len_list[0]))
878cdda926dSMintz, Yuval 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
8790500a70dSMichal Kalderon 				   le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
880cdda926dSMintz, Yuval 
8810500a70dSMichal Kalderon 	if (unlikely(cqe->bw_ext_bd_len_list[1])) {
882cdda926dSMintz, Yuval 		DP_ERR(edev,
8830500a70dSMichal Kalderon 		       "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
884cdda926dSMintz, Yuval 		tpa_info->state = QEDE_AGG_STATE_ERROR;
885cdda926dSMintz, Yuval 	}
886cdda926dSMintz, Yuval }
887cdda926dSMintz, Yuval 
888cdda926dSMintz, Yuval #ifdef CONFIG_INET
qede_gro_ip_csum(struct sk_buff * skb)889cdda926dSMintz, Yuval static void qede_gro_ip_csum(struct sk_buff *skb)
890cdda926dSMintz, Yuval {
891cdda926dSMintz, Yuval 	const struct iphdr *iph = ip_hdr(skb);
892cdda926dSMintz, Yuval 	struct tcphdr *th;
893cdda926dSMintz, Yuval 
894cdda926dSMintz, Yuval 	skb_set_transport_header(skb, sizeof(struct iphdr));
895cdda926dSMintz, Yuval 	th = tcp_hdr(skb);
896cdda926dSMintz, Yuval 
897cdda926dSMintz, Yuval 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
898cdda926dSMintz, Yuval 				  iph->saddr, iph->daddr, 0);
899cdda926dSMintz, Yuval 
900cdda926dSMintz, Yuval 	tcp_gro_complete(skb);
901cdda926dSMintz, Yuval }
902cdda926dSMintz, Yuval 
qede_gro_ipv6_csum(struct sk_buff * skb)903cdda926dSMintz, Yuval static void qede_gro_ipv6_csum(struct sk_buff *skb)
904cdda926dSMintz, Yuval {
905cdda926dSMintz, Yuval 	struct ipv6hdr *iph = ipv6_hdr(skb);
906cdda926dSMintz, Yuval 	struct tcphdr *th;
907cdda926dSMintz, Yuval 
908cdda926dSMintz, Yuval 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
909cdda926dSMintz, Yuval 	th = tcp_hdr(skb);
910cdda926dSMintz, Yuval 
911cdda926dSMintz, Yuval 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
912cdda926dSMintz, Yuval 				  &iph->saddr, &iph->daddr, 0);
913cdda926dSMintz, Yuval 	tcp_gro_complete(skb);
914cdda926dSMintz, Yuval }
915cdda926dSMintz, Yuval #endif
916cdda926dSMintz, Yuval 
qede_gro_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct sk_buff * skb,u16 vlan_tag)917cdda926dSMintz, Yuval static void qede_gro_receive(struct qede_dev *edev,
918cdda926dSMintz, Yuval 			     struct qede_fastpath *fp,
919cdda926dSMintz, Yuval 			     struct sk_buff *skb,
920cdda926dSMintz, Yuval 			     u16 vlan_tag)
921cdda926dSMintz, Yuval {
922cdda926dSMintz, Yuval 	/* FW can send a single MTU sized packet from gro flow
923cdda926dSMintz, Yuval 	 * due to aggregation timeout/last segment etc. which
924cdda926dSMintz, Yuval 	 * is not expected to be a gro packet. If a skb has zero
925cdda926dSMintz, Yuval 	 * frags then simply push it in the stack as non gso skb.
926cdda926dSMintz, Yuval 	 */
927cdda926dSMintz, Yuval 	if (unlikely(!skb->data_len)) {
928cdda926dSMintz, Yuval 		skb_shinfo(skb)->gso_type = 0;
929cdda926dSMintz, Yuval 		skb_shinfo(skb)->gso_size = 0;
930cdda926dSMintz, Yuval 		goto send_skb;
931cdda926dSMintz, Yuval 	}
932cdda926dSMintz, Yuval 
933cdda926dSMintz, Yuval #ifdef CONFIG_INET
934cdda926dSMintz, Yuval 	if (skb_shinfo(skb)->gso_size) {
935cdda926dSMintz, Yuval 		skb_reset_network_header(skb);
936cdda926dSMintz, Yuval 
937cdda926dSMintz, Yuval 		switch (skb->protocol) {
938cdda926dSMintz, Yuval 		case htons(ETH_P_IP):
939cdda926dSMintz, Yuval 			qede_gro_ip_csum(skb);
940cdda926dSMintz, Yuval 			break;
941cdda926dSMintz, Yuval 		case htons(ETH_P_IPV6):
942cdda926dSMintz, Yuval 			qede_gro_ipv6_csum(skb);
943cdda926dSMintz, Yuval 			break;
944cdda926dSMintz, Yuval 		default:
945cdda926dSMintz, Yuval 			DP_ERR(edev,
946cdda926dSMintz, Yuval 			       "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
947cdda926dSMintz, Yuval 			       ntohs(skb->protocol));
948cdda926dSMintz, Yuval 		}
949cdda926dSMintz, Yuval 	}
950cdda926dSMintz, Yuval #endif
951cdda926dSMintz, Yuval 
952cdda926dSMintz, Yuval send_skb:
953cdda926dSMintz, Yuval 	skb_record_rx_queue(skb, fp->rxq->rxq_id);
954cdda926dSMintz, Yuval 	qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
955cdda926dSMintz, Yuval }
956cdda926dSMintz, Yuval 
qede_tpa_cont(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)957cdda926dSMintz, Yuval static inline void qede_tpa_cont(struct qede_dev *edev,
958cdda926dSMintz, Yuval 				 struct qede_rx_queue *rxq,
959cdda926dSMintz, Yuval 				 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
960cdda926dSMintz, Yuval {
961cdda926dSMintz, Yuval 	int i;
962cdda926dSMintz, Yuval 
963cdda926dSMintz, Yuval 	for (i = 0; cqe->len_list[i]; i++)
964cdda926dSMintz, Yuval 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
965cdda926dSMintz, Yuval 				   le16_to_cpu(cqe->len_list[i]));
966cdda926dSMintz, Yuval 
967cdda926dSMintz, Yuval 	if (unlikely(i > 1))
968cdda926dSMintz, Yuval 		DP_ERR(edev,
969cdda926dSMintz, Yuval 		       "Strange - TPA cont with more than a single len_list entry\n");
970cdda926dSMintz, Yuval }
971cdda926dSMintz, Yuval 
qede_tpa_end(struct qede_dev * edev,struct qede_fastpath * fp,struct eth_fast_path_rx_tpa_end_cqe * cqe)97210a0176eSMintz, Yuval static int qede_tpa_end(struct qede_dev *edev,
973cdda926dSMintz, Yuval 			struct qede_fastpath *fp,
974cdda926dSMintz, Yuval 			struct eth_fast_path_rx_tpa_end_cqe *cqe)
975cdda926dSMintz, Yuval {
976cdda926dSMintz, Yuval 	struct qede_rx_queue *rxq = fp->rxq;
977cdda926dSMintz, Yuval 	struct qede_agg_info *tpa_info;
978cdda926dSMintz, Yuval 	struct sk_buff *skb;
979cdda926dSMintz, Yuval 	int i;
980cdda926dSMintz, Yuval 
981cdda926dSMintz, Yuval 	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
982cdda926dSMintz, Yuval 	skb = tpa_info->skb;
983cdda926dSMintz, Yuval 
9848a863397SManish Chopra 	if (tpa_info->buffer.page_offset == PAGE_SIZE)
9858a863397SManish Chopra 		dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
9868a863397SManish Chopra 			       PAGE_SIZE, rxq->data_direction);
9878a863397SManish Chopra 
988cdda926dSMintz, Yuval 	for (i = 0; cqe->len_list[i]; i++)
989cdda926dSMintz, Yuval 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
990cdda926dSMintz, Yuval 				   le16_to_cpu(cqe->len_list[i]));
991cdda926dSMintz, Yuval 	if (unlikely(i > 1))
992cdda926dSMintz, Yuval 		DP_ERR(edev,
993cdda926dSMintz, Yuval 		       "Strange - TPA emd with more than a single len_list entry\n");
994cdda926dSMintz, Yuval 
995cdda926dSMintz, Yuval 	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
996cdda926dSMintz, Yuval 		goto err;
997cdda926dSMintz, Yuval 
998cdda926dSMintz, Yuval 	/* Sanity */
999cdda926dSMintz, Yuval 	if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1000cdda926dSMintz, Yuval 		DP_ERR(edev,
1001cdda926dSMintz, Yuval 		       "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1002cdda926dSMintz, Yuval 		       cqe->num_of_bds, tpa_info->frag_id);
1003cdda926dSMintz, Yuval 	if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1004cdda926dSMintz, Yuval 		DP_ERR(edev,
1005cdda926dSMintz, Yuval 		       "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1006cdda926dSMintz, Yuval 		       le16_to_cpu(cqe->total_packet_len), skb->len);
1007cdda926dSMintz, Yuval 
1008cdda926dSMintz, Yuval 	/* Finalize the SKB */
1009cdda926dSMintz, Yuval 	skb->protocol = eth_type_trans(skb, edev->ndev);
1010cdda926dSMintz, Yuval 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1011cdda926dSMintz, Yuval 
1012cdda926dSMintz, Yuval 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1013cdda926dSMintz, Yuval 	 * to skb_shinfo(skb)->gso_segs
1014cdda926dSMintz, Yuval 	 */
1015cdda926dSMintz, Yuval 	NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1016cdda926dSMintz, Yuval 
1017cdda926dSMintz, Yuval 	qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1018cdda926dSMintz, Yuval 
1019cdda926dSMintz, Yuval 	tpa_info->state = QEDE_AGG_STATE_NONE;
1020cdda926dSMintz, Yuval 
102110a0176eSMintz, Yuval 	return 1;
1022cdda926dSMintz, Yuval err:
1023cdda926dSMintz, Yuval 	tpa_info->state = QEDE_AGG_STATE_NONE;
10248a863397SManish Chopra 
10258a863397SManish Chopra 	if (tpa_info->tpa_start_fail) {
10268a863397SManish Chopra 		qede_reuse_page(rxq, &tpa_info->buffer);
10278a863397SManish Chopra 		tpa_info->tpa_start_fail = false;
10288a863397SManish Chopra 	}
10298a863397SManish Chopra 
1030cdda926dSMintz, Yuval 	dev_kfree_skb_any(tpa_info->skb);
1031cdda926dSMintz, Yuval 	tpa_info->skb = NULL;
103210a0176eSMintz, Yuval 	return 0;
1033cdda926dSMintz, Yuval }
1034cdda926dSMintz, Yuval 
qede_check_notunn_csum(u16 flag)1035cdda926dSMintz, Yuval static u8 qede_check_notunn_csum(u16 flag)
1036cdda926dSMintz, Yuval {
1037cdda926dSMintz, Yuval 	u16 csum_flag = 0;
1038cdda926dSMintz, Yuval 	u8 csum = 0;
1039cdda926dSMintz, Yuval 
1040cdda926dSMintz, Yuval 	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1041cdda926dSMintz, Yuval 		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1042cdda926dSMintz, Yuval 		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1043cdda926dSMintz, Yuval 			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1044cdda926dSMintz, Yuval 		csum = QEDE_CSUM_UNNECESSARY;
1045cdda926dSMintz, Yuval 	}
1046cdda926dSMintz, Yuval 
1047cdda926dSMintz, Yuval 	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1048cdda926dSMintz, Yuval 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1049cdda926dSMintz, Yuval 
1050cdda926dSMintz, Yuval 	if (csum_flag & flag)
1051cdda926dSMintz, Yuval 		return QEDE_CSUM_ERROR;
1052cdda926dSMintz, Yuval 
1053cdda926dSMintz, Yuval 	return csum;
1054cdda926dSMintz, Yuval }
1055cdda926dSMintz, Yuval 
qede_check_csum(u16 flag)1056cdda926dSMintz, Yuval static u8 qede_check_csum(u16 flag)
1057cdda926dSMintz, Yuval {
1058cdda926dSMintz, Yuval 	if (!qede_tunn_exist(flag))
1059cdda926dSMintz, Yuval 		return qede_check_notunn_csum(flag);
1060cdda926dSMintz, Yuval 	else
1061cdda926dSMintz, Yuval 		return qede_check_tunn_csum(flag);
1062cdda926dSMintz, Yuval }
1063cdda926dSMintz, Yuval 
qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe * cqe,u16 flag)1064cdda926dSMintz, Yuval static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1065cdda926dSMintz, Yuval 				      u16 flag)
1066cdda926dSMintz, Yuval {
1067cdda926dSMintz, Yuval 	u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1068cdda926dSMintz, Yuval 
1069cdda926dSMintz, Yuval 	if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1070cdda926dSMintz, Yuval 			     ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1071cdda926dSMintz, Yuval 	    (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1072cdda926dSMintz, Yuval 		     PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1073cdda926dSMintz, Yuval 		return true;
1074cdda926dSMintz, Yuval 
1075cdda926dSMintz, Yuval 	return false;
1076cdda926dSMintz, Yuval }
1077cdda926dSMintz, Yuval 
1078cdda926dSMintz, Yuval /* Return true iff packet is to be passed to stack */
qede_rx_xdp(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct bpf_prog * prog,struct sw_rx_data * bd,struct eth_fast_path_rx_reg_cqe * cqe,u16 * data_offset,u16 * len)1079cdda926dSMintz, Yuval static bool qede_rx_xdp(struct qede_dev *edev,
1080cdda926dSMintz, Yuval 			struct qede_fastpath *fp,
1081cdda926dSMintz, Yuval 			struct qede_rx_queue *rxq,
1082cdda926dSMintz, Yuval 			struct bpf_prog *prog,
1083cdda926dSMintz, Yuval 			struct sw_rx_data *bd,
108415ed8a47SMintz, Yuval 			struct eth_fast_path_rx_reg_cqe *cqe,
1085059eeb07SMintz, Yuval 			u16 *data_offset, u16 *len)
1086cdda926dSMintz, Yuval {
1087cdda926dSMintz, Yuval 	struct xdp_buff xdp;
1088cdda926dSMintz, Yuval 	enum xdp_action act;
1089cdda926dSMintz, Yuval 
109043b5169dSLorenzo Bianconi 	xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
1091be9df4afSLorenzo Bianconi 	xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
1092be9df4afSLorenzo Bianconi 			 *len, false);
1093cdda926dSMintz, Yuval 
1094cdda926dSMintz, Yuval 	act = bpf_prog_run_xdp(prog, &xdp);
1095cdda926dSMintz, Yuval 
1096059eeb07SMintz, Yuval 	/* Recalculate, as XDP might have changed the headers */
1097059eeb07SMintz, Yuval 	*data_offset = xdp.data - xdp.data_hard_start;
1098059eeb07SMintz, Yuval 	*len = xdp.data_end - xdp.data;
1099059eeb07SMintz, Yuval 
1100cdda926dSMintz, Yuval 	if (act == XDP_PASS)
1101cdda926dSMintz, Yuval 		return true;
1102cdda926dSMintz, Yuval 
1103cdda926dSMintz, Yuval 	/* Count number of packets not to be passed to stack */
1104cdda926dSMintz, Yuval 	rxq->xdp_no_pass++;
1105cdda926dSMintz, Yuval 
1106cdda926dSMintz, Yuval 	switch (act) {
1107cdda926dSMintz, Yuval 	case XDP_TX:
1108cdda926dSMintz, Yuval 		/* We need the replacement buffer before transmit. */
11094c2bacbeSAlexander Lobakin 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1110cdda926dSMintz, Yuval 			qede_recycle_rx_bd_ring(rxq, 1);
11114c2bacbeSAlexander Lobakin 
1112a67edbf4SDaniel Borkmann 			trace_xdp_exception(edev->ndev, prog, act);
11134c2bacbeSAlexander Lobakin 			break;
1114cdda926dSMintz, Yuval 		}
1115cdda926dSMintz, Yuval 
1116cdda926dSMintz, Yuval 		/* Now if there's a transmission problem, we'd still have to
1117cdda926dSMintz, Yuval 		 * throw current buffer, as replacement was already allocated.
1118cdda926dSMintz, Yuval 		 */
11194c2bacbeSAlexander Lobakin 		if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
1120d1b25b79SAlexander Lobakin 					   *data_offset, *len, bd->data,
1121d1b25b79SAlexander Lobakin 					   NULL))) {
11224c2bacbeSAlexander Lobakin 			dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
11234c2bacbeSAlexander Lobakin 				       rxq->data_direction);
1124cdda926dSMintz, Yuval 			__free_page(bd->data);
11254c2bacbeSAlexander Lobakin 
1126a67edbf4SDaniel Borkmann 			trace_xdp_exception(edev->ndev, prog, act);
11274c2bacbeSAlexander Lobakin 		} else {
11284c2bacbeSAlexander Lobakin 			dma_sync_single_for_device(rxq->dev,
11294c2bacbeSAlexander Lobakin 						   bd->mapping + *data_offset,
11304c2bacbeSAlexander Lobakin 						   *len, rxq->data_direction);
11314c2bacbeSAlexander Lobakin 			fp->xdp_xmit |= QEDE_XDP_TX;
1132cdda926dSMintz, Yuval 		}
1133cdda926dSMintz, Yuval 
1134cdda926dSMintz, Yuval 		/* Regardless, we've consumed an Rx BD */
1135cdda926dSMintz, Yuval 		qede_rx_bd_ring_consume(rxq);
11364c2bacbeSAlexander Lobakin 		break;
1137d1b25b79SAlexander Lobakin 	case XDP_REDIRECT:
1138d1b25b79SAlexander Lobakin 		/* We need the replacement buffer before transmit. */
1139d1b25b79SAlexander Lobakin 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1140d1b25b79SAlexander Lobakin 			qede_recycle_rx_bd_ring(rxq, 1);
1141d1b25b79SAlexander Lobakin 
1142d1b25b79SAlexander Lobakin 			trace_xdp_exception(edev->ndev, prog, act);
1143d1b25b79SAlexander Lobakin 			break;
1144d1b25b79SAlexander Lobakin 		}
1145d1b25b79SAlexander Lobakin 
1146d1b25b79SAlexander Lobakin 		dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1147d1b25b79SAlexander Lobakin 			       rxq->data_direction);
1148d1b25b79SAlexander Lobakin 
1149d1b25b79SAlexander Lobakin 		if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
1150d1b25b79SAlexander Lobakin 			DP_NOTICE(edev, "Failed to redirect the packet\n");
1151d1b25b79SAlexander Lobakin 		else
1152d1b25b79SAlexander Lobakin 			fp->xdp_xmit |= QEDE_XDP_REDIRECT;
1153d1b25b79SAlexander Lobakin 
1154d1b25b79SAlexander Lobakin 		qede_rx_bd_ring_consume(rxq);
1155d1b25b79SAlexander Lobakin 		break;
1156cdda926dSMintz, Yuval 	default:
1157c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
11584c2bacbeSAlexander Lobakin 		fallthrough;
1159cdda926dSMintz, Yuval 	case XDP_ABORTED:
1160a67edbf4SDaniel Borkmann 		trace_xdp_exception(edev->ndev, prog, act);
11614c2bacbeSAlexander Lobakin 		fallthrough;
1162cdda926dSMintz, Yuval 	case XDP_DROP:
1163cdda926dSMintz, Yuval 		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1164cdda926dSMintz, Yuval 	}
1165cdda926dSMintz, Yuval 
1166cdda926dSMintz, Yuval 	return false;
1167cdda926dSMintz, Yuval }
1168cdda926dSMintz, Yuval 
qede_rx_build_jumbo(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sk_buff * skb,struct eth_fast_path_rx_reg_cqe * cqe,u16 first_bd_len)1169cdda926dSMintz, Yuval static int qede_rx_build_jumbo(struct qede_dev *edev,
1170cdda926dSMintz, Yuval 			       struct qede_rx_queue *rxq,
1171cdda926dSMintz, Yuval 			       struct sk_buff *skb,
1172cdda926dSMintz, Yuval 			       struct eth_fast_path_rx_reg_cqe *cqe,
1173cdda926dSMintz, Yuval 			       u16 first_bd_len)
1174cdda926dSMintz, Yuval {
1175cdda926dSMintz, Yuval 	u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1176cdda926dSMintz, Yuval 	struct sw_rx_data *bd;
1177cdda926dSMintz, Yuval 	u16 bd_cons_idx;
1178cdda926dSMintz, Yuval 	u8 num_frags;
1179cdda926dSMintz, Yuval 
1180cdda926dSMintz, Yuval 	pkt_len -= first_bd_len;
1181cdda926dSMintz, Yuval 
1182cdda926dSMintz, Yuval 	/* We've already used one BD for the SKB. Now take care of the rest */
1183cdda926dSMintz, Yuval 	for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1184cdda926dSMintz, Yuval 		u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1185cdda926dSMintz, Yuval 		    pkt_len;
1186cdda926dSMintz, Yuval 
1187cdda926dSMintz, Yuval 		if (unlikely(!cur_size)) {
1188cdda926dSMintz, Yuval 			DP_ERR(edev,
1189cdda926dSMintz, Yuval 			       "Still got %d BDs for mapping jumbo, but length became 0\n",
1190cdda926dSMintz, Yuval 			       num_frags);
1191cdda926dSMintz, Yuval 			goto out;
1192cdda926dSMintz, Yuval 		}
1193cdda926dSMintz, Yuval 
1194cdda926dSMintz, Yuval 		/* We need a replacement buffer for each BD */
1195e3eef7eeSMintz, Yuval 		if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1196cdda926dSMintz, Yuval 			goto out;
1197cdda926dSMintz, Yuval 
1198cdda926dSMintz, Yuval 		/* Now that we've allocated the replacement buffer,
1199cdda926dSMintz, Yuval 		 * we can safely consume the next BD and map it to the SKB.
1200cdda926dSMintz, Yuval 		 */
1201cdda926dSMintz, Yuval 		bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1202cdda926dSMintz, Yuval 		bd = &rxq->sw_rx_ring[bd_cons_idx];
1203cdda926dSMintz, Yuval 		qede_rx_bd_ring_consume(rxq);
1204cdda926dSMintz, Yuval 
1205cdda926dSMintz, Yuval 		dma_unmap_page(rxq->dev, bd->mapping,
1206cdda926dSMintz, Yuval 			       PAGE_SIZE, DMA_FROM_DEVICE);
1207cdda926dSMintz, Yuval 
12087190e9d8SChristophe JAILLET 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
12097190e9d8SChristophe JAILLET 				rxq->rx_headroom, cur_size, PAGE_SIZE);
1210cdda926dSMintz, Yuval 
1211cdda926dSMintz, Yuval 		pkt_len -= cur_size;
1212cdda926dSMintz, Yuval 	}
1213cdda926dSMintz, Yuval 
1214cdda926dSMintz, Yuval 	if (unlikely(pkt_len))
1215cdda926dSMintz, Yuval 		DP_ERR(edev,
1216cdda926dSMintz, Yuval 		       "Mapped all BDs of jumbo, but still have %d bytes\n",
1217cdda926dSMintz, Yuval 		       pkt_len);
1218cdda926dSMintz, Yuval 
1219cdda926dSMintz, Yuval out:
1220cdda926dSMintz, Yuval 	return num_frags;
1221cdda926dSMintz, Yuval }
1222cdda926dSMintz, Yuval 
qede_rx_process_tpa_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,union eth_rx_cqe * cqe,enum eth_rx_cqe_type type)1223cdda926dSMintz, Yuval static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1224cdda926dSMintz, Yuval 				   struct qede_fastpath *fp,
1225cdda926dSMintz, Yuval 				   struct qede_rx_queue *rxq,
1226cdda926dSMintz, Yuval 				   union eth_rx_cqe *cqe,
1227cdda926dSMintz, Yuval 				   enum eth_rx_cqe_type type)
1228cdda926dSMintz, Yuval {
1229cdda926dSMintz, Yuval 	switch (type) {
1230cdda926dSMintz, Yuval 	case ETH_RX_CQE_TYPE_TPA_START:
1231cdda926dSMintz, Yuval 		qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1232cdda926dSMintz, Yuval 		return 0;
1233cdda926dSMintz, Yuval 	case ETH_RX_CQE_TYPE_TPA_CONT:
1234cdda926dSMintz, Yuval 		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1235cdda926dSMintz, Yuval 		return 0;
1236cdda926dSMintz, Yuval 	case ETH_RX_CQE_TYPE_TPA_END:
123710a0176eSMintz, Yuval 		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1238cdda926dSMintz, Yuval 	default:
1239cdda926dSMintz, Yuval 		return 0;
1240cdda926dSMintz, Yuval 	}
1241cdda926dSMintz, Yuval }
1242cdda926dSMintz, Yuval 
qede_rx_process_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq)1243cdda926dSMintz, Yuval static int qede_rx_process_cqe(struct qede_dev *edev,
1244cdda926dSMintz, Yuval 			       struct qede_fastpath *fp,
1245cdda926dSMintz, Yuval 			       struct qede_rx_queue *rxq)
1246cdda926dSMintz, Yuval {
1247cdda926dSMintz, Yuval 	struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1248cdda926dSMintz, Yuval 	struct eth_fast_path_rx_reg_cqe *fp_cqe;
1249cdda926dSMintz, Yuval 	u16 len, pad, bd_cons_idx, parse_flag;
1250cdda926dSMintz, Yuval 	enum eth_rx_cqe_type cqe_type;
1251cdda926dSMintz, Yuval 	union eth_rx_cqe *cqe;
1252cdda926dSMintz, Yuval 	struct sw_rx_data *bd;
1253cdda926dSMintz, Yuval 	struct sk_buff *skb;
1254cdda926dSMintz, Yuval 	__le16 flags;
1255cdda926dSMintz, Yuval 	u8 csum_flag;
1256cdda926dSMintz, Yuval 
1257cdda926dSMintz, Yuval 	/* Get the CQE from the completion ring */
1258cdda926dSMintz, Yuval 	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1259cdda926dSMintz, Yuval 	cqe_type = cqe->fast_path_regular.type;
1260cdda926dSMintz, Yuval 
1261cdda926dSMintz, Yuval 	/* Process an unlikely slowpath event */
1262cdda926dSMintz, Yuval 	if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1263cdda926dSMintz, Yuval 		struct eth_slow_path_rx_cqe *sp_cqe;
1264cdda926dSMintz, Yuval 
1265cdda926dSMintz, Yuval 		sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1266cdda926dSMintz, Yuval 		edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1267cdda926dSMintz, Yuval 		return 0;
1268cdda926dSMintz, Yuval 	}
1269cdda926dSMintz, Yuval 
1270cdda926dSMintz, Yuval 	/* Handle TPA cqes */
1271cdda926dSMintz, Yuval 	if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1272cdda926dSMintz, Yuval 		return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1273cdda926dSMintz, Yuval 
1274cdda926dSMintz, Yuval 	/* Get the data from the SW ring; Consume it only after it's evident
1275cdda926dSMintz, Yuval 	 * we wouldn't recycle it.
1276cdda926dSMintz, Yuval 	 */
1277cdda926dSMintz, Yuval 	bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1278cdda926dSMintz, Yuval 	bd = &rxq->sw_rx_ring[bd_cons_idx];
1279cdda926dSMintz, Yuval 
1280cdda926dSMintz, Yuval 	fp_cqe = &cqe->fast_path_regular;
1281cdda926dSMintz, Yuval 	len = le16_to_cpu(fp_cqe->len_on_first_bd);
128215ed8a47SMintz, Yuval 	pad = fp_cqe->placement_offset + rxq->rx_headroom;
1283cdda926dSMintz, Yuval 
1284cdda926dSMintz, Yuval 	/* Run eBPF program if one is attached */
1285cdda926dSMintz, Yuval 	if (xdp_prog)
1286059eeb07SMintz, Yuval 		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1287059eeb07SMintz, Yuval 				 &pad, &len))
128810a0176eSMintz, Yuval 			return 0;
1289cdda926dSMintz, Yuval 
1290cdda926dSMintz, Yuval 	/* If this is an error packet then drop it */
1291cdda926dSMintz, Yuval 	flags = cqe->fast_path_regular.pars_flags.flags;
1292cdda926dSMintz, Yuval 	parse_flag = le16_to_cpu(flags);
1293cdda926dSMintz, Yuval 
1294cdda926dSMintz, Yuval 	csum_flag = qede_check_csum(parse_flag);
1295cdda926dSMintz, Yuval 	if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
129658f101bfSManish Chopra 		if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1297cdda926dSMintz, Yuval 			rxq->rx_ip_frags++;
129858f101bfSManish Chopra 		else
1299cdda926dSMintz, Yuval 			rxq->rx_hw_errors++;
1300cdda926dSMintz, Yuval 	}
1301cdda926dSMintz, Yuval 
1302cdda926dSMintz, Yuval 	/* Basic validation passed; Need to prepare an SKB. This would also
1303cdda926dSMintz, Yuval 	 * guarantee to finally consume the first BD upon success.
1304cdda926dSMintz, Yuval 	 */
13058a863397SManish Chopra 	skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1306cdda926dSMintz, Yuval 	if (!skb) {
1307cdda926dSMintz, Yuval 		rxq->rx_alloc_errors++;
1308cdda926dSMintz, Yuval 		qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1309cdda926dSMintz, Yuval 		return 0;
1310cdda926dSMintz, Yuval 	}
1311cdda926dSMintz, Yuval 
1312cdda926dSMintz, Yuval 	/* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1313cdda926dSMintz, Yuval 	 * by a single cqe.
1314cdda926dSMintz, Yuval 	 */
1315cdda926dSMintz, Yuval 	if (fp_cqe->bd_num > 1) {
1316cdda926dSMintz, Yuval 		u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1317cdda926dSMintz, Yuval 							 fp_cqe, len);
1318cdda926dSMintz, Yuval 
1319cdda926dSMintz, Yuval 		if (unlikely(unmapped_frags > 0)) {
1320cdda926dSMintz, Yuval 			qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1321cdda926dSMintz, Yuval 			dev_kfree_skb_any(skb);
1322cdda926dSMintz, Yuval 			return 0;
1323cdda926dSMintz, Yuval 		}
1324cdda926dSMintz, Yuval 	}
1325cdda926dSMintz, Yuval 
1326cdda926dSMintz, Yuval 	/* The SKB contains all the data. Now prepare meta-magic */
1327cdda926dSMintz, Yuval 	skb->protocol = eth_type_trans(skb, edev->ndev);
1328cdda926dSMintz, Yuval 	qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1329cdda926dSMintz, Yuval 	qede_set_skb_csum(skb, csum_flag);
1330cdda926dSMintz, Yuval 	skb_record_rx_queue(skb, rxq->rxq_id);
13314c55215cSSudarsana Reddy Kalluru 	qede_ptp_record_rx_ts(edev, cqe, skb);
1332cdda926dSMintz, Yuval 
1333cdda926dSMintz, Yuval 	/* SKB is prepared - pass it to stack */
1334cdda926dSMintz, Yuval 	qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1335cdda926dSMintz, Yuval 
1336cdda926dSMintz, Yuval 	return 1;
1337cdda926dSMintz, Yuval }
1338cdda926dSMintz, Yuval 
qede_rx_int(struct qede_fastpath * fp,int budget)1339cdda926dSMintz, Yuval static int qede_rx_int(struct qede_fastpath *fp, int budget)
1340cdda926dSMintz, Yuval {
1341cdda926dSMintz, Yuval 	struct qede_rx_queue *rxq = fp->rxq;
1342cdda926dSMintz, Yuval 	struct qede_dev *edev = fp->edev;
134310a0176eSMintz, Yuval 	int work_done = 0, rcv_pkts = 0;
1344cdda926dSMintz, Yuval 	u16 hw_comp_cons, sw_comp_cons;
1345cdda926dSMintz, Yuval 
1346cdda926dSMintz, Yuval 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1347cdda926dSMintz, Yuval 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1348cdda926dSMintz, Yuval 
1349cdda926dSMintz, Yuval 	/* Memory barrier to prevent the CPU from doing speculative reads of CQE
1350cdda926dSMintz, Yuval 	 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1351cdda926dSMintz, Yuval 	 * read before it is written by FW, then FW writes CQE and SB, and then
1352cdda926dSMintz, Yuval 	 * the CPU reads the hw_comp_cons, it will use an old CQE.
1353cdda926dSMintz, Yuval 	 */
1354cdda926dSMintz, Yuval 	rmb();
1355cdda926dSMintz, Yuval 
1356cdda926dSMintz, Yuval 	/* Loop to complete all indicated BDs */
1357cdda926dSMintz, Yuval 	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
135810a0176eSMintz, Yuval 		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1359cdda926dSMintz, Yuval 		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1360cdda926dSMintz, Yuval 		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1361cdda926dSMintz, Yuval 		work_done++;
1362cdda926dSMintz, Yuval 	}
1363cdda926dSMintz, Yuval 
136410a0176eSMintz, Yuval 	rxq->rcv_pkts += rcv_pkts;
136510a0176eSMintz, Yuval 
1366e3eef7eeSMintz, Yuval 	/* Allocate replacement buffers */
1367e3eef7eeSMintz, Yuval 	while (rxq->num_rx_buffers - rxq->filled_buffers)
1368e3eef7eeSMintz, Yuval 		if (qede_alloc_rx_buffer(rxq, false))
1369e3eef7eeSMintz, Yuval 			break;
1370e3eef7eeSMintz, Yuval 
1371cdda926dSMintz, Yuval 	/* Update producers */
1372cdda926dSMintz, Yuval 	qede_update_rx_prod(edev, rxq);
1373cdda926dSMintz, Yuval 
1374cdda926dSMintz, Yuval 	return work_done;
1375cdda926dSMintz, Yuval }
1376cdda926dSMintz, Yuval 
qede_poll_is_more_work(struct qede_fastpath * fp)1377cdda926dSMintz, Yuval static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1378cdda926dSMintz, Yuval {
1379cdda926dSMintz, Yuval 	qed_sb_update_sb_idx(fp->sb_info);
1380cdda926dSMintz, Yuval 
1381cdda926dSMintz, Yuval 	/* *_has_*_work() reads the status block, thus we need to ensure that
1382cdda926dSMintz, Yuval 	 * status block indices have been actually read (qed_sb_update_sb_idx)
1383cdda926dSMintz, Yuval 	 * prior to this check (*_has_*_work) so that we won't write the
1384cdda926dSMintz, Yuval 	 * "newer" value of the status block to HW (if there was a DMA right
1385cdda926dSMintz, Yuval 	 * after qede_has_rx_work and if there is no rmb, the memory reading
1386cdda926dSMintz, Yuval 	 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1387cdda926dSMintz, Yuval 	 * In this case there will never be another interrupt until there is
1388cdda926dSMintz, Yuval 	 * another update of the status block, while there is still unhandled
1389cdda926dSMintz, Yuval 	 * work.
1390cdda926dSMintz, Yuval 	 */
1391cdda926dSMintz, Yuval 	rmb();
1392cdda926dSMintz, Yuval 
1393cdda926dSMintz, Yuval 	if (likely(fp->type & QEDE_FASTPATH_RX))
1394cdda926dSMintz, Yuval 		if (qede_has_rx_work(fp->rxq))
1395cdda926dSMintz, Yuval 			return true;
1396cdda926dSMintz, Yuval 
1397cdda926dSMintz, Yuval 	if (fp->type & QEDE_FASTPATH_XDP)
1398cdda926dSMintz, Yuval 		if (qede_txq_has_work(fp->xdp_tx))
1399cdda926dSMintz, Yuval 			return true;
1400cdda926dSMintz, Yuval 
14015e7baf0fSManish Chopra 	if (likely(fp->type & QEDE_FASTPATH_TX)) {
14025e7baf0fSManish Chopra 		int cos;
14035e7baf0fSManish Chopra 
14045e7baf0fSManish Chopra 		for_each_cos_in_txq(fp->edev, cos) {
14055e7baf0fSManish Chopra 			if (qede_txq_has_work(&fp->txq[cos]))
1406cdda926dSMintz, Yuval 				return true;
14075e7baf0fSManish Chopra 		}
14085e7baf0fSManish Chopra 	}
1409cdda926dSMintz, Yuval 
1410cdda926dSMintz, Yuval 	return false;
1411cdda926dSMintz, Yuval }
1412cdda926dSMintz, Yuval 
1413cdda926dSMintz, Yuval /*********************
1414cdda926dSMintz, Yuval  * NDO & API related *
1415cdda926dSMintz, Yuval  *********************/
qede_poll(struct napi_struct * napi,int budget)1416cdda926dSMintz, Yuval int qede_poll(struct napi_struct *napi, int budget)
1417cdda926dSMintz, Yuval {
1418cdda926dSMintz, Yuval 	struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1419cdda926dSMintz, Yuval 						napi);
1420cdda926dSMintz, Yuval 	struct qede_dev *edev = fp->edev;
1421cdda926dSMintz, Yuval 	int rx_work_done = 0;
14224c2bacbeSAlexander Lobakin 	u16 xdp_prod;
14234c2bacbeSAlexander Lobakin 
14244c2bacbeSAlexander Lobakin 	fp->xdp_xmit = 0;
1425cdda926dSMintz, Yuval 
14265e7baf0fSManish Chopra 	if (likely(fp->type & QEDE_FASTPATH_TX)) {
14275e7baf0fSManish Chopra 		int cos;
14285e7baf0fSManish Chopra 
14295e7baf0fSManish Chopra 		for_each_cos_in_txq(fp->edev, cos) {
14305e7baf0fSManish Chopra 			if (qede_txq_has_work(&fp->txq[cos]))
14315e7baf0fSManish Chopra 				qede_tx_int(edev, &fp->txq[cos]);
14325e7baf0fSManish Chopra 		}
14335e7baf0fSManish Chopra 	}
1434cdda926dSMintz, Yuval 
1435cdda926dSMintz, Yuval 	if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1436cdda926dSMintz, Yuval 		qede_xdp_tx_int(edev, fp->xdp_tx);
1437cdda926dSMintz, Yuval 
1438cdda926dSMintz, Yuval 	rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1439cdda926dSMintz, Yuval 			qede_has_rx_work(fp->rxq)) ?
1440cdda926dSMintz, Yuval 			qede_rx_int(fp, budget) : 0;
1441*2ccce20dSMagnus Karlsson 
1442*2ccce20dSMagnus Karlsson 	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
1443*2ccce20dSMagnus Karlsson 		xdp_do_flush();
1444*2ccce20dSMagnus Karlsson 
1445961aa716SBhaskar Upadhaya 	/* Handle case where we are called by netpoll with a budget of 0 */
1446961aa716SBhaskar Upadhaya 	if (rx_work_done < budget || !budget) {
1447cdda926dSMintz, Yuval 		if (!qede_poll_is_more_work(fp)) {
14486ad20165SEric Dumazet 			napi_complete_done(napi, rx_work_done);
1449cdda926dSMintz, Yuval 
1450cdda926dSMintz, Yuval 			/* Update and reenable interrupts */
1451cdda926dSMintz, Yuval 			qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1452cdda926dSMintz, Yuval 		} else {
1453cdda926dSMintz, Yuval 			rx_work_done = budget;
1454cdda926dSMintz, Yuval 		}
1455cdda926dSMintz, Yuval 	}
1456cdda926dSMintz, Yuval 
14574c2bacbeSAlexander Lobakin 	if (fp->xdp_xmit & QEDE_XDP_TX) {
14584c2bacbeSAlexander Lobakin 		xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1459cdda926dSMintz, Yuval 
1460cdda926dSMintz, Yuval 		fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1461cdda926dSMintz, Yuval 		qede_update_tx_producer(fp->xdp_tx);
1462cdda926dSMintz, Yuval 	}
1463cdda926dSMintz, Yuval 
1464cdda926dSMintz, Yuval 	return rx_work_done;
1465cdda926dSMintz, Yuval }
1466cdda926dSMintz, Yuval 
qede_msix_fp_int(int irq,void * fp_cookie)1467cdda926dSMintz, Yuval irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1468cdda926dSMintz, Yuval {
1469cdda926dSMintz, Yuval 	struct qede_fastpath *fp = fp_cookie;
1470cdda926dSMintz, Yuval 
1471cdda926dSMintz, Yuval 	qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1472cdda926dSMintz, Yuval 
1473cdda926dSMintz, Yuval 	napi_schedule_irqoff(&fp->napi);
1474cdda926dSMintz, Yuval 	return IRQ_HANDLED;
1475cdda926dSMintz, Yuval }
1476cdda926dSMintz, Yuval 
1477cdda926dSMintz, Yuval /* Main transmit function */
qede_start_xmit(struct sk_buff * skb,struct net_device * ndev)1478cdda926dSMintz, Yuval netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1479cdda926dSMintz, Yuval {
1480cdda926dSMintz, Yuval 	struct qede_dev *edev = netdev_priv(ndev);
1481cdda926dSMintz, Yuval 	struct netdev_queue *netdev_txq;
1482cdda926dSMintz, Yuval 	struct qede_tx_queue *txq;
1483cdda926dSMintz, Yuval 	struct eth_tx_1st_bd *first_bd;
1484cdda926dSMintz, Yuval 	struct eth_tx_2nd_bd *second_bd = NULL;
1485cdda926dSMintz, Yuval 	struct eth_tx_3rd_bd *third_bd = NULL;
1486cdda926dSMintz, Yuval 	struct eth_tx_bd *tx_data_bd = NULL;
148748848a06SManish Chopra 	u16 txq_index, val = 0;
1488cdda926dSMintz, Yuval 	u8 nbd = 0;
1489cdda926dSMintz, Yuval 	dma_addr_t mapping;
1490cdda926dSMintz, Yuval 	int rc, frag_idx = 0, ipv6_ext = 0;
1491cdda926dSMintz, Yuval 	u8 xmit_type;
1492cdda926dSMintz, Yuval 	u16 idx;
1493cdda926dSMintz, Yuval 	u16 hlen;
1494cdda926dSMintz, Yuval 	bool data_split = false;
1495cdda926dSMintz, Yuval 
1496cdda926dSMintz, Yuval 	/* Get tx-queue context and netdev index */
1497cdda926dSMintz, Yuval 	txq_index = skb_get_queue_mapping(skb);
14985e7baf0fSManish Chopra 	WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
14995e7baf0fSManish Chopra 	txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1500cdda926dSMintz, Yuval 	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1501cdda926dSMintz, Yuval 
1502cdda926dSMintz, Yuval 	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1503cdda926dSMintz, Yuval 
1504cdda926dSMintz, Yuval 	xmit_type = qede_xmit_type(skb, &ipv6_ext);
1505cdda926dSMintz, Yuval 
1506cdda926dSMintz, Yuval #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1507cdda926dSMintz, Yuval 	if (qede_pkt_req_lin(skb, xmit_type)) {
1508cdda926dSMintz, Yuval 		if (skb_linearize(skb)) {
1509dcc6abaeSMichael Shteinbok 			txq->tx_mem_alloc_err++;
1510dcc6abaeSMichael Shteinbok 
1511cdda926dSMintz, Yuval 			dev_kfree_skb_any(skb);
1512cdda926dSMintz, Yuval 			return NETDEV_TX_OK;
1513cdda926dSMintz, Yuval 		}
1514cdda926dSMintz, Yuval 	}
1515cdda926dSMintz, Yuval #endif
1516cdda926dSMintz, Yuval 
1517cdda926dSMintz, Yuval 	/* Fill the entry in the SW ring and the BDs in the FW ring */
15185a052d62SSudarsana Reddy Kalluru 	idx = txq->sw_tx_prod;
1519cdda926dSMintz, Yuval 	txq->sw_tx_ring.skbs[idx].skb = skb;
1520cdda926dSMintz, Yuval 	first_bd = (struct eth_tx_1st_bd *)
1521cdda926dSMintz, Yuval 		   qed_chain_produce(&txq->tx_pbl);
1522cdda926dSMintz, Yuval 	memset(first_bd, 0, sizeof(*first_bd));
1523cdda926dSMintz, Yuval 	first_bd->data.bd_flags.bitfields =
1524cdda926dSMintz, Yuval 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1525cdda926dSMintz, Yuval 
15264c55215cSSudarsana Reddy Kalluru 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
15274c55215cSSudarsana Reddy Kalluru 		qede_ptp_tx_ts(edev, skb);
15284c55215cSSudarsana Reddy Kalluru 
1529cdda926dSMintz, Yuval 	/* Map skb linear data for DMA and set in the first BD */
1530cdda926dSMintz, Yuval 	mapping = dma_map_single(txq->dev, skb->data,
1531cdda926dSMintz, Yuval 				 skb_headlen(skb), DMA_TO_DEVICE);
1532cdda926dSMintz, Yuval 	if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1533cdda926dSMintz, Yuval 		DP_NOTICE(edev, "SKB mapping failed\n");
1534cdda926dSMintz, Yuval 		qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1535cdda926dSMintz, Yuval 		qede_update_tx_producer(txq);
1536cdda926dSMintz, Yuval 		return NETDEV_TX_OK;
1537cdda926dSMintz, Yuval 	}
1538cdda926dSMintz, Yuval 	nbd++;
1539cdda926dSMintz, Yuval 	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1540cdda926dSMintz, Yuval 
1541cdda926dSMintz, Yuval 	/* In case there is IPv6 with extension headers or LSO we need 2nd and
1542cdda926dSMintz, Yuval 	 * 3rd BDs.
1543cdda926dSMintz, Yuval 	 */
1544cdda926dSMintz, Yuval 	if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1545cdda926dSMintz, Yuval 		second_bd = (struct eth_tx_2nd_bd *)
1546cdda926dSMintz, Yuval 			qed_chain_produce(&txq->tx_pbl);
1547cdda926dSMintz, Yuval 		memset(second_bd, 0, sizeof(*second_bd));
1548cdda926dSMintz, Yuval 
1549cdda926dSMintz, Yuval 		nbd++;
1550cdda926dSMintz, Yuval 		third_bd = (struct eth_tx_3rd_bd *)
1551cdda926dSMintz, Yuval 			qed_chain_produce(&txq->tx_pbl);
1552cdda926dSMintz, Yuval 		memset(third_bd, 0, sizeof(*third_bd));
1553cdda926dSMintz, Yuval 
1554cdda926dSMintz, Yuval 		nbd++;
1555cdda926dSMintz, Yuval 		/* We need to fill in additional data in second_bd... */
1556cdda926dSMintz, Yuval 		tx_data_bd = (struct eth_tx_bd *)second_bd;
1557cdda926dSMintz, Yuval 	}
1558cdda926dSMintz, Yuval 
1559cdda926dSMintz, Yuval 	if (skb_vlan_tag_present(skb)) {
1560cdda926dSMintz, Yuval 		first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1561cdda926dSMintz, Yuval 		first_bd->data.bd_flags.bitfields |=
1562cdda926dSMintz, Yuval 			1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1563cdda926dSMintz, Yuval 	}
1564cdda926dSMintz, Yuval 
1565cdda926dSMintz, Yuval 	/* Fill the parsing flags & params according to the requested offload */
1566cdda926dSMintz, Yuval 	if (xmit_type & XMIT_L4_CSUM) {
1567cdda926dSMintz, Yuval 		/* We don't re-calculate IP checksum as it is already done by
1568cdda926dSMintz, Yuval 		 * the upper stack
1569cdda926dSMintz, Yuval 		 */
1570cdda926dSMintz, Yuval 		first_bd->data.bd_flags.bitfields |=
1571cdda926dSMintz, Yuval 			1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1572cdda926dSMintz, Yuval 
1573cdda926dSMintz, Yuval 		if (xmit_type & XMIT_ENC) {
1574cdda926dSMintz, Yuval 			first_bd->data.bd_flags.bitfields |=
1575cdda926dSMintz, Yuval 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
157648848a06SManish Chopra 
157748848a06SManish Chopra 			val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1578cdda926dSMintz, Yuval 		}
1579cdda926dSMintz, Yuval 
1580cdda926dSMintz, Yuval 		/* Legacy FW had flipped behavior in regard to this bit -
1581cdda926dSMintz, Yuval 		 * I.e., needed to set to prevent FW from touching encapsulated
1582cdda926dSMintz, Yuval 		 * packets when it didn't need to.
1583cdda926dSMintz, Yuval 		 */
1584cdda926dSMintz, Yuval 		if (unlikely(txq->is_legacy))
158548848a06SManish Chopra 			val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1586cdda926dSMintz, Yuval 
1587cdda926dSMintz, Yuval 		/* If the packet is IPv6 with extension header, indicate that
1588cdda926dSMintz, Yuval 		 * to FW and pass few params, since the device cracker doesn't
1589cdda926dSMintz, Yuval 		 * support parsing IPv6 with extension header/s.
1590cdda926dSMintz, Yuval 		 */
1591cdda926dSMintz, Yuval 		if (unlikely(ipv6_ext))
1592cdda926dSMintz, Yuval 			qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1593cdda926dSMintz, Yuval 	}
1594cdda926dSMintz, Yuval 
1595cdda926dSMintz, Yuval 	if (xmit_type & XMIT_LSO) {
1596cdda926dSMintz, Yuval 		first_bd->data.bd_flags.bitfields |=
1597cdda926dSMintz, Yuval 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1598cdda926dSMintz, Yuval 		third_bd->data.lso_mss =
1599cdda926dSMintz, Yuval 			cpu_to_le16(skb_shinfo(skb)->gso_size);
1600cdda926dSMintz, Yuval 
1601cdda926dSMintz, Yuval 		if (unlikely(xmit_type & XMIT_ENC)) {
1602cdda926dSMintz, Yuval 			first_bd->data.bd_flags.bitfields |=
1603cdda926dSMintz, Yuval 				1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1604cdda926dSMintz, Yuval 
1605cdda926dSMintz, Yuval 			if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1606cdda926dSMintz, Yuval 				u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1607cdda926dSMintz, Yuval 
1608cdda926dSMintz, Yuval 				first_bd->data.bd_flags.bitfields |= 1 << tmp;
1609cdda926dSMintz, Yuval 			}
1610cdda926dSMintz, Yuval 			hlen = qede_get_skb_hlen(skb, true);
1611cdda926dSMintz, Yuval 		} else {
1612cdda926dSMintz, Yuval 			first_bd->data.bd_flags.bitfields |=
1613cdda926dSMintz, Yuval 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1614cdda926dSMintz, Yuval 			hlen = qede_get_skb_hlen(skb, false);
1615cdda926dSMintz, Yuval 		}
1616cdda926dSMintz, Yuval 
1617cdda926dSMintz, Yuval 		/* @@@TBD - if will not be removed need to check */
1618cdda926dSMintz, Yuval 		third_bd->data.bitfields |=
1619cdda926dSMintz, Yuval 			cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1620cdda926dSMintz, Yuval 
1621cdda926dSMintz, Yuval 		/* Make life easier for FW guys who can't deal with header and
1622cdda926dSMintz, Yuval 		 * data on same BD. If we need to split, use the second bd...
1623cdda926dSMintz, Yuval 		 */
1624cdda926dSMintz, Yuval 		if (unlikely(skb_headlen(skb) > hlen)) {
1625cdda926dSMintz, Yuval 			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1626cdda926dSMintz, Yuval 				   "TSO split header size is %d (%x:%x)\n",
1627cdda926dSMintz, Yuval 				   first_bd->nbytes, first_bd->addr.hi,
1628cdda926dSMintz, Yuval 				   first_bd->addr.lo);
1629cdda926dSMintz, Yuval 
1630cdda926dSMintz, Yuval 			mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1631cdda926dSMintz, Yuval 					   le32_to_cpu(first_bd->addr.lo)) +
1632cdda926dSMintz, Yuval 					   hlen;
1633cdda926dSMintz, Yuval 
1634cdda926dSMintz, Yuval 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1635cdda926dSMintz, Yuval 					      le16_to_cpu(first_bd->nbytes) -
1636cdda926dSMintz, Yuval 					      hlen);
1637cdda926dSMintz, Yuval 
1638cdda926dSMintz, Yuval 			/* this marks the BD as one that has no
1639cdda926dSMintz, Yuval 			 * individual mapping
1640cdda926dSMintz, Yuval 			 */
1641cdda926dSMintz, Yuval 			txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1642cdda926dSMintz, Yuval 
1643cdda926dSMintz, Yuval 			first_bd->nbytes = cpu_to_le16(hlen);
1644cdda926dSMintz, Yuval 
1645cdda926dSMintz, Yuval 			tx_data_bd = (struct eth_tx_bd *)third_bd;
1646cdda926dSMintz, Yuval 			data_split = true;
1647cdda926dSMintz, Yuval 		}
1648cdda926dSMintz, Yuval 	} else {
16498e227b19SManish Chopra 		if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
16508e227b19SManish Chopra 			DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
16518e227b19SManish Chopra 			qede_free_failed_tx_pkt(txq, first_bd, 0, false);
16528e227b19SManish Chopra 			qede_update_tx_producer(txq);
16538e227b19SManish Chopra 			return NETDEV_TX_OK;
16548e227b19SManish Chopra 		}
16558e227b19SManish Chopra 
165648848a06SManish Chopra 		val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
165748848a06SManish Chopra 			 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1658cdda926dSMintz, Yuval 	}
1659cdda926dSMintz, Yuval 
166048848a06SManish Chopra 	first_bd->data.bitfields = cpu_to_le16(val);
166148848a06SManish Chopra 
1662cdda926dSMintz, Yuval 	/* Handle fragmented skb */
1663cdda926dSMintz, Yuval 	/* special handle for frags inside 2nd and 3rd bds.. */
1664cdda926dSMintz, Yuval 	while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1665cdda926dSMintz, Yuval 		rc = map_frag_to_bd(txq,
1666cdda926dSMintz, Yuval 				    &skb_shinfo(skb)->frags[frag_idx],
1667cdda926dSMintz, Yuval 				    tx_data_bd);
1668cdda926dSMintz, Yuval 		if (rc) {
1669cdda926dSMintz, Yuval 			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1670cdda926dSMintz, Yuval 			qede_update_tx_producer(txq);
1671cdda926dSMintz, Yuval 			return NETDEV_TX_OK;
1672cdda926dSMintz, Yuval 		}
1673cdda926dSMintz, Yuval 
1674cdda926dSMintz, Yuval 		if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1675cdda926dSMintz, Yuval 			tx_data_bd = (struct eth_tx_bd *)third_bd;
1676cdda926dSMintz, Yuval 		else
1677cdda926dSMintz, Yuval 			tx_data_bd = NULL;
1678cdda926dSMintz, Yuval 
1679cdda926dSMintz, Yuval 		frag_idx++;
1680cdda926dSMintz, Yuval 	}
1681cdda926dSMintz, Yuval 
1682cdda926dSMintz, Yuval 	/* map last frags into 4th, 5th .... */
1683cdda926dSMintz, Yuval 	for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1684cdda926dSMintz, Yuval 		tx_data_bd = (struct eth_tx_bd *)
1685cdda926dSMintz, Yuval 			     qed_chain_produce(&txq->tx_pbl);
1686cdda926dSMintz, Yuval 
1687cdda926dSMintz, Yuval 		memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1688cdda926dSMintz, Yuval 
1689cdda926dSMintz, Yuval 		rc = map_frag_to_bd(txq,
1690cdda926dSMintz, Yuval 				    &skb_shinfo(skb)->frags[frag_idx],
1691cdda926dSMintz, Yuval 				    tx_data_bd);
1692cdda926dSMintz, Yuval 		if (rc) {
1693cdda926dSMintz, Yuval 			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1694cdda926dSMintz, Yuval 			qede_update_tx_producer(txq);
1695cdda926dSMintz, Yuval 			return NETDEV_TX_OK;
1696cdda926dSMintz, Yuval 		}
1697cdda926dSMintz, Yuval 	}
1698cdda926dSMintz, Yuval 
1699cdda926dSMintz, Yuval 	/* update the first BD with the actual num BDs */
1700cdda926dSMintz, Yuval 	first_bd->data.nbds = nbd;
1701cdda926dSMintz, Yuval 
1702cdda926dSMintz, Yuval 	netdev_tx_sent_queue(netdev_txq, skb->len);
1703cdda926dSMintz, Yuval 
1704cdda926dSMintz, Yuval 	skb_tx_timestamp(skb);
1705cdda926dSMintz, Yuval 
1706cdda926dSMintz, Yuval 	/* Advance packet producer only before sending the packet since mapping
1707cdda926dSMintz, Yuval 	 * of pages may fail.
1708cdda926dSMintz, Yuval 	 */
17095a052d62SSudarsana Reddy Kalluru 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1710cdda926dSMintz, Yuval 
1711cdda926dSMintz, Yuval 	/* 'next page' entries are counted in the producer value */
1712cdda926dSMintz, Yuval 	txq->tx_db.data.bd_prod =
1713cdda926dSMintz, Yuval 		cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1714cdda926dSMintz, Yuval 
17156b16f9eeSFlorian Westphal 	if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1716cdda926dSMintz, Yuval 		qede_update_tx_producer(txq);
1717cdda926dSMintz, Yuval 
1718cdda926dSMintz, Yuval 	if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1719cdda926dSMintz, Yuval 		      < (MAX_SKB_FRAGS + 1))) {
17206b16f9eeSFlorian Westphal 		if (netdev_xmit_more())
1721cdda926dSMintz, Yuval 			qede_update_tx_producer(txq);
1722cdda926dSMintz, Yuval 
1723cdda926dSMintz, Yuval 		netif_tx_stop_queue(netdev_txq);
1724cdda926dSMintz, Yuval 		txq->stopped_cnt++;
1725cdda926dSMintz, Yuval 		DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1726cdda926dSMintz, Yuval 			   "Stop queue was called\n");
1727cdda926dSMintz, Yuval 		/* paired memory barrier is in qede_tx_int(), we have to keep
1728cdda926dSMintz, Yuval 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
1729cdda926dSMintz, Yuval 		 * fp->bd_tx_cons
1730cdda926dSMintz, Yuval 		 */
1731cdda926dSMintz, Yuval 		smp_mb();
1732cdda926dSMintz, Yuval 
1733cdda926dSMintz, Yuval 		if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1734cdda926dSMintz, Yuval 		     (MAX_SKB_FRAGS + 1)) &&
1735cdda926dSMintz, Yuval 		    (edev->state == QEDE_STATE_OPEN)) {
1736cdda926dSMintz, Yuval 			netif_tx_wake_queue(netdev_txq);
1737cdda926dSMintz, Yuval 			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1738cdda926dSMintz, Yuval 				   "Wake queue was called\n");
1739cdda926dSMintz, Yuval 		}
1740cdda926dSMintz, Yuval 	}
1741cdda926dSMintz, Yuval 
1742cdda926dSMintz, Yuval 	return NETDEV_TX_OK;
1743cdda926dSMintz, Yuval }
1744cdda926dSMintz, Yuval 
qede_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)17450aa4febbSSudarsana Reddy Kalluru u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1746a350ecceSPaolo Abeni 		      struct net_device *sb_dev)
17470aa4febbSSudarsana Reddy Kalluru {
17480aa4febbSSudarsana Reddy Kalluru 	struct qede_dev *edev = netdev_priv(dev);
17490aa4febbSSudarsana Reddy Kalluru 	int total_txq;
17500aa4febbSSudarsana Reddy Kalluru 
17510aa4febbSSudarsana Reddy Kalluru 	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
17520aa4febbSSudarsana Reddy Kalluru 
17530aa4febbSSudarsana Reddy Kalluru 	return QEDE_TSS_COUNT(edev) ?
1754a350ecceSPaolo Abeni 		netdev_pick_tx(dev, skb, NULL) % total_txq :  0;
17550aa4febbSSudarsana Reddy Kalluru }
17560aa4febbSSudarsana Reddy Kalluru 
1757cdda926dSMintz, Yuval /* 8B udp header + 8B base tunnel header + 32B option length */
1758cdda926dSMintz, Yuval #define QEDE_MAX_TUN_HDR_LEN 48
1759cdda926dSMintz, Yuval 
qede_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1760cdda926dSMintz, Yuval netdev_features_t qede_features_check(struct sk_buff *skb,
1761cdda926dSMintz, Yuval 				      struct net_device *dev,
1762cdda926dSMintz, Yuval 				      netdev_features_t features)
1763cdda926dSMintz, Yuval {
1764cdda926dSMintz, Yuval 	if (skb->encapsulation) {
1765cdda926dSMintz, Yuval 		u8 l4_proto = 0;
1766cdda926dSMintz, Yuval 
1767cdda926dSMintz, Yuval 		switch (vlan_get_protocol(skb)) {
1768cdda926dSMintz, Yuval 		case htons(ETH_P_IP):
1769cdda926dSMintz, Yuval 			l4_proto = ip_hdr(skb)->protocol;
1770cdda926dSMintz, Yuval 			break;
1771cdda926dSMintz, Yuval 		case htons(ETH_P_IPV6):
1772cdda926dSMintz, Yuval 			l4_proto = ipv6_hdr(skb)->nexthdr;
1773cdda926dSMintz, Yuval 			break;
1774cdda926dSMintz, Yuval 		default:
1775cdda926dSMintz, Yuval 			return features;
1776cdda926dSMintz, Yuval 		}
1777cdda926dSMintz, Yuval 
1778cdda926dSMintz, Yuval 		/* Disable offloads for geneve tunnels, as HW can't parse
1779369bfd4eSChopra, Manish 		 * the geneve header which has option length greater than 32b
1780369bfd4eSChopra, Manish 		 * and disable offloads for the ports which are not offloaded.
1781cdda926dSMintz, Yuval 		 */
1782369bfd4eSChopra, Manish 		if (l4_proto == IPPROTO_UDP) {
1783369bfd4eSChopra, Manish 			struct qede_dev *edev = netdev_priv(dev);
1784369bfd4eSChopra, Manish 			u16 hdrlen, vxln_port, gnv_port;
1785369bfd4eSChopra, Manish 
1786369bfd4eSChopra, Manish 			hdrlen = QEDE_MAX_TUN_HDR_LEN;
1787369bfd4eSChopra, Manish 			vxln_port = edev->vxlan_dst_port;
1788369bfd4eSChopra, Manish 			gnv_port = edev->geneve_dst_port;
1789369bfd4eSChopra, Manish 
1790369bfd4eSChopra, Manish 			if ((skb_inner_mac_header(skb) -
1791369bfd4eSChopra, Manish 			     skb_transport_header(skb)) > hdrlen ||
1792369bfd4eSChopra, Manish 			     (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1793369bfd4eSChopra, Manish 			      ntohs(udp_hdr(skb)->dest) != gnv_port))
1794cdda926dSMintz, Yuval 				return features & ~(NETIF_F_CSUM_MASK |
1795cdda926dSMintz, Yuval 						    NETIF_F_GSO_MASK);
17965d5647daSManish Chopra 		} else if (l4_proto == IPPROTO_IPIP) {
17975d5647daSManish Chopra 			/* IPIP tunnels are unknown to the device or at least unsupported natively,
17985d5647daSManish Chopra 			 * offloads for them can't be done trivially, so disable them for such skb.
17995d5647daSManish Chopra 			 */
18005d5647daSManish Chopra 			return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1801cdda926dSMintz, Yuval 		}
1802369bfd4eSChopra, Manish 	}
1803cdda926dSMintz, Yuval 
1804cdda926dSMintz, Yuval 	return features;
1805cdda926dSMintz, Yuval }
1806