xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_txrx.c (revision ea47eed33a3fe3d919e6e3cf4e4eb5507b817188)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/prefetch.h>
5 #include <net/busy_poll.h>
6 #include <linux/bpf_trace.h>
7 #include <net/xdp.h>
8 #include "i40e.h"
9 #include "i40e_trace.h"
10 #include "i40e_prototype.h"
11 
12 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
13 				u32 td_tag)
14 {
15 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
16 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
17 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
18 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
19 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
20 }
21 
22 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
23 /**
24  * i40e_fdir - Generate a Flow Director descriptor based on fdata
25  * @tx_ring: Tx ring to send buffer on
26  * @fdata: Flow director filter data
27  * @add: Indicate if we are adding a rule or deleting one
28  *
29  **/
30 static void i40e_fdir(struct i40e_ring *tx_ring,
31 		      struct i40e_fdir_filter *fdata, bool add)
32 {
33 	struct i40e_filter_program_desc *fdir_desc;
34 	struct i40e_pf *pf = tx_ring->vsi->back;
35 	u32 flex_ptype, dtype_cmd;
36 	u16 i;
37 
38 	/* grab the next descriptor */
39 	i = tx_ring->next_to_use;
40 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
41 
42 	i++;
43 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
44 
45 	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
46 		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
47 
48 	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
49 		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
50 
51 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
52 		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
53 
54 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
55 		      (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
56 
57 	/* Use LAN VSI Id if not programmed by user */
58 	flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
59 		      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
60 		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
61 
62 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
63 
64 	dtype_cmd |= add ?
65 		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
66 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
67 		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
68 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;
69 
70 	dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
71 		     (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
72 
73 	dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
74 		     (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
75 
76 	if (fdata->cnt_index) {
77 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
78 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
79 			     ((u32)fdata->cnt_index <<
80 			      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
81 	}
82 
83 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
84 	fdir_desc->rsvd = cpu_to_le32(0);
85 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
86 	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
87 }
88 
89 #define I40E_FD_CLEAN_DELAY 10
90 /**
91  * i40e_program_fdir_filter - Program a Flow Director filter
92  * @fdir_data: Packet data that will be filter parameters
93  * @raw_packet: the pre-allocated packet buffer for FDir
94  * @pf: The PF pointer
95  * @add: True for add/update, False for remove
96  **/
97 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
98 				    u8 *raw_packet, struct i40e_pf *pf,
99 				    bool add)
100 {
101 	struct i40e_tx_buffer *tx_buf, *first;
102 	struct i40e_tx_desc *tx_desc;
103 	struct i40e_ring *tx_ring;
104 	struct i40e_vsi *vsi;
105 	struct device *dev;
106 	dma_addr_t dma;
107 	u32 td_cmd = 0;
108 	u16 i;
109 
110 	/* find existing FDIR VSI */
111 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
112 	if (!vsi)
113 		return -ENOENT;
114 
115 	tx_ring = vsi->tx_rings[0];
116 	dev = tx_ring->dev;
117 
118 	/* we need two descriptors to add/del a filter and we can wait */
119 	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
120 		if (!i)
121 			return -EAGAIN;
122 		msleep_interruptible(1);
123 	}
124 
125 	dma = dma_map_single(dev, raw_packet,
126 			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
127 	if (dma_mapping_error(dev, dma))
128 		goto dma_fail;
129 
130 	/* grab the next descriptor */
131 	i = tx_ring->next_to_use;
132 	first = &tx_ring->tx_bi[i];
133 	i40e_fdir(tx_ring, fdir_data, add);
134 
135 	/* Now program a dummy descriptor */
136 	i = tx_ring->next_to_use;
137 	tx_desc = I40E_TX_DESC(tx_ring, i);
138 	tx_buf = &tx_ring->tx_bi[i];
139 
140 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
141 
142 	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
143 
144 	/* record length, and DMA address */
145 	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
146 	dma_unmap_addr_set(tx_buf, dma, dma);
147 
148 	tx_desc->buffer_addr = cpu_to_le64(dma);
149 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
150 
151 	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
152 	tx_buf->raw_buf = (void *)raw_packet;
153 
154 	tx_desc->cmd_type_offset_bsz =
155 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
156 
157 	/* Force memory writes to complete before letting h/w
158 	 * know there are new descriptors to fetch.
159 	 */
160 	wmb();
161 
162 	/* Mark the data descriptor to be watched */
163 	first->next_to_watch = tx_desc;
164 
165 	writel(tx_ring->next_to_use, tx_ring->tail);
166 	return 0;
167 
168 dma_fail:
169 	return -1;
170 }
171 
172 #define IP_HEADER_OFFSET 14
173 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
174 /**
175  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
176  * @vsi: pointer to the targeted VSI
177  * @fd_data: the flow director data required for the FDir descriptor
178  * @add: true adds a filter, false removes it
179  *
180  * Returns 0 if the filters were successfully added or removed
181  **/
182 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
183 				   struct i40e_fdir_filter *fd_data,
184 				   bool add)
185 {
186 	struct i40e_pf *pf = vsi->back;
187 	struct udphdr *udp;
188 	struct iphdr *ip;
189 	u8 *raw_packet;
190 	int ret;
191 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
192 		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
193 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
194 
195 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
196 	if (!raw_packet)
197 		return -ENOMEM;
198 	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
199 
200 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
201 	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
202 	      + sizeof(struct iphdr));
203 
204 	ip->daddr = fd_data->dst_ip;
205 	udp->dest = fd_data->dst_port;
206 	ip->saddr = fd_data->src_ip;
207 	udp->source = fd_data->src_port;
208 
209 	if (fd_data->flex_filter) {
210 		u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
211 		__be16 pattern = fd_data->flex_word;
212 		u16 off = fd_data->flex_offset;
213 
214 		*((__force __be16 *)(payload + off)) = pattern;
215 	}
216 
217 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
218 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
219 	if (ret) {
220 		dev_info(&pf->pdev->dev,
221 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
222 			 fd_data->pctype, fd_data->fd_id, ret);
223 		/* Free the packet buffer since it wasn't added to the ring */
224 		kfree(raw_packet);
225 		return -EOPNOTSUPP;
226 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
227 		if (add)
228 			dev_info(&pf->pdev->dev,
229 				 "Filter OK for PCTYPE %d loc = %d\n",
230 				 fd_data->pctype, fd_data->fd_id);
231 		else
232 			dev_info(&pf->pdev->dev,
233 				 "Filter deleted for PCTYPE %d loc = %d\n",
234 				 fd_data->pctype, fd_data->fd_id);
235 	}
236 
237 	if (add)
238 		pf->fd_udp4_filter_cnt++;
239 	else
240 		pf->fd_udp4_filter_cnt--;
241 
242 	return 0;
243 }
244 
245 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 /**
247  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
248  * @vsi: pointer to the targeted VSI
249  * @fd_data: the flow director data required for the FDir descriptor
250  * @add: true adds a filter, false removes it
251  *
252  * Returns 0 if the filters were successfully added or removed
253  **/
254 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
255 				   struct i40e_fdir_filter *fd_data,
256 				   bool add)
257 {
258 	struct i40e_pf *pf = vsi->back;
259 	struct tcphdr *tcp;
260 	struct iphdr *ip;
261 	u8 *raw_packet;
262 	int ret;
263 	/* Dummy packet */
264 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 		0x0, 0x72, 0, 0, 0, 0};
268 
269 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
270 	if (!raw_packet)
271 		return -ENOMEM;
272 	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
273 
274 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 	      + sizeof(struct iphdr));
277 
278 	ip->daddr = fd_data->dst_ip;
279 	tcp->dest = fd_data->dst_port;
280 	ip->saddr = fd_data->src_ip;
281 	tcp->source = fd_data->src_port;
282 
283 	if (fd_data->flex_filter) {
284 		u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
285 		__be16 pattern = fd_data->flex_word;
286 		u16 off = fd_data->flex_offset;
287 
288 		*((__force __be16 *)(payload + off)) = pattern;
289 	}
290 
291 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
292 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
293 	if (ret) {
294 		dev_info(&pf->pdev->dev,
295 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
296 			 fd_data->pctype, fd_data->fd_id, ret);
297 		/* Free the packet buffer since it wasn't added to the ring */
298 		kfree(raw_packet);
299 		return -EOPNOTSUPP;
300 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
301 		if (add)
302 			dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
303 				 fd_data->pctype, fd_data->fd_id);
304 		else
305 			dev_info(&pf->pdev->dev,
306 				 "Filter deleted for PCTYPE %d loc = %d\n",
307 				 fd_data->pctype, fd_data->fd_id);
308 	}
309 
310 	if (add) {
311 		pf->fd_tcp4_filter_cnt++;
312 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
313 		    I40E_DEBUG_FD & pf->hw.debug_mask)
314 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
315 		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
316 	} else {
317 		pf->fd_tcp4_filter_cnt--;
318 	}
319 
320 	return 0;
321 }
322 
323 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
324 /**
325  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326  * a specific flow spec
327  * @vsi: pointer to the targeted VSI
328  * @fd_data: the flow director data required for the FDir descriptor
329  * @add: true adds a filter, false removes it
330  *
331  * Returns 0 if the filters were successfully added or removed
332  **/
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 				    struct i40e_fdir_filter *fd_data,
335 				    bool add)
336 {
337 	struct i40e_pf *pf = vsi->back;
338 	struct sctphdr *sctp;
339 	struct iphdr *ip;
340 	u8 *raw_packet;
341 	int ret;
342 	/* Dummy packet */
343 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
344 		0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
345 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
346 
347 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
348 	if (!raw_packet)
349 		return -ENOMEM;
350 	memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
351 
352 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
353 	sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
354 	      + sizeof(struct iphdr));
355 
356 	ip->daddr = fd_data->dst_ip;
357 	sctp->dest = fd_data->dst_port;
358 	ip->saddr = fd_data->src_ip;
359 	sctp->source = fd_data->src_port;
360 
361 	if (fd_data->flex_filter) {
362 		u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
363 		__be16 pattern = fd_data->flex_word;
364 		u16 off = fd_data->flex_offset;
365 
366 		*((__force __be16 *)(payload + off)) = pattern;
367 	}
368 
369 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
370 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
371 	if (ret) {
372 		dev_info(&pf->pdev->dev,
373 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
374 			 fd_data->pctype, fd_data->fd_id, ret);
375 		/* Free the packet buffer since it wasn't added to the ring */
376 		kfree(raw_packet);
377 		return -EOPNOTSUPP;
378 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
379 		if (add)
380 			dev_info(&pf->pdev->dev,
381 				 "Filter OK for PCTYPE %d loc = %d\n",
382 				 fd_data->pctype, fd_data->fd_id);
383 		else
384 			dev_info(&pf->pdev->dev,
385 				 "Filter deleted for PCTYPE %d loc = %d\n",
386 				 fd_data->pctype, fd_data->fd_id);
387 	}
388 
389 	if (add)
390 		pf->fd_sctp4_filter_cnt++;
391 	else
392 		pf->fd_sctp4_filter_cnt--;
393 
394 	return 0;
395 }
396 
397 #define I40E_IP_DUMMY_PACKET_LEN 34
398 /**
399  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
400  * a specific flow spec
401  * @vsi: pointer to the targeted VSI
402  * @fd_data: the flow director data required for the FDir descriptor
403  * @add: true adds a filter, false removes it
404  *
405  * Returns 0 if the filters were successfully added or removed
406  **/
407 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
408 				  struct i40e_fdir_filter *fd_data,
409 				  bool add)
410 {
411 	struct i40e_pf *pf = vsi->back;
412 	struct iphdr *ip;
413 	u8 *raw_packet;
414 	int ret;
415 	int i;
416 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
417 		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
418 		0, 0, 0, 0};
419 
420 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
421 	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
422 		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
423 		if (!raw_packet)
424 			return -ENOMEM;
425 		memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
426 		ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
427 
428 		ip->saddr = fd_data->src_ip;
429 		ip->daddr = fd_data->dst_ip;
430 		ip->protocol = 0;
431 
432 		if (fd_data->flex_filter) {
433 			u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
434 			__be16 pattern = fd_data->flex_word;
435 			u16 off = fd_data->flex_offset;
436 
437 			*((__force __be16 *)(payload + off)) = pattern;
438 		}
439 
440 		fd_data->pctype = i;
441 		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
442 		if (ret) {
443 			dev_info(&pf->pdev->dev,
444 				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
445 				 fd_data->pctype, fd_data->fd_id, ret);
446 			/* The packet buffer wasn't added to the ring so we
447 			 * need to free it now.
448 			 */
449 			kfree(raw_packet);
450 			return -EOPNOTSUPP;
451 		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
452 			if (add)
453 				dev_info(&pf->pdev->dev,
454 					 "Filter OK for PCTYPE %d loc = %d\n",
455 					 fd_data->pctype, fd_data->fd_id);
456 			else
457 				dev_info(&pf->pdev->dev,
458 					 "Filter deleted for PCTYPE %d loc = %d\n",
459 					 fd_data->pctype, fd_data->fd_id);
460 		}
461 	}
462 
463 	if (add)
464 		pf->fd_ip4_filter_cnt++;
465 	else
466 		pf->fd_ip4_filter_cnt--;
467 
468 	return 0;
469 }
470 
471 /**
472  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
473  * @vsi: pointer to the targeted VSI
474  * @input: filter to add or delete
475  * @add: true adds a filter, false removes it
476  *
477  **/
478 int i40e_add_del_fdir(struct i40e_vsi *vsi,
479 		      struct i40e_fdir_filter *input, bool add)
480 {
481 	struct i40e_pf *pf = vsi->back;
482 	int ret;
483 
484 	switch (input->flow_type & ~FLOW_EXT) {
485 	case TCP_V4_FLOW:
486 		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
487 		break;
488 	case UDP_V4_FLOW:
489 		ret = i40e_add_del_fdir_udpv4(vsi, input, add);
490 		break;
491 	case SCTP_V4_FLOW:
492 		ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
493 		break;
494 	case IP_USER_FLOW:
495 		switch (input->ip4_proto) {
496 		case IPPROTO_TCP:
497 			ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
498 			break;
499 		case IPPROTO_UDP:
500 			ret = i40e_add_del_fdir_udpv4(vsi, input, add);
501 			break;
502 		case IPPROTO_SCTP:
503 			ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
504 			break;
505 		case IPPROTO_IP:
506 			ret = i40e_add_del_fdir_ipv4(vsi, input, add);
507 			break;
508 		default:
509 			/* We cannot support masking based on protocol */
510 			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
511 				 input->ip4_proto);
512 			return -EINVAL;
513 		}
514 		break;
515 	default:
516 		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
517 			 input->flow_type);
518 		return -EINVAL;
519 	}
520 
521 	/* The buffer allocated here will be normally be freed by
522 	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
523 	 * completion. In the event of an error adding the buffer to the FDIR
524 	 * ring, it will immediately be freed. It may also be freed by
525 	 * i40e_clean_tx_ring() when closing the VSI.
526 	 */
527 	return ret;
528 }
529 
530 /**
531  * i40e_fd_handle_status - check the Programming Status for FD
532  * @rx_ring: the Rx ring for this descriptor
533  * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
534  * @prog_id: the id originally used for programming
535  *
536  * This is used to verify if the FD programming or invalidation
537  * requested by SW to the HW is successful or not and take actions accordingly.
538  **/
539 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
540 				  union i40e_rx_desc *rx_desc, u8 prog_id)
541 {
542 	struct i40e_pf *pf = rx_ring->vsi->back;
543 	struct pci_dev *pdev = pf->pdev;
544 	u32 fcnt_prog, fcnt_avail;
545 	u32 error;
546 	u64 qw;
547 
548 	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
549 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
550 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
551 
552 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
553 		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
554 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
555 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
556 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
557 				 pf->fd_inv);
558 
559 		/* Check if the programming error is for ATR.
560 		 * If so, auto disable ATR and set a state for
561 		 * flush in progress. Next time we come here if flush is in
562 		 * progress do nothing, once flush is complete the state will
563 		 * be cleared.
564 		 */
565 		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
566 			return;
567 
568 		pf->fd_add_err++;
569 		/* store the current atr filter count */
570 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
571 
572 		if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
573 		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
574 			/* These set_bit() calls aren't atomic with the
575 			 * test_bit() here, but worse case we potentially
576 			 * disable ATR and queue a flush right after SB
577 			 * support is re-enabled. That shouldn't cause an
578 			 * issue in practice
579 			 */
580 			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
581 			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
582 		}
583 
584 		/* filter programming failed most likely due to table full */
585 		fcnt_prog = i40e_get_global_fd_count(pf);
586 		fcnt_avail = pf->fdir_pf_filter_count;
587 		/* If ATR is running fcnt_prog can quickly change,
588 		 * if we are very close to full, it makes sense to disable
589 		 * FD ATR/SB and then re-enable it when there is room.
590 		 */
591 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
592 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
593 			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
594 					      pf->state))
595 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
596 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
597 		}
598 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
599 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
600 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
601 				 rx_desc->wb.qword0.hi_dword.fd_id);
602 	}
603 }
604 
605 /**
606  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
607  * @ring:      the ring that owns the buffer
608  * @tx_buffer: the buffer to free
609  **/
610 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
611 					    struct i40e_tx_buffer *tx_buffer)
612 {
613 	if (tx_buffer->skb) {
614 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
615 			kfree(tx_buffer->raw_buf);
616 		else if (ring_is_xdp(ring))
617 			xdp_return_frame(tx_buffer->xdpf);
618 		else
619 			dev_kfree_skb_any(tx_buffer->skb);
620 		if (dma_unmap_len(tx_buffer, len))
621 			dma_unmap_single(ring->dev,
622 					 dma_unmap_addr(tx_buffer, dma),
623 					 dma_unmap_len(tx_buffer, len),
624 					 DMA_TO_DEVICE);
625 	} else if (dma_unmap_len(tx_buffer, len)) {
626 		dma_unmap_page(ring->dev,
627 			       dma_unmap_addr(tx_buffer, dma),
628 			       dma_unmap_len(tx_buffer, len),
629 			       DMA_TO_DEVICE);
630 	}
631 
632 	tx_buffer->next_to_watch = NULL;
633 	tx_buffer->skb = NULL;
634 	dma_unmap_len_set(tx_buffer, len, 0);
635 	/* tx_buffer must be completely set up in the transmit path */
636 }
637 
638 /**
639  * i40e_clean_tx_ring - Free any empty Tx buffers
640  * @tx_ring: ring to be cleaned
641  **/
642 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
643 {
644 	unsigned long bi_size;
645 	u16 i;
646 
647 	/* ring already cleared, nothing to do */
648 	if (!tx_ring->tx_bi)
649 		return;
650 
651 	/* Free all the Tx ring sk_buffs */
652 	for (i = 0; i < tx_ring->count; i++)
653 		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
654 
655 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
656 	memset(tx_ring->tx_bi, 0, bi_size);
657 
658 	/* Zero out the descriptor ring */
659 	memset(tx_ring->desc, 0, tx_ring->size);
660 
661 	tx_ring->next_to_use = 0;
662 	tx_ring->next_to_clean = 0;
663 
664 	if (!tx_ring->netdev)
665 		return;
666 
667 	/* cleanup Tx queue statistics */
668 	netdev_tx_reset_queue(txring_txq(tx_ring));
669 }
670 
671 /**
672  * i40e_free_tx_resources - Free Tx resources per queue
673  * @tx_ring: Tx descriptor ring for a specific queue
674  *
675  * Free all transmit software resources
676  **/
677 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
678 {
679 	i40e_clean_tx_ring(tx_ring);
680 	kfree(tx_ring->tx_bi);
681 	tx_ring->tx_bi = NULL;
682 
683 	if (tx_ring->desc) {
684 		dma_free_coherent(tx_ring->dev, tx_ring->size,
685 				  tx_ring->desc, tx_ring->dma);
686 		tx_ring->desc = NULL;
687 	}
688 }
689 
690 /**
691  * i40e_get_tx_pending - how many tx descriptors not processed
692  * @ring: the ring of descriptors
693  * @in_sw: use SW variables
694  *
695  * Since there is no access to the ring head register
696  * in XL710, we need to use our local copies
697  **/
698 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
699 {
700 	u32 head, tail;
701 
702 	if (!in_sw) {
703 		head = i40e_get_head(ring);
704 		tail = readl(ring->tail);
705 	} else {
706 		head = ring->next_to_clean;
707 		tail = ring->next_to_use;
708 	}
709 
710 	if (head != tail)
711 		return (head < tail) ?
712 			tail - head : (tail + ring->count - head);
713 
714 	return 0;
715 }
716 
717 /**
718  * i40e_detect_recover_hung - Function to detect and recover hung_queues
719  * @vsi:  pointer to vsi struct with tx queues
720  *
721  * VSI has netdev and netdev has TX queues. This function is to check each of
722  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
723  **/
724 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
725 {
726 	struct i40e_ring *tx_ring = NULL;
727 	struct net_device *netdev;
728 	unsigned int i;
729 	int packets;
730 
731 	if (!vsi)
732 		return;
733 
734 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
735 		return;
736 
737 	netdev = vsi->netdev;
738 	if (!netdev)
739 		return;
740 
741 	if (!netif_carrier_ok(netdev))
742 		return;
743 
744 	for (i = 0; i < vsi->num_queue_pairs; i++) {
745 		tx_ring = vsi->tx_rings[i];
746 		if (tx_ring && tx_ring->desc) {
747 			/* If packet counter has not changed the queue is
748 			 * likely stalled, so force an interrupt for this
749 			 * queue.
750 			 *
751 			 * prev_pkt_ctr would be negative if there was no
752 			 * pending work.
753 			 */
754 			packets = tx_ring->stats.packets & INT_MAX;
755 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
756 				i40e_force_wb(vsi, tx_ring->q_vector);
757 				continue;
758 			}
759 
760 			/* Memory barrier between read of packet count and call
761 			 * to i40e_get_tx_pending()
762 			 */
763 			smp_rmb();
764 			tx_ring->tx_stats.prev_pkt_ctr =
765 			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
766 		}
767 	}
768 }
769 
770 #define WB_STRIDE 4
771 
772 /**
773  * i40e_clean_tx_irq - Reclaim resources after transmit completes
774  * @vsi: the VSI we care about
775  * @tx_ring: Tx ring to clean
776  * @napi_budget: Used to determine if we are in netpoll
777  *
778  * Returns true if there's any budget left (e.g. the clean is finished)
779  **/
780 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
781 			      struct i40e_ring *tx_ring, int napi_budget)
782 {
783 	u16 i = tx_ring->next_to_clean;
784 	struct i40e_tx_buffer *tx_buf;
785 	struct i40e_tx_desc *tx_head;
786 	struct i40e_tx_desc *tx_desc;
787 	unsigned int total_bytes = 0, total_packets = 0;
788 	unsigned int budget = vsi->work_limit;
789 
790 	tx_buf = &tx_ring->tx_bi[i];
791 	tx_desc = I40E_TX_DESC(tx_ring, i);
792 	i -= tx_ring->count;
793 
794 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
795 
796 	do {
797 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
798 
799 		/* if next_to_watch is not set then there is no work pending */
800 		if (!eop_desc)
801 			break;
802 
803 		/* prevent any other reads prior to eop_desc */
804 		smp_rmb();
805 
806 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
807 		/* we have caught up to head, no work left to do */
808 		if (tx_head == tx_desc)
809 			break;
810 
811 		/* clear next_to_watch to prevent false hangs */
812 		tx_buf->next_to_watch = NULL;
813 
814 		/* update the statistics for this packet */
815 		total_bytes += tx_buf->bytecount;
816 		total_packets += tx_buf->gso_segs;
817 
818 		/* free the skb/XDP data */
819 		if (ring_is_xdp(tx_ring))
820 			xdp_return_frame(tx_buf->xdpf);
821 		else
822 			napi_consume_skb(tx_buf->skb, napi_budget);
823 
824 		/* unmap skb header data */
825 		dma_unmap_single(tx_ring->dev,
826 				 dma_unmap_addr(tx_buf, dma),
827 				 dma_unmap_len(tx_buf, len),
828 				 DMA_TO_DEVICE);
829 
830 		/* clear tx_buffer data */
831 		tx_buf->skb = NULL;
832 		dma_unmap_len_set(tx_buf, len, 0);
833 
834 		/* unmap remaining buffers */
835 		while (tx_desc != eop_desc) {
836 			i40e_trace(clean_tx_irq_unmap,
837 				   tx_ring, tx_desc, tx_buf);
838 
839 			tx_buf++;
840 			tx_desc++;
841 			i++;
842 			if (unlikely(!i)) {
843 				i -= tx_ring->count;
844 				tx_buf = tx_ring->tx_bi;
845 				tx_desc = I40E_TX_DESC(tx_ring, 0);
846 			}
847 
848 			/* unmap any remaining paged data */
849 			if (dma_unmap_len(tx_buf, len)) {
850 				dma_unmap_page(tx_ring->dev,
851 					       dma_unmap_addr(tx_buf, dma),
852 					       dma_unmap_len(tx_buf, len),
853 					       DMA_TO_DEVICE);
854 				dma_unmap_len_set(tx_buf, len, 0);
855 			}
856 		}
857 
858 		/* move us one more past the eop_desc for start of next pkt */
859 		tx_buf++;
860 		tx_desc++;
861 		i++;
862 		if (unlikely(!i)) {
863 			i -= tx_ring->count;
864 			tx_buf = tx_ring->tx_bi;
865 			tx_desc = I40E_TX_DESC(tx_ring, 0);
866 		}
867 
868 		prefetch(tx_desc);
869 
870 		/* update budget accounting */
871 		budget--;
872 	} while (likely(budget));
873 
874 	i += tx_ring->count;
875 	tx_ring->next_to_clean = i;
876 	u64_stats_update_begin(&tx_ring->syncp);
877 	tx_ring->stats.bytes += total_bytes;
878 	tx_ring->stats.packets += total_packets;
879 	u64_stats_update_end(&tx_ring->syncp);
880 	tx_ring->q_vector->tx.total_bytes += total_bytes;
881 	tx_ring->q_vector->tx.total_packets += total_packets;
882 
883 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
884 		/* check to see if there are < 4 descriptors
885 		 * waiting to be written back, then kick the hardware to force
886 		 * them to be written back in case we stay in NAPI.
887 		 * In this mode on X722 we do not enable Interrupt.
888 		 */
889 		unsigned int j = i40e_get_tx_pending(tx_ring, false);
890 
891 		if (budget &&
892 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
893 		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
894 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
895 			tx_ring->arm_wb = true;
896 	}
897 
898 	if (ring_is_xdp(tx_ring))
899 		return !!budget;
900 
901 	/* notify netdev of completed buffers */
902 	netdev_tx_completed_queue(txring_txq(tx_ring),
903 				  total_packets, total_bytes);
904 
905 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
906 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
907 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
908 		/* Make sure that anybody stopping the queue after this
909 		 * sees the new next_to_clean.
910 		 */
911 		smp_mb();
912 		if (__netif_subqueue_stopped(tx_ring->netdev,
913 					     tx_ring->queue_index) &&
914 		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
915 			netif_wake_subqueue(tx_ring->netdev,
916 					    tx_ring->queue_index);
917 			++tx_ring->tx_stats.restart_queue;
918 		}
919 	}
920 
921 	return !!budget;
922 }
923 
924 /**
925  * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
926  * @vsi: the VSI we care about
927  * @q_vector: the vector on which to enable writeback
928  *
929  **/
930 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
931 				  struct i40e_q_vector *q_vector)
932 {
933 	u16 flags = q_vector->tx.ring[0].flags;
934 	u32 val;
935 
936 	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
937 		return;
938 
939 	if (q_vector->arm_wb_state)
940 		return;
941 
942 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
944 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
945 
946 		wr32(&vsi->back->hw,
947 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
948 		     val);
949 	} else {
950 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
951 		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
952 
953 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
954 	}
955 	q_vector->arm_wb_state = true;
956 }
957 
958 /**
959  * i40e_force_wb - Issue SW Interrupt so HW does a wb
960  * @vsi: the VSI we care about
961  * @q_vector: the vector  on which to force writeback
962  *
963  **/
964 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
965 {
966 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
968 			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
969 			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
970 			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
971 			  /* allow 00 to be written to the index */
972 
973 		wr32(&vsi->back->hw,
974 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
975 	} else {
976 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
977 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
978 			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
979 			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
980 			/* allow 00 to be written to the index */
981 
982 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
983 	}
984 }
985 
986 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
987 					struct i40e_ring_container *rc)
988 {
989 	return &q_vector->rx == rc;
990 }
991 
992 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
993 {
994 	unsigned int divisor;
995 
996 	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
997 	case I40E_LINK_SPEED_40GB:
998 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
999 		break;
1000 	case I40E_LINK_SPEED_25GB:
1001 	case I40E_LINK_SPEED_20GB:
1002 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1003 		break;
1004 	default:
1005 	case I40E_LINK_SPEED_10GB:
1006 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1007 		break;
1008 	case I40E_LINK_SPEED_1GB:
1009 	case I40E_LINK_SPEED_100MB:
1010 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1011 		break;
1012 	}
1013 
1014 	return divisor;
1015 }
1016 
1017 /**
1018  * i40e_update_itr - update the dynamic ITR value based on statistics
1019  * @q_vector: structure containing interrupt and ring information
1020  * @rc: structure containing ring performance data
1021  *
1022  * Stores a new ITR value based on packets and byte
1023  * counts during the last interrupt.  The advantage of per interrupt
1024  * computation is faster updates and more accurate ITR for the current
1025  * traffic pattern.  Constants in this function were computed
1026  * based on theoretical maximum wire speed and thresholds were set based
1027  * on testing data as well as attempting to minimize response time
1028  * while increasing bulk throughput.
1029  **/
1030 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1031 			    struct i40e_ring_container *rc)
1032 {
1033 	unsigned int avg_wire_size, packets, bytes, itr;
1034 	unsigned long next_update = jiffies;
1035 
1036 	/* If we don't have any rings just leave ourselves set for maximum
1037 	 * possible latency so we take ourselves out of the equation.
1038 	 */
1039 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1040 		return;
1041 
1042 	/* For Rx we want to push the delay up and default to low latency.
1043 	 * for Tx we want to pull the delay down and default to high latency.
1044 	 */
1045 	itr = i40e_container_is_rx(q_vector, rc) ?
1046 	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1047 	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1048 
1049 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1050 	 * that either packets are coming in so slow there hasn't been
1051 	 * any work, or that there is so much work that NAPI is dealing
1052 	 * with interrupt moderation and we don't need to do anything.
1053 	 */
1054 	if (time_after(next_update, rc->next_update))
1055 		goto clear_counts;
1056 
1057 	/* If itr_countdown is set it means we programmed an ITR within
1058 	 * the last 4 interrupt cycles. This has a side effect of us
1059 	 * potentially firing an early interrupt. In order to work around
1060 	 * this we need to throw out any data received for a few
1061 	 * interrupts following the update.
1062 	 */
1063 	if (q_vector->itr_countdown) {
1064 		itr = rc->target_itr;
1065 		goto clear_counts;
1066 	}
1067 
1068 	packets = rc->total_packets;
1069 	bytes = rc->total_bytes;
1070 
1071 	if (i40e_container_is_rx(q_vector, rc)) {
1072 		/* If Rx there are 1 to 4 packets and bytes are less than
1073 		 * 9000 assume insufficient data to use bulk rate limiting
1074 		 * approach unless Tx is already in bulk rate limiting. We
1075 		 * are likely latency driven.
1076 		 */
1077 		if (packets && packets < 4 && bytes < 9000 &&
1078 		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1079 			itr = I40E_ITR_ADAPTIVE_LATENCY;
1080 			goto adjust_by_size;
1081 		}
1082 	} else if (packets < 4) {
1083 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1084 		 * bulk mode and we are receiving 4 or fewer packets just
1085 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1086 		 * that the Rx can relax.
1087 		 */
1088 		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1089 		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1090 		     I40E_ITR_ADAPTIVE_MAX_USECS)
1091 			goto clear_counts;
1092 	} else if (packets > 32) {
1093 		/* If we have processed over 32 packets in a single interrupt
1094 		 * for Tx assume we need to switch over to "bulk" mode.
1095 		 */
1096 		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1097 	}
1098 
1099 	/* We have no packets to actually measure against. This means
1100 	 * either one of the other queues on this vector is active or
1101 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1102 	 *
1103 	 * Between 4 and 56 we can assume that our current interrupt delay
1104 	 * is only slightly too low. As such we should increase it by a small
1105 	 * fixed amount.
1106 	 */
1107 	if (packets < 56) {
1108 		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1109 		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1110 			itr &= I40E_ITR_ADAPTIVE_LATENCY;
1111 			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1112 		}
1113 		goto clear_counts;
1114 	}
1115 
1116 	if (packets <= 256) {
1117 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1118 		itr &= I40E_ITR_MASK;
1119 
1120 		/* Between 56 and 112 is our "goldilocks" zone where we are
1121 		 * working out "just right". Just report that our current
1122 		 * ITR is good for us.
1123 		 */
1124 		if (packets <= 112)
1125 			goto clear_counts;
1126 
1127 		/* If packet count is 128 or greater we are likely looking
1128 		 * at a slight overrun of the delay we want. Try halving
1129 		 * our delay to see if that will cut the number of packets
1130 		 * in half per interrupt.
1131 		 */
1132 		itr /= 2;
1133 		itr &= I40E_ITR_MASK;
1134 		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1135 			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1136 
1137 		goto clear_counts;
1138 	}
1139 
1140 	/* The paths below assume we are dealing with a bulk ITR since
1141 	 * number of packets is greater than 256. We are just going to have
1142 	 * to compute a value and try to bring the count under control,
1143 	 * though for smaller packet sizes there isn't much we can do as
1144 	 * NAPI polling will likely be kicking in sooner rather than later.
1145 	 */
1146 	itr = I40E_ITR_ADAPTIVE_BULK;
1147 
1148 adjust_by_size:
1149 	/* If packet counts are 256 or greater we can assume we have a gross
1150 	 * overestimation of what the rate should be. Instead of trying to fine
1151 	 * tune it just use the formula below to try and dial in an exact value
1152 	 * give the current packet size of the frame.
1153 	 */
1154 	avg_wire_size = bytes / packets;
1155 
1156 	/* The following is a crude approximation of:
1157 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1158 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1159 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1160 	 *
1161 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1162 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1163 	 * formula down to
1164 	 *
1165 	 *  (170 * (size + 24)) / (size + 640) = ITR
1166 	 *
1167 	 * We first do some math on the packet size and then finally bitshift
1168 	 * by 8 after rounding up. We also have to account for PCIe link speed
1169 	 * difference as ITR scales based on this.
1170 	 */
1171 	if (avg_wire_size <= 60) {
1172 		/* Start at 250k ints/sec */
1173 		avg_wire_size = 4096;
1174 	} else if (avg_wire_size <= 380) {
1175 		/* 250K ints/sec to 60K ints/sec */
1176 		avg_wire_size *= 40;
1177 		avg_wire_size += 1696;
1178 	} else if (avg_wire_size <= 1084) {
1179 		/* 60K ints/sec to 36K ints/sec */
1180 		avg_wire_size *= 15;
1181 		avg_wire_size += 11452;
1182 	} else if (avg_wire_size <= 1980) {
1183 		/* 36K ints/sec to 30K ints/sec */
1184 		avg_wire_size *= 5;
1185 		avg_wire_size += 22420;
1186 	} else {
1187 		/* plateau at a limit of 30K ints/sec */
1188 		avg_wire_size = 32256;
1189 	}
1190 
1191 	/* If we are in low latency mode halve our delay which doubles the
1192 	 * rate to somewhere between 100K to 16K ints/sec
1193 	 */
1194 	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1195 		avg_wire_size /= 2;
1196 
1197 	/* Resultant value is 256 times larger than it needs to be. This
1198 	 * gives us room to adjust the value as needed to either increase
1199 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1200 	 *
1201 	 * Use addition as we have already recorded the new latency flag
1202 	 * for the ITR value.
1203 	 */
1204 	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1205 	       I40E_ITR_ADAPTIVE_MIN_INC;
1206 
1207 	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1208 		itr &= I40E_ITR_ADAPTIVE_LATENCY;
1209 		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1210 	}
1211 
1212 clear_counts:
1213 	/* write back value */
1214 	rc->target_itr = itr;
1215 
1216 	/* next update should occur within next jiffy */
1217 	rc->next_update = next_update + 1;
1218 
1219 	rc->total_bytes = 0;
1220 	rc->total_packets = 0;
1221 }
1222 
1223 /**
1224  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1225  * @rx_ring: rx descriptor ring to store buffers on
1226  * @old_buff: donor buffer to have page reused
1227  *
1228  * Synchronizes page for reuse by the adapter
1229  **/
1230 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1231 			       struct i40e_rx_buffer *old_buff)
1232 {
1233 	struct i40e_rx_buffer *new_buff;
1234 	u16 nta = rx_ring->next_to_alloc;
1235 
1236 	new_buff = &rx_ring->rx_bi[nta];
1237 
1238 	/* update, and store next to alloc */
1239 	nta++;
1240 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1241 
1242 	/* transfer page from old buffer to new buffer */
1243 	new_buff->dma		= old_buff->dma;
1244 	new_buff->page		= old_buff->page;
1245 	new_buff->page_offset	= old_buff->page_offset;
1246 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1247 }
1248 
1249 /**
1250  * i40e_rx_is_programming_status - check for programming status descriptor
1251  * @qw: qword representing status_error_len in CPU ordering
1252  *
1253  * The value of in the descriptor length field indicate if this
1254  * is a programming status descriptor for flow director or FCoE
1255  * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1256  * it is a packet descriptor.
1257  **/
1258 static inline bool i40e_rx_is_programming_status(u64 qw)
1259 {
1260 	/* The Rx filter programming status and SPH bit occupy the same
1261 	 * spot in the descriptor. Since we don't support packet split we
1262 	 * can just reuse the bit as an indication that this is a
1263 	 * programming status descriptor.
1264 	 */
1265 	return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1266 }
1267 
1268 /**
1269  * i40e_clean_programming_status - clean the programming status descriptor
1270  * @rx_ring: the rx ring that has this descriptor
1271  * @rx_desc: the rx descriptor written back by HW
1272  * @qw: qword representing status_error_len in CPU ordering
1273  *
1274  * Flow director should handle FD_FILTER_STATUS to check its filter programming
1275  * status being successful or not and take actions accordingly. FCoE should
1276  * handle its context/filter programming/invalidation status and take actions.
1277  *
1278  **/
1279 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1280 					  union i40e_rx_desc *rx_desc,
1281 					  u64 qw)
1282 {
1283 	struct i40e_rx_buffer *rx_buffer;
1284 	u32 ntc = rx_ring->next_to_clean;
1285 	u8 id;
1286 
1287 	/* fetch, update, and store next to clean */
1288 	rx_buffer = &rx_ring->rx_bi[ntc++];
1289 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1290 	rx_ring->next_to_clean = ntc;
1291 
1292 	prefetch(I40E_RX_DESC(rx_ring, ntc));
1293 
1294 	/* place unused page back on the ring */
1295 	i40e_reuse_rx_page(rx_ring, rx_buffer);
1296 	rx_ring->rx_stats.page_reuse_count++;
1297 
1298 	/* clear contents of buffer_info */
1299 	rx_buffer->page = NULL;
1300 
1301 	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1302 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1303 
1304 	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1305 		i40e_fd_handle_status(rx_ring, rx_desc, id);
1306 }
1307 
1308 /**
1309  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1310  * @tx_ring: the tx ring to set up
1311  *
1312  * Return 0 on success, negative on error
1313  **/
1314 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1315 {
1316 	struct device *dev = tx_ring->dev;
1317 	int bi_size;
1318 
1319 	if (!dev)
1320 		return -ENOMEM;
1321 
1322 	/* warn if we are about to overwrite the pointer */
1323 	WARN_ON(tx_ring->tx_bi);
1324 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1325 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1326 	if (!tx_ring->tx_bi)
1327 		goto err;
1328 
1329 	u64_stats_init(&tx_ring->syncp);
1330 
1331 	/* round up to nearest 4K */
1332 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1333 	/* add u32 for head writeback, align after this takes care of
1334 	 * guaranteeing this is at least one cache line in size
1335 	 */
1336 	tx_ring->size += sizeof(u32);
1337 	tx_ring->size = ALIGN(tx_ring->size, 4096);
1338 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1339 					   &tx_ring->dma, GFP_KERNEL);
1340 	if (!tx_ring->desc) {
1341 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1342 			 tx_ring->size);
1343 		goto err;
1344 	}
1345 
1346 	tx_ring->next_to_use = 0;
1347 	tx_ring->next_to_clean = 0;
1348 	tx_ring->tx_stats.prev_pkt_ctr = -1;
1349 	return 0;
1350 
1351 err:
1352 	kfree(tx_ring->tx_bi);
1353 	tx_ring->tx_bi = NULL;
1354 	return -ENOMEM;
1355 }
1356 
1357 /**
1358  * i40e_clean_rx_ring - Free Rx buffers
1359  * @rx_ring: ring to be cleaned
1360  **/
1361 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1362 {
1363 	unsigned long bi_size;
1364 	u16 i;
1365 
1366 	/* ring already cleared, nothing to do */
1367 	if (!rx_ring->rx_bi)
1368 		return;
1369 
1370 	if (rx_ring->skb) {
1371 		dev_kfree_skb(rx_ring->skb);
1372 		rx_ring->skb = NULL;
1373 	}
1374 
1375 	/* Free all the Rx ring sk_buffs */
1376 	for (i = 0; i < rx_ring->count; i++) {
1377 		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1378 
1379 		if (!rx_bi->page)
1380 			continue;
1381 
1382 		/* Invalidate cache lines that may have been written to by
1383 		 * device so that we avoid corrupting memory.
1384 		 */
1385 		dma_sync_single_range_for_cpu(rx_ring->dev,
1386 					      rx_bi->dma,
1387 					      rx_bi->page_offset,
1388 					      rx_ring->rx_buf_len,
1389 					      DMA_FROM_DEVICE);
1390 
1391 		/* free resources associated with mapping */
1392 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1393 				     i40e_rx_pg_size(rx_ring),
1394 				     DMA_FROM_DEVICE,
1395 				     I40E_RX_DMA_ATTR);
1396 
1397 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1398 
1399 		rx_bi->page = NULL;
1400 		rx_bi->page_offset = 0;
1401 	}
1402 
1403 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1404 	memset(rx_ring->rx_bi, 0, bi_size);
1405 
1406 	/* Zero out the descriptor ring */
1407 	memset(rx_ring->desc, 0, rx_ring->size);
1408 
1409 	rx_ring->next_to_alloc = 0;
1410 	rx_ring->next_to_clean = 0;
1411 	rx_ring->next_to_use = 0;
1412 }
1413 
1414 /**
1415  * i40e_free_rx_resources - Free Rx resources
1416  * @rx_ring: ring to clean the resources from
1417  *
1418  * Free all receive software resources
1419  **/
1420 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1421 {
1422 	i40e_clean_rx_ring(rx_ring);
1423 	if (rx_ring->vsi->type == I40E_VSI_MAIN)
1424 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1425 	rx_ring->xdp_prog = NULL;
1426 	kfree(rx_ring->rx_bi);
1427 	rx_ring->rx_bi = NULL;
1428 
1429 	if (rx_ring->desc) {
1430 		dma_free_coherent(rx_ring->dev, rx_ring->size,
1431 				  rx_ring->desc, rx_ring->dma);
1432 		rx_ring->desc = NULL;
1433 	}
1434 }
1435 
1436 /**
1437  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1438  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1439  *
1440  * Returns 0 on success, negative on failure
1441  **/
1442 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1443 {
1444 	struct device *dev = rx_ring->dev;
1445 	int err = -ENOMEM;
1446 	int bi_size;
1447 
1448 	/* warn if we are about to overwrite the pointer */
1449 	WARN_ON(rx_ring->rx_bi);
1450 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1451 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1452 	if (!rx_ring->rx_bi)
1453 		goto err;
1454 
1455 	u64_stats_init(&rx_ring->syncp);
1456 
1457 	/* Round up to nearest 4K */
1458 	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1459 	rx_ring->size = ALIGN(rx_ring->size, 4096);
1460 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1461 					   &rx_ring->dma, GFP_KERNEL);
1462 
1463 	if (!rx_ring->desc) {
1464 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1465 			 rx_ring->size);
1466 		goto err;
1467 	}
1468 
1469 	rx_ring->next_to_alloc = 0;
1470 	rx_ring->next_to_clean = 0;
1471 	rx_ring->next_to_use = 0;
1472 
1473 	/* XDP RX-queue info only needed for RX rings exposed to XDP */
1474 	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1475 		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1476 				       rx_ring->queue_index);
1477 		if (err < 0)
1478 			goto err;
1479 	}
1480 
1481 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1482 
1483 	return 0;
1484 err:
1485 	kfree(rx_ring->rx_bi);
1486 	rx_ring->rx_bi = NULL;
1487 	return err;
1488 }
1489 
1490 /**
1491  * i40e_release_rx_desc - Store the new tail and head values
1492  * @rx_ring: ring to bump
1493  * @val: new head index
1494  **/
1495 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1496 {
1497 	rx_ring->next_to_use = val;
1498 
1499 	/* update next to alloc since we have filled the ring */
1500 	rx_ring->next_to_alloc = val;
1501 
1502 	/* Force memory writes to complete before letting h/w
1503 	 * know there are new descriptors to fetch.  (Only
1504 	 * applicable for weak-ordered memory model archs,
1505 	 * such as IA-64).
1506 	 */
1507 	wmb();
1508 	writel(val, rx_ring->tail);
1509 }
1510 
1511 /**
1512  * i40e_rx_offset - Return expected offset into page to access data
1513  * @rx_ring: Ring we are requesting offset of
1514  *
1515  * Returns the offset value for ring into the data buffer.
1516  */
1517 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1518 {
1519 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1520 }
1521 
1522 /**
1523  * i40e_alloc_mapped_page - recycle or make a new page
1524  * @rx_ring: ring to use
1525  * @bi: rx_buffer struct to modify
1526  *
1527  * Returns true if the page was successfully allocated or
1528  * reused.
1529  **/
1530 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1531 				   struct i40e_rx_buffer *bi)
1532 {
1533 	struct page *page = bi->page;
1534 	dma_addr_t dma;
1535 
1536 	/* since we are recycling buffers we should seldom need to alloc */
1537 	if (likely(page)) {
1538 		rx_ring->rx_stats.page_reuse_count++;
1539 		return true;
1540 	}
1541 
1542 	/* alloc new page for storage */
1543 	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1544 	if (unlikely(!page)) {
1545 		rx_ring->rx_stats.alloc_page_failed++;
1546 		return false;
1547 	}
1548 
1549 	/* map page for use */
1550 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1551 				 i40e_rx_pg_size(rx_ring),
1552 				 DMA_FROM_DEVICE,
1553 				 I40E_RX_DMA_ATTR);
1554 
1555 	/* if mapping failed free memory back to system since
1556 	 * there isn't much point in holding memory we can't use
1557 	 */
1558 	if (dma_mapping_error(rx_ring->dev, dma)) {
1559 		__free_pages(page, i40e_rx_pg_order(rx_ring));
1560 		rx_ring->rx_stats.alloc_page_failed++;
1561 		return false;
1562 	}
1563 
1564 	bi->dma = dma;
1565 	bi->page = page;
1566 	bi->page_offset = i40e_rx_offset(rx_ring);
1567 	page_ref_add(page, USHRT_MAX - 1);
1568 	bi->pagecnt_bias = USHRT_MAX;
1569 
1570 	return true;
1571 }
1572 
1573 /**
1574  * i40e_receive_skb - Send a completed packet up the stack
1575  * @rx_ring:  rx ring in play
1576  * @skb: packet to send up
1577  * @vlan_tag: vlan tag for packet
1578  **/
1579 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1580 			     struct sk_buff *skb, u16 vlan_tag)
1581 {
1582 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
1583 
1584 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1585 	    (vlan_tag & VLAN_VID_MASK))
1586 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1587 
1588 	napi_gro_receive(&q_vector->napi, skb);
1589 }
1590 
1591 /**
1592  * i40e_alloc_rx_buffers - Replace used receive buffers
1593  * @rx_ring: ring to place buffers on
1594  * @cleaned_count: number of buffers to replace
1595  *
1596  * Returns false if all allocations were successful, true if any fail
1597  **/
1598 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1599 {
1600 	u16 ntu = rx_ring->next_to_use;
1601 	union i40e_rx_desc *rx_desc;
1602 	struct i40e_rx_buffer *bi;
1603 
1604 	/* do nothing if no valid netdev defined */
1605 	if (!rx_ring->netdev || !cleaned_count)
1606 		return false;
1607 
1608 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
1609 	bi = &rx_ring->rx_bi[ntu];
1610 
1611 	do {
1612 		if (!i40e_alloc_mapped_page(rx_ring, bi))
1613 			goto no_buffers;
1614 
1615 		/* sync the buffer for use by the device */
1616 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1617 						 bi->page_offset,
1618 						 rx_ring->rx_buf_len,
1619 						 DMA_FROM_DEVICE);
1620 
1621 		/* Refresh the desc even if buffer_addrs didn't change
1622 		 * because each write-back erases this info.
1623 		 */
1624 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1625 
1626 		rx_desc++;
1627 		bi++;
1628 		ntu++;
1629 		if (unlikely(ntu == rx_ring->count)) {
1630 			rx_desc = I40E_RX_DESC(rx_ring, 0);
1631 			bi = rx_ring->rx_bi;
1632 			ntu = 0;
1633 		}
1634 
1635 		/* clear the status bits for the next_to_use descriptor */
1636 		rx_desc->wb.qword1.status_error_len = 0;
1637 
1638 		cleaned_count--;
1639 	} while (cleaned_count);
1640 
1641 	if (rx_ring->next_to_use != ntu)
1642 		i40e_release_rx_desc(rx_ring, ntu);
1643 
1644 	return false;
1645 
1646 no_buffers:
1647 	if (rx_ring->next_to_use != ntu)
1648 		i40e_release_rx_desc(rx_ring, ntu);
1649 
1650 	/* make sure to come back via polling to try again after
1651 	 * allocation failure
1652 	 */
1653 	return true;
1654 }
1655 
1656 /**
1657  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1658  * @vsi: the VSI we care about
1659  * @skb: skb currently being received and modified
1660  * @rx_desc: the receive descriptor
1661  **/
1662 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1663 				    struct sk_buff *skb,
1664 				    union i40e_rx_desc *rx_desc)
1665 {
1666 	struct i40e_rx_ptype_decoded decoded;
1667 	u32 rx_error, rx_status;
1668 	bool ipv4, ipv6;
1669 	u8 ptype;
1670 	u64 qword;
1671 
1672 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1673 	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1674 	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1675 		   I40E_RXD_QW1_ERROR_SHIFT;
1676 	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1677 		    I40E_RXD_QW1_STATUS_SHIFT;
1678 	decoded = decode_rx_desc_ptype(ptype);
1679 
1680 	skb->ip_summed = CHECKSUM_NONE;
1681 
1682 	skb_checksum_none_assert(skb);
1683 
1684 	/* Rx csum enabled and ip headers found? */
1685 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1686 		return;
1687 
1688 	/* did the hardware decode the packet and checksum? */
1689 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1690 		return;
1691 
1692 	/* both known and outer_ip must be set for the below code to work */
1693 	if (!(decoded.known && decoded.outer_ip))
1694 		return;
1695 
1696 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1697 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1698 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1699 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1700 
1701 	if (ipv4 &&
1702 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1703 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1704 		goto checksum_fail;
1705 
1706 	/* likely incorrect csum if alternate IP extension headers found */
1707 	if (ipv6 &&
1708 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1709 		/* don't increment checksum err here, non-fatal err */
1710 		return;
1711 
1712 	/* there was some L4 error, count error and punt packet to the stack */
1713 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1714 		goto checksum_fail;
1715 
1716 	/* handle packets that were not able to be checksummed due
1717 	 * to arrival speed, in this case the stack can compute
1718 	 * the csum.
1719 	 */
1720 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1721 		return;
1722 
1723 	/* If there is an outer header present that might contain a checksum
1724 	 * we need to bump the checksum level by 1 to reflect the fact that
1725 	 * we are indicating we validated the inner checksum.
1726 	 */
1727 	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1728 		skb->csum_level = 1;
1729 
1730 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
1731 	switch (decoded.inner_prot) {
1732 	case I40E_RX_PTYPE_INNER_PROT_TCP:
1733 	case I40E_RX_PTYPE_INNER_PROT_UDP:
1734 	case I40E_RX_PTYPE_INNER_PROT_SCTP:
1735 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1736 		/* fall though */
1737 	default:
1738 		break;
1739 	}
1740 
1741 	return;
1742 
1743 checksum_fail:
1744 	vsi->back->hw_csum_rx_error++;
1745 }
1746 
1747 /**
1748  * i40e_ptype_to_htype - get a hash type
1749  * @ptype: the ptype value from the descriptor
1750  *
1751  * Returns a hash type to be used by skb_set_hash
1752  **/
1753 static inline int i40e_ptype_to_htype(u8 ptype)
1754 {
1755 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1756 
1757 	if (!decoded.known)
1758 		return PKT_HASH_TYPE_NONE;
1759 
1760 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1761 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1762 		return PKT_HASH_TYPE_L4;
1763 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1764 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1765 		return PKT_HASH_TYPE_L3;
1766 	else
1767 		return PKT_HASH_TYPE_L2;
1768 }
1769 
1770 /**
1771  * i40e_rx_hash - set the hash value in the skb
1772  * @ring: descriptor ring
1773  * @rx_desc: specific descriptor
1774  * @skb: skb currently being received and modified
1775  * @rx_ptype: Rx packet type
1776  **/
1777 static inline void i40e_rx_hash(struct i40e_ring *ring,
1778 				union i40e_rx_desc *rx_desc,
1779 				struct sk_buff *skb,
1780 				u8 rx_ptype)
1781 {
1782 	u32 hash;
1783 	const __le64 rss_mask =
1784 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1785 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1786 
1787 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1788 		return;
1789 
1790 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1791 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1792 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1793 	}
1794 }
1795 
1796 /**
1797  * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1798  * @rx_ring: rx descriptor ring packet is being transacted on
1799  * @rx_desc: pointer to the EOP Rx descriptor
1800  * @skb: pointer to current skb being populated
1801  * @rx_ptype: the packet type decoded by hardware
1802  *
1803  * This function checks the ring, descriptor, and packet information in
1804  * order to populate the hash, checksum, VLAN, protocol, and
1805  * other fields within the skb.
1806  **/
1807 static inline
1808 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1809 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1810 			     u8 rx_ptype)
1811 {
1812 	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1813 	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1814 			I40E_RXD_QW1_STATUS_SHIFT;
1815 	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1816 	u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1817 		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1818 
1819 	if (unlikely(tsynvalid))
1820 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1821 
1822 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1823 
1824 	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1825 
1826 	skb_record_rx_queue(skb, rx_ring->queue_index);
1827 
1828 	/* modifies the skb - consumes the enet header */
1829 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1830 }
1831 
1832 /**
1833  * i40e_cleanup_headers - Correct empty headers
1834  * @rx_ring: rx descriptor ring packet is being transacted on
1835  * @skb: pointer to current skb being fixed
1836  * @rx_desc: pointer to the EOP Rx descriptor
1837  *
1838  * Also address the case where we are pulling data in on pages only
1839  * and as such no data is present in the skb header.
1840  *
1841  * In addition if skb is not at least 60 bytes we need to pad it so that
1842  * it is large enough to qualify as a valid Ethernet frame.
1843  *
1844  * Returns true if an error was encountered and skb was freed.
1845  **/
1846 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1847 				 union i40e_rx_desc *rx_desc)
1848 
1849 {
1850 	/* XDP packets use error pointer so abort at this point */
1851 	if (IS_ERR(skb))
1852 		return true;
1853 
1854 	/* ERR_MASK will only have valid bits if EOP set, and
1855 	 * what we are doing here is actually checking
1856 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1857 	 * the error field
1858 	 */
1859 	if (unlikely(i40e_test_staterr(rx_desc,
1860 				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1861 		dev_kfree_skb_any(skb);
1862 		return true;
1863 	}
1864 
1865 	/* if eth_skb_pad returns an error the skb was freed */
1866 	if (eth_skb_pad(skb))
1867 		return true;
1868 
1869 	return false;
1870 }
1871 
1872 /**
1873  * i40e_page_is_reusable - check if any reuse is possible
1874  * @page: page struct to check
1875  *
1876  * A page is not reusable if it was allocated under low memory
1877  * conditions, or it's not in the same NUMA node as this CPU.
1878  */
1879 static inline bool i40e_page_is_reusable(struct page *page)
1880 {
1881 	return (page_to_nid(page) == numa_mem_id()) &&
1882 		!page_is_pfmemalloc(page);
1883 }
1884 
1885 /**
1886  * i40e_can_reuse_rx_page - Determine if this page can be reused by
1887  * the adapter for another receive
1888  *
1889  * @rx_buffer: buffer containing the page
1890  *
1891  * If page is reusable, rx_buffer->page_offset is adjusted to point to
1892  * an unused region in the page.
1893  *
1894  * For small pages, @truesize will be a constant value, half the size
1895  * of the memory at page.  We'll attempt to alternate between high and
1896  * low halves of the page, with one half ready for use by the hardware
1897  * and the other half being consumed by the stack.  We use the page
1898  * ref count to determine whether the stack has finished consuming the
1899  * portion of this page that was passed up with a previous packet.  If
1900  * the page ref count is >1, we'll assume the "other" half page is
1901  * still busy, and this page cannot be reused.
1902  *
1903  * For larger pages, @truesize will be the actual space used by the
1904  * received packet (adjusted upward to an even multiple of the cache
1905  * line size).  This will advance through the page by the amount
1906  * actually consumed by the received packets while there is still
1907  * space for a buffer.  Each region of larger pages will be used at
1908  * most once, after which the page will not be reused.
1909  *
1910  * In either case, if the page is reusable its refcount is increased.
1911  **/
1912 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1913 {
1914 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1915 	struct page *page = rx_buffer->page;
1916 
1917 	/* Is any reuse possible? */
1918 	if (unlikely(!i40e_page_is_reusable(page)))
1919 		return false;
1920 
1921 #if (PAGE_SIZE < 8192)
1922 	/* if we are only owner of page we can reuse it */
1923 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
1924 		return false;
1925 #else
1926 #define I40E_LAST_OFFSET \
1927 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1928 	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1929 		return false;
1930 #endif
1931 
1932 	/* If we have drained the page fragment pool we need to update
1933 	 * the pagecnt_bias and page count so that we fully restock the
1934 	 * number of references the driver holds.
1935 	 */
1936 	if (unlikely(pagecnt_bias == 1)) {
1937 		page_ref_add(page, USHRT_MAX - 1);
1938 		rx_buffer->pagecnt_bias = USHRT_MAX;
1939 	}
1940 
1941 	return true;
1942 }
1943 
1944 /**
1945  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1946  * @rx_ring: rx descriptor ring to transact packets on
1947  * @rx_buffer: buffer containing page to add
1948  * @skb: sk_buff to place the data into
1949  * @size: packet length from rx_desc
1950  *
1951  * This function will add the data contained in rx_buffer->page to the skb.
1952  * It will just attach the page as a frag to the skb.
1953  *
1954  * The function will then update the page offset.
1955  **/
1956 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1957 			     struct i40e_rx_buffer *rx_buffer,
1958 			     struct sk_buff *skb,
1959 			     unsigned int size)
1960 {
1961 #if (PAGE_SIZE < 8192)
1962 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1963 #else
1964 	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1965 #endif
1966 
1967 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1968 			rx_buffer->page_offset, size, truesize);
1969 
1970 	/* page is being used so we must update the page offset */
1971 #if (PAGE_SIZE < 8192)
1972 	rx_buffer->page_offset ^= truesize;
1973 #else
1974 	rx_buffer->page_offset += truesize;
1975 #endif
1976 }
1977 
1978 /**
1979  * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1980  * @rx_ring: rx descriptor ring to transact packets on
1981  * @size: size of buffer to add to skb
1982  *
1983  * This function will pull an Rx buffer from the ring and synchronize it
1984  * for use by the CPU.
1985  */
1986 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1987 						 const unsigned int size)
1988 {
1989 	struct i40e_rx_buffer *rx_buffer;
1990 
1991 	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1992 	prefetchw(rx_buffer->page);
1993 
1994 	/* we are reusing so sync this buffer for CPU use */
1995 	dma_sync_single_range_for_cpu(rx_ring->dev,
1996 				      rx_buffer->dma,
1997 				      rx_buffer->page_offset,
1998 				      size,
1999 				      DMA_FROM_DEVICE);
2000 
2001 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
2002 	rx_buffer->pagecnt_bias--;
2003 
2004 	return rx_buffer;
2005 }
2006 
2007 /**
2008  * i40e_construct_skb - Allocate skb and populate it
2009  * @rx_ring: rx descriptor ring to transact packets on
2010  * @rx_buffer: rx buffer to pull data from
2011  * @xdp: xdp_buff pointing to the data
2012  *
2013  * This function allocates an skb.  It then populates it with the page
2014  * data from the current receive descriptor, taking care to set up the
2015  * skb correctly.
2016  */
2017 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2018 					  struct i40e_rx_buffer *rx_buffer,
2019 					  struct xdp_buff *xdp)
2020 {
2021 	unsigned int size = xdp->data_end - xdp->data;
2022 #if (PAGE_SIZE < 8192)
2023 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2024 #else
2025 	unsigned int truesize = SKB_DATA_ALIGN(size);
2026 #endif
2027 	unsigned int headlen;
2028 	struct sk_buff *skb;
2029 
2030 	/* prefetch first cache line of first page */
2031 	prefetch(xdp->data);
2032 #if L1_CACHE_BYTES < 128
2033 	prefetch(xdp->data + L1_CACHE_BYTES);
2034 #endif
2035 	/* Note, we get here by enabling legacy-rx via:
2036 	 *
2037 	 *    ethtool --set-priv-flags <dev> legacy-rx on
2038 	 *
2039 	 * In this mode, we currently get 0 extra XDP headroom as
2040 	 * opposed to having legacy-rx off, where we process XDP
2041 	 * packets going to stack via i40e_build_skb(). The latter
2042 	 * provides us currently with 192 bytes of headroom.
2043 	 *
2044 	 * For i40e_construct_skb() mode it means that the
2045 	 * xdp->data_meta will always point to xdp->data, since
2046 	 * the helper cannot expand the head. Should this ever
2047 	 * change in future for legacy-rx mode on, then lets also
2048 	 * add xdp->data_meta handling here.
2049 	 */
2050 
2051 	/* allocate a skb to store the frags */
2052 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2053 			       I40E_RX_HDR_SIZE,
2054 			       GFP_ATOMIC | __GFP_NOWARN);
2055 	if (unlikely(!skb))
2056 		return NULL;
2057 
2058 	/* Determine available headroom for copy */
2059 	headlen = size;
2060 	if (headlen > I40E_RX_HDR_SIZE)
2061 		headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2062 
2063 	/* align pull length to size of long to optimize memcpy performance */
2064 	memcpy(__skb_put(skb, headlen), xdp->data,
2065 	       ALIGN(headlen, sizeof(long)));
2066 
2067 	/* update all of the pointers */
2068 	size -= headlen;
2069 	if (size) {
2070 		skb_add_rx_frag(skb, 0, rx_buffer->page,
2071 				rx_buffer->page_offset + headlen,
2072 				size, truesize);
2073 
2074 		/* buffer is used by skb, update page_offset */
2075 #if (PAGE_SIZE < 8192)
2076 		rx_buffer->page_offset ^= truesize;
2077 #else
2078 		rx_buffer->page_offset += truesize;
2079 #endif
2080 	} else {
2081 		/* buffer is unused, reset bias back to rx_buffer */
2082 		rx_buffer->pagecnt_bias++;
2083 	}
2084 
2085 	return skb;
2086 }
2087 
2088 /**
2089  * i40e_build_skb - Build skb around an existing buffer
2090  * @rx_ring: Rx descriptor ring to transact packets on
2091  * @rx_buffer: Rx buffer to pull data from
2092  * @xdp: xdp_buff pointing to the data
2093  *
2094  * This function builds an skb around an existing Rx buffer, taking care
2095  * to set up the skb correctly and avoid any memcpy overhead.
2096  */
2097 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2098 				      struct i40e_rx_buffer *rx_buffer,
2099 				      struct xdp_buff *xdp)
2100 {
2101 	unsigned int metasize = xdp->data - xdp->data_meta;
2102 #if (PAGE_SIZE < 8192)
2103 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2104 #else
2105 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2106 				SKB_DATA_ALIGN(I40E_SKB_PAD +
2107 					       (xdp->data_end -
2108 						xdp->data_hard_start));
2109 #endif
2110 	struct sk_buff *skb;
2111 
2112 	/* Prefetch first cache line of first page. If xdp->data_meta
2113 	 * is unused, this points exactly as xdp->data, otherwise we
2114 	 * likely have a consumer accessing first few bytes of meta
2115 	 * data, and then actual data.
2116 	 */
2117 	prefetch(xdp->data_meta);
2118 #if L1_CACHE_BYTES < 128
2119 	prefetch(xdp->data_meta + L1_CACHE_BYTES);
2120 #endif
2121 	/* build an skb around the page buffer */
2122 	skb = build_skb(xdp->data_hard_start, truesize);
2123 	if (unlikely(!skb))
2124 		return NULL;
2125 
2126 	/* update pointers within the skb to store the data */
2127 	skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
2128 	__skb_put(skb, xdp->data_end - xdp->data);
2129 	if (metasize)
2130 		skb_metadata_set(skb, metasize);
2131 
2132 	/* buffer is used by skb, update page_offset */
2133 #if (PAGE_SIZE < 8192)
2134 	rx_buffer->page_offset ^= truesize;
2135 #else
2136 	rx_buffer->page_offset += truesize;
2137 #endif
2138 
2139 	return skb;
2140 }
2141 
2142 /**
2143  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2144  * @rx_ring: rx descriptor ring to transact packets on
2145  * @rx_buffer: rx buffer to pull data from
2146  *
2147  * This function will clean up the contents of the rx_buffer.  It will
2148  * either recycle the buffer or unmap it and free the associated resources.
2149  */
2150 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2151 			       struct i40e_rx_buffer *rx_buffer)
2152 {
2153 	if (i40e_can_reuse_rx_page(rx_buffer)) {
2154 		/* hand second half of page back to the ring */
2155 		i40e_reuse_rx_page(rx_ring, rx_buffer);
2156 		rx_ring->rx_stats.page_reuse_count++;
2157 	} else {
2158 		/* we are not reusing the buffer so unmap it */
2159 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2160 				     i40e_rx_pg_size(rx_ring),
2161 				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2162 		__page_frag_cache_drain(rx_buffer->page,
2163 					rx_buffer->pagecnt_bias);
2164 	}
2165 
2166 	/* clear contents of buffer_info */
2167 	rx_buffer->page = NULL;
2168 }
2169 
2170 /**
2171  * i40e_is_non_eop - process handling of non-EOP buffers
2172  * @rx_ring: Rx ring being processed
2173  * @rx_desc: Rx descriptor for current buffer
2174  * @skb: Current socket buffer containing buffer in progress
2175  *
2176  * This function updates next to clean.  If the buffer is an EOP buffer
2177  * this function exits returning false, otherwise it will place the
2178  * sk_buff in the next buffer to be chained and return true indicating
2179  * that this is in fact a non-EOP buffer.
2180  **/
2181 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2182 			    union i40e_rx_desc *rx_desc,
2183 			    struct sk_buff *skb)
2184 {
2185 	u32 ntc = rx_ring->next_to_clean + 1;
2186 
2187 	/* fetch, update, and store next to clean */
2188 	ntc = (ntc < rx_ring->count) ? ntc : 0;
2189 	rx_ring->next_to_clean = ntc;
2190 
2191 	prefetch(I40E_RX_DESC(rx_ring, ntc));
2192 
2193 	/* if we are the last buffer then there is nothing else to do */
2194 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2195 	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2196 		return false;
2197 
2198 	rx_ring->rx_stats.non_eop_descs++;
2199 
2200 	return true;
2201 }
2202 
2203 #define I40E_XDP_PASS 0
2204 #define I40E_XDP_CONSUMED 1
2205 #define I40E_XDP_TX 2
2206 
2207 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2208 			      struct i40e_ring *xdp_ring);
2209 
2210 static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
2211 				 struct i40e_ring *xdp_ring)
2212 {
2213 	struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2214 
2215 	if (unlikely(!xdpf))
2216 		return I40E_XDP_CONSUMED;
2217 
2218 	return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2219 }
2220 
2221 /**
2222  * i40e_run_xdp - run an XDP program
2223  * @rx_ring: Rx ring being processed
2224  * @xdp: XDP buffer containing the frame
2225  **/
2226 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2227 				    struct xdp_buff *xdp)
2228 {
2229 	int err, result = I40E_XDP_PASS;
2230 	struct i40e_ring *xdp_ring;
2231 	struct bpf_prog *xdp_prog;
2232 	u32 act;
2233 
2234 	rcu_read_lock();
2235 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2236 
2237 	if (!xdp_prog)
2238 		goto xdp_out;
2239 
2240 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
2241 
2242 	act = bpf_prog_run_xdp(xdp_prog, xdp);
2243 	switch (act) {
2244 	case XDP_PASS:
2245 		break;
2246 	case XDP_TX:
2247 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2248 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2249 		break;
2250 	case XDP_REDIRECT:
2251 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2252 		result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2253 		break;
2254 	default:
2255 		bpf_warn_invalid_xdp_action(act);
2256 	case XDP_ABORTED:
2257 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2258 		/* fallthrough -- handle aborts by dropping packet */
2259 	case XDP_DROP:
2260 		result = I40E_XDP_CONSUMED;
2261 		break;
2262 	}
2263 xdp_out:
2264 	rcu_read_unlock();
2265 	return ERR_PTR(-result);
2266 }
2267 
2268 /**
2269  * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2270  * @rx_ring: Rx ring
2271  * @rx_buffer: Rx buffer to adjust
2272  * @size: Size of adjustment
2273  **/
2274 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2275 				struct i40e_rx_buffer *rx_buffer,
2276 				unsigned int size)
2277 {
2278 #if (PAGE_SIZE < 8192)
2279 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2280 
2281 	rx_buffer->page_offset ^= truesize;
2282 #else
2283 	unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2284 
2285 	rx_buffer->page_offset += truesize;
2286 #endif
2287 }
2288 
2289 static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2290 {
2291 	/* Force memory writes to complete before letting h/w
2292 	 * know there are new descriptors to fetch.
2293 	 */
2294 	wmb();
2295 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2296 }
2297 
2298 /**
2299  * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2300  * @rx_ring: rx descriptor ring to transact packets on
2301  * @budget: Total limit on number of packets to process
2302  *
2303  * This function provides a "bounce buffer" approach to Rx interrupt
2304  * processing.  The advantage to this is that on systems that have
2305  * expensive overhead for IOMMU access this provides a means of avoiding
2306  * it by maintaining the mapping of the page to the system.
2307  *
2308  * Returns amount of work completed
2309  **/
2310 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2311 {
2312 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2313 	struct sk_buff *skb = rx_ring->skb;
2314 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2315 	bool failure = false, xdp_xmit = false;
2316 	struct xdp_buff xdp;
2317 
2318 	xdp.rxq = &rx_ring->xdp_rxq;
2319 
2320 	while (likely(total_rx_packets < (unsigned int)budget)) {
2321 		struct i40e_rx_buffer *rx_buffer;
2322 		union i40e_rx_desc *rx_desc;
2323 		unsigned int size;
2324 		u16 vlan_tag;
2325 		u8 rx_ptype;
2326 		u64 qword;
2327 
2328 		/* return some buffers to hardware, one at a time is too slow */
2329 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2330 			failure = failure ||
2331 				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2332 			cleaned_count = 0;
2333 		}
2334 
2335 		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2336 
2337 		/* status_error_len will always be zero for unused descriptors
2338 		 * because it's cleared in cleanup, and overlaps with hdr_addr
2339 		 * which is always zero because packet split isn't used, if the
2340 		 * hardware wrote DD then the length will be non-zero
2341 		 */
2342 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2343 
2344 		/* This memory barrier is needed to keep us from reading
2345 		 * any other fields out of the rx_desc until we have
2346 		 * verified the descriptor has been written back.
2347 		 */
2348 		dma_rmb();
2349 
2350 		if (unlikely(i40e_rx_is_programming_status(qword))) {
2351 			i40e_clean_programming_status(rx_ring, rx_desc, qword);
2352 			cleaned_count++;
2353 			continue;
2354 		}
2355 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2356 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2357 		if (!size)
2358 			break;
2359 
2360 		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2361 		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2362 
2363 		/* retrieve a buffer from the ring */
2364 		if (!skb) {
2365 			xdp.data = page_address(rx_buffer->page) +
2366 				   rx_buffer->page_offset;
2367 			xdp.data_meta = xdp.data;
2368 			xdp.data_hard_start = xdp.data -
2369 					      i40e_rx_offset(rx_ring);
2370 			xdp.data_end = xdp.data + size;
2371 
2372 			skb = i40e_run_xdp(rx_ring, &xdp);
2373 		}
2374 
2375 		if (IS_ERR(skb)) {
2376 			if (PTR_ERR(skb) == -I40E_XDP_TX) {
2377 				xdp_xmit = true;
2378 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2379 			} else {
2380 				rx_buffer->pagecnt_bias++;
2381 			}
2382 			total_rx_bytes += size;
2383 			total_rx_packets++;
2384 		} else if (skb) {
2385 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2386 		} else if (ring_uses_build_skb(rx_ring)) {
2387 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2388 		} else {
2389 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2390 		}
2391 
2392 		/* exit if we failed to retrieve a buffer */
2393 		if (!skb) {
2394 			rx_ring->rx_stats.alloc_buff_failed++;
2395 			rx_buffer->pagecnt_bias++;
2396 			break;
2397 		}
2398 
2399 		i40e_put_rx_buffer(rx_ring, rx_buffer);
2400 		cleaned_count++;
2401 
2402 		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2403 			continue;
2404 
2405 		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2406 			skb = NULL;
2407 			continue;
2408 		}
2409 
2410 		/* probably a little skewed due to removing CRC */
2411 		total_rx_bytes += skb->len;
2412 
2413 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2414 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2415 			   I40E_RXD_QW1_PTYPE_SHIFT;
2416 
2417 		/* populate checksum, VLAN, and protocol */
2418 		i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2419 
2420 		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2421 			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2422 
2423 		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2424 		i40e_receive_skb(rx_ring, skb, vlan_tag);
2425 		skb = NULL;
2426 
2427 		/* update budget accounting */
2428 		total_rx_packets++;
2429 	}
2430 
2431 	if (xdp_xmit) {
2432 		struct i40e_ring *xdp_ring =
2433 			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2434 
2435 		i40e_xdp_ring_update_tail(xdp_ring);
2436 		xdp_do_flush_map();
2437 	}
2438 
2439 	rx_ring->skb = skb;
2440 
2441 	u64_stats_update_begin(&rx_ring->syncp);
2442 	rx_ring->stats.packets += total_rx_packets;
2443 	rx_ring->stats.bytes += total_rx_bytes;
2444 	u64_stats_update_end(&rx_ring->syncp);
2445 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
2446 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2447 
2448 	/* guarantee a trip back through this routine if there was a failure */
2449 	return failure ? budget : (int)total_rx_packets;
2450 }
2451 
2452 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2453 {
2454 	u32 val;
2455 
2456 	/* We don't bother with setting the CLEARPBA bit as the data sheet
2457 	 * points out doing so is "meaningless since it was already
2458 	 * auto-cleared". The auto-clearing happens when the interrupt is
2459 	 * asserted.
2460 	 *
2461 	 * Hardware errata 28 for also indicates that writing to a
2462 	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2463 	 * an event in the PBA anyway so we need to rely on the automask
2464 	 * to hold pending events for us until the interrupt is re-enabled
2465 	 *
2466 	 * The itr value is reported in microseconds, and the register
2467 	 * value is recorded in 2 microsecond units. For this reason we
2468 	 * only need to shift by the interval shift - 1 instead of the
2469 	 * full value.
2470 	 */
2471 	itr &= I40E_ITR_MASK;
2472 
2473 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2474 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2475 	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2476 
2477 	return val;
2478 }
2479 
2480 /* a small macro to shorten up some long lines */
2481 #define INTREG I40E_PFINT_DYN_CTLN
2482 
2483 /* The act of updating the ITR will cause it to immediately trigger. In order
2484  * to prevent this from throwing off adaptive update statistics we defer the
2485  * update so that it can only happen so often. So after either Tx or Rx are
2486  * updated we make the adaptive scheme wait until either the ITR completely
2487  * expires via the next_update expiration or we have been through at least
2488  * 3 interrupts.
2489  */
2490 #define ITR_COUNTDOWN_START 3
2491 
2492 /**
2493  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2494  * @vsi: the VSI we care about
2495  * @q_vector: q_vector for which itr is being updated and interrupt enabled
2496  *
2497  **/
2498 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2499 					  struct i40e_q_vector *q_vector)
2500 {
2501 	struct i40e_hw *hw = &vsi->back->hw;
2502 	u32 intval;
2503 
2504 	/* If we don't have MSIX, then we only need to re-enable icr0 */
2505 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2506 		i40e_irq_dynamic_enable_icr0(vsi->back);
2507 		return;
2508 	}
2509 
2510 	/* These will do nothing if dynamic updates are not enabled */
2511 	i40e_update_itr(q_vector, &q_vector->tx);
2512 	i40e_update_itr(q_vector, &q_vector->rx);
2513 
2514 	/* This block of logic allows us to get away with only updating
2515 	 * one ITR value with each interrupt. The idea is to perform a
2516 	 * pseudo-lazy update with the following criteria.
2517 	 *
2518 	 * 1. Rx is given higher priority than Tx if both are in same state
2519 	 * 2. If we must reduce an ITR that is given highest priority.
2520 	 * 3. We then give priority to increasing ITR based on amount.
2521 	 */
2522 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2523 		/* Rx ITR needs to be reduced, this is highest priority */
2524 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2525 					   q_vector->rx.target_itr);
2526 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2527 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2528 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2529 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2530 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2531 		/* Tx ITR needs to be reduced, this is second priority
2532 		 * Tx ITR needs to be increased more than Rx, fourth priority
2533 		 */
2534 		intval = i40e_buildreg_itr(I40E_TX_ITR,
2535 					   q_vector->tx.target_itr);
2536 		q_vector->tx.current_itr = q_vector->tx.target_itr;
2537 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2538 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2539 		/* Rx ITR needs to be increased, third priority */
2540 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2541 					   q_vector->rx.target_itr);
2542 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2543 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2544 	} else {
2545 		/* No ITR update, lowest priority */
2546 		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2547 		if (q_vector->itr_countdown)
2548 			q_vector->itr_countdown--;
2549 	}
2550 
2551 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2552 		wr32(hw, INTREG(q_vector->reg_idx), intval);
2553 }
2554 
2555 /**
2556  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2557  * @napi: napi struct with our devices info in it
2558  * @budget: amount of work driver is allowed to do this pass, in packets
2559  *
2560  * This function will clean all queues associated with a q_vector.
2561  *
2562  * Returns the amount of work done
2563  **/
2564 int i40e_napi_poll(struct napi_struct *napi, int budget)
2565 {
2566 	struct i40e_q_vector *q_vector =
2567 			       container_of(napi, struct i40e_q_vector, napi);
2568 	struct i40e_vsi *vsi = q_vector->vsi;
2569 	struct i40e_ring *ring;
2570 	bool clean_complete = true;
2571 	bool arm_wb = false;
2572 	int budget_per_ring;
2573 	int work_done = 0;
2574 
2575 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2576 		napi_complete(napi);
2577 		return 0;
2578 	}
2579 
2580 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2581 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2582 	 */
2583 	i40e_for_each_ring(ring, q_vector->tx) {
2584 		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2585 			clean_complete = false;
2586 			continue;
2587 		}
2588 		arm_wb |= ring->arm_wb;
2589 		ring->arm_wb = false;
2590 	}
2591 
2592 	/* Handle case where we are called by netpoll with a budget of 0 */
2593 	if (budget <= 0)
2594 		goto tx_only;
2595 
2596 	/* We attempt to distribute budget to each Rx queue fairly, but don't
2597 	 * allow the budget to go below 1 because that would exit polling early.
2598 	 */
2599 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2600 
2601 	i40e_for_each_ring(ring, q_vector->rx) {
2602 		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2603 
2604 		work_done += cleaned;
2605 		/* if we clean as many as budgeted, we must not be done */
2606 		if (cleaned >= budget_per_ring)
2607 			clean_complete = false;
2608 	}
2609 
2610 	/* If work not completed, return budget and polling will return */
2611 	if (!clean_complete) {
2612 		int cpu_id = smp_processor_id();
2613 
2614 		/* It is possible that the interrupt affinity has changed but,
2615 		 * if the cpu is pegged at 100%, polling will never exit while
2616 		 * traffic continues and the interrupt will be stuck on this
2617 		 * cpu.  We check to make sure affinity is correct before we
2618 		 * continue to poll, otherwise we must stop polling so the
2619 		 * interrupt can move to the correct cpu.
2620 		 */
2621 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2622 			/* Tell napi that we are done polling */
2623 			napi_complete_done(napi, work_done);
2624 
2625 			/* Force an interrupt */
2626 			i40e_force_wb(vsi, q_vector);
2627 
2628 			/* Return budget-1 so that polling stops */
2629 			return budget - 1;
2630 		}
2631 tx_only:
2632 		if (arm_wb) {
2633 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2634 			i40e_enable_wb_on_itr(vsi, q_vector);
2635 		}
2636 		return budget;
2637 	}
2638 
2639 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2640 		q_vector->arm_wb_state = false;
2641 
2642 	/* Work is done so exit the polling mode and re-enable the interrupt */
2643 	napi_complete_done(napi, work_done);
2644 
2645 	i40e_update_enable_itr(vsi, q_vector);
2646 
2647 	return min(work_done, budget - 1);
2648 }
2649 
2650 /**
2651  * i40e_atr - Add a Flow Director ATR filter
2652  * @tx_ring:  ring to add programming descriptor to
2653  * @skb:      send buffer
2654  * @tx_flags: send tx flags
2655  **/
2656 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2657 		     u32 tx_flags)
2658 {
2659 	struct i40e_filter_program_desc *fdir_desc;
2660 	struct i40e_pf *pf = tx_ring->vsi->back;
2661 	union {
2662 		unsigned char *network;
2663 		struct iphdr *ipv4;
2664 		struct ipv6hdr *ipv6;
2665 	} hdr;
2666 	struct tcphdr *th;
2667 	unsigned int hlen;
2668 	u32 flex_ptype, dtype_cmd;
2669 	int l4_proto;
2670 	u16 i;
2671 
2672 	/* make sure ATR is enabled */
2673 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2674 		return;
2675 
2676 	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2677 		return;
2678 
2679 	/* if sampling is disabled do nothing */
2680 	if (!tx_ring->atr_sample_rate)
2681 		return;
2682 
2683 	/* Currently only IPv4/IPv6 with TCP is supported */
2684 	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2685 		return;
2686 
2687 	/* snag network header to get L4 type and address */
2688 	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2689 		      skb_inner_network_header(skb) : skb_network_header(skb);
2690 
2691 	/* Note: tx_flags gets modified to reflect inner protocols in
2692 	 * tx_enable_csum function if encap is enabled.
2693 	 */
2694 	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2695 		/* access ihl as u8 to avoid unaligned access on ia64 */
2696 		hlen = (hdr.network[0] & 0x0F) << 2;
2697 		l4_proto = hdr.ipv4->protocol;
2698 	} else {
2699 		/* find the start of the innermost ipv6 header */
2700 		unsigned int inner_hlen = hdr.network - skb->data;
2701 		unsigned int h_offset = inner_hlen;
2702 
2703 		/* this function updates h_offset to the end of the header */
2704 		l4_proto =
2705 		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2706 		/* hlen will contain our best estimate of the tcp header */
2707 		hlen = h_offset - inner_hlen;
2708 	}
2709 
2710 	if (l4_proto != IPPROTO_TCP)
2711 		return;
2712 
2713 	th = (struct tcphdr *)(hdr.network + hlen);
2714 
2715 	/* Due to lack of space, no more new filters can be programmed */
2716 	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2717 		return;
2718 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2719 		/* HW ATR eviction will take care of removing filters on FIN
2720 		 * and RST packets.
2721 		 */
2722 		if (th->fin || th->rst)
2723 			return;
2724 	}
2725 
2726 	tx_ring->atr_count++;
2727 
2728 	/* sample on all syn/fin/rst packets or once every atr sample rate */
2729 	if (!th->fin &&
2730 	    !th->syn &&
2731 	    !th->rst &&
2732 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2733 		return;
2734 
2735 	tx_ring->atr_count = 0;
2736 
2737 	/* grab the next descriptor */
2738 	i = tx_ring->next_to_use;
2739 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2740 
2741 	i++;
2742 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2743 
2744 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2745 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2746 	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2747 		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2748 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2749 		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2750 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2751 
2752 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2753 
2754 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2755 
2756 	dtype_cmd |= (th->fin || th->rst) ?
2757 		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2758 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2759 		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2760 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2761 
2762 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2763 		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2764 
2765 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2766 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2767 
2768 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2769 	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2770 		dtype_cmd |=
2771 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2772 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2773 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2774 	else
2775 		dtype_cmd |=
2776 			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2777 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2778 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2779 
2780 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2781 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2782 
2783 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2784 	fdir_desc->rsvd = cpu_to_le32(0);
2785 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2786 	fdir_desc->fd_id = cpu_to_le32(0);
2787 }
2788 
2789 /**
2790  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2791  * @skb:     send buffer
2792  * @tx_ring: ring to send buffer on
2793  * @flags:   the tx flags to be set
2794  *
2795  * Checks the skb and set up correspondingly several generic transmit flags
2796  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2797  *
2798  * Returns error code indicate the frame should be dropped upon error and the
2799  * otherwise  returns 0 to indicate the flags has been set properly.
2800  **/
2801 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2802 					     struct i40e_ring *tx_ring,
2803 					     u32 *flags)
2804 {
2805 	__be16 protocol = skb->protocol;
2806 	u32  tx_flags = 0;
2807 
2808 	if (protocol == htons(ETH_P_8021Q) &&
2809 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2810 		/* When HW VLAN acceleration is turned off by the user the
2811 		 * stack sets the protocol to 8021q so that the driver
2812 		 * can take any steps required to support the SW only
2813 		 * VLAN handling.  In our case the driver doesn't need
2814 		 * to take any further steps so just set the protocol
2815 		 * to the encapsulated ethertype.
2816 		 */
2817 		skb->protocol = vlan_get_protocol(skb);
2818 		goto out;
2819 	}
2820 
2821 	/* if we have a HW VLAN tag being added, default to the HW one */
2822 	if (skb_vlan_tag_present(skb)) {
2823 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2824 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2825 	/* else if it is a SW VLAN, check the next protocol and store the tag */
2826 	} else if (protocol == htons(ETH_P_8021Q)) {
2827 		struct vlan_hdr *vhdr, _vhdr;
2828 
2829 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2830 		if (!vhdr)
2831 			return -EINVAL;
2832 
2833 		protocol = vhdr->h_vlan_encapsulated_proto;
2834 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2835 		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2836 	}
2837 
2838 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2839 		goto out;
2840 
2841 	/* Insert 802.1p priority into VLAN header */
2842 	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2843 	    (skb->priority != TC_PRIO_CONTROL)) {
2844 		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2845 		tx_flags |= (skb->priority & 0x7) <<
2846 				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2847 		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2848 			struct vlan_ethhdr *vhdr;
2849 			int rc;
2850 
2851 			rc = skb_cow_head(skb, 0);
2852 			if (rc < 0)
2853 				return rc;
2854 			vhdr = (struct vlan_ethhdr *)skb->data;
2855 			vhdr->h_vlan_TCI = htons(tx_flags >>
2856 						 I40E_TX_FLAGS_VLAN_SHIFT);
2857 		} else {
2858 			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2859 		}
2860 	}
2861 
2862 out:
2863 	*flags = tx_flags;
2864 	return 0;
2865 }
2866 
2867 /**
2868  * i40e_tso - set up the tso context descriptor
2869  * @first:    pointer to first Tx buffer for xmit
2870  * @hdr_len:  ptr to the size of the packet header
2871  * @cd_type_cmd_tso_mss: Quad Word 1
2872  *
2873  * Returns 0 if no TSO can happen, 1 if tso is going, or error
2874  **/
2875 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2876 		    u64 *cd_type_cmd_tso_mss)
2877 {
2878 	struct sk_buff *skb = first->skb;
2879 	u64 cd_cmd, cd_tso_len, cd_mss;
2880 	union {
2881 		struct iphdr *v4;
2882 		struct ipv6hdr *v6;
2883 		unsigned char *hdr;
2884 	} ip;
2885 	union {
2886 		struct tcphdr *tcp;
2887 		struct udphdr *udp;
2888 		unsigned char *hdr;
2889 	} l4;
2890 	u32 paylen, l4_offset;
2891 	u16 gso_segs, gso_size;
2892 	int err;
2893 
2894 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2895 		return 0;
2896 
2897 	if (!skb_is_gso(skb))
2898 		return 0;
2899 
2900 	err = skb_cow_head(skb, 0);
2901 	if (err < 0)
2902 		return err;
2903 
2904 	ip.hdr = skb_network_header(skb);
2905 	l4.hdr = skb_transport_header(skb);
2906 
2907 	/* initialize outer IP header fields */
2908 	if (ip.v4->version == 4) {
2909 		ip.v4->tot_len = 0;
2910 		ip.v4->check = 0;
2911 	} else {
2912 		ip.v6->payload_len = 0;
2913 	}
2914 
2915 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2916 					 SKB_GSO_GRE_CSUM |
2917 					 SKB_GSO_IPXIP4 |
2918 					 SKB_GSO_IPXIP6 |
2919 					 SKB_GSO_UDP_TUNNEL |
2920 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2921 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2922 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2923 			l4.udp->len = 0;
2924 
2925 			/* determine offset of outer transport header */
2926 			l4_offset = l4.hdr - skb->data;
2927 
2928 			/* remove payload length from outer checksum */
2929 			paylen = skb->len - l4_offset;
2930 			csum_replace_by_diff(&l4.udp->check,
2931 					     (__force __wsum)htonl(paylen));
2932 		}
2933 
2934 		/* reset pointers to inner headers */
2935 		ip.hdr = skb_inner_network_header(skb);
2936 		l4.hdr = skb_inner_transport_header(skb);
2937 
2938 		/* initialize inner IP header fields */
2939 		if (ip.v4->version == 4) {
2940 			ip.v4->tot_len = 0;
2941 			ip.v4->check = 0;
2942 		} else {
2943 			ip.v6->payload_len = 0;
2944 		}
2945 	}
2946 
2947 	/* determine offset of inner transport header */
2948 	l4_offset = l4.hdr - skb->data;
2949 
2950 	/* remove payload length from inner checksum */
2951 	paylen = skb->len - l4_offset;
2952 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2953 
2954 	/* compute length of segmentation header */
2955 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
2956 
2957 	/* pull values out of skb_shinfo */
2958 	gso_size = skb_shinfo(skb)->gso_size;
2959 	gso_segs = skb_shinfo(skb)->gso_segs;
2960 
2961 	/* update GSO size and bytecount with header size */
2962 	first->gso_segs = gso_segs;
2963 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
2964 
2965 	/* find the field values */
2966 	cd_cmd = I40E_TX_CTX_DESC_TSO;
2967 	cd_tso_len = skb->len - *hdr_len;
2968 	cd_mss = gso_size;
2969 	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2970 				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2971 				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2972 	return 1;
2973 }
2974 
2975 /**
2976  * i40e_tsyn - set up the tsyn context descriptor
2977  * @tx_ring:  ptr to the ring to send
2978  * @skb:      ptr to the skb we're sending
2979  * @tx_flags: the collected send information
2980  * @cd_type_cmd_tso_mss: Quad Word 1
2981  *
2982  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2983  **/
2984 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2985 		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2986 {
2987 	struct i40e_pf *pf;
2988 
2989 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2990 		return 0;
2991 
2992 	/* Tx timestamps cannot be sampled when doing TSO */
2993 	if (tx_flags & I40E_TX_FLAGS_TSO)
2994 		return 0;
2995 
2996 	/* only timestamp the outbound packet if the user has requested it and
2997 	 * we are not already transmitting a packet to be timestamped
2998 	 */
2999 	pf = i40e_netdev_to_pf(tx_ring->netdev);
3000 	if (!(pf->flags & I40E_FLAG_PTP))
3001 		return 0;
3002 
3003 	if (pf->ptp_tx &&
3004 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3005 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3006 		pf->ptp_tx_start = jiffies;
3007 		pf->ptp_tx_skb = skb_get(skb);
3008 	} else {
3009 		pf->tx_hwtstamp_skipped++;
3010 		return 0;
3011 	}
3012 
3013 	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3014 				I40E_TXD_CTX_QW1_CMD_SHIFT;
3015 
3016 	return 1;
3017 }
3018 
3019 /**
3020  * i40e_tx_enable_csum - Enable Tx checksum offloads
3021  * @skb: send buffer
3022  * @tx_flags: pointer to Tx flags currently set
3023  * @td_cmd: Tx descriptor command bits to set
3024  * @td_offset: Tx descriptor header offsets to set
3025  * @tx_ring: Tx descriptor ring
3026  * @cd_tunneling: ptr to context desc bits
3027  **/
3028 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3029 			       u32 *td_cmd, u32 *td_offset,
3030 			       struct i40e_ring *tx_ring,
3031 			       u32 *cd_tunneling)
3032 {
3033 	union {
3034 		struct iphdr *v4;
3035 		struct ipv6hdr *v6;
3036 		unsigned char *hdr;
3037 	} ip;
3038 	union {
3039 		struct tcphdr *tcp;
3040 		struct udphdr *udp;
3041 		unsigned char *hdr;
3042 	} l4;
3043 	unsigned char *exthdr;
3044 	u32 offset, cmd = 0;
3045 	__be16 frag_off;
3046 	u8 l4_proto = 0;
3047 
3048 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3049 		return 0;
3050 
3051 	ip.hdr = skb_network_header(skb);
3052 	l4.hdr = skb_transport_header(skb);
3053 
3054 	/* compute outer L2 header size */
3055 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3056 
3057 	if (skb->encapsulation) {
3058 		u32 tunnel = 0;
3059 		/* define outer network header type */
3060 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3061 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3062 				  I40E_TX_CTX_EXT_IP_IPV4 :
3063 				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3064 
3065 			l4_proto = ip.v4->protocol;
3066 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3067 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3068 
3069 			exthdr = ip.hdr + sizeof(*ip.v6);
3070 			l4_proto = ip.v6->nexthdr;
3071 			if (l4.hdr != exthdr)
3072 				ipv6_skip_exthdr(skb, exthdr - skb->data,
3073 						 &l4_proto, &frag_off);
3074 		}
3075 
3076 		/* define outer transport */
3077 		switch (l4_proto) {
3078 		case IPPROTO_UDP:
3079 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3080 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3081 			break;
3082 		case IPPROTO_GRE:
3083 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3084 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3085 			break;
3086 		case IPPROTO_IPIP:
3087 		case IPPROTO_IPV6:
3088 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3089 			l4.hdr = skb_inner_network_header(skb);
3090 			break;
3091 		default:
3092 			if (*tx_flags & I40E_TX_FLAGS_TSO)
3093 				return -1;
3094 
3095 			skb_checksum_help(skb);
3096 			return 0;
3097 		}
3098 
3099 		/* compute outer L3 header size */
3100 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3101 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3102 
3103 		/* switch IP header pointer from outer to inner header */
3104 		ip.hdr = skb_inner_network_header(skb);
3105 
3106 		/* compute tunnel header size */
3107 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3108 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3109 
3110 		/* indicate if we need to offload outer UDP header */
3111 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3112 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3113 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3114 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3115 
3116 		/* record tunnel offload values */
3117 		*cd_tunneling |= tunnel;
3118 
3119 		/* switch L4 header pointer from outer to inner */
3120 		l4.hdr = skb_inner_transport_header(skb);
3121 		l4_proto = 0;
3122 
3123 		/* reset type as we transition from outer to inner headers */
3124 		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3125 		if (ip.v4->version == 4)
3126 			*tx_flags |= I40E_TX_FLAGS_IPV4;
3127 		if (ip.v6->version == 6)
3128 			*tx_flags |= I40E_TX_FLAGS_IPV6;
3129 	}
3130 
3131 	/* Enable IP checksum offloads */
3132 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3133 		l4_proto = ip.v4->protocol;
3134 		/* the stack computes the IP header already, the only time we
3135 		 * need the hardware to recompute it is in the case of TSO.
3136 		 */
3137 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3138 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3139 		       I40E_TX_DESC_CMD_IIPT_IPV4;
3140 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3141 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3142 
3143 		exthdr = ip.hdr + sizeof(*ip.v6);
3144 		l4_proto = ip.v6->nexthdr;
3145 		if (l4.hdr != exthdr)
3146 			ipv6_skip_exthdr(skb, exthdr - skb->data,
3147 					 &l4_proto, &frag_off);
3148 	}
3149 
3150 	/* compute inner L3 header size */
3151 	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3152 
3153 	/* Enable L4 checksum offloads */
3154 	switch (l4_proto) {
3155 	case IPPROTO_TCP:
3156 		/* enable checksum offloads */
3157 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3158 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3159 		break;
3160 	case IPPROTO_SCTP:
3161 		/* enable SCTP checksum offload */
3162 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3163 		offset |= (sizeof(struct sctphdr) >> 2) <<
3164 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3165 		break;
3166 	case IPPROTO_UDP:
3167 		/* enable UDP checksum offload */
3168 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3169 		offset |= (sizeof(struct udphdr) >> 2) <<
3170 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3171 		break;
3172 	default:
3173 		if (*tx_flags & I40E_TX_FLAGS_TSO)
3174 			return -1;
3175 		skb_checksum_help(skb);
3176 		return 0;
3177 	}
3178 
3179 	*td_cmd |= cmd;
3180 	*td_offset |= offset;
3181 
3182 	return 1;
3183 }
3184 
3185 /**
3186  * i40e_create_tx_ctx Build the Tx context descriptor
3187  * @tx_ring:  ring to create the descriptor on
3188  * @cd_type_cmd_tso_mss: Quad Word 1
3189  * @cd_tunneling: Quad Word 0 - bits 0-31
3190  * @cd_l2tag2: Quad Word 0 - bits 32-63
3191  **/
3192 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3193 			       const u64 cd_type_cmd_tso_mss,
3194 			       const u32 cd_tunneling, const u32 cd_l2tag2)
3195 {
3196 	struct i40e_tx_context_desc *context_desc;
3197 	int i = tx_ring->next_to_use;
3198 
3199 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3200 	    !cd_tunneling && !cd_l2tag2)
3201 		return;
3202 
3203 	/* grab the next descriptor */
3204 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3205 
3206 	i++;
3207 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3208 
3209 	/* cpu_to_le32 and assign to struct fields */
3210 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3211 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3212 	context_desc->rsvd = cpu_to_le16(0);
3213 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3214 }
3215 
3216 /**
3217  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3218  * @tx_ring: the ring to be checked
3219  * @size:    the size buffer we want to assure is available
3220  *
3221  * Returns -EBUSY if a stop is needed, else 0
3222  **/
3223 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3224 {
3225 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3226 	/* Memory barrier before checking head and tail */
3227 	smp_mb();
3228 
3229 	/* Check again in a case another CPU has just made room available. */
3230 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3231 		return -EBUSY;
3232 
3233 	/* A reprieve! - use start_queue because it doesn't call schedule */
3234 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3235 	++tx_ring->tx_stats.restart_queue;
3236 	return 0;
3237 }
3238 
3239 /**
3240  * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3241  * @skb:      send buffer
3242  *
3243  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3244  * and so we need to figure out the cases where we need to linearize the skb.
3245  *
3246  * For TSO we need to count the TSO header and segment payload separately.
3247  * As such we need to check cases where we have 7 fragments or more as we
3248  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3249  * the segment payload in the first descriptor, and another 7 for the
3250  * fragments.
3251  **/
3252 bool __i40e_chk_linearize(struct sk_buff *skb)
3253 {
3254 	const struct skb_frag_struct *frag, *stale;
3255 	int nr_frags, sum;
3256 
3257 	/* no need to check if number of frags is less than 7 */
3258 	nr_frags = skb_shinfo(skb)->nr_frags;
3259 	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3260 		return false;
3261 
3262 	/* We need to walk through the list and validate that each group
3263 	 * of 6 fragments totals at least gso_size.
3264 	 */
3265 	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3266 	frag = &skb_shinfo(skb)->frags[0];
3267 
3268 	/* Initialize size to the negative value of gso_size minus 1.  We
3269 	 * use this as the worst case scenerio in which the frag ahead
3270 	 * of us only provides one byte which is why we are limited to 6
3271 	 * descriptors for a single transmit as the header and previous
3272 	 * fragment are already consuming 2 descriptors.
3273 	 */
3274 	sum = 1 - skb_shinfo(skb)->gso_size;
3275 
3276 	/* Add size of frags 0 through 4 to create our initial sum */
3277 	sum += skb_frag_size(frag++);
3278 	sum += skb_frag_size(frag++);
3279 	sum += skb_frag_size(frag++);
3280 	sum += skb_frag_size(frag++);
3281 	sum += skb_frag_size(frag++);
3282 
3283 	/* Walk through fragments adding latest fragment, testing it, and
3284 	 * then removing stale fragments from the sum.
3285 	 */
3286 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3287 		int stale_size = skb_frag_size(stale);
3288 
3289 		sum += skb_frag_size(frag++);
3290 
3291 		/* The stale fragment may present us with a smaller
3292 		 * descriptor than the actual fragment size. To account
3293 		 * for that we need to remove all the data on the front and
3294 		 * figure out what the remainder would be in the last
3295 		 * descriptor associated with the fragment.
3296 		 */
3297 		if (stale_size > I40E_MAX_DATA_PER_TXD) {
3298 			int align_pad = -(stale->page_offset) &
3299 					(I40E_MAX_READ_REQ_SIZE - 1);
3300 
3301 			sum -= align_pad;
3302 			stale_size -= align_pad;
3303 
3304 			do {
3305 				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3306 				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3307 			} while (stale_size > I40E_MAX_DATA_PER_TXD);
3308 		}
3309 
3310 		/* if sum is negative we failed to make sufficient progress */
3311 		if (sum < 0)
3312 			return true;
3313 
3314 		if (!nr_frags--)
3315 			break;
3316 
3317 		sum -= stale_size;
3318 	}
3319 
3320 	return false;
3321 }
3322 
3323 /**
3324  * i40e_tx_map - Build the Tx descriptor
3325  * @tx_ring:  ring to send buffer on
3326  * @skb:      send buffer
3327  * @first:    first buffer info buffer to use
3328  * @tx_flags: collected send information
3329  * @hdr_len:  size of the packet header
3330  * @td_cmd:   the command field in the descriptor
3331  * @td_offset: offset for checksum or crc
3332  *
3333  * Returns 0 on success, -1 on failure to DMA
3334  **/
3335 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3336 			      struct i40e_tx_buffer *first, u32 tx_flags,
3337 			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3338 {
3339 	unsigned int data_len = skb->data_len;
3340 	unsigned int size = skb_headlen(skb);
3341 	struct skb_frag_struct *frag;
3342 	struct i40e_tx_buffer *tx_bi;
3343 	struct i40e_tx_desc *tx_desc;
3344 	u16 i = tx_ring->next_to_use;
3345 	u32 td_tag = 0;
3346 	dma_addr_t dma;
3347 	u16 desc_count = 1;
3348 
3349 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3350 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3351 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3352 			 I40E_TX_FLAGS_VLAN_SHIFT;
3353 	}
3354 
3355 	first->tx_flags = tx_flags;
3356 
3357 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3358 
3359 	tx_desc = I40E_TX_DESC(tx_ring, i);
3360 	tx_bi = first;
3361 
3362 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3363 		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3364 
3365 		if (dma_mapping_error(tx_ring->dev, dma))
3366 			goto dma_error;
3367 
3368 		/* record length, and DMA address */
3369 		dma_unmap_len_set(tx_bi, len, size);
3370 		dma_unmap_addr_set(tx_bi, dma, dma);
3371 
3372 		/* align size to end of page */
3373 		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3374 		tx_desc->buffer_addr = cpu_to_le64(dma);
3375 
3376 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3377 			tx_desc->cmd_type_offset_bsz =
3378 				build_ctob(td_cmd, td_offset,
3379 					   max_data, td_tag);
3380 
3381 			tx_desc++;
3382 			i++;
3383 			desc_count++;
3384 
3385 			if (i == tx_ring->count) {
3386 				tx_desc = I40E_TX_DESC(tx_ring, 0);
3387 				i = 0;
3388 			}
3389 
3390 			dma += max_data;
3391 			size -= max_data;
3392 
3393 			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3394 			tx_desc->buffer_addr = cpu_to_le64(dma);
3395 		}
3396 
3397 		if (likely(!data_len))
3398 			break;
3399 
3400 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3401 							  size, td_tag);
3402 
3403 		tx_desc++;
3404 		i++;
3405 		desc_count++;
3406 
3407 		if (i == tx_ring->count) {
3408 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3409 			i = 0;
3410 		}
3411 
3412 		size = skb_frag_size(frag);
3413 		data_len -= size;
3414 
3415 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3416 				       DMA_TO_DEVICE);
3417 
3418 		tx_bi = &tx_ring->tx_bi[i];
3419 	}
3420 
3421 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3422 
3423 	i++;
3424 	if (i == tx_ring->count)
3425 		i = 0;
3426 
3427 	tx_ring->next_to_use = i;
3428 
3429 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3430 
3431 	/* write last descriptor with EOP bit */
3432 	td_cmd |= I40E_TX_DESC_CMD_EOP;
3433 
3434 	/* We OR these values together to check both against 4 (WB_STRIDE)
3435 	 * below. This is safe since we don't re-use desc_count afterwards.
3436 	 */
3437 	desc_count |= ++tx_ring->packet_stride;
3438 
3439 	if (desc_count >= WB_STRIDE) {
3440 		/* write last descriptor with RS bit set */
3441 		td_cmd |= I40E_TX_DESC_CMD_RS;
3442 		tx_ring->packet_stride = 0;
3443 	}
3444 
3445 	tx_desc->cmd_type_offset_bsz =
3446 			build_ctob(td_cmd, td_offset, size, td_tag);
3447 
3448 	/* Force memory writes to complete before letting h/w know there
3449 	 * are new descriptors to fetch.
3450 	 *
3451 	 * We also use this memory barrier to make certain all of the
3452 	 * status bits have been updated before next_to_watch is written.
3453 	 */
3454 	wmb();
3455 
3456 	/* set next_to_watch value indicating a packet is present */
3457 	first->next_to_watch = tx_desc;
3458 
3459 	/* notify HW of packet */
3460 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3461 		writel(i, tx_ring->tail);
3462 
3463 		/* we need this if more than one processor can write to our tail
3464 		 * at a time, it synchronizes IO on IA64/Altix systems
3465 		 */
3466 		mmiowb();
3467 	}
3468 
3469 	return 0;
3470 
3471 dma_error:
3472 	dev_info(tx_ring->dev, "TX DMA map failed\n");
3473 
3474 	/* clear dma mappings for failed tx_bi map */
3475 	for (;;) {
3476 		tx_bi = &tx_ring->tx_bi[i];
3477 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3478 		if (tx_bi == first)
3479 			break;
3480 		if (i == 0)
3481 			i = tx_ring->count;
3482 		i--;
3483 	}
3484 
3485 	tx_ring->next_to_use = i;
3486 
3487 	return -1;
3488 }
3489 
3490 /**
3491  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3492  * @xdp: data to transmit
3493  * @xdp_ring: XDP Tx ring
3494  **/
3495 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3496 			      struct i40e_ring *xdp_ring)
3497 {
3498 	u16 i = xdp_ring->next_to_use;
3499 	struct i40e_tx_buffer *tx_bi;
3500 	struct i40e_tx_desc *tx_desc;
3501 	u32 size = xdpf->len;
3502 	dma_addr_t dma;
3503 
3504 	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3505 		xdp_ring->tx_stats.tx_busy++;
3506 		return I40E_XDP_CONSUMED;
3507 	}
3508 
3509 	dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
3510 	if (dma_mapping_error(xdp_ring->dev, dma))
3511 		return I40E_XDP_CONSUMED;
3512 
3513 	tx_bi = &xdp_ring->tx_bi[i];
3514 	tx_bi->bytecount = size;
3515 	tx_bi->gso_segs = 1;
3516 	tx_bi->xdpf = xdpf;
3517 
3518 	/* record length, and DMA address */
3519 	dma_unmap_len_set(tx_bi, len, size);
3520 	dma_unmap_addr_set(tx_bi, dma, dma);
3521 
3522 	tx_desc = I40E_TX_DESC(xdp_ring, i);
3523 	tx_desc->buffer_addr = cpu_to_le64(dma);
3524 	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3525 						  | I40E_TXD_CMD,
3526 						  0, size, 0);
3527 
3528 	/* Make certain all of the status bits have been updated
3529 	 * before next_to_watch is written.
3530 	 */
3531 	smp_wmb();
3532 
3533 	i++;
3534 	if (i == xdp_ring->count)
3535 		i = 0;
3536 
3537 	tx_bi->next_to_watch = tx_desc;
3538 	xdp_ring->next_to_use = i;
3539 
3540 	return I40E_XDP_TX;
3541 }
3542 
3543 /**
3544  * i40e_xmit_frame_ring - Sends buffer on Tx ring
3545  * @skb:     send buffer
3546  * @tx_ring: ring to send buffer on
3547  *
3548  * Returns NETDEV_TX_OK if sent, else an error code
3549  **/
3550 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3551 					struct i40e_ring *tx_ring)
3552 {
3553 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3554 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
3555 	struct i40e_tx_buffer *first;
3556 	u32 td_offset = 0;
3557 	u32 tx_flags = 0;
3558 	__be16 protocol;
3559 	u32 td_cmd = 0;
3560 	u8 hdr_len = 0;
3561 	int tso, count;
3562 	int tsyn;
3563 
3564 	/* prefetch the data, we'll need it later */
3565 	prefetch(skb->data);
3566 
3567 	i40e_trace(xmit_frame_ring, skb, tx_ring);
3568 
3569 	count = i40e_xmit_descriptor_count(skb);
3570 	if (i40e_chk_linearize(skb, count)) {
3571 		if (__skb_linearize(skb)) {
3572 			dev_kfree_skb_any(skb);
3573 			return NETDEV_TX_OK;
3574 		}
3575 		count = i40e_txd_use_count(skb->len);
3576 		tx_ring->tx_stats.tx_linearize++;
3577 	}
3578 
3579 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3580 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3581 	 *       + 4 desc gap to avoid the cache line where head is,
3582 	 *       + 1 desc for context descriptor,
3583 	 * otherwise try next time
3584 	 */
3585 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3586 		tx_ring->tx_stats.tx_busy++;
3587 		return NETDEV_TX_BUSY;
3588 	}
3589 
3590 	/* record the location of the first descriptor for this packet */
3591 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
3592 	first->skb = skb;
3593 	first->bytecount = skb->len;
3594 	first->gso_segs = 1;
3595 
3596 	/* prepare the xmit flags */
3597 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3598 		goto out_drop;
3599 
3600 	/* obtain protocol of skb */
3601 	protocol = vlan_get_protocol(skb);
3602 
3603 	/* setup IPv4/IPv6 offloads */
3604 	if (protocol == htons(ETH_P_IP))
3605 		tx_flags |= I40E_TX_FLAGS_IPV4;
3606 	else if (protocol == htons(ETH_P_IPV6))
3607 		tx_flags |= I40E_TX_FLAGS_IPV6;
3608 
3609 	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3610 
3611 	if (tso < 0)
3612 		goto out_drop;
3613 	else if (tso)
3614 		tx_flags |= I40E_TX_FLAGS_TSO;
3615 
3616 	/* Always offload the checksum, since it's in the data descriptor */
3617 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3618 				  tx_ring, &cd_tunneling);
3619 	if (tso < 0)
3620 		goto out_drop;
3621 
3622 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3623 
3624 	if (tsyn)
3625 		tx_flags |= I40E_TX_FLAGS_TSYN;
3626 
3627 	skb_tx_timestamp(skb);
3628 
3629 	/* always enable CRC insertion offload */
3630 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
3631 
3632 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3633 			   cd_tunneling, cd_l2tag2);
3634 
3635 	/* Add Flow Director ATR if it's enabled.
3636 	 *
3637 	 * NOTE: this must always be directly before the data descriptor.
3638 	 */
3639 	i40e_atr(tx_ring, skb, tx_flags);
3640 
3641 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3642 			td_cmd, td_offset))
3643 		goto cleanup_tx_tstamp;
3644 
3645 	return NETDEV_TX_OK;
3646 
3647 out_drop:
3648 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3649 	dev_kfree_skb_any(first->skb);
3650 	first->skb = NULL;
3651 cleanup_tx_tstamp:
3652 	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3653 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3654 
3655 		dev_kfree_skb_any(pf->ptp_tx_skb);
3656 		pf->ptp_tx_skb = NULL;
3657 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3658 	}
3659 
3660 	return NETDEV_TX_OK;
3661 }
3662 
3663 /**
3664  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3665  * @skb:    send buffer
3666  * @netdev: network interface device structure
3667  *
3668  * Returns NETDEV_TX_OK if sent, else an error code
3669  **/
3670 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3671 {
3672 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3673 	struct i40e_vsi *vsi = np->vsi;
3674 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3675 
3676 	/* hardware can't handle really short frames, hardware padding works
3677 	 * beyond this point
3678 	 */
3679 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3680 		return NETDEV_TX_OK;
3681 
3682 	return i40e_xmit_frame_ring(skb, tx_ring);
3683 }
3684 
3685 /**
3686  * i40e_xdp_xmit - Implements ndo_xdp_xmit
3687  * @dev: netdev
3688  * @xdp: XDP buffer
3689  *
3690  * Returns number of frames successfully sent. Frames that fail are
3691  * free'ed via XDP return API.
3692  *
3693  * For error cases, a negative errno code is returned and no-frames
3694  * are transmitted (caller must handle freeing frames).
3695  **/
3696 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3697 		  u32 flags)
3698 {
3699 	struct i40e_netdev_priv *np = netdev_priv(dev);
3700 	unsigned int queue_index = smp_processor_id();
3701 	struct i40e_vsi *vsi = np->vsi;
3702 	struct i40e_ring *xdp_ring;
3703 	int drops = 0;
3704 	int i;
3705 
3706 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
3707 		return -ENETDOWN;
3708 
3709 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3710 		return -ENXIO;
3711 
3712 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3713 		return -EINVAL;
3714 
3715 	xdp_ring = vsi->xdp_rings[queue_index];
3716 
3717 	for (i = 0; i < n; i++) {
3718 		struct xdp_frame *xdpf = frames[i];
3719 		int err;
3720 
3721 		err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3722 		if (err != I40E_XDP_TX) {
3723 			xdp_return_frame_rx_napi(xdpf);
3724 			drops++;
3725 		}
3726 	}
3727 
3728 	if (unlikely(flags & XDP_XMIT_FLUSH))
3729 		i40e_xdp_ring_update_tail(xdp_ring);
3730 
3731 	return n - drops;
3732 }
3733