xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_txrx.c (revision 28efb0046512e8a13ed9f9bdf0d68d10bbfbe9cf)
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
29 #include <linux/bpf_trace.h>
30 #include "i40e.h"
31 #include "i40e_trace.h"
32 #include "i40e_prototype.h"
33 
34 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 				u32 td_tag)
36 {
37 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
38 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
39 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
40 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
41 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
42 }
43 
44 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
45 /**
46  * i40e_fdir - Generate a Flow Director descriptor based on fdata
47  * @tx_ring: Tx ring to send buffer on
48  * @fdata: Flow director filter data
49  * @add: Indicate if we are adding a rule or deleting one
50  *
51  **/
52 static void i40e_fdir(struct i40e_ring *tx_ring,
53 		      struct i40e_fdir_filter *fdata, bool add)
54 {
55 	struct i40e_filter_program_desc *fdir_desc;
56 	struct i40e_pf *pf = tx_ring->vsi->back;
57 	u32 flex_ptype, dtype_cmd;
58 	u16 i;
59 
60 	/* grab the next descriptor */
61 	i = tx_ring->next_to_use;
62 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
63 
64 	i++;
65 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
66 
67 	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
68 		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
69 
70 	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
71 		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
72 
73 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
74 		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
75 
76 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
77 		      (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
78 
79 	/* Use LAN VSI Id if not programmed by user */
80 	flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
81 		      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
82 		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
83 
84 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
85 
86 	dtype_cmd |= add ?
87 		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
88 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
89 		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
90 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;
91 
92 	dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
93 		     (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
94 
95 	dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
96 		     (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
97 
98 	if (fdata->cnt_index) {
99 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
100 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
101 			     ((u32)fdata->cnt_index <<
102 			      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
103 	}
104 
105 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
106 	fdir_desc->rsvd = cpu_to_le32(0);
107 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
108 	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
109 }
110 
111 #define I40E_FD_CLEAN_DELAY 10
112 /**
113  * i40e_program_fdir_filter - Program a Flow Director filter
114  * @fdir_data: Packet data that will be filter parameters
115  * @raw_packet: the pre-allocated packet buffer for FDir
116  * @pf: The PF pointer
117  * @add: True for add/update, False for remove
118  **/
119 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
120 				    u8 *raw_packet, struct i40e_pf *pf,
121 				    bool add)
122 {
123 	struct i40e_tx_buffer *tx_buf, *first;
124 	struct i40e_tx_desc *tx_desc;
125 	struct i40e_ring *tx_ring;
126 	struct i40e_vsi *vsi;
127 	struct device *dev;
128 	dma_addr_t dma;
129 	u32 td_cmd = 0;
130 	u16 i;
131 
132 	/* find existing FDIR VSI */
133 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
134 	if (!vsi)
135 		return -ENOENT;
136 
137 	tx_ring = vsi->tx_rings[0];
138 	dev = tx_ring->dev;
139 
140 	/* we need two descriptors to add/del a filter and we can wait */
141 	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
142 		if (!i)
143 			return -EAGAIN;
144 		msleep_interruptible(1);
145 	}
146 
147 	dma = dma_map_single(dev, raw_packet,
148 			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
149 	if (dma_mapping_error(dev, dma))
150 		goto dma_fail;
151 
152 	/* grab the next descriptor */
153 	i = tx_ring->next_to_use;
154 	first = &tx_ring->tx_bi[i];
155 	i40e_fdir(tx_ring, fdir_data, add);
156 
157 	/* Now program a dummy descriptor */
158 	i = tx_ring->next_to_use;
159 	tx_desc = I40E_TX_DESC(tx_ring, i);
160 	tx_buf = &tx_ring->tx_bi[i];
161 
162 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
163 
164 	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
165 
166 	/* record length, and DMA address */
167 	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
168 	dma_unmap_addr_set(tx_buf, dma, dma);
169 
170 	tx_desc->buffer_addr = cpu_to_le64(dma);
171 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
172 
173 	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
174 	tx_buf->raw_buf = (void *)raw_packet;
175 
176 	tx_desc->cmd_type_offset_bsz =
177 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
178 
179 	/* Force memory writes to complete before letting h/w
180 	 * know there are new descriptors to fetch.
181 	 */
182 	wmb();
183 
184 	/* Mark the data descriptor to be watched */
185 	first->next_to_watch = tx_desc;
186 
187 	writel(tx_ring->next_to_use, tx_ring->tail);
188 	return 0;
189 
190 dma_fail:
191 	return -1;
192 }
193 
194 #define IP_HEADER_OFFSET 14
195 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
196 /**
197  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
198  * @vsi: pointer to the targeted VSI
199  * @fd_data: the flow director data required for the FDir descriptor
200  * @add: true adds a filter, false removes it
201  *
202  * Returns 0 if the filters were successfully added or removed
203  **/
204 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
205 				   struct i40e_fdir_filter *fd_data,
206 				   bool add)
207 {
208 	struct i40e_pf *pf = vsi->back;
209 	struct udphdr *udp;
210 	struct iphdr *ip;
211 	u8 *raw_packet;
212 	int ret;
213 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
214 		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
215 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
216 
217 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
218 	if (!raw_packet)
219 		return -ENOMEM;
220 	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
221 
222 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
223 	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
224 	      + sizeof(struct iphdr));
225 
226 	ip->daddr = fd_data->dst_ip;
227 	udp->dest = fd_data->dst_port;
228 	ip->saddr = fd_data->src_ip;
229 	udp->source = fd_data->src_port;
230 
231 	if (fd_data->flex_filter) {
232 		u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
233 		__be16 pattern = fd_data->flex_word;
234 		u16 off = fd_data->flex_offset;
235 
236 		*((__force __be16 *)(payload + off)) = pattern;
237 	}
238 
239 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
240 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
241 	if (ret) {
242 		dev_info(&pf->pdev->dev,
243 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
244 			 fd_data->pctype, fd_data->fd_id, ret);
245 		/* Free the packet buffer since it wasn't added to the ring */
246 		kfree(raw_packet);
247 		return -EOPNOTSUPP;
248 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
249 		if (add)
250 			dev_info(&pf->pdev->dev,
251 				 "Filter OK for PCTYPE %d loc = %d\n",
252 				 fd_data->pctype, fd_data->fd_id);
253 		else
254 			dev_info(&pf->pdev->dev,
255 				 "Filter deleted for PCTYPE %d loc = %d\n",
256 				 fd_data->pctype, fd_data->fd_id);
257 	}
258 
259 	if (add)
260 		pf->fd_udp4_filter_cnt++;
261 	else
262 		pf->fd_udp4_filter_cnt--;
263 
264 	return 0;
265 }
266 
267 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
268 /**
269  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
270  * @vsi: pointer to the targeted VSI
271  * @fd_data: the flow director data required for the FDir descriptor
272  * @add: true adds a filter, false removes it
273  *
274  * Returns 0 if the filters were successfully added or removed
275  **/
276 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
277 				   struct i40e_fdir_filter *fd_data,
278 				   bool add)
279 {
280 	struct i40e_pf *pf = vsi->back;
281 	struct tcphdr *tcp;
282 	struct iphdr *ip;
283 	u8 *raw_packet;
284 	int ret;
285 	/* Dummy packet */
286 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
287 		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
288 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
289 		0x0, 0x72, 0, 0, 0, 0};
290 
291 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
292 	if (!raw_packet)
293 		return -ENOMEM;
294 	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
295 
296 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
297 	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
298 	      + sizeof(struct iphdr));
299 
300 	ip->daddr = fd_data->dst_ip;
301 	tcp->dest = fd_data->dst_port;
302 	ip->saddr = fd_data->src_ip;
303 	tcp->source = fd_data->src_port;
304 
305 	if (fd_data->flex_filter) {
306 		u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
307 		__be16 pattern = fd_data->flex_word;
308 		u16 off = fd_data->flex_offset;
309 
310 		*((__force __be16 *)(payload + off)) = pattern;
311 	}
312 
313 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
314 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
315 	if (ret) {
316 		dev_info(&pf->pdev->dev,
317 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
318 			 fd_data->pctype, fd_data->fd_id, ret);
319 		/* Free the packet buffer since it wasn't added to the ring */
320 		kfree(raw_packet);
321 		return -EOPNOTSUPP;
322 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
323 		if (add)
324 			dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
325 				 fd_data->pctype, fd_data->fd_id);
326 		else
327 			dev_info(&pf->pdev->dev,
328 				 "Filter deleted for PCTYPE %d loc = %d\n",
329 				 fd_data->pctype, fd_data->fd_id);
330 	}
331 
332 	if (add) {
333 		pf->fd_tcp4_filter_cnt++;
334 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
335 		    I40E_DEBUG_FD & pf->hw.debug_mask)
336 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
337 		pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
338 	} else {
339 		pf->fd_tcp4_filter_cnt--;
340 	}
341 
342 	return 0;
343 }
344 
345 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
346 /**
347  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
348  * a specific flow spec
349  * @vsi: pointer to the targeted VSI
350  * @fd_data: the flow director data required for the FDir descriptor
351  * @add: true adds a filter, false removes it
352  *
353  * Returns 0 if the filters were successfully added or removed
354  **/
355 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
356 				    struct i40e_fdir_filter *fd_data,
357 				    bool add)
358 {
359 	struct i40e_pf *pf = vsi->back;
360 	struct sctphdr *sctp;
361 	struct iphdr *ip;
362 	u8 *raw_packet;
363 	int ret;
364 	/* Dummy packet */
365 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
366 		0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
367 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
368 
369 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
370 	if (!raw_packet)
371 		return -ENOMEM;
372 	memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
373 
374 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
375 	sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
376 	      + sizeof(struct iphdr));
377 
378 	ip->daddr = fd_data->dst_ip;
379 	sctp->dest = fd_data->dst_port;
380 	ip->saddr = fd_data->src_ip;
381 	sctp->source = fd_data->src_port;
382 
383 	if (fd_data->flex_filter) {
384 		u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
385 		__be16 pattern = fd_data->flex_word;
386 		u16 off = fd_data->flex_offset;
387 
388 		*((__force __be16 *)(payload + off)) = pattern;
389 	}
390 
391 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
392 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
393 	if (ret) {
394 		dev_info(&pf->pdev->dev,
395 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
396 			 fd_data->pctype, fd_data->fd_id, ret);
397 		/* Free the packet buffer since it wasn't added to the ring */
398 		kfree(raw_packet);
399 		return -EOPNOTSUPP;
400 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
401 		if (add)
402 			dev_info(&pf->pdev->dev,
403 				 "Filter OK for PCTYPE %d loc = %d\n",
404 				 fd_data->pctype, fd_data->fd_id);
405 		else
406 			dev_info(&pf->pdev->dev,
407 				 "Filter deleted for PCTYPE %d loc = %d\n",
408 				 fd_data->pctype, fd_data->fd_id);
409 	}
410 
411 	if (add)
412 		pf->fd_sctp4_filter_cnt++;
413 	else
414 		pf->fd_sctp4_filter_cnt--;
415 
416 	return 0;
417 }
418 
419 #define I40E_IP_DUMMY_PACKET_LEN 34
420 /**
421  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
422  * a specific flow spec
423  * @vsi: pointer to the targeted VSI
424  * @fd_data: the flow director data required for the FDir descriptor
425  * @add: true adds a filter, false removes it
426  *
427  * Returns 0 if the filters were successfully added or removed
428  **/
429 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
430 				  struct i40e_fdir_filter *fd_data,
431 				  bool add)
432 {
433 	struct i40e_pf *pf = vsi->back;
434 	struct iphdr *ip;
435 	u8 *raw_packet;
436 	int ret;
437 	int i;
438 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
439 		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
440 		0, 0, 0, 0};
441 
442 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
443 	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
444 		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
445 		if (!raw_packet)
446 			return -ENOMEM;
447 		memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
448 		ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
449 
450 		ip->saddr = fd_data->src_ip;
451 		ip->daddr = fd_data->dst_ip;
452 		ip->protocol = 0;
453 
454 		if (fd_data->flex_filter) {
455 			u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
456 			__be16 pattern = fd_data->flex_word;
457 			u16 off = fd_data->flex_offset;
458 
459 			*((__force __be16 *)(payload + off)) = pattern;
460 		}
461 
462 		fd_data->pctype = i;
463 		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
464 		if (ret) {
465 			dev_info(&pf->pdev->dev,
466 				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
467 				 fd_data->pctype, fd_data->fd_id, ret);
468 			/* The packet buffer wasn't added to the ring so we
469 			 * need to free it now.
470 			 */
471 			kfree(raw_packet);
472 			return -EOPNOTSUPP;
473 		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
474 			if (add)
475 				dev_info(&pf->pdev->dev,
476 					 "Filter OK for PCTYPE %d loc = %d\n",
477 					 fd_data->pctype, fd_data->fd_id);
478 			else
479 				dev_info(&pf->pdev->dev,
480 					 "Filter deleted for PCTYPE %d loc = %d\n",
481 					 fd_data->pctype, fd_data->fd_id);
482 		}
483 	}
484 
485 	if (add)
486 		pf->fd_ip4_filter_cnt++;
487 	else
488 		pf->fd_ip4_filter_cnt--;
489 
490 	return 0;
491 }
492 
493 /**
494  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
495  * @vsi: pointer to the targeted VSI
496  * @cmd: command to get or set RX flow classification rules
497  * @add: true adds a filter, false removes it
498  *
499  **/
500 int i40e_add_del_fdir(struct i40e_vsi *vsi,
501 		      struct i40e_fdir_filter *input, bool add)
502 {
503 	struct i40e_pf *pf = vsi->back;
504 	int ret;
505 
506 	switch (input->flow_type & ~FLOW_EXT) {
507 	case TCP_V4_FLOW:
508 		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
509 		break;
510 	case UDP_V4_FLOW:
511 		ret = i40e_add_del_fdir_udpv4(vsi, input, add);
512 		break;
513 	case SCTP_V4_FLOW:
514 		ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
515 		break;
516 	case IP_USER_FLOW:
517 		switch (input->ip4_proto) {
518 		case IPPROTO_TCP:
519 			ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
520 			break;
521 		case IPPROTO_UDP:
522 			ret = i40e_add_del_fdir_udpv4(vsi, input, add);
523 			break;
524 		case IPPROTO_SCTP:
525 			ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
526 			break;
527 		case IPPROTO_IP:
528 			ret = i40e_add_del_fdir_ipv4(vsi, input, add);
529 			break;
530 		default:
531 			/* We cannot support masking based on protocol */
532 			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
533 				 input->ip4_proto);
534 			return -EINVAL;
535 		}
536 		break;
537 	default:
538 		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
539 			 input->flow_type);
540 		return -EINVAL;
541 	}
542 
543 	/* The buffer allocated here will be normally be freed by
544 	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
545 	 * completion. In the event of an error adding the buffer to the FDIR
546 	 * ring, it will immediately be freed. It may also be freed by
547 	 * i40e_clean_tx_ring() when closing the VSI.
548 	 */
549 	return ret;
550 }
551 
552 /**
553  * i40e_fd_handle_status - check the Programming Status for FD
554  * @rx_ring: the Rx ring for this descriptor
555  * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
556  * @prog_id: the id originally used for programming
557  *
558  * This is used to verify if the FD programming or invalidation
559  * requested by SW to the HW is successful or not and take actions accordingly.
560  **/
561 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
562 				  union i40e_rx_desc *rx_desc, u8 prog_id)
563 {
564 	struct i40e_pf *pf = rx_ring->vsi->back;
565 	struct pci_dev *pdev = pf->pdev;
566 	u32 fcnt_prog, fcnt_avail;
567 	u32 error;
568 	u64 qw;
569 
570 	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
571 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
572 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
573 
574 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
575 		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
576 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
577 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
578 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
579 				 pf->fd_inv);
580 
581 		/* Check if the programming error is for ATR.
582 		 * If so, auto disable ATR and set a state for
583 		 * flush in progress. Next time we come here if flush is in
584 		 * progress do nothing, once flush is complete the state will
585 		 * be cleared.
586 		 */
587 		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
588 			return;
589 
590 		pf->fd_add_err++;
591 		/* store the current atr filter count */
592 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
593 
594 		if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
595 		    pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
596 			pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
597 			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
598 		}
599 
600 		/* filter programming failed most likely due to table full */
601 		fcnt_prog = i40e_get_global_fd_count(pf);
602 		fcnt_avail = pf->fdir_pf_filter_count;
603 		/* If ATR is running fcnt_prog can quickly change,
604 		 * if we are very close to full, it makes sense to disable
605 		 * FD ATR/SB and then re-enable it when there is room.
606 		 */
607 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
608 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
609 			    !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
610 				pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
611 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
612 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
613 			}
614 		}
615 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
616 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
617 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
618 				 rx_desc->wb.qword0.hi_dword.fd_id);
619 	}
620 }
621 
622 /**
623  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
624  * @ring:      the ring that owns the buffer
625  * @tx_buffer: the buffer to free
626  **/
627 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
628 					    struct i40e_tx_buffer *tx_buffer)
629 {
630 	if (tx_buffer->skb) {
631 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
632 			kfree(tx_buffer->raw_buf);
633 		else if (ring_is_xdp(ring))
634 			page_frag_free(tx_buffer->raw_buf);
635 		else
636 			dev_kfree_skb_any(tx_buffer->skb);
637 		if (dma_unmap_len(tx_buffer, len))
638 			dma_unmap_single(ring->dev,
639 					 dma_unmap_addr(tx_buffer, dma),
640 					 dma_unmap_len(tx_buffer, len),
641 					 DMA_TO_DEVICE);
642 	} else if (dma_unmap_len(tx_buffer, len)) {
643 		dma_unmap_page(ring->dev,
644 			       dma_unmap_addr(tx_buffer, dma),
645 			       dma_unmap_len(tx_buffer, len),
646 			       DMA_TO_DEVICE);
647 	}
648 
649 	tx_buffer->next_to_watch = NULL;
650 	tx_buffer->skb = NULL;
651 	dma_unmap_len_set(tx_buffer, len, 0);
652 	/* tx_buffer must be completely set up in the transmit path */
653 }
654 
655 /**
656  * i40e_clean_tx_ring - Free any empty Tx buffers
657  * @tx_ring: ring to be cleaned
658  **/
659 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
660 {
661 	unsigned long bi_size;
662 	u16 i;
663 
664 	/* ring already cleared, nothing to do */
665 	if (!tx_ring->tx_bi)
666 		return;
667 
668 	/* Free all the Tx ring sk_buffs */
669 	for (i = 0; i < tx_ring->count; i++)
670 		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
671 
672 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
673 	memset(tx_ring->tx_bi, 0, bi_size);
674 
675 	/* Zero out the descriptor ring */
676 	memset(tx_ring->desc, 0, tx_ring->size);
677 
678 	tx_ring->next_to_use = 0;
679 	tx_ring->next_to_clean = 0;
680 
681 	if (!tx_ring->netdev)
682 		return;
683 
684 	/* cleanup Tx queue statistics */
685 	netdev_tx_reset_queue(txring_txq(tx_ring));
686 }
687 
688 /**
689  * i40e_free_tx_resources - Free Tx resources per queue
690  * @tx_ring: Tx descriptor ring for a specific queue
691  *
692  * Free all transmit software resources
693  **/
694 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
695 {
696 	i40e_clean_tx_ring(tx_ring);
697 	kfree(tx_ring->tx_bi);
698 	tx_ring->tx_bi = NULL;
699 
700 	if (tx_ring->desc) {
701 		dma_free_coherent(tx_ring->dev, tx_ring->size,
702 				  tx_ring->desc, tx_ring->dma);
703 		tx_ring->desc = NULL;
704 	}
705 }
706 
707 /**
708  * i40e_get_tx_pending - how many tx descriptors not processed
709  * @tx_ring: the ring of descriptors
710  *
711  * Since there is no access to the ring head register
712  * in XL710, we need to use our local copies
713  **/
714 u32 i40e_get_tx_pending(struct i40e_ring *ring)
715 {
716 	u32 head, tail;
717 
718 	head = i40e_get_head(ring);
719 	tail = readl(ring->tail);
720 
721 	if (head != tail)
722 		return (head < tail) ?
723 			tail - head : (tail + ring->count - head);
724 
725 	return 0;
726 }
727 
728 #define WB_STRIDE 4
729 
730 /**
731  * i40e_clean_tx_irq - Reclaim resources after transmit completes
732  * @vsi: the VSI we care about
733  * @tx_ring: Tx ring to clean
734  * @napi_budget: Used to determine if we are in netpoll
735  *
736  * Returns true if there's any budget left (e.g. the clean is finished)
737  **/
738 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
739 			      struct i40e_ring *tx_ring, int napi_budget)
740 {
741 	u16 i = tx_ring->next_to_clean;
742 	struct i40e_tx_buffer *tx_buf;
743 	struct i40e_tx_desc *tx_head;
744 	struct i40e_tx_desc *tx_desc;
745 	unsigned int total_bytes = 0, total_packets = 0;
746 	unsigned int budget = vsi->work_limit;
747 
748 	tx_buf = &tx_ring->tx_bi[i];
749 	tx_desc = I40E_TX_DESC(tx_ring, i);
750 	i -= tx_ring->count;
751 
752 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
753 
754 	do {
755 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
756 
757 		/* if next_to_watch is not set then there is no work pending */
758 		if (!eop_desc)
759 			break;
760 
761 		/* prevent any other reads prior to eop_desc */
762 		read_barrier_depends();
763 
764 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
765 		/* we have caught up to head, no work left to do */
766 		if (tx_head == tx_desc)
767 			break;
768 
769 		/* clear next_to_watch to prevent false hangs */
770 		tx_buf->next_to_watch = NULL;
771 
772 		/* update the statistics for this packet */
773 		total_bytes += tx_buf->bytecount;
774 		total_packets += tx_buf->gso_segs;
775 
776 		/* free the skb/XDP data */
777 		if (ring_is_xdp(tx_ring))
778 			page_frag_free(tx_buf->raw_buf);
779 		else
780 			napi_consume_skb(tx_buf->skb, napi_budget);
781 
782 		/* unmap skb header data */
783 		dma_unmap_single(tx_ring->dev,
784 				 dma_unmap_addr(tx_buf, dma),
785 				 dma_unmap_len(tx_buf, len),
786 				 DMA_TO_DEVICE);
787 
788 		/* clear tx_buffer data */
789 		tx_buf->skb = NULL;
790 		dma_unmap_len_set(tx_buf, len, 0);
791 
792 		/* unmap remaining buffers */
793 		while (tx_desc != eop_desc) {
794 			i40e_trace(clean_tx_irq_unmap,
795 				   tx_ring, tx_desc, tx_buf);
796 
797 			tx_buf++;
798 			tx_desc++;
799 			i++;
800 			if (unlikely(!i)) {
801 				i -= tx_ring->count;
802 				tx_buf = tx_ring->tx_bi;
803 				tx_desc = I40E_TX_DESC(tx_ring, 0);
804 			}
805 
806 			/* unmap any remaining paged data */
807 			if (dma_unmap_len(tx_buf, len)) {
808 				dma_unmap_page(tx_ring->dev,
809 					       dma_unmap_addr(tx_buf, dma),
810 					       dma_unmap_len(tx_buf, len),
811 					       DMA_TO_DEVICE);
812 				dma_unmap_len_set(tx_buf, len, 0);
813 			}
814 		}
815 
816 		/* move us one more past the eop_desc for start of next pkt */
817 		tx_buf++;
818 		tx_desc++;
819 		i++;
820 		if (unlikely(!i)) {
821 			i -= tx_ring->count;
822 			tx_buf = tx_ring->tx_bi;
823 			tx_desc = I40E_TX_DESC(tx_ring, 0);
824 		}
825 
826 		prefetch(tx_desc);
827 
828 		/* update budget accounting */
829 		budget--;
830 	} while (likely(budget));
831 
832 	i += tx_ring->count;
833 	tx_ring->next_to_clean = i;
834 	u64_stats_update_begin(&tx_ring->syncp);
835 	tx_ring->stats.bytes += total_bytes;
836 	tx_ring->stats.packets += total_packets;
837 	u64_stats_update_end(&tx_ring->syncp);
838 	tx_ring->q_vector->tx.total_bytes += total_bytes;
839 	tx_ring->q_vector->tx.total_packets += total_packets;
840 
841 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
842 		/* check to see if there are < 4 descriptors
843 		 * waiting to be written back, then kick the hardware to force
844 		 * them to be written back in case we stay in NAPI.
845 		 * In this mode on X722 we do not enable Interrupt.
846 		 */
847 		unsigned int j = i40e_get_tx_pending(tx_ring);
848 
849 		if (budget &&
850 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
851 		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
852 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
853 			tx_ring->arm_wb = true;
854 	}
855 
856 	if (ring_is_xdp(tx_ring))
857 		return !!budget;
858 
859 	/* notify netdev of completed buffers */
860 	netdev_tx_completed_queue(txring_txq(tx_ring),
861 				  total_packets, total_bytes);
862 
863 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
864 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
865 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
866 		/* Make sure that anybody stopping the queue after this
867 		 * sees the new next_to_clean.
868 		 */
869 		smp_mb();
870 		if (__netif_subqueue_stopped(tx_ring->netdev,
871 					     tx_ring->queue_index) &&
872 		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
873 			netif_wake_subqueue(tx_ring->netdev,
874 					    tx_ring->queue_index);
875 			++tx_ring->tx_stats.restart_queue;
876 		}
877 	}
878 
879 	return !!budget;
880 }
881 
882 /**
883  * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
884  * @vsi: the VSI we care about
885  * @q_vector: the vector on which to enable writeback
886  *
887  **/
888 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
889 				  struct i40e_q_vector *q_vector)
890 {
891 	u16 flags = q_vector->tx.ring[0].flags;
892 	u32 val;
893 
894 	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
895 		return;
896 
897 	if (q_vector->arm_wb_state)
898 		return;
899 
900 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
901 		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
902 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
903 
904 		wr32(&vsi->back->hw,
905 		     I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
906 		     val);
907 	} else {
908 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
909 		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
910 
911 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
912 	}
913 	q_vector->arm_wb_state = true;
914 }
915 
916 /**
917  * i40e_force_wb - Issue SW Interrupt so HW does a wb
918  * @vsi: the VSI we care about
919  * @q_vector: the vector  on which to force writeback
920  *
921  **/
922 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
923 {
924 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
925 		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
926 			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
927 			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
928 			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
929 			  /* allow 00 to be written to the index */
930 
931 		wr32(&vsi->back->hw,
932 		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
933 					 vsi->base_vector - 1), val);
934 	} else {
935 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
936 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
937 			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
938 			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
939 			/* allow 00 to be written to the index */
940 
941 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
942 	}
943 }
944 
945 /**
946  * i40e_set_new_dynamic_itr - Find new ITR level
947  * @rc: structure containing ring performance data
948  *
949  * Returns true if ITR changed, false if not
950  *
951  * Stores a new ITR value based on packets and byte counts during
952  * the last interrupt.  The advantage of per interrupt computation
953  * is faster updates and more accurate ITR for the current traffic
954  * pattern.  Constants in this function were computed based on
955  * theoretical maximum wire speed and thresholds were set based on
956  * testing data as well as attempting to minimize response time
957  * while increasing bulk throughput.
958  **/
959 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
960 {
961 	enum i40e_latency_range new_latency_range = rc->latency_range;
962 	u32 new_itr = rc->itr;
963 	int bytes_per_usec;
964 	unsigned int usecs, estimated_usecs;
965 
966 	if (rc->total_packets == 0 || !rc->itr)
967 		return false;
968 
969 	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
970 	bytes_per_usec = rc->total_bytes / usecs;
971 
972 	/* The calculations in this algorithm depend on interrupts actually
973 	 * firing at the ITR rate. This may not happen if the packet rate is
974 	 * really low, or if we've been napi polling. Check to make sure
975 	 * that's not the case before we continue.
976 	 */
977 	estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
978 	if (estimated_usecs > usecs) {
979 		new_latency_range = I40E_LOW_LATENCY;
980 		goto reset_latency;
981 	}
982 
983 	/* simple throttlerate management
984 	 *   0-10MB/s   lowest (50000 ints/s)
985 	 *  10-20MB/s   low    (20000 ints/s)
986 	 *  20-1249MB/s bulk   (18000 ints/s)
987 	 *
988 	 * The math works out because the divisor is in 10^(-6) which
989 	 * turns the bytes/us input value into MB/s values, but
990 	 * make sure to use usecs, as the register values written
991 	 * are in 2 usec increments in the ITR registers, and make sure
992 	 * to use the smoothed values that the countdown timer gives us.
993 	 */
994 	switch (new_latency_range) {
995 	case I40E_LOWEST_LATENCY:
996 		if (bytes_per_usec > 10)
997 			new_latency_range = I40E_LOW_LATENCY;
998 		break;
999 	case I40E_LOW_LATENCY:
1000 		if (bytes_per_usec > 20)
1001 			new_latency_range = I40E_BULK_LATENCY;
1002 		else if (bytes_per_usec <= 10)
1003 			new_latency_range = I40E_LOWEST_LATENCY;
1004 		break;
1005 	case I40E_BULK_LATENCY:
1006 	default:
1007 		if (bytes_per_usec <= 20)
1008 			new_latency_range = I40E_LOW_LATENCY;
1009 		break;
1010 	}
1011 
1012 reset_latency:
1013 	rc->latency_range = new_latency_range;
1014 
1015 	switch (new_latency_range) {
1016 	case I40E_LOWEST_LATENCY:
1017 		new_itr = I40E_ITR_50K;
1018 		break;
1019 	case I40E_LOW_LATENCY:
1020 		new_itr = I40E_ITR_20K;
1021 		break;
1022 	case I40E_BULK_LATENCY:
1023 		new_itr = I40E_ITR_18K;
1024 		break;
1025 	default:
1026 		break;
1027 	}
1028 
1029 	rc->total_bytes = 0;
1030 	rc->total_packets = 0;
1031 	rc->last_itr_update = jiffies;
1032 
1033 	if (new_itr != rc->itr) {
1034 		rc->itr = new_itr;
1035 		return true;
1036 	}
1037 	return false;
1038 }
1039 
1040 /**
1041  * i40e_rx_is_programming_status - check for programming status descriptor
1042  * @qw: qword representing status_error_len in CPU ordering
1043  *
1044  * The value of in the descriptor length field indicate if this
1045  * is a programming status descriptor for flow director or FCoE
1046  * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1047  * it is a packet descriptor.
1048  **/
1049 static inline bool i40e_rx_is_programming_status(u64 qw)
1050 {
1051 	/* The Rx filter programming status and SPH bit occupy the same
1052 	 * spot in the descriptor. Since we don't support packet split we
1053 	 * can just reuse the bit as an indication that this is a
1054 	 * programming status descriptor.
1055 	 */
1056 	return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1057 }
1058 
1059 /**
1060  * i40e_clean_programming_status - clean the programming status descriptor
1061  * @rx_ring: the rx ring that has this descriptor
1062  * @rx_desc: the rx descriptor written back by HW
1063  * @qw: qword representing status_error_len in CPU ordering
1064  *
1065  * Flow director should handle FD_FILTER_STATUS to check its filter programming
1066  * status being successful or not and take actions accordingly. FCoE should
1067  * handle its context/filter programming/invalidation status and take actions.
1068  *
1069  **/
1070 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 					  union i40e_rx_desc *rx_desc,
1072 					  u64 qw)
1073 {
1074 	u32 ntc = rx_ring->next_to_clean + 1;
1075 	u8 id;
1076 
1077 	/* fetch, update, and store next to clean */
1078 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 	rx_ring->next_to_clean = ntc;
1080 
1081 	prefetch(I40E_RX_DESC(rx_ring, ntc));
1082 
1083 	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1085 
1086 	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1087 		i40e_fd_handle_status(rx_ring, rx_desc, id);
1088 }
1089 
1090 /**
1091  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1092  * @tx_ring: the tx ring to set up
1093  *
1094  * Return 0 on success, negative on error
1095  **/
1096 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1097 {
1098 	struct device *dev = tx_ring->dev;
1099 	int bi_size;
1100 
1101 	if (!dev)
1102 		return -ENOMEM;
1103 
1104 	/* warn if we are about to overwrite the pointer */
1105 	WARN_ON(tx_ring->tx_bi);
1106 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1107 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1108 	if (!tx_ring->tx_bi)
1109 		goto err;
1110 
1111 	u64_stats_init(&tx_ring->syncp);
1112 
1113 	/* round up to nearest 4K */
1114 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1115 	/* add u32 for head writeback, align after this takes care of
1116 	 * guaranteeing this is at least one cache line in size
1117 	 */
1118 	tx_ring->size += sizeof(u32);
1119 	tx_ring->size = ALIGN(tx_ring->size, 4096);
1120 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1121 					   &tx_ring->dma, GFP_KERNEL);
1122 	if (!tx_ring->desc) {
1123 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1124 			 tx_ring->size);
1125 		goto err;
1126 	}
1127 
1128 	tx_ring->next_to_use = 0;
1129 	tx_ring->next_to_clean = 0;
1130 	return 0;
1131 
1132 err:
1133 	kfree(tx_ring->tx_bi);
1134 	tx_ring->tx_bi = NULL;
1135 	return -ENOMEM;
1136 }
1137 
1138 /**
1139  * i40e_clean_rx_ring - Free Rx buffers
1140  * @rx_ring: ring to be cleaned
1141  **/
1142 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1143 {
1144 	unsigned long bi_size;
1145 	u16 i;
1146 
1147 	/* ring already cleared, nothing to do */
1148 	if (!rx_ring->rx_bi)
1149 		return;
1150 
1151 	if (rx_ring->skb) {
1152 		dev_kfree_skb(rx_ring->skb);
1153 		rx_ring->skb = NULL;
1154 	}
1155 
1156 	/* Free all the Rx ring sk_buffs */
1157 	for (i = 0; i < rx_ring->count; i++) {
1158 		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1159 
1160 		if (!rx_bi->page)
1161 			continue;
1162 
1163 		/* Invalidate cache lines that may have been written to by
1164 		 * device so that we avoid corrupting memory.
1165 		 */
1166 		dma_sync_single_range_for_cpu(rx_ring->dev,
1167 					      rx_bi->dma,
1168 					      rx_bi->page_offset,
1169 					      rx_ring->rx_buf_len,
1170 					      DMA_FROM_DEVICE);
1171 
1172 		/* free resources associated with mapping */
1173 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1174 				     i40e_rx_pg_size(rx_ring),
1175 				     DMA_FROM_DEVICE,
1176 				     I40E_RX_DMA_ATTR);
1177 
1178 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1179 
1180 		rx_bi->page = NULL;
1181 		rx_bi->page_offset = 0;
1182 	}
1183 
1184 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1185 	memset(rx_ring->rx_bi, 0, bi_size);
1186 
1187 	/* Zero out the descriptor ring */
1188 	memset(rx_ring->desc, 0, rx_ring->size);
1189 
1190 	rx_ring->next_to_alloc = 0;
1191 	rx_ring->next_to_clean = 0;
1192 	rx_ring->next_to_use = 0;
1193 }
1194 
1195 /**
1196  * i40e_free_rx_resources - Free Rx resources
1197  * @rx_ring: ring to clean the resources from
1198  *
1199  * Free all receive software resources
1200  **/
1201 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1202 {
1203 	i40e_clean_rx_ring(rx_ring);
1204 	rx_ring->xdp_prog = NULL;
1205 	kfree(rx_ring->rx_bi);
1206 	rx_ring->rx_bi = NULL;
1207 
1208 	if (rx_ring->desc) {
1209 		dma_free_coherent(rx_ring->dev, rx_ring->size,
1210 				  rx_ring->desc, rx_ring->dma);
1211 		rx_ring->desc = NULL;
1212 	}
1213 }
1214 
1215 /**
1216  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1217  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1218  *
1219  * Returns 0 on success, negative on failure
1220  **/
1221 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1222 {
1223 	struct device *dev = rx_ring->dev;
1224 	int bi_size;
1225 
1226 	/* warn if we are about to overwrite the pointer */
1227 	WARN_ON(rx_ring->rx_bi);
1228 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1229 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1230 	if (!rx_ring->rx_bi)
1231 		goto err;
1232 
1233 	u64_stats_init(&rx_ring->syncp);
1234 
1235 	/* Round up to nearest 4K */
1236 	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1237 	rx_ring->size = ALIGN(rx_ring->size, 4096);
1238 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1239 					   &rx_ring->dma, GFP_KERNEL);
1240 
1241 	if (!rx_ring->desc) {
1242 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1243 			 rx_ring->size);
1244 		goto err;
1245 	}
1246 
1247 	rx_ring->next_to_alloc = 0;
1248 	rx_ring->next_to_clean = 0;
1249 	rx_ring->next_to_use = 0;
1250 
1251 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1252 
1253 	return 0;
1254 err:
1255 	kfree(rx_ring->rx_bi);
1256 	rx_ring->rx_bi = NULL;
1257 	return -ENOMEM;
1258 }
1259 
1260 /**
1261  * i40e_release_rx_desc - Store the new tail and head values
1262  * @rx_ring: ring to bump
1263  * @val: new head index
1264  **/
1265 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1266 {
1267 	rx_ring->next_to_use = val;
1268 
1269 	/* update next to alloc since we have filled the ring */
1270 	rx_ring->next_to_alloc = val;
1271 
1272 	/* Force memory writes to complete before letting h/w
1273 	 * know there are new descriptors to fetch.  (Only
1274 	 * applicable for weak-ordered memory model archs,
1275 	 * such as IA-64).
1276 	 */
1277 	wmb();
1278 	writel(val, rx_ring->tail);
1279 }
1280 
1281 /**
1282  * i40e_rx_offset - Return expected offset into page to access data
1283  * @rx_ring: Ring we are requesting offset of
1284  *
1285  * Returns the offset value for ring into the data buffer.
1286  */
1287 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1288 {
1289 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1290 }
1291 
1292 /**
1293  * i40e_alloc_mapped_page - recycle or make a new page
1294  * @rx_ring: ring to use
1295  * @bi: rx_buffer struct to modify
1296  *
1297  * Returns true if the page was successfully allocated or
1298  * reused.
1299  **/
1300 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1301 				   struct i40e_rx_buffer *bi)
1302 {
1303 	struct page *page = bi->page;
1304 	dma_addr_t dma;
1305 
1306 	/* since we are recycling buffers we should seldom need to alloc */
1307 	if (likely(page)) {
1308 		rx_ring->rx_stats.page_reuse_count++;
1309 		return true;
1310 	}
1311 
1312 	/* alloc new page for storage */
1313 	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1314 	if (unlikely(!page)) {
1315 		rx_ring->rx_stats.alloc_page_failed++;
1316 		return false;
1317 	}
1318 
1319 	/* map page for use */
1320 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1321 				 i40e_rx_pg_size(rx_ring),
1322 				 DMA_FROM_DEVICE,
1323 				 I40E_RX_DMA_ATTR);
1324 
1325 	/* if mapping failed free memory back to system since
1326 	 * there isn't much point in holding memory we can't use
1327 	 */
1328 	if (dma_mapping_error(rx_ring->dev, dma)) {
1329 		__free_pages(page, i40e_rx_pg_order(rx_ring));
1330 		rx_ring->rx_stats.alloc_page_failed++;
1331 		return false;
1332 	}
1333 
1334 	bi->dma = dma;
1335 	bi->page = page;
1336 	bi->page_offset = i40e_rx_offset(rx_ring);
1337 
1338 	/* initialize pagecnt_bias to 1 representing we fully own page */
1339 	bi->pagecnt_bias = 1;
1340 
1341 	return true;
1342 }
1343 
1344 /**
1345  * i40e_receive_skb - Send a completed packet up the stack
1346  * @rx_ring:  rx ring in play
1347  * @skb: packet to send up
1348  * @vlan_tag: vlan tag for packet
1349  **/
1350 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1351 			     struct sk_buff *skb, u16 vlan_tag)
1352 {
1353 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
1354 
1355 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1356 	    (vlan_tag & VLAN_VID_MASK))
1357 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1358 
1359 	napi_gro_receive(&q_vector->napi, skb);
1360 }
1361 
1362 /**
1363  * i40e_alloc_rx_buffers - Replace used receive buffers
1364  * @rx_ring: ring to place buffers on
1365  * @cleaned_count: number of buffers to replace
1366  *
1367  * Returns false if all allocations were successful, true if any fail
1368  **/
1369 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1370 {
1371 	u16 ntu = rx_ring->next_to_use;
1372 	union i40e_rx_desc *rx_desc;
1373 	struct i40e_rx_buffer *bi;
1374 
1375 	/* do nothing if no valid netdev defined */
1376 	if (!rx_ring->netdev || !cleaned_count)
1377 		return false;
1378 
1379 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
1380 	bi = &rx_ring->rx_bi[ntu];
1381 
1382 	do {
1383 		if (!i40e_alloc_mapped_page(rx_ring, bi))
1384 			goto no_buffers;
1385 
1386 		/* sync the buffer for use by the device */
1387 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1388 						 bi->page_offset,
1389 						 rx_ring->rx_buf_len,
1390 						 DMA_FROM_DEVICE);
1391 
1392 		/* Refresh the desc even if buffer_addrs didn't change
1393 		 * because each write-back erases this info.
1394 		 */
1395 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1396 
1397 		rx_desc++;
1398 		bi++;
1399 		ntu++;
1400 		if (unlikely(ntu == rx_ring->count)) {
1401 			rx_desc = I40E_RX_DESC(rx_ring, 0);
1402 			bi = rx_ring->rx_bi;
1403 			ntu = 0;
1404 		}
1405 
1406 		/* clear the status bits for the next_to_use descriptor */
1407 		rx_desc->wb.qword1.status_error_len = 0;
1408 
1409 		cleaned_count--;
1410 	} while (cleaned_count);
1411 
1412 	if (rx_ring->next_to_use != ntu)
1413 		i40e_release_rx_desc(rx_ring, ntu);
1414 
1415 	return false;
1416 
1417 no_buffers:
1418 	if (rx_ring->next_to_use != ntu)
1419 		i40e_release_rx_desc(rx_ring, ntu);
1420 
1421 	/* make sure to come back via polling to try again after
1422 	 * allocation failure
1423 	 */
1424 	return true;
1425 }
1426 
1427 /**
1428  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1429  * @vsi: the VSI we care about
1430  * @skb: skb currently being received and modified
1431  * @rx_desc: the receive descriptor
1432  **/
1433 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1434 				    struct sk_buff *skb,
1435 				    union i40e_rx_desc *rx_desc)
1436 {
1437 	struct i40e_rx_ptype_decoded decoded;
1438 	u32 rx_error, rx_status;
1439 	bool ipv4, ipv6;
1440 	u8 ptype;
1441 	u64 qword;
1442 
1443 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1444 	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1445 	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1446 		   I40E_RXD_QW1_ERROR_SHIFT;
1447 	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1448 		    I40E_RXD_QW1_STATUS_SHIFT;
1449 	decoded = decode_rx_desc_ptype(ptype);
1450 
1451 	skb->ip_summed = CHECKSUM_NONE;
1452 
1453 	skb_checksum_none_assert(skb);
1454 
1455 	/* Rx csum enabled and ip headers found? */
1456 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1457 		return;
1458 
1459 	/* did the hardware decode the packet and checksum? */
1460 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1461 		return;
1462 
1463 	/* both known and outer_ip must be set for the below code to work */
1464 	if (!(decoded.known && decoded.outer_ip))
1465 		return;
1466 
1467 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1468 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1469 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1470 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1471 
1472 	if (ipv4 &&
1473 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1474 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1475 		goto checksum_fail;
1476 
1477 	/* likely incorrect csum if alternate IP extension headers found */
1478 	if (ipv6 &&
1479 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1480 		/* don't increment checksum err here, non-fatal err */
1481 		return;
1482 
1483 	/* there was some L4 error, count error and punt packet to the stack */
1484 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1485 		goto checksum_fail;
1486 
1487 	/* handle packets that were not able to be checksummed due
1488 	 * to arrival speed, in this case the stack can compute
1489 	 * the csum.
1490 	 */
1491 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1492 		return;
1493 
1494 	/* If there is an outer header present that might contain a checksum
1495 	 * we need to bump the checksum level by 1 to reflect the fact that
1496 	 * we are indicating we validated the inner checksum.
1497 	 */
1498 	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1499 		skb->csum_level = 1;
1500 
1501 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
1502 	switch (decoded.inner_prot) {
1503 	case I40E_RX_PTYPE_INNER_PROT_TCP:
1504 	case I40E_RX_PTYPE_INNER_PROT_UDP:
1505 	case I40E_RX_PTYPE_INNER_PROT_SCTP:
1506 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1507 		/* fall though */
1508 	default:
1509 		break;
1510 	}
1511 
1512 	return;
1513 
1514 checksum_fail:
1515 	vsi->back->hw_csum_rx_error++;
1516 }
1517 
1518 /**
1519  * i40e_ptype_to_htype - get a hash type
1520  * @ptype: the ptype value from the descriptor
1521  *
1522  * Returns a hash type to be used by skb_set_hash
1523  **/
1524 static inline int i40e_ptype_to_htype(u8 ptype)
1525 {
1526 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1527 
1528 	if (!decoded.known)
1529 		return PKT_HASH_TYPE_NONE;
1530 
1531 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1532 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1533 		return PKT_HASH_TYPE_L4;
1534 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1535 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1536 		return PKT_HASH_TYPE_L3;
1537 	else
1538 		return PKT_HASH_TYPE_L2;
1539 }
1540 
1541 /**
1542  * i40e_rx_hash - set the hash value in the skb
1543  * @ring: descriptor ring
1544  * @rx_desc: specific descriptor
1545  **/
1546 static inline void i40e_rx_hash(struct i40e_ring *ring,
1547 				union i40e_rx_desc *rx_desc,
1548 				struct sk_buff *skb,
1549 				u8 rx_ptype)
1550 {
1551 	u32 hash;
1552 	const __le64 rss_mask =
1553 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1554 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1555 
1556 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1557 		return;
1558 
1559 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1560 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1561 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1562 	}
1563 }
1564 
1565 /**
1566  * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1567  * @rx_ring: rx descriptor ring packet is being transacted on
1568  * @rx_desc: pointer to the EOP Rx descriptor
1569  * @skb: pointer to current skb being populated
1570  * @rx_ptype: the packet type decoded by hardware
1571  *
1572  * This function checks the ring, descriptor, and packet information in
1573  * order to populate the hash, checksum, VLAN, protocol, and
1574  * other fields within the skb.
1575  **/
1576 static inline
1577 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1578 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1579 			     u8 rx_ptype)
1580 {
1581 	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1582 	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1583 			I40E_RXD_QW1_STATUS_SHIFT;
1584 	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1585 	u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1586 		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1587 
1588 	if (unlikely(tsynvalid))
1589 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1590 
1591 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1592 
1593 	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1594 
1595 	skb_record_rx_queue(skb, rx_ring->queue_index);
1596 
1597 	/* modifies the skb - consumes the enet header */
1598 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1599 }
1600 
1601 /**
1602  * i40e_cleanup_headers - Correct empty headers
1603  * @rx_ring: rx descriptor ring packet is being transacted on
1604  * @skb: pointer to current skb being fixed
1605  * @rx_desc: pointer to the EOP Rx descriptor
1606  *
1607  * Also address the case where we are pulling data in on pages only
1608  * and as such no data is present in the skb header.
1609  *
1610  * In addition if skb is not at least 60 bytes we need to pad it so that
1611  * it is large enough to qualify as a valid Ethernet frame.
1612  *
1613  * Returns true if an error was encountered and skb was freed.
1614  **/
1615 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1616 				 union i40e_rx_desc *rx_desc)
1617 
1618 {
1619 	/* XDP packets use error pointer so abort at this point */
1620 	if (IS_ERR(skb))
1621 		return true;
1622 
1623 	/* ERR_MASK will only have valid bits if EOP set, and
1624 	 * what we are doing here is actually checking
1625 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1626 	 * the error field
1627 	 */
1628 	if (unlikely(i40e_test_staterr(rx_desc,
1629 				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1630 		dev_kfree_skb_any(skb);
1631 		return true;
1632 	}
1633 
1634 	/* if eth_skb_pad returns an error the skb was freed */
1635 	if (eth_skb_pad(skb))
1636 		return true;
1637 
1638 	return false;
1639 }
1640 
1641 /**
1642  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1643  * @rx_ring: rx descriptor ring to store buffers on
1644  * @old_buff: donor buffer to have page reused
1645  *
1646  * Synchronizes page for reuse by the adapter
1647  **/
1648 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1649 			       struct i40e_rx_buffer *old_buff)
1650 {
1651 	struct i40e_rx_buffer *new_buff;
1652 	u16 nta = rx_ring->next_to_alloc;
1653 
1654 	new_buff = &rx_ring->rx_bi[nta];
1655 
1656 	/* update, and store next to alloc */
1657 	nta++;
1658 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1659 
1660 	/* transfer page from old buffer to new buffer */
1661 	new_buff->dma		= old_buff->dma;
1662 	new_buff->page		= old_buff->page;
1663 	new_buff->page_offset	= old_buff->page_offset;
1664 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1665 }
1666 
1667 /**
1668  * i40e_page_is_reusable - check if any reuse is possible
1669  * @page: page struct to check
1670  *
1671  * A page is not reusable if it was allocated under low memory
1672  * conditions, or it's not in the same NUMA node as this CPU.
1673  */
1674 static inline bool i40e_page_is_reusable(struct page *page)
1675 {
1676 	return (page_to_nid(page) == numa_mem_id()) &&
1677 		!page_is_pfmemalloc(page);
1678 }
1679 
1680 /**
1681  * i40e_can_reuse_rx_page - Determine if this page can be reused by
1682  * the adapter for another receive
1683  *
1684  * @rx_buffer: buffer containing the page
1685  *
1686  * If page is reusable, rx_buffer->page_offset is adjusted to point to
1687  * an unused region in the page.
1688  *
1689  * For small pages, @truesize will be a constant value, half the size
1690  * of the memory at page.  We'll attempt to alternate between high and
1691  * low halves of the page, with one half ready for use by the hardware
1692  * and the other half being consumed by the stack.  We use the page
1693  * ref count to determine whether the stack has finished consuming the
1694  * portion of this page that was passed up with a previous packet.  If
1695  * the page ref count is >1, we'll assume the "other" half page is
1696  * still busy, and this page cannot be reused.
1697  *
1698  * For larger pages, @truesize will be the actual space used by the
1699  * received packet (adjusted upward to an even multiple of the cache
1700  * line size).  This will advance through the page by the amount
1701  * actually consumed by the received packets while there is still
1702  * space for a buffer.  Each region of larger pages will be used at
1703  * most once, after which the page will not be reused.
1704  *
1705  * In either case, if the page is reusable its refcount is increased.
1706  **/
1707 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1708 {
1709 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1710 	struct page *page = rx_buffer->page;
1711 
1712 	/* Is any reuse possible? */
1713 	if (unlikely(!i40e_page_is_reusable(page)))
1714 		return false;
1715 
1716 #if (PAGE_SIZE < 8192)
1717 	/* if we are only owner of page we can reuse it */
1718 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
1719 		return false;
1720 #else
1721 #define I40E_LAST_OFFSET \
1722 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1723 	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1724 		return false;
1725 #endif
1726 
1727 	/* If we have drained the page fragment pool we need to update
1728 	 * the pagecnt_bias and page count so that we fully restock the
1729 	 * number of references the driver holds.
1730 	 */
1731 	if (unlikely(!pagecnt_bias)) {
1732 		page_ref_add(page, USHRT_MAX);
1733 		rx_buffer->pagecnt_bias = USHRT_MAX;
1734 	}
1735 
1736 	return true;
1737 }
1738 
1739 /**
1740  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1741  * @rx_ring: rx descriptor ring to transact packets on
1742  * @rx_buffer: buffer containing page to add
1743  * @skb: sk_buff to place the data into
1744  * @size: packet length from rx_desc
1745  *
1746  * This function will add the data contained in rx_buffer->page to the skb.
1747  * It will just attach the page as a frag to the skb.
1748  *
1749  * The function will then update the page offset.
1750  **/
1751 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1752 			     struct i40e_rx_buffer *rx_buffer,
1753 			     struct sk_buff *skb,
1754 			     unsigned int size)
1755 {
1756 #if (PAGE_SIZE < 8192)
1757 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1758 #else
1759 	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1760 #endif
1761 
1762 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1763 			rx_buffer->page_offset, size, truesize);
1764 
1765 	/* page is being used so we must update the page offset */
1766 #if (PAGE_SIZE < 8192)
1767 	rx_buffer->page_offset ^= truesize;
1768 #else
1769 	rx_buffer->page_offset += truesize;
1770 #endif
1771 }
1772 
1773 /**
1774  * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1775  * @rx_ring: rx descriptor ring to transact packets on
1776  * @size: size of buffer to add to skb
1777  *
1778  * This function will pull an Rx buffer from the ring and synchronize it
1779  * for use by the CPU.
1780  */
1781 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1782 						 const unsigned int size)
1783 {
1784 	struct i40e_rx_buffer *rx_buffer;
1785 
1786 	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1787 	prefetchw(rx_buffer->page);
1788 
1789 	/* we are reusing so sync this buffer for CPU use */
1790 	dma_sync_single_range_for_cpu(rx_ring->dev,
1791 				      rx_buffer->dma,
1792 				      rx_buffer->page_offset,
1793 				      size,
1794 				      DMA_FROM_DEVICE);
1795 
1796 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
1797 	rx_buffer->pagecnt_bias--;
1798 
1799 	return rx_buffer;
1800 }
1801 
1802 /**
1803  * i40e_construct_skb - Allocate skb and populate it
1804  * @rx_ring: rx descriptor ring to transact packets on
1805  * @rx_buffer: rx buffer to pull data from
1806  * @xdp: xdp_buff pointing to the data
1807  *
1808  * This function allocates an skb.  It then populates it with the page
1809  * data from the current receive descriptor, taking care to set up the
1810  * skb correctly.
1811  */
1812 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1813 					  struct i40e_rx_buffer *rx_buffer,
1814 					  struct xdp_buff *xdp)
1815 {
1816 	unsigned int size = xdp->data_end - xdp->data;
1817 #if (PAGE_SIZE < 8192)
1818 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1819 #else
1820 	unsigned int truesize = SKB_DATA_ALIGN(size);
1821 #endif
1822 	unsigned int headlen;
1823 	struct sk_buff *skb;
1824 
1825 	/* prefetch first cache line of first page */
1826 	prefetch(xdp->data);
1827 #if L1_CACHE_BYTES < 128
1828 	prefetch(xdp->data + L1_CACHE_BYTES);
1829 #endif
1830 
1831 	/* allocate a skb to store the frags */
1832 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1833 			       I40E_RX_HDR_SIZE,
1834 			       GFP_ATOMIC | __GFP_NOWARN);
1835 	if (unlikely(!skb))
1836 		return NULL;
1837 
1838 	/* Determine available headroom for copy */
1839 	headlen = size;
1840 	if (headlen > I40E_RX_HDR_SIZE)
1841 		headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
1842 
1843 	/* align pull length to size of long to optimize memcpy performance */
1844 	memcpy(__skb_put(skb, headlen), xdp->data,
1845 	       ALIGN(headlen, sizeof(long)));
1846 
1847 	/* update all of the pointers */
1848 	size -= headlen;
1849 	if (size) {
1850 		skb_add_rx_frag(skb, 0, rx_buffer->page,
1851 				rx_buffer->page_offset + headlen,
1852 				size, truesize);
1853 
1854 		/* buffer is used by skb, update page_offset */
1855 #if (PAGE_SIZE < 8192)
1856 		rx_buffer->page_offset ^= truesize;
1857 #else
1858 		rx_buffer->page_offset += truesize;
1859 #endif
1860 	} else {
1861 		/* buffer is unused, reset bias back to rx_buffer */
1862 		rx_buffer->pagecnt_bias++;
1863 	}
1864 
1865 	return skb;
1866 }
1867 
1868 /**
1869  * i40e_build_skb - Build skb around an existing buffer
1870  * @rx_ring: Rx descriptor ring to transact packets on
1871  * @rx_buffer: Rx buffer to pull data from
1872  * @xdp: xdp_buff pointing to the data
1873  *
1874  * This function builds an skb around an existing Rx buffer, taking care
1875  * to set up the skb correctly and avoid any memcpy overhead.
1876  */
1877 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1878 				      struct i40e_rx_buffer *rx_buffer,
1879 				      struct xdp_buff *xdp)
1880 {
1881 	unsigned int size = xdp->data_end - xdp->data;
1882 #if (PAGE_SIZE < 8192)
1883 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1884 #else
1885 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1886 				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1887 #endif
1888 	struct sk_buff *skb;
1889 
1890 	/* prefetch first cache line of first page */
1891 	prefetch(xdp->data);
1892 #if L1_CACHE_BYTES < 128
1893 	prefetch(xdp->data + L1_CACHE_BYTES);
1894 #endif
1895 	/* build an skb around the page buffer */
1896 	skb = build_skb(xdp->data_hard_start, truesize);
1897 	if (unlikely(!skb))
1898 		return NULL;
1899 
1900 	/* update pointers within the skb to store the data */
1901 	skb_reserve(skb, I40E_SKB_PAD);
1902 	__skb_put(skb, size);
1903 
1904 	/* buffer is used by skb, update page_offset */
1905 #if (PAGE_SIZE < 8192)
1906 	rx_buffer->page_offset ^= truesize;
1907 #else
1908 	rx_buffer->page_offset += truesize;
1909 #endif
1910 
1911 	return skb;
1912 }
1913 
1914 /**
1915  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1916  * @rx_ring: rx descriptor ring to transact packets on
1917  * @rx_buffer: rx buffer to pull data from
1918  *
1919  * This function will clean up the contents of the rx_buffer.  It will
1920  * either recycle the bufer or unmap it and free the associated resources.
1921  */
1922 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1923 			       struct i40e_rx_buffer *rx_buffer)
1924 {
1925 	if (i40e_can_reuse_rx_page(rx_buffer)) {
1926 		/* hand second half of page back to the ring */
1927 		i40e_reuse_rx_page(rx_ring, rx_buffer);
1928 		rx_ring->rx_stats.page_reuse_count++;
1929 	} else {
1930 		/* we are not reusing the buffer so unmap it */
1931 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1932 				     i40e_rx_pg_size(rx_ring),
1933 				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1934 		__page_frag_cache_drain(rx_buffer->page,
1935 					rx_buffer->pagecnt_bias);
1936 	}
1937 
1938 	/* clear contents of buffer_info */
1939 	rx_buffer->page = NULL;
1940 }
1941 
1942 /**
1943  * i40e_is_non_eop - process handling of non-EOP buffers
1944  * @rx_ring: Rx ring being processed
1945  * @rx_desc: Rx descriptor for current buffer
1946  * @skb: Current socket buffer containing buffer in progress
1947  *
1948  * This function updates next to clean.  If the buffer is an EOP buffer
1949  * this function exits returning false, otherwise it will place the
1950  * sk_buff in the next buffer to be chained and return true indicating
1951  * that this is in fact a non-EOP buffer.
1952  **/
1953 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1954 			    union i40e_rx_desc *rx_desc,
1955 			    struct sk_buff *skb)
1956 {
1957 	u32 ntc = rx_ring->next_to_clean + 1;
1958 
1959 	/* fetch, update, and store next to clean */
1960 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1961 	rx_ring->next_to_clean = ntc;
1962 
1963 	prefetch(I40E_RX_DESC(rx_ring, ntc));
1964 
1965 	/* if we are the last buffer then there is nothing else to do */
1966 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1967 	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1968 		return false;
1969 
1970 	rx_ring->rx_stats.non_eop_descs++;
1971 
1972 	return true;
1973 }
1974 
1975 #define I40E_XDP_PASS 0
1976 #define I40E_XDP_CONSUMED 1
1977 #define I40E_XDP_TX 2
1978 
1979 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
1980 			      struct i40e_ring *xdp_ring);
1981 
1982 /**
1983  * i40e_run_xdp - run an XDP program
1984  * @rx_ring: Rx ring being processed
1985  * @xdp: XDP buffer containing the frame
1986  **/
1987 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
1988 				    struct xdp_buff *xdp)
1989 {
1990 	int result = I40E_XDP_PASS;
1991 	struct i40e_ring *xdp_ring;
1992 	struct bpf_prog *xdp_prog;
1993 	u32 act;
1994 
1995 	rcu_read_lock();
1996 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1997 
1998 	if (!xdp_prog)
1999 		goto xdp_out;
2000 
2001 	act = bpf_prog_run_xdp(xdp_prog, xdp);
2002 	switch (act) {
2003 	case XDP_PASS:
2004 		break;
2005 	case XDP_TX:
2006 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2007 		result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2008 		break;
2009 	default:
2010 		bpf_warn_invalid_xdp_action(act);
2011 	case XDP_ABORTED:
2012 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2013 		/* fallthrough -- handle aborts by dropping packet */
2014 	case XDP_DROP:
2015 		result = I40E_XDP_CONSUMED;
2016 		break;
2017 	}
2018 xdp_out:
2019 	rcu_read_unlock();
2020 	return ERR_PTR(-result);
2021 }
2022 
2023 /**
2024  * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2025  * @rx_ring: Rx ring
2026  * @rx_buffer: Rx buffer to adjust
2027  * @size: Size of adjustment
2028  **/
2029 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2030 				struct i40e_rx_buffer *rx_buffer,
2031 				unsigned int size)
2032 {
2033 #if (PAGE_SIZE < 8192)
2034 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2035 
2036 	rx_buffer->page_offset ^= truesize;
2037 #else
2038 	unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2039 
2040 	rx_buffer->page_offset += truesize;
2041 #endif
2042 }
2043 
2044 /**
2045  * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2046  * @rx_ring: rx descriptor ring to transact packets on
2047  * @budget: Total limit on number of packets to process
2048  *
2049  * This function provides a "bounce buffer" approach to Rx interrupt
2050  * processing.  The advantage to this is that on systems that have
2051  * expensive overhead for IOMMU access this provides a means of avoiding
2052  * it by maintaining the mapping of the page to the system.
2053  *
2054  * Returns amount of work completed
2055  **/
2056 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2057 {
2058 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2059 	struct sk_buff *skb = rx_ring->skb;
2060 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2061 	bool failure = false, xdp_xmit = false;
2062 
2063 	while (likely(total_rx_packets < (unsigned int)budget)) {
2064 		struct i40e_rx_buffer *rx_buffer;
2065 		union i40e_rx_desc *rx_desc;
2066 		struct xdp_buff xdp;
2067 		unsigned int size;
2068 		u16 vlan_tag;
2069 		u8 rx_ptype;
2070 		u64 qword;
2071 
2072 		/* return some buffers to hardware, one at a time is too slow */
2073 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2074 			failure = failure ||
2075 				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2076 			cleaned_count = 0;
2077 		}
2078 
2079 		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2080 
2081 		/* status_error_len will always be zero for unused descriptors
2082 		 * because it's cleared in cleanup, and overlaps with hdr_addr
2083 		 * which is always zero because packet split isn't used, if the
2084 		 * hardware wrote DD then the length will be non-zero
2085 		 */
2086 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2087 
2088 		/* This memory barrier is needed to keep us from reading
2089 		 * any other fields out of the rx_desc until we have
2090 		 * verified the descriptor has been written back.
2091 		 */
2092 		dma_rmb();
2093 
2094 		if (unlikely(i40e_rx_is_programming_status(qword))) {
2095 			i40e_clean_programming_status(rx_ring, rx_desc, qword);
2096 			continue;
2097 		}
2098 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2099 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2100 		if (!size)
2101 			break;
2102 
2103 		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2104 		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2105 
2106 		/* retrieve a buffer from the ring */
2107 		if (!skb) {
2108 			xdp.data = page_address(rx_buffer->page) +
2109 				   rx_buffer->page_offset;
2110 			xdp_set_data_meta_invalid(&xdp);
2111 			xdp.data_hard_start = xdp.data -
2112 					      i40e_rx_offset(rx_ring);
2113 			xdp.data_end = xdp.data + size;
2114 
2115 			skb = i40e_run_xdp(rx_ring, &xdp);
2116 		}
2117 
2118 		if (IS_ERR(skb)) {
2119 			if (PTR_ERR(skb) == -I40E_XDP_TX) {
2120 				xdp_xmit = true;
2121 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2122 			} else {
2123 				rx_buffer->pagecnt_bias++;
2124 			}
2125 			total_rx_bytes += size;
2126 			total_rx_packets++;
2127 		} else if (skb) {
2128 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2129 		} else if (ring_uses_build_skb(rx_ring)) {
2130 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2131 		} else {
2132 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2133 		}
2134 
2135 		/* exit if we failed to retrieve a buffer */
2136 		if (!skb) {
2137 			rx_ring->rx_stats.alloc_buff_failed++;
2138 			rx_buffer->pagecnt_bias++;
2139 			break;
2140 		}
2141 
2142 		i40e_put_rx_buffer(rx_ring, rx_buffer);
2143 		cleaned_count++;
2144 
2145 		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2146 			continue;
2147 
2148 		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2149 			skb = NULL;
2150 			continue;
2151 		}
2152 
2153 		/* probably a little skewed due to removing CRC */
2154 		total_rx_bytes += skb->len;
2155 
2156 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2157 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2158 			   I40E_RXD_QW1_PTYPE_SHIFT;
2159 
2160 		/* populate checksum, VLAN, and protocol */
2161 		i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2162 
2163 		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2164 			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2165 
2166 		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2167 		i40e_receive_skb(rx_ring, skb, vlan_tag);
2168 		skb = NULL;
2169 
2170 		/* update budget accounting */
2171 		total_rx_packets++;
2172 	}
2173 
2174 	if (xdp_xmit) {
2175 		struct i40e_ring *xdp_ring;
2176 
2177 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2178 
2179 		/* Force memory writes to complete before letting h/w
2180 		 * know there are new descriptors to fetch.
2181 		 */
2182 		wmb();
2183 
2184 		writel(xdp_ring->next_to_use, xdp_ring->tail);
2185 	}
2186 
2187 	rx_ring->skb = skb;
2188 
2189 	u64_stats_update_begin(&rx_ring->syncp);
2190 	rx_ring->stats.packets += total_rx_packets;
2191 	rx_ring->stats.bytes += total_rx_bytes;
2192 	u64_stats_update_end(&rx_ring->syncp);
2193 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
2194 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2195 
2196 	/* guarantee a trip back through this routine if there was a failure */
2197 	return failure ? budget : (int)total_rx_packets;
2198 }
2199 
2200 static u32 i40e_buildreg_itr(const int type, const u16 itr)
2201 {
2202 	u32 val;
2203 
2204 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2205 	      /* Don't clear PBA because that can cause lost interrupts that
2206 	       * came in while we were cleaning/polling
2207 	       */
2208 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2209 	      (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2210 
2211 	return val;
2212 }
2213 
2214 /* a small macro to shorten up some long lines */
2215 #define INTREG I40E_PFINT_DYN_CTLN
2216 static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
2217 {
2218 	return vsi->rx_rings[idx]->rx_itr_setting;
2219 }
2220 
2221 static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
2222 {
2223 	return vsi->tx_rings[idx]->tx_itr_setting;
2224 }
2225 
2226 /**
2227  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2228  * @vsi: the VSI we care about
2229  * @q_vector: q_vector for which itr is being updated and interrupt enabled
2230  *
2231  **/
2232 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2233 					  struct i40e_q_vector *q_vector)
2234 {
2235 	struct i40e_hw *hw = &vsi->back->hw;
2236 	bool rx = false, tx = false;
2237 	u32 rxval, txval;
2238 	int vector;
2239 	int idx = q_vector->v_idx;
2240 	int rx_itr_setting, tx_itr_setting;
2241 
2242 	/* If we don't have MSIX, then we only need to re-enable icr0 */
2243 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2244 		i40e_irq_dynamic_enable_icr0(vsi->back, false);
2245 		return;
2246 	}
2247 
2248 	vector = (q_vector->v_idx + vsi->base_vector);
2249 
2250 	/* avoid dynamic calculation if in countdown mode OR if
2251 	 * all dynamic is disabled
2252 	 */
2253 	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2254 
2255 	rx_itr_setting = get_rx_itr(vsi, idx);
2256 	tx_itr_setting = get_tx_itr(vsi, idx);
2257 
2258 	if (q_vector->itr_countdown > 0 ||
2259 	    (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2260 	     !ITR_IS_DYNAMIC(tx_itr_setting))) {
2261 		goto enable_int;
2262 	}
2263 
2264 	if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2265 		rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2266 		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2267 	}
2268 
2269 	if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2270 		tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2271 		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
2272 	}
2273 
2274 	if (rx || tx) {
2275 		/* get the higher of the two ITR adjustments and
2276 		 * use the same value for both ITR registers
2277 		 * when in adaptive mode (Rx and/or Tx)
2278 		 */
2279 		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2280 
2281 		q_vector->tx.itr = q_vector->rx.itr = itr;
2282 		txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2283 		tx = true;
2284 		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2285 		rx = true;
2286 	}
2287 
2288 	/* only need to enable the interrupt once, but need
2289 	 * to possibly update both ITR values
2290 	 */
2291 	if (rx) {
2292 		/* set the INTENA_MSK_MASK so that this first write
2293 		 * won't actually enable the interrupt, instead just
2294 		 * updating the ITR (it's bit 31 PF and VF)
2295 		 */
2296 		rxval |= BIT(31);
2297 		/* don't check _DOWN because interrupt isn't being enabled */
2298 		wr32(hw, INTREG(vector - 1), rxval);
2299 	}
2300 
2301 enable_int:
2302 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2303 		wr32(hw, INTREG(vector - 1), txval);
2304 
2305 	if (q_vector->itr_countdown)
2306 		q_vector->itr_countdown--;
2307 	else
2308 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2309 }
2310 
2311 /**
2312  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2313  * @napi: napi struct with our devices info in it
2314  * @budget: amount of work driver is allowed to do this pass, in packets
2315  *
2316  * This function will clean all queues associated with a q_vector.
2317  *
2318  * Returns the amount of work done
2319  **/
2320 int i40e_napi_poll(struct napi_struct *napi, int budget)
2321 {
2322 	struct i40e_q_vector *q_vector =
2323 			       container_of(napi, struct i40e_q_vector, napi);
2324 	struct i40e_vsi *vsi = q_vector->vsi;
2325 	struct i40e_ring *ring;
2326 	bool clean_complete = true;
2327 	bool arm_wb = false;
2328 	int budget_per_ring;
2329 	int work_done = 0;
2330 
2331 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2332 		napi_complete(napi);
2333 		return 0;
2334 	}
2335 
2336 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2337 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2338 	 */
2339 	i40e_for_each_ring(ring, q_vector->tx) {
2340 		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2341 			clean_complete = false;
2342 			continue;
2343 		}
2344 		arm_wb |= ring->arm_wb;
2345 		ring->arm_wb = false;
2346 	}
2347 
2348 	/* Handle case where we are called by netpoll with a budget of 0 */
2349 	if (budget <= 0)
2350 		goto tx_only;
2351 
2352 	/* We attempt to distribute budget to each Rx queue fairly, but don't
2353 	 * allow the budget to go below 1 because that would exit polling early.
2354 	 */
2355 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2356 
2357 	i40e_for_each_ring(ring, q_vector->rx) {
2358 		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2359 
2360 		work_done += cleaned;
2361 		/* if we clean as many as budgeted, we must not be done */
2362 		if (cleaned >= budget_per_ring)
2363 			clean_complete = false;
2364 	}
2365 
2366 	/* If work not completed, return budget and polling will return */
2367 	if (!clean_complete) {
2368 		int cpu_id = smp_processor_id();
2369 
2370 		/* It is possible that the interrupt affinity has changed but,
2371 		 * if the cpu is pegged at 100%, polling will never exit while
2372 		 * traffic continues and the interrupt will be stuck on this
2373 		 * cpu.  We check to make sure affinity is correct before we
2374 		 * continue to poll, otherwise we must stop polling so the
2375 		 * interrupt can move to the correct cpu.
2376 		 */
2377 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2378 			/* Tell napi that we are done polling */
2379 			napi_complete_done(napi, work_done);
2380 
2381 			/* Force an interrupt */
2382 			i40e_force_wb(vsi, q_vector);
2383 
2384 			/* Return budget-1 so that polling stops */
2385 			return budget - 1;
2386 		}
2387 tx_only:
2388 		if (arm_wb) {
2389 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2390 			i40e_enable_wb_on_itr(vsi, q_vector);
2391 		}
2392 		return budget;
2393 	}
2394 
2395 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2396 		q_vector->arm_wb_state = false;
2397 
2398 	/* Work is done so exit the polling mode and re-enable the interrupt */
2399 	napi_complete_done(napi, work_done);
2400 
2401 	i40e_update_enable_itr(vsi, q_vector);
2402 
2403 	return min(work_done, budget - 1);
2404 }
2405 
2406 /**
2407  * i40e_atr - Add a Flow Director ATR filter
2408  * @tx_ring:  ring to add programming descriptor to
2409  * @skb:      send buffer
2410  * @tx_flags: send tx flags
2411  **/
2412 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2413 		     u32 tx_flags)
2414 {
2415 	struct i40e_filter_program_desc *fdir_desc;
2416 	struct i40e_pf *pf = tx_ring->vsi->back;
2417 	union {
2418 		unsigned char *network;
2419 		struct iphdr *ipv4;
2420 		struct ipv6hdr *ipv6;
2421 	} hdr;
2422 	struct tcphdr *th;
2423 	unsigned int hlen;
2424 	u32 flex_ptype, dtype_cmd;
2425 	int l4_proto;
2426 	u16 i;
2427 
2428 	/* make sure ATR is enabled */
2429 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2430 		return;
2431 
2432 	if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
2433 		return;
2434 
2435 	/* if sampling is disabled do nothing */
2436 	if (!tx_ring->atr_sample_rate)
2437 		return;
2438 
2439 	/* Currently only IPv4/IPv6 with TCP is supported */
2440 	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2441 		return;
2442 
2443 	/* snag network header to get L4 type and address */
2444 	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2445 		      skb_inner_network_header(skb) : skb_network_header(skb);
2446 
2447 	/* Note: tx_flags gets modified to reflect inner protocols in
2448 	 * tx_enable_csum function if encap is enabled.
2449 	 */
2450 	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2451 		/* access ihl as u8 to avoid unaligned access on ia64 */
2452 		hlen = (hdr.network[0] & 0x0F) << 2;
2453 		l4_proto = hdr.ipv4->protocol;
2454 	} else {
2455 		/* find the start of the innermost ipv6 header */
2456 		unsigned int inner_hlen = hdr.network - skb->data;
2457 		unsigned int h_offset = inner_hlen;
2458 
2459 		/* this function updates h_offset to the end of the header */
2460 		l4_proto =
2461 		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2462 		/* hlen will contain our best estimate of the tcp header */
2463 		hlen = h_offset - inner_hlen;
2464 	}
2465 
2466 	if (l4_proto != IPPROTO_TCP)
2467 		return;
2468 
2469 	th = (struct tcphdr *)(hdr.network + hlen);
2470 
2471 	/* Due to lack of space, no more new filters can be programmed */
2472 	if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
2473 		return;
2474 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2475 		/* HW ATR eviction will take care of removing filters on FIN
2476 		 * and RST packets.
2477 		 */
2478 		if (th->fin || th->rst)
2479 			return;
2480 	}
2481 
2482 	tx_ring->atr_count++;
2483 
2484 	/* sample on all syn/fin/rst packets or once every atr sample rate */
2485 	if (!th->fin &&
2486 	    !th->syn &&
2487 	    !th->rst &&
2488 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2489 		return;
2490 
2491 	tx_ring->atr_count = 0;
2492 
2493 	/* grab the next descriptor */
2494 	i = tx_ring->next_to_use;
2495 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2496 
2497 	i++;
2498 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2499 
2500 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2501 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2502 	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2503 		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2504 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2505 		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2506 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2507 
2508 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2509 
2510 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2511 
2512 	dtype_cmd |= (th->fin || th->rst) ?
2513 		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2514 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2515 		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2516 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2517 
2518 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2519 		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2520 
2521 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2522 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2523 
2524 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2525 	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2526 		dtype_cmd |=
2527 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2528 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2529 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2530 	else
2531 		dtype_cmd |=
2532 			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2533 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2534 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2535 
2536 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2537 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2538 
2539 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2540 	fdir_desc->rsvd = cpu_to_le32(0);
2541 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2542 	fdir_desc->fd_id = cpu_to_le32(0);
2543 }
2544 
2545 /**
2546  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2547  * @skb:     send buffer
2548  * @tx_ring: ring to send buffer on
2549  * @flags:   the tx flags to be set
2550  *
2551  * Checks the skb and set up correspondingly several generic transmit flags
2552  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2553  *
2554  * Returns error code indicate the frame should be dropped upon error and the
2555  * otherwise  returns 0 to indicate the flags has been set properly.
2556  **/
2557 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2558 					     struct i40e_ring *tx_ring,
2559 					     u32 *flags)
2560 {
2561 	__be16 protocol = skb->protocol;
2562 	u32  tx_flags = 0;
2563 
2564 	if (protocol == htons(ETH_P_8021Q) &&
2565 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2566 		/* When HW VLAN acceleration is turned off by the user the
2567 		 * stack sets the protocol to 8021q so that the driver
2568 		 * can take any steps required to support the SW only
2569 		 * VLAN handling.  In our case the driver doesn't need
2570 		 * to take any further steps so just set the protocol
2571 		 * to the encapsulated ethertype.
2572 		 */
2573 		skb->protocol = vlan_get_protocol(skb);
2574 		goto out;
2575 	}
2576 
2577 	/* if we have a HW VLAN tag being added, default to the HW one */
2578 	if (skb_vlan_tag_present(skb)) {
2579 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2580 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2581 	/* else if it is a SW VLAN, check the next protocol and store the tag */
2582 	} else if (protocol == htons(ETH_P_8021Q)) {
2583 		struct vlan_hdr *vhdr, _vhdr;
2584 
2585 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2586 		if (!vhdr)
2587 			return -EINVAL;
2588 
2589 		protocol = vhdr->h_vlan_encapsulated_proto;
2590 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2591 		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2592 	}
2593 
2594 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2595 		goto out;
2596 
2597 	/* Insert 802.1p priority into VLAN header */
2598 	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2599 	    (skb->priority != TC_PRIO_CONTROL)) {
2600 		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2601 		tx_flags |= (skb->priority & 0x7) <<
2602 				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2603 		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2604 			struct vlan_ethhdr *vhdr;
2605 			int rc;
2606 
2607 			rc = skb_cow_head(skb, 0);
2608 			if (rc < 0)
2609 				return rc;
2610 			vhdr = (struct vlan_ethhdr *)skb->data;
2611 			vhdr->h_vlan_TCI = htons(tx_flags >>
2612 						 I40E_TX_FLAGS_VLAN_SHIFT);
2613 		} else {
2614 			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2615 		}
2616 	}
2617 
2618 out:
2619 	*flags = tx_flags;
2620 	return 0;
2621 }
2622 
2623 /**
2624  * i40e_tso - set up the tso context descriptor
2625  * @first:    pointer to first Tx buffer for xmit
2626  * @hdr_len:  ptr to the size of the packet header
2627  * @cd_type_cmd_tso_mss: Quad Word 1
2628  *
2629  * Returns 0 if no TSO can happen, 1 if tso is going, or error
2630  **/
2631 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2632 		    u64 *cd_type_cmd_tso_mss)
2633 {
2634 	struct sk_buff *skb = first->skb;
2635 	u64 cd_cmd, cd_tso_len, cd_mss;
2636 	union {
2637 		struct iphdr *v4;
2638 		struct ipv6hdr *v6;
2639 		unsigned char *hdr;
2640 	} ip;
2641 	union {
2642 		struct tcphdr *tcp;
2643 		struct udphdr *udp;
2644 		unsigned char *hdr;
2645 	} l4;
2646 	u32 paylen, l4_offset;
2647 	u16 gso_segs, gso_size;
2648 	int err;
2649 
2650 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2651 		return 0;
2652 
2653 	if (!skb_is_gso(skb))
2654 		return 0;
2655 
2656 	err = skb_cow_head(skb, 0);
2657 	if (err < 0)
2658 		return err;
2659 
2660 	ip.hdr = skb_network_header(skb);
2661 	l4.hdr = skb_transport_header(skb);
2662 
2663 	/* initialize outer IP header fields */
2664 	if (ip.v4->version == 4) {
2665 		ip.v4->tot_len = 0;
2666 		ip.v4->check = 0;
2667 	} else {
2668 		ip.v6->payload_len = 0;
2669 	}
2670 
2671 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2672 					 SKB_GSO_GRE_CSUM |
2673 					 SKB_GSO_IPXIP4 |
2674 					 SKB_GSO_IPXIP6 |
2675 					 SKB_GSO_UDP_TUNNEL |
2676 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2677 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2678 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2679 			l4.udp->len = 0;
2680 
2681 			/* determine offset of outer transport header */
2682 			l4_offset = l4.hdr - skb->data;
2683 
2684 			/* remove payload length from outer checksum */
2685 			paylen = skb->len - l4_offset;
2686 			csum_replace_by_diff(&l4.udp->check,
2687 					     (__force __wsum)htonl(paylen));
2688 		}
2689 
2690 		/* reset pointers to inner headers */
2691 		ip.hdr = skb_inner_network_header(skb);
2692 		l4.hdr = skb_inner_transport_header(skb);
2693 
2694 		/* initialize inner IP header fields */
2695 		if (ip.v4->version == 4) {
2696 			ip.v4->tot_len = 0;
2697 			ip.v4->check = 0;
2698 		} else {
2699 			ip.v6->payload_len = 0;
2700 		}
2701 	}
2702 
2703 	/* determine offset of inner transport header */
2704 	l4_offset = l4.hdr - skb->data;
2705 
2706 	/* remove payload length from inner checksum */
2707 	paylen = skb->len - l4_offset;
2708 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2709 
2710 	/* compute length of segmentation header */
2711 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
2712 
2713 	/* pull values out of skb_shinfo */
2714 	gso_size = skb_shinfo(skb)->gso_size;
2715 	gso_segs = skb_shinfo(skb)->gso_segs;
2716 
2717 	/* update GSO size and bytecount with header size */
2718 	first->gso_segs = gso_segs;
2719 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
2720 
2721 	/* find the field values */
2722 	cd_cmd = I40E_TX_CTX_DESC_TSO;
2723 	cd_tso_len = skb->len - *hdr_len;
2724 	cd_mss = gso_size;
2725 	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2726 				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2727 				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2728 	return 1;
2729 }
2730 
2731 /**
2732  * i40e_tsyn - set up the tsyn context descriptor
2733  * @tx_ring:  ptr to the ring to send
2734  * @skb:      ptr to the skb we're sending
2735  * @tx_flags: the collected send information
2736  * @cd_type_cmd_tso_mss: Quad Word 1
2737  *
2738  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2739  **/
2740 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2741 		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2742 {
2743 	struct i40e_pf *pf;
2744 
2745 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2746 		return 0;
2747 
2748 	/* Tx timestamps cannot be sampled when doing TSO */
2749 	if (tx_flags & I40E_TX_FLAGS_TSO)
2750 		return 0;
2751 
2752 	/* only timestamp the outbound packet if the user has requested it and
2753 	 * we are not already transmitting a packet to be timestamped
2754 	 */
2755 	pf = i40e_netdev_to_pf(tx_ring->netdev);
2756 	if (!(pf->flags & I40E_FLAG_PTP))
2757 		return 0;
2758 
2759 	if (pf->ptp_tx &&
2760 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2761 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2762 		pf->ptp_tx_start = jiffies;
2763 		pf->ptp_tx_skb = skb_get(skb);
2764 	} else {
2765 		pf->tx_hwtstamp_skipped++;
2766 		return 0;
2767 	}
2768 
2769 	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2770 				I40E_TXD_CTX_QW1_CMD_SHIFT;
2771 
2772 	return 1;
2773 }
2774 
2775 /**
2776  * i40e_tx_enable_csum - Enable Tx checksum offloads
2777  * @skb: send buffer
2778  * @tx_flags: pointer to Tx flags currently set
2779  * @td_cmd: Tx descriptor command bits to set
2780  * @td_offset: Tx descriptor header offsets to set
2781  * @tx_ring: Tx descriptor ring
2782  * @cd_tunneling: ptr to context desc bits
2783  **/
2784 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2785 			       u32 *td_cmd, u32 *td_offset,
2786 			       struct i40e_ring *tx_ring,
2787 			       u32 *cd_tunneling)
2788 {
2789 	union {
2790 		struct iphdr *v4;
2791 		struct ipv6hdr *v6;
2792 		unsigned char *hdr;
2793 	} ip;
2794 	union {
2795 		struct tcphdr *tcp;
2796 		struct udphdr *udp;
2797 		unsigned char *hdr;
2798 	} l4;
2799 	unsigned char *exthdr;
2800 	u32 offset, cmd = 0;
2801 	__be16 frag_off;
2802 	u8 l4_proto = 0;
2803 
2804 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2805 		return 0;
2806 
2807 	ip.hdr = skb_network_header(skb);
2808 	l4.hdr = skb_transport_header(skb);
2809 
2810 	/* compute outer L2 header size */
2811 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2812 
2813 	if (skb->encapsulation) {
2814 		u32 tunnel = 0;
2815 		/* define outer network header type */
2816 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2817 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2818 				  I40E_TX_CTX_EXT_IP_IPV4 :
2819 				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2820 
2821 			l4_proto = ip.v4->protocol;
2822 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2823 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2824 
2825 			exthdr = ip.hdr + sizeof(*ip.v6);
2826 			l4_proto = ip.v6->nexthdr;
2827 			if (l4.hdr != exthdr)
2828 				ipv6_skip_exthdr(skb, exthdr - skb->data,
2829 						 &l4_proto, &frag_off);
2830 		}
2831 
2832 		/* define outer transport */
2833 		switch (l4_proto) {
2834 		case IPPROTO_UDP:
2835 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2836 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2837 			break;
2838 		case IPPROTO_GRE:
2839 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2840 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2841 			break;
2842 		case IPPROTO_IPIP:
2843 		case IPPROTO_IPV6:
2844 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2845 			l4.hdr = skb_inner_network_header(skb);
2846 			break;
2847 		default:
2848 			if (*tx_flags & I40E_TX_FLAGS_TSO)
2849 				return -1;
2850 
2851 			skb_checksum_help(skb);
2852 			return 0;
2853 		}
2854 
2855 		/* compute outer L3 header size */
2856 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2857 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2858 
2859 		/* switch IP header pointer from outer to inner header */
2860 		ip.hdr = skb_inner_network_header(skb);
2861 
2862 		/* compute tunnel header size */
2863 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2864 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2865 
2866 		/* indicate if we need to offload outer UDP header */
2867 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2868 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2869 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2870 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2871 
2872 		/* record tunnel offload values */
2873 		*cd_tunneling |= tunnel;
2874 
2875 		/* switch L4 header pointer from outer to inner */
2876 		l4.hdr = skb_inner_transport_header(skb);
2877 		l4_proto = 0;
2878 
2879 		/* reset type as we transition from outer to inner headers */
2880 		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2881 		if (ip.v4->version == 4)
2882 			*tx_flags |= I40E_TX_FLAGS_IPV4;
2883 		if (ip.v6->version == 6)
2884 			*tx_flags |= I40E_TX_FLAGS_IPV6;
2885 	}
2886 
2887 	/* Enable IP checksum offloads */
2888 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2889 		l4_proto = ip.v4->protocol;
2890 		/* the stack computes the IP header already, the only time we
2891 		 * need the hardware to recompute it is in the case of TSO.
2892 		 */
2893 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2894 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2895 		       I40E_TX_DESC_CMD_IIPT_IPV4;
2896 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2897 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2898 
2899 		exthdr = ip.hdr + sizeof(*ip.v6);
2900 		l4_proto = ip.v6->nexthdr;
2901 		if (l4.hdr != exthdr)
2902 			ipv6_skip_exthdr(skb, exthdr - skb->data,
2903 					 &l4_proto, &frag_off);
2904 	}
2905 
2906 	/* compute inner L3 header size */
2907 	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2908 
2909 	/* Enable L4 checksum offloads */
2910 	switch (l4_proto) {
2911 	case IPPROTO_TCP:
2912 		/* enable checksum offloads */
2913 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2914 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2915 		break;
2916 	case IPPROTO_SCTP:
2917 		/* enable SCTP checksum offload */
2918 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2919 		offset |= (sizeof(struct sctphdr) >> 2) <<
2920 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2921 		break;
2922 	case IPPROTO_UDP:
2923 		/* enable UDP checksum offload */
2924 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2925 		offset |= (sizeof(struct udphdr) >> 2) <<
2926 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2927 		break;
2928 	default:
2929 		if (*tx_flags & I40E_TX_FLAGS_TSO)
2930 			return -1;
2931 		skb_checksum_help(skb);
2932 		return 0;
2933 	}
2934 
2935 	*td_cmd |= cmd;
2936 	*td_offset |= offset;
2937 
2938 	return 1;
2939 }
2940 
2941 /**
2942  * i40e_create_tx_ctx Build the Tx context descriptor
2943  * @tx_ring:  ring to create the descriptor on
2944  * @cd_type_cmd_tso_mss: Quad Word 1
2945  * @cd_tunneling: Quad Word 0 - bits 0-31
2946  * @cd_l2tag2: Quad Word 0 - bits 32-63
2947  **/
2948 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2949 			       const u64 cd_type_cmd_tso_mss,
2950 			       const u32 cd_tunneling, const u32 cd_l2tag2)
2951 {
2952 	struct i40e_tx_context_desc *context_desc;
2953 	int i = tx_ring->next_to_use;
2954 
2955 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2956 	    !cd_tunneling && !cd_l2tag2)
2957 		return;
2958 
2959 	/* grab the next descriptor */
2960 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2961 
2962 	i++;
2963 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2964 
2965 	/* cpu_to_le32 and assign to struct fields */
2966 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2967 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2968 	context_desc->rsvd = cpu_to_le16(0);
2969 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2970 }
2971 
2972 /**
2973  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2974  * @tx_ring: the ring to be checked
2975  * @size:    the size buffer we want to assure is available
2976  *
2977  * Returns -EBUSY if a stop is needed, else 0
2978  **/
2979 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2980 {
2981 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2982 	/* Memory barrier before checking head and tail */
2983 	smp_mb();
2984 
2985 	/* Check again in a case another CPU has just made room available. */
2986 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2987 		return -EBUSY;
2988 
2989 	/* A reprieve! - use start_queue because it doesn't call schedule */
2990 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2991 	++tx_ring->tx_stats.restart_queue;
2992 	return 0;
2993 }
2994 
2995 /**
2996  * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2997  * @skb:      send buffer
2998  *
2999  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3000  * and so we need to figure out the cases where we need to linearize the skb.
3001  *
3002  * For TSO we need to count the TSO header and segment payload separately.
3003  * As such we need to check cases where we have 7 fragments or more as we
3004  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3005  * the segment payload in the first descriptor, and another 7 for the
3006  * fragments.
3007  **/
3008 bool __i40e_chk_linearize(struct sk_buff *skb)
3009 {
3010 	const struct skb_frag_struct *frag, *stale;
3011 	int nr_frags, sum;
3012 
3013 	/* no need to check if number of frags is less than 7 */
3014 	nr_frags = skb_shinfo(skb)->nr_frags;
3015 	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3016 		return false;
3017 
3018 	/* We need to walk through the list and validate that each group
3019 	 * of 6 fragments totals at least gso_size.
3020 	 */
3021 	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3022 	frag = &skb_shinfo(skb)->frags[0];
3023 
3024 	/* Initialize size to the negative value of gso_size minus 1.  We
3025 	 * use this as the worst case scenerio in which the frag ahead
3026 	 * of us only provides one byte which is why we are limited to 6
3027 	 * descriptors for a single transmit as the header and previous
3028 	 * fragment are already consuming 2 descriptors.
3029 	 */
3030 	sum = 1 - skb_shinfo(skb)->gso_size;
3031 
3032 	/* Add size of frags 0 through 4 to create our initial sum */
3033 	sum += skb_frag_size(frag++);
3034 	sum += skb_frag_size(frag++);
3035 	sum += skb_frag_size(frag++);
3036 	sum += skb_frag_size(frag++);
3037 	sum += skb_frag_size(frag++);
3038 
3039 	/* Walk through fragments adding latest fragment, testing it, and
3040 	 * then removing stale fragments from the sum.
3041 	 */
3042 	stale = &skb_shinfo(skb)->frags[0];
3043 	for (;;) {
3044 		sum += skb_frag_size(frag++);
3045 
3046 		/* if sum is negative we failed to make sufficient progress */
3047 		if (sum < 0)
3048 			return true;
3049 
3050 		if (!nr_frags--)
3051 			break;
3052 
3053 		sum -= skb_frag_size(stale++);
3054 	}
3055 
3056 	return false;
3057 }
3058 
3059 /**
3060  * i40e_tx_map - Build the Tx descriptor
3061  * @tx_ring:  ring to send buffer on
3062  * @skb:      send buffer
3063  * @first:    first buffer info buffer to use
3064  * @tx_flags: collected send information
3065  * @hdr_len:  size of the packet header
3066  * @td_cmd:   the command field in the descriptor
3067  * @td_offset: offset for checksum or crc
3068  *
3069  * Returns 0 on success, -1 on failure to DMA
3070  **/
3071 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3072 			      struct i40e_tx_buffer *first, u32 tx_flags,
3073 			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3074 {
3075 	unsigned int data_len = skb->data_len;
3076 	unsigned int size = skb_headlen(skb);
3077 	struct skb_frag_struct *frag;
3078 	struct i40e_tx_buffer *tx_bi;
3079 	struct i40e_tx_desc *tx_desc;
3080 	u16 i = tx_ring->next_to_use;
3081 	u32 td_tag = 0;
3082 	dma_addr_t dma;
3083 	u16 desc_count = 1;
3084 
3085 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3086 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3087 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3088 			 I40E_TX_FLAGS_VLAN_SHIFT;
3089 	}
3090 
3091 	first->tx_flags = tx_flags;
3092 
3093 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3094 
3095 	tx_desc = I40E_TX_DESC(tx_ring, i);
3096 	tx_bi = first;
3097 
3098 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3099 		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3100 
3101 		if (dma_mapping_error(tx_ring->dev, dma))
3102 			goto dma_error;
3103 
3104 		/* record length, and DMA address */
3105 		dma_unmap_len_set(tx_bi, len, size);
3106 		dma_unmap_addr_set(tx_bi, dma, dma);
3107 
3108 		/* align size to end of page */
3109 		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3110 		tx_desc->buffer_addr = cpu_to_le64(dma);
3111 
3112 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3113 			tx_desc->cmd_type_offset_bsz =
3114 				build_ctob(td_cmd, td_offset,
3115 					   max_data, td_tag);
3116 
3117 			tx_desc++;
3118 			i++;
3119 			desc_count++;
3120 
3121 			if (i == tx_ring->count) {
3122 				tx_desc = I40E_TX_DESC(tx_ring, 0);
3123 				i = 0;
3124 			}
3125 
3126 			dma += max_data;
3127 			size -= max_data;
3128 
3129 			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3130 			tx_desc->buffer_addr = cpu_to_le64(dma);
3131 		}
3132 
3133 		if (likely(!data_len))
3134 			break;
3135 
3136 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3137 							  size, td_tag);
3138 
3139 		tx_desc++;
3140 		i++;
3141 		desc_count++;
3142 
3143 		if (i == tx_ring->count) {
3144 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3145 			i = 0;
3146 		}
3147 
3148 		size = skb_frag_size(frag);
3149 		data_len -= size;
3150 
3151 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3152 				       DMA_TO_DEVICE);
3153 
3154 		tx_bi = &tx_ring->tx_bi[i];
3155 	}
3156 
3157 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3158 
3159 	i++;
3160 	if (i == tx_ring->count)
3161 		i = 0;
3162 
3163 	tx_ring->next_to_use = i;
3164 
3165 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3166 
3167 	/* write last descriptor with EOP bit */
3168 	td_cmd |= I40E_TX_DESC_CMD_EOP;
3169 
3170 	/* We can OR these values together as they both are checked against
3171 	 * 4 below and at this point desc_count will be used as a boolean value
3172 	 * after this if/else block.
3173 	 */
3174 	desc_count |= ++tx_ring->packet_stride;
3175 
3176 	/* Algorithm to optimize tail and RS bit setting:
3177 	 * if queue is stopped
3178 	 *	mark RS bit
3179 	 *	reset packet counter
3180 	 * else if xmit_more is supported and is true
3181 	 *	advance packet counter to 4
3182 	 *	reset desc_count to 0
3183 	 *
3184 	 * if desc_count >= 4
3185 	 *	mark RS bit
3186 	 *	reset packet counter
3187 	 * if desc_count > 0
3188 	 *	update tail
3189 	 *
3190 	 * Note: If there are less than 4 descriptors
3191 	 * pending and interrupts were disabled the service task will
3192 	 * trigger a force WB.
3193 	 */
3194 	if (netif_xmit_stopped(txring_txq(tx_ring))) {
3195 		goto do_rs;
3196 	} else if (skb->xmit_more) {
3197 		/* set stride to arm on next packet and reset desc_count */
3198 		tx_ring->packet_stride = WB_STRIDE;
3199 		desc_count = 0;
3200 	} else if (desc_count >= WB_STRIDE) {
3201 do_rs:
3202 		/* write last descriptor with RS bit set */
3203 		td_cmd |= I40E_TX_DESC_CMD_RS;
3204 		tx_ring->packet_stride = 0;
3205 	}
3206 
3207 	tx_desc->cmd_type_offset_bsz =
3208 			build_ctob(td_cmd, td_offset, size, td_tag);
3209 
3210 	/* Force memory writes to complete before letting h/w know there
3211 	 * are new descriptors to fetch.
3212 	 *
3213 	 * We also use this memory barrier to make certain all of the
3214 	 * status bits have been updated before next_to_watch is written.
3215 	 */
3216 	wmb();
3217 
3218 	/* set next_to_watch value indicating a packet is present */
3219 	first->next_to_watch = tx_desc;
3220 
3221 	/* notify HW of packet */
3222 	if (desc_count) {
3223 		writel(i, tx_ring->tail);
3224 
3225 		/* we need this if more than one processor can write to our tail
3226 		 * at a time, it synchronizes IO on IA64/Altix systems
3227 		 */
3228 		mmiowb();
3229 	}
3230 
3231 	return 0;
3232 
3233 dma_error:
3234 	dev_info(tx_ring->dev, "TX DMA map failed\n");
3235 
3236 	/* clear dma mappings for failed tx_bi map */
3237 	for (;;) {
3238 		tx_bi = &tx_ring->tx_bi[i];
3239 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3240 		if (tx_bi == first)
3241 			break;
3242 		if (i == 0)
3243 			i = tx_ring->count;
3244 		i--;
3245 	}
3246 
3247 	tx_ring->next_to_use = i;
3248 
3249 	return -1;
3250 }
3251 
3252 /**
3253  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3254  * @xdp: data to transmit
3255  * @xdp_ring: XDP Tx ring
3256  **/
3257 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3258 			      struct i40e_ring *xdp_ring)
3259 {
3260 	u32 size = xdp->data_end - xdp->data;
3261 	u16 i = xdp_ring->next_to_use;
3262 	struct i40e_tx_buffer *tx_bi;
3263 	struct i40e_tx_desc *tx_desc;
3264 	dma_addr_t dma;
3265 
3266 	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3267 		xdp_ring->tx_stats.tx_busy++;
3268 		return I40E_XDP_CONSUMED;
3269 	}
3270 
3271 	dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3272 	if (dma_mapping_error(xdp_ring->dev, dma))
3273 		return I40E_XDP_CONSUMED;
3274 
3275 	tx_bi = &xdp_ring->tx_bi[i];
3276 	tx_bi->bytecount = size;
3277 	tx_bi->gso_segs = 1;
3278 	tx_bi->raw_buf = xdp->data;
3279 
3280 	/* record length, and DMA address */
3281 	dma_unmap_len_set(tx_bi, len, size);
3282 	dma_unmap_addr_set(tx_bi, dma, dma);
3283 
3284 	tx_desc = I40E_TX_DESC(xdp_ring, i);
3285 	tx_desc->buffer_addr = cpu_to_le64(dma);
3286 	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3287 						  | I40E_TXD_CMD,
3288 						  0, size, 0);
3289 
3290 	/* Make certain all of the status bits have been updated
3291 	 * before next_to_watch is written.
3292 	 */
3293 	smp_wmb();
3294 
3295 	i++;
3296 	if (i == xdp_ring->count)
3297 		i = 0;
3298 
3299 	tx_bi->next_to_watch = tx_desc;
3300 	xdp_ring->next_to_use = i;
3301 
3302 	return I40E_XDP_TX;
3303 }
3304 
3305 /**
3306  * i40e_xmit_frame_ring - Sends buffer on Tx ring
3307  * @skb:     send buffer
3308  * @tx_ring: ring to send buffer on
3309  *
3310  * Returns NETDEV_TX_OK if sent, else an error code
3311  **/
3312 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3313 					struct i40e_ring *tx_ring)
3314 {
3315 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3316 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
3317 	struct i40e_tx_buffer *first;
3318 	u32 td_offset = 0;
3319 	u32 tx_flags = 0;
3320 	__be16 protocol;
3321 	u32 td_cmd = 0;
3322 	u8 hdr_len = 0;
3323 	int tso, count;
3324 	int tsyn;
3325 
3326 	/* prefetch the data, we'll need it later */
3327 	prefetch(skb->data);
3328 
3329 	i40e_trace(xmit_frame_ring, skb, tx_ring);
3330 
3331 	count = i40e_xmit_descriptor_count(skb);
3332 	if (i40e_chk_linearize(skb, count)) {
3333 		if (__skb_linearize(skb)) {
3334 			dev_kfree_skb_any(skb);
3335 			return NETDEV_TX_OK;
3336 		}
3337 		count = i40e_txd_use_count(skb->len);
3338 		tx_ring->tx_stats.tx_linearize++;
3339 	}
3340 
3341 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3342 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3343 	 *       + 4 desc gap to avoid the cache line where head is,
3344 	 *       + 1 desc for context descriptor,
3345 	 * otherwise try next time
3346 	 */
3347 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3348 		tx_ring->tx_stats.tx_busy++;
3349 		return NETDEV_TX_BUSY;
3350 	}
3351 
3352 	/* record the location of the first descriptor for this packet */
3353 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
3354 	first->skb = skb;
3355 	first->bytecount = skb->len;
3356 	first->gso_segs = 1;
3357 
3358 	/* prepare the xmit flags */
3359 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3360 		goto out_drop;
3361 
3362 	/* obtain protocol of skb */
3363 	protocol = vlan_get_protocol(skb);
3364 
3365 	/* setup IPv4/IPv6 offloads */
3366 	if (protocol == htons(ETH_P_IP))
3367 		tx_flags |= I40E_TX_FLAGS_IPV4;
3368 	else if (protocol == htons(ETH_P_IPV6))
3369 		tx_flags |= I40E_TX_FLAGS_IPV6;
3370 
3371 	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3372 
3373 	if (tso < 0)
3374 		goto out_drop;
3375 	else if (tso)
3376 		tx_flags |= I40E_TX_FLAGS_TSO;
3377 
3378 	/* Always offload the checksum, since it's in the data descriptor */
3379 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3380 				  tx_ring, &cd_tunneling);
3381 	if (tso < 0)
3382 		goto out_drop;
3383 
3384 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3385 
3386 	if (tsyn)
3387 		tx_flags |= I40E_TX_FLAGS_TSYN;
3388 
3389 	skb_tx_timestamp(skb);
3390 
3391 	/* always enable CRC insertion offload */
3392 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
3393 
3394 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3395 			   cd_tunneling, cd_l2tag2);
3396 
3397 	/* Add Flow Director ATR if it's enabled.
3398 	 *
3399 	 * NOTE: this must always be directly before the data descriptor.
3400 	 */
3401 	i40e_atr(tx_ring, skb, tx_flags);
3402 
3403 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3404 			td_cmd, td_offset))
3405 		goto cleanup_tx_tstamp;
3406 
3407 	return NETDEV_TX_OK;
3408 
3409 out_drop:
3410 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3411 	dev_kfree_skb_any(first->skb);
3412 	first->skb = NULL;
3413 cleanup_tx_tstamp:
3414 	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3415 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3416 
3417 		dev_kfree_skb_any(pf->ptp_tx_skb);
3418 		pf->ptp_tx_skb = NULL;
3419 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3420 	}
3421 
3422 	return NETDEV_TX_OK;
3423 }
3424 
3425 /**
3426  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3427  * @skb:    send buffer
3428  * @netdev: network interface device structure
3429  *
3430  * Returns NETDEV_TX_OK if sent, else an error code
3431  **/
3432 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3433 {
3434 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3435 	struct i40e_vsi *vsi = np->vsi;
3436 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3437 
3438 	/* hardware can't handle really short frames, hardware padding works
3439 	 * beyond this point
3440 	 */
3441 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3442 		return NETDEV_TX_OK;
3443 
3444 	return i40e_xmit_frame_ring(skb, tx_ring);
3445 }
3446