1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/prefetch.h>
5 #include <linux/bpf_trace.h>
6 #include <net/xdp.h>
7 #include "i40e.h"
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10 #include "i40e_txrx_common.h"
11 #include "i40e_xsk.h"
12 
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14 /**
15  * i40e_fdir - Generate a Flow Director descriptor based on fdata
16  * @tx_ring: Tx ring to send buffer on
17  * @fdata: Flow director filter data
18  * @add: Indicate if we are adding a rule or deleting one
19  *
20  **/
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 		      struct i40e_fdir_filter *fdata, bool add)
23 {
24 	struct i40e_filter_program_desc *fdir_desc;
25 	struct i40e_pf *pf = tx_ring->vsi->back;
26 	u32 flex_ptype, dtype_cmd;
27 	u16 i;
28 
29 	/* grab the next descriptor */
30 	i = tx_ring->next_to_use;
31 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
32 
33 	i++;
34 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
35 
36 	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
38 
39 	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
41 
42 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
44 
45 	/* Use LAN VSI Id if not programmed by user */
46 	flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
47 		      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
48 		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
49 
50 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
51 
52 	dtype_cmd |= add ?
53 		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
54 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
55 		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
56 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;
57 
58 	dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
59 		     (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
60 
61 	dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
62 		     (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
63 
64 	if (fdata->cnt_index) {
65 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
66 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
67 			     ((u32)fdata->cnt_index <<
68 			      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
69 	}
70 
71 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
72 	fdir_desc->rsvd = cpu_to_le32(0);
73 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
74 	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
75 }
76 
77 #define I40E_FD_CLEAN_DELAY 10
78 /**
79  * i40e_program_fdir_filter - Program a Flow Director filter
80  * @fdir_data: Packet data that will be filter parameters
81  * @raw_packet: the pre-allocated packet buffer for FDir
82  * @pf: The PF pointer
83  * @add: True for add/update, False for remove
84  **/
85 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
86 				    u8 *raw_packet, struct i40e_pf *pf,
87 				    bool add)
88 {
89 	struct i40e_tx_buffer *tx_buf, *first;
90 	struct i40e_tx_desc *tx_desc;
91 	struct i40e_ring *tx_ring;
92 	struct i40e_vsi *vsi;
93 	struct device *dev;
94 	dma_addr_t dma;
95 	u32 td_cmd = 0;
96 	u16 i;
97 
98 	/* find existing FDIR VSI */
99 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
100 	if (!vsi)
101 		return -ENOENT;
102 
103 	tx_ring = vsi->tx_rings[0];
104 	dev = tx_ring->dev;
105 
106 	/* we need two descriptors to add/del a filter and we can wait */
107 	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
108 		if (!i)
109 			return -EAGAIN;
110 		msleep_interruptible(1);
111 	}
112 
113 	dma = dma_map_single(dev, raw_packet,
114 			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
115 	if (dma_mapping_error(dev, dma))
116 		goto dma_fail;
117 
118 	/* grab the next descriptor */
119 	i = tx_ring->next_to_use;
120 	first = &tx_ring->tx_bi[i];
121 	i40e_fdir(tx_ring, fdir_data, add);
122 
123 	/* Now program a dummy descriptor */
124 	i = tx_ring->next_to_use;
125 	tx_desc = I40E_TX_DESC(tx_ring, i);
126 	tx_buf = &tx_ring->tx_bi[i];
127 
128 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
129 
130 	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
131 
132 	/* record length, and DMA address */
133 	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
134 	dma_unmap_addr_set(tx_buf, dma, dma);
135 
136 	tx_desc->buffer_addr = cpu_to_le64(dma);
137 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
138 
139 	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
140 	tx_buf->raw_buf = (void *)raw_packet;
141 
142 	tx_desc->cmd_type_offset_bsz =
143 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
144 
145 	/* Force memory writes to complete before letting h/w
146 	 * know there are new descriptors to fetch.
147 	 */
148 	wmb();
149 
150 	/* Mark the data descriptor to be watched */
151 	first->next_to_watch = tx_desc;
152 
153 	writel(tx_ring->next_to_use, tx_ring->tail);
154 	return 0;
155 
156 dma_fail:
157 	return -1;
158 }
159 
160 /**
161  * i40e_create_dummy_packet - Constructs dummy packet for HW
162  * @dummy_packet: preallocated space for dummy packet
163  * @ipv4: is layer 3 packet of version 4 or 6
164  * @l4proto: next level protocol used in data portion of l3
165  * @data: filter data
166  *
167  * Returns address of layer 4 protocol dummy packet.
168  **/
169 static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
170 				      struct i40e_fdir_filter *data)
171 {
172 	bool is_vlan = !!data->vlan_tag;
173 	struct vlan_hdr vlan;
174 	struct ipv6hdr ipv6;
175 	struct ethhdr eth;
176 	struct iphdr ip;
177 	u8 *tmp;
178 
179 	if (ipv4) {
180 		eth.h_proto = cpu_to_be16(ETH_P_IP);
181 		ip.protocol = l4proto;
182 		ip.version = 0x4;
183 		ip.ihl = 0x5;
184 
185 		ip.daddr = data->dst_ip;
186 		ip.saddr = data->src_ip;
187 	} else {
188 		eth.h_proto = cpu_to_be16(ETH_P_IPV6);
189 		ipv6.nexthdr = l4proto;
190 		ipv6.version = 0x6;
191 
192 		memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
193 		       sizeof(__be32) * 4);
194 		memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
195 		       sizeof(__be32) * 4);
196 	}
197 
198 	if (is_vlan) {
199 		vlan.h_vlan_TCI = data->vlan_tag;
200 		vlan.h_vlan_encapsulated_proto = eth.h_proto;
201 		eth.h_proto = data->vlan_etype;
202 	}
203 
204 	tmp = dummy_packet;
205 	memcpy(tmp, &eth, sizeof(eth));
206 	tmp += sizeof(eth);
207 
208 	if (is_vlan) {
209 		memcpy(tmp, &vlan, sizeof(vlan));
210 		tmp += sizeof(vlan);
211 	}
212 
213 	if (ipv4) {
214 		memcpy(tmp, &ip, sizeof(ip));
215 		tmp += sizeof(ip);
216 	} else {
217 		memcpy(tmp, &ipv6, sizeof(ipv6));
218 		tmp += sizeof(ipv6);
219 	}
220 
221 	return tmp;
222 }
223 
224 /**
225  * i40e_create_dummy_udp_packet - helper function to create UDP packet
226  * @raw_packet: preallocated space for dummy packet
227  * @ipv4: is layer 3 packet of version 4 or 6
228  * @l4proto: next level protocol used in data portion of l3
229  * @data: filter data
230  *
231  * Helper function to populate udp fields.
232  **/
233 static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
234 					 struct i40e_fdir_filter *data)
235 {
236 	struct udphdr *udp;
237 	u8 *tmp;
238 
239 	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
240 	udp = (struct udphdr *)(tmp);
241 	udp->dest = data->dst_port;
242 	udp->source = data->src_port;
243 }
244 
245 /**
246  * i40e_create_dummy_tcp_packet - helper function to create TCP packet
247  * @raw_packet: preallocated space for dummy packet
248  * @ipv4: is layer 3 packet of version 4 or 6
249  * @l4proto: next level protocol used in data portion of l3
250  * @data: filter data
251  *
252  * Helper function to populate tcp fields.
253  **/
254 static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
255 					 struct i40e_fdir_filter *data)
256 {
257 	struct tcphdr *tcp;
258 	u8 *tmp;
259 	/* Dummy tcp packet */
260 	static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
261 		0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
262 
263 	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
264 
265 	tcp = (struct tcphdr *)tmp;
266 	memcpy(tcp, tcp_packet, sizeof(tcp_packet));
267 	tcp->dest = data->dst_port;
268 	tcp->source = data->src_port;
269 }
270 
271 /**
272  * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
273  * @raw_packet: preallocated space for dummy packet
274  * @ipv4: is layer 3 packet of version 4 or 6
275  * @l4proto: next level protocol used in data portion of l3
276  * @data: filter data
277  *
278  * Helper function to populate sctp fields.
279  **/
280 static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
281 					  u8 l4proto,
282 					  struct i40e_fdir_filter *data)
283 {
284 	struct sctphdr *sctp;
285 	u8 *tmp;
286 
287 	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
288 
289 	sctp = (struct sctphdr *)tmp;
290 	sctp->dest = data->dst_port;
291 	sctp->source = data->src_port;
292 }
293 
294 /**
295  * i40e_prepare_fdir_filter - Prepare and program fdir filter
296  * @pf: physical function to attach filter to
297  * @fd_data: filter data
298  * @add: add or delete filter
299  * @packet_addr: address of dummy packet, used in filtering
300  * @payload_offset: offset from dummy packet address to user defined data
301  * @pctype: Packet type for which filter is used
302  *
303  * Helper function to offset data of dummy packet, program it and
304  * handle errors.
305  **/
306 static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
307 				    struct i40e_fdir_filter *fd_data,
308 				    bool add, char *packet_addr,
309 				    int payload_offset, u8 pctype)
310 {
311 	int ret;
312 
313 	if (fd_data->flex_filter) {
314 		u8 *payload;
315 		__be16 pattern = fd_data->flex_word;
316 		u16 off = fd_data->flex_offset;
317 
318 		payload = packet_addr + payload_offset;
319 
320 		/* If user provided vlan, offset payload by vlan header length */
321 		if (!!fd_data->vlan_tag)
322 			payload += VLAN_HLEN;
323 
324 		*((__force __be16 *)(payload + off)) = pattern;
325 	}
326 
327 	fd_data->pctype = pctype;
328 	ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
329 	if (ret) {
330 		dev_info(&pf->pdev->dev,
331 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
332 			 fd_data->pctype, fd_data->fd_id, ret);
333 		/* Free the packet buffer since it wasn't added to the ring */
334 		return -EOPNOTSUPP;
335 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
336 		if (add)
337 			dev_info(&pf->pdev->dev,
338 				 "Filter OK for PCTYPE %d loc = %d\n",
339 				 fd_data->pctype, fd_data->fd_id);
340 		else
341 			dev_info(&pf->pdev->dev,
342 				 "Filter deleted for PCTYPE %d loc = %d\n",
343 				 fd_data->pctype, fd_data->fd_id);
344 	}
345 
346 	return ret;
347 }
348 
349 /**
350  * i40e_change_filter_num - Prepare and program fdir filter
351  * @ipv4: is layer 3 packet of version 4 or 6
352  * @add: add or delete filter
353  * @ipv4_filter_num: field to update
354  * @ipv6_filter_num: field to update
355  *
356  * Update filter number field for pf.
357  **/
358 static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
359 				   u16 *ipv6_filter_num)
360 {
361 	if (add) {
362 		if (ipv4)
363 			(*ipv4_filter_num)++;
364 		else
365 			(*ipv6_filter_num)++;
366 	} else {
367 		if (ipv4)
368 			(*ipv4_filter_num)--;
369 		else
370 			(*ipv6_filter_num)--;
371 	}
372 }
373 
374 #define IP_HEADER_OFFSET		14
375 #define I40E_UDPIP_DUMMY_PACKET_LEN	42
376 #define I40E_UDPIP6_DUMMY_PACKET_LEN	62
377 /**
378  * i40e_add_del_fdir_udp - Add/Remove UDP filters
379  * @vsi: pointer to the targeted VSI
380  * @fd_data: the flow director data required for the FDir descriptor
381  * @add: true adds a filter, false removes it
382  * @ipv4: true is v4, false is v6
383  *
384  * Returns 0 if the filters were successfully added or removed
385  **/
386 static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
387 				 struct i40e_fdir_filter *fd_data,
388 				 bool add,
389 				 bool ipv4)
390 {
391 	struct i40e_pf *pf = vsi->back;
392 	u8 *raw_packet;
393 	int ret;
394 
395 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
396 	if (!raw_packet)
397 		return -ENOMEM;
398 
399 	i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
400 
401 	if (ipv4)
402 		ret = i40e_prepare_fdir_filter
403 			(pf, fd_data, add, raw_packet,
404 			 I40E_UDPIP_DUMMY_PACKET_LEN,
405 			 I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
406 	else
407 		ret = i40e_prepare_fdir_filter
408 			(pf, fd_data, add, raw_packet,
409 			 I40E_UDPIP6_DUMMY_PACKET_LEN,
410 			 I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
411 
412 	if (ret) {
413 		kfree(raw_packet);
414 		return ret;
415 	}
416 
417 	i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
418 			       &pf->fd_udp6_filter_cnt);
419 
420 	return 0;
421 }
422 
423 #define I40E_TCPIP_DUMMY_PACKET_LEN	54
424 #define I40E_TCPIP6_DUMMY_PACKET_LEN	74
425 /**
426  * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
427  * @vsi: pointer to the targeted VSI
428  * @fd_data: the flow director data required for the FDir descriptor
429  * @add: true adds a filter, false removes it
430  * @ipv4: true is v4, false is v6
431  *
432  * Returns 0 if the filters were successfully added or removed
433  **/
434 static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
435 				 struct i40e_fdir_filter *fd_data,
436 				 bool add,
437 				 bool ipv4)
438 {
439 	struct i40e_pf *pf = vsi->back;
440 	u8 *raw_packet;
441 	int ret;
442 
443 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
444 	if (!raw_packet)
445 		return -ENOMEM;
446 
447 	i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
448 	if (ipv4)
449 		ret = i40e_prepare_fdir_filter
450 			(pf, fd_data, add, raw_packet,
451 			 I40E_TCPIP_DUMMY_PACKET_LEN,
452 			 I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
453 	else
454 		ret = i40e_prepare_fdir_filter
455 			(pf, fd_data, add, raw_packet,
456 			 I40E_TCPIP6_DUMMY_PACKET_LEN,
457 			 I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
458 
459 	if (ret) {
460 		kfree(raw_packet);
461 		return ret;
462 	}
463 
464 	i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
465 			       &pf->fd_tcp6_filter_cnt);
466 
467 	if (add) {
468 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
469 		    I40E_DEBUG_FD & pf->hw.debug_mask)
470 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
471 		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
472 	}
473 	return 0;
474 }
475 
476 #define I40E_SCTPIP_DUMMY_PACKET_LEN	46
477 #define I40E_SCTPIP6_DUMMY_PACKET_LEN	66
478 /**
479  * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
480  * a specific flow spec
481  * @vsi: pointer to the targeted VSI
482  * @fd_data: the flow director data required for the FDir descriptor
483  * @add: true adds a filter, false removes it
484  * @ipv4: true is v4, false is v6
485  *
486  * Returns 0 if the filters were successfully added or removed
487  **/
488 static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
489 				  struct i40e_fdir_filter *fd_data,
490 				  bool add,
491 				  bool ipv4)
492 {
493 	struct i40e_pf *pf = vsi->back;
494 	u8 *raw_packet;
495 	int ret;
496 
497 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
498 	if (!raw_packet)
499 		return -ENOMEM;
500 
501 	i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
502 
503 	if (ipv4)
504 		ret = i40e_prepare_fdir_filter
505 			(pf, fd_data, add, raw_packet,
506 			 I40E_SCTPIP_DUMMY_PACKET_LEN,
507 			 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
508 	else
509 		ret = i40e_prepare_fdir_filter
510 			(pf, fd_data, add, raw_packet,
511 			 I40E_SCTPIP6_DUMMY_PACKET_LEN,
512 			 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
513 
514 	if (ret) {
515 		kfree(raw_packet);
516 		return ret;
517 	}
518 
519 	i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
520 			       &pf->fd_sctp6_filter_cnt);
521 
522 	return 0;
523 }
524 
525 #define I40E_IP_DUMMY_PACKET_LEN	34
526 #define I40E_IP6_DUMMY_PACKET_LEN	54
527 /**
528  * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
529  * a specific flow spec
530  * @vsi: pointer to the targeted VSI
531  * @fd_data: the flow director data required for the FDir descriptor
532  * @add: true adds a filter, false removes it
533  * @ipv4: true is v4, false is v6
534  *
535  * Returns 0 if the filters were successfully added or removed
536  **/
537 static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
538 				struct i40e_fdir_filter *fd_data,
539 				bool add,
540 				bool ipv4)
541 {
542 	struct i40e_pf *pf = vsi->back;
543 	int payload_offset;
544 	u8 *raw_packet;
545 	int iter_start;
546 	int iter_end;
547 	int ret;
548 	int i;
549 
550 	if (ipv4) {
551 		iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
552 		iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
553 	} else {
554 		iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
555 		iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
556 	}
557 
558 	for (i = iter_start; i <= iter_end; i++) {
559 		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
560 		if (!raw_packet)
561 			return -ENOMEM;
562 
563 		/* IPv6 no header option differs from IPv4 */
564 		(void)i40e_create_dummy_packet
565 			(raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
566 			 fd_data);
567 
568 		payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
569 			I40E_IP6_DUMMY_PACKET_LEN;
570 		ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
571 					       payload_offset, i);
572 		if (ret)
573 			goto err;
574 	}
575 
576 	i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
577 			       &pf->fd_ip6_filter_cnt);
578 
579 	return 0;
580 err:
581 	kfree(raw_packet);
582 	return ret;
583 }
584 
585 /**
586  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
587  * @vsi: pointer to the targeted VSI
588  * @input: filter to add or delete
589  * @add: true adds a filter, false removes it
590  *
591  **/
592 int i40e_add_del_fdir(struct i40e_vsi *vsi,
593 		      struct i40e_fdir_filter *input, bool add)
594 {
595 	enum ip_ver { ipv6 = 0, ipv4 = 1 };
596 	struct i40e_pf *pf = vsi->back;
597 	int ret;
598 
599 	switch (input->flow_type & ~FLOW_EXT) {
600 	case TCP_V4_FLOW:
601 		ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
602 		break;
603 	case UDP_V4_FLOW:
604 		ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
605 		break;
606 	case SCTP_V4_FLOW:
607 		ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
608 		break;
609 	case TCP_V6_FLOW:
610 		ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
611 		break;
612 	case UDP_V6_FLOW:
613 		ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
614 		break;
615 	case SCTP_V6_FLOW:
616 		ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
617 		break;
618 	case IP_USER_FLOW:
619 		switch (input->ipl4_proto) {
620 		case IPPROTO_TCP:
621 			ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
622 			break;
623 		case IPPROTO_UDP:
624 			ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
625 			break;
626 		case IPPROTO_SCTP:
627 			ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
628 			break;
629 		case IPPROTO_IP:
630 			ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
631 			break;
632 		default:
633 			/* We cannot support masking based on protocol */
634 			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
635 				 input->ipl4_proto);
636 			return -EINVAL;
637 		}
638 		break;
639 	case IPV6_USER_FLOW:
640 		switch (input->ipl4_proto) {
641 		case IPPROTO_TCP:
642 			ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
643 			break;
644 		case IPPROTO_UDP:
645 			ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
646 			break;
647 		case IPPROTO_SCTP:
648 			ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
649 			break;
650 		case IPPROTO_IP:
651 			ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
652 			break;
653 		default:
654 			/* We cannot support masking based on protocol */
655 			dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
656 				 input->ipl4_proto);
657 			return -EINVAL;
658 		}
659 		break;
660 	default:
661 		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
662 			 input->flow_type);
663 		return -EINVAL;
664 	}
665 
666 	/* The buffer allocated here will be normally be freed by
667 	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
668 	 * completion. In the event of an error adding the buffer to the FDIR
669 	 * ring, it will immediately be freed. It may also be freed by
670 	 * i40e_clean_tx_ring() when closing the VSI.
671 	 */
672 	return ret;
673 }
674 
675 /**
676  * i40e_fd_handle_status - check the Programming Status for FD
677  * @rx_ring: the Rx ring for this descriptor
678  * @qword0_raw: qword0
679  * @qword1: qword1 after le_to_cpu
680  * @prog_id: the id originally used for programming
681  *
682  * This is used to verify if the FD programming or invalidation
683  * requested by SW to the HW is successful or not and take actions accordingly.
684  **/
685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
686 				  u64 qword1, u8 prog_id)
687 {
688 	struct i40e_pf *pf = rx_ring->vsi->back;
689 	struct pci_dev *pdev = pf->pdev;
690 	struct i40e_16b_rx_wb_qw0 *qw0;
691 	u32 fcnt_prog, fcnt_avail;
692 	u32 error;
693 
694 	qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
695 	error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
696 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
697 
698 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
699 		pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
700 		if (qw0->hi_dword.fd_id != 0 ||
701 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
702 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
703 				 pf->fd_inv);
704 
705 		/* Check if the programming error is for ATR.
706 		 * If so, auto disable ATR and set a state for
707 		 * flush in progress. Next time we come here if flush is in
708 		 * progress do nothing, once flush is complete the state will
709 		 * be cleared.
710 		 */
711 		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
712 			return;
713 
714 		pf->fd_add_err++;
715 		/* store the current atr filter count */
716 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
717 
718 		if (qw0->hi_dword.fd_id == 0 &&
719 		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
720 			/* These set_bit() calls aren't atomic with the
721 			 * test_bit() here, but worse case we potentially
722 			 * disable ATR and queue a flush right after SB
723 			 * support is re-enabled. That shouldn't cause an
724 			 * issue in practice
725 			 */
726 			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
727 			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
728 		}
729 
730 		/* filter programming failed most likely due to table full */
731 		fcnt_prog = i40e_get_global_fd_count(pf);
732 		fcnt_avail = pf->fdir_pf_filter_count;
733 		/* If ATR is running fcnt_prog can quickly change,
734 		 * if we are very close to full, it makes sense to disable
735 		 * FD ATR/SB and then re-enable it when there is room.
736 		 */
737 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
738 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
739 			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
740 					      pf->state))
741 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
742 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
743 		}
744 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
745 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
746 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
747 				 qw0->hi_dword.fd_id);
748 	}
749 }
750 
751 /**
752  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
753  * @ring:      the ring that owns the buffer
754  * @tx_buffer: the buffer to free
755  **/
756 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
757 					    struct i40e_tx_buffer *tx_buffer)
758 {
759 	if (tx_buffer->skb) {
760 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
761 			kfree(tx_buffer->raw_buf);
762 		else if (ring_is_xdp(ring))
763 			xdp_return_frame(tx_buffer->xdpf);
764 		else
765 			dev_kfree_skb_any(tx_buffer->skb);
766 		if (dma_unmap_len(tx_buffer, len))
767 			dma_unmap_single(ring->dev,
768 					 dma_unmap_addr(tx_buffer, dma),
769 					 dma_unmap_len(tx_buffer, len),
770 					 DMA_TO_DEVICE);
771 	} else if (dma_unmap_len(tx_buffer, len)) {
772 		dma_unmap_page(ring->dev,
773 			       dma_unmap_addr(tx_buffer, dma),
774 			       dma_unmap_len(tx_buffer, len),
775 			       DMA_TO_DEVICE);
776 	}
777 
778 	tx_buffer->next_to_watch = NULL;
779 	tx_buffer->skb = NULL;
780 	dma_unmap_len_set(tx_buffer, len, 0);
781 	/* tx_buffer must be completely set up in the transmit path */
782 }
783 
784 /**
785  * i40e_clean_tx_ring - Free any empty Tx buffers
786  * @tx_ring: ring to be cleaned
787  **/
788 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
789 {
790 	unsigned long bi_size;
791 	u16 i;
792 
793 	if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
794 		i40e_xsk_clean_tx_ring(tx_ring);
795 	} else {
796 		/* ring already cleared, nothing to do */
797 		if (!tx_ring->tx_bi)
798 			return;
799 
800 		/* Free all the Tx ring sk_buffs */
801 		for (i = 0; i < tx_ring->count; i++)
802 			i40e_unmap_and_free_tx_resource(tx_ring,
803 							&tx_ring->tx_bi[i]);
804 	}
805 
806 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
807 	memset(tx_ring->tx_bi, 0, bi_size);
808 
809 	/* Zero out the descriptor ring */
810 	memset(tx_ring->desc, 0, tx_ring->size);
811 
812 	tx_ring->next_to_use = 0;
813 	tx_ring->next_to_clean = 0;
814 
815 	if (!tx_ring->netdev)
816 		return;
817 
818 	/* cleanup Tx queue statistics */
819 	netdev_tx_reset_queue(txring_txq(tx_ring));
820 }
821 
822 /**
823  * i40e_free_tx_resources - Free Tx resources per queue
824  * @tx_ring: Tx descriptor ring for a specific queue
825  *
826  * Free all transmit software resources
827  **/
828 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
829 {
830 	i40e_clean_tx_ring(tx_ring);
831 	kfree(tx_ring->tx_bi);
832 	tx_ring->tx_bi = NULL;
833 	kfree(tx_ring->xsk_descs);
834 	tx_ring->xsk_descs = NULL;
835 
836 	if (tx_ring->desc) {
837 		dma_free_coherent(tx_ring->dev, tx_ring->size,
838 				  tx_ring->desc, tx_ring->dma);
839 		tx_ring->desc = NULL;
840 	}
841 }
842 
843 /**
844  * i40e_get_tx_pending - how many tx descriptors not processed
845  * @ring: the ring of descriptors
846  * @in_sw: use SW variables
847  *
848  * Since there is no access to the ring head register
849  * in XL710, we need to use our local copies
850  **/
851 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
852 {
853 	u32 head, tail;
854 
855 	if (!in_sw) {
856 		head = i40e_get_head(ring);
857 		tail = readl(ring->tail);
858 	} else {
859 		head = ring->next_to_clean;
860 		tail = ring->next_to_use;
861 	}
862 
863 	if (head != tail)
864 		return (head < tail) ?
865 			tail - head : (tail + ring->count - head);
866 
867 	return 0;
868 }
869 
870 /**
871  * i40e_detect_recover_hung - Function to detect and recover hung_queues
872  * @vsi:  pointer to vsi struct with tx queues
873  *
874  * VSI has netdev and netdev has TX queues. This function is to check each of
875  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
876  **/
877 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
878 {
879 	struct i40e_ring *tx_ring = NULL;
880 	struct net_device *netdev;
881 	unsigned int i;
882 	int packets;
883 
884 	if (!vsi)
885 		return;
886 
887 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
888 		return;
889 
890 	netdev = vsi->netdev;
891 	if (!netdev)
892 		return;
893 
894 	if (!netif_carrier_ok(netdev))
895 		return;
896 
897 	for (i = 0; i < vsi->num_queue_pairs; i++) {
898 		tx_ring = vsi->tx_rings[i];
899 		if (tx_ring && tx_ring->desc) {
900 			/* If packet counter has not changed the queue is
901 			 * likely stalled, so force an interrupt for this
902 			 * queue.
903 			 *
904 			 * prev_pkt_ctr would be negative if there was no
905 			 * pending work.
906 			 */
907 			packets = tx_ring->stats.packets & INT_MAX;
908 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
909 				i40e_force_wb(vsi, tx_ring->q_vector);
910 				continue;
911 			}
912 
913 			/* Memory barrier between read of packet count and call
914 			 * to i40e_get_tx_pending()
915 			 */
916 			smp_rmb();
917 			tx_ring->tx_stats.prev_pkt_ctr =
918 			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
919 		}
920 	}
921 }
922 
923 /**
924  * i40e_clean_tx_irq - Reclaim resources after transmit completes
925  * @vsi: the VSI we care about
926  * @tx_ring: Tx ring to clean
927  * @napi_budget: Used to determine if we are in netpoll
928  *
929  * Returns true if there's any budget left (e.g. the clean is finished)
930  **/
931 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
932 			      struct i40e_ring *tx_ring, int napi_budget)
933 {
934 	int i = tx_ring->next_to_clean;
935 	struct i40e_tx_buffer *tx_buf;
936 	struct i40e_tx_desc *tx_head;
937 	struct i40e_tx_desc *tx_desc;
938 	unsigned int total_bytes = 0, total_packets = 0;
939 	unsigned int budget = vsi->work_limit;
940 
941 	tx_buf = &tx_ring->tx_bi[i];
942 	tx_desc = I40E_TX_DESC(tx_ring, i);
943 	i -= tx_ring->count;
944 
945 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
946 
947 	do {
948 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
949 
950 		/* if next_to_watch is not set then there is no work pending */
951 		if (!eop_desc)
952 			break;
953 
954 		/* prevent any other reads prior to eop_desc */
955 		smp_rmb();
956 
957 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
958 		/* we have caught up to head, no work left to do */
959 		if (tx_head == tx_desc)
960 			break;
961 
962 		/* clear next_to_watch to prevent false hangs */
963 		tx_buf->next_to_watch = NULL;
964 
965 		/* update the statistics for this packet */
966 		total_bytes += tx_buf->bytecount;
967 		total_packets += tx_buf->gso_segs;
968 
969 		/* free the skb/XDP data */
970 		if (ring_is_xdp(tx_ring))
971 			xdp_return_frame(tx_buf->xdpf);
972 		else
973 			napi_consume_skb(tx_buf->skb, napi_budget);
974 
975 		/* unmap skb header data */
976 		dma_unmap_single(tx_ring->dev,
977 				 dma_unmap_addr(tx_buf, dma),
978 				 dma_unmap_len(tx_buf, len),
979 				 DMA_TO_DEVICE);
980 
981 		/* clear tx_buffer data */
982 		tx_buf->skb = NULL;
983 		dma_unmap_len_set(tx_buf, len, 0);
984 
985 		/* unmap remaining buffers */
986 		while (tx_desc != eop_desc) {
987 			i40e_trace(clean_tx_irq_unmap,
988 				   tx_ring, tx_desc, tx_buf);
989 
990 			tx_buf++;
991 			tx_desc++;
992 			i++;
993 			if (unlikely(!i)) {
994 				i -= tx_ring->count;
995 				tx_buf = tx_ring->tx_bi;
996 				tx_desc = I40E_TX_DESC(tx_ring, 0);
997 			}
998 
999 			/* unmap any remaining paged data */
1000 			if (dma_unmap_len(tx_buf, len)) {
1001 				dma_unmap_page(tx_ring->dev,
1002 					       dma_unmap_addr(tx_buf, dma),
1003 					       dma_unmap_len(tx_buf, len),
1004 					       DMA_TO_DEVICE);
1005 				dma_unmap_len_set(tx_buf, len, 0);
1006 			}
1007 		}
1008 
1009 		/* move us one more past the eop_desc for start of next pkt */
1010 		tx_buf++;
1011 		tx_desc++;
1012 		i++;
1013 		if (unlikely(!i)) {
1014 			i -= tx_ring->count;
1015 			tx_buf = tx_ring->tx_bi;
1016 			tx_desc = I40E_TX_DESC(tx_ring, 0);
1017 		}
1018 
1019 		prefetch(tx_desc);
1020 
1021 		/* update budget accounting */
1022 		budget--;
1023 	} while (likely(budget));
1024 
1025 	i += tx_ring->count;
1026 	tx_ring->next_to_clean = i;
1027 	i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1028 	i40e_arm_wb(tx_ring, vsi, budget);
1029 
1030 	if (ring_is_xdp(tx_ring))
1031 		return !!budget;
1032 
1033 	/* notify netdev of completed buffers */
1034 	netdev_tx_completed_queue(txring_txq(tx_ring),
1035 				  total_packets, total_bytes);
1036 
1037 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
1038 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1039 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1040 		/* Make sure that anybody stopping the queue after this
1041 		 * sees the new next_to_clean.
1042 		 */
1043 		smp_mb();
1044 		if (__netif_subqueue_stopped(tx_ring->netdev,
1045 					     tx_ring->queue_index) &&
1046 		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
1047 			netif_wake_subqueue(tx_ring->netdev,
1048 					    tx_ring->queue_index);
1049 			++tx_ring->tx_stats.restart_queue;
1050 		}
1051 	}
1052 
1053 	return !!budget;
1054 }
1055 
1056 /**
1057  * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1058  * @vsi: the VSI we care about
1059  * @q_vector: the vector on which to enable writeback
1060  *
1061  **/
1062 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
1063 				  struct i40e_q_vector *q_vector)
1064 {
1065 	u16 flags = q_vector->tx.ring[0].flags;
1066 	u32 val;
1067 
1068 	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
1069 		return;
1070 
1071 	if (q_vector->arm_wb_state)
1072 		return;
1073 
1074 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1075 		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
1076 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
1077 
1078 		wr32(&vsi->back->hw,
1079 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1080 		     val);
1081 	} else {
1082 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
1083 		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
1084 
1085 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1086 	}
1087 	q_vector->arm_wb_state = true;
1088 }
1089 
1090 /**
1091  * i40e_force_wb - Issue SW Interrupt so HW does a wb
1092  * @vsi: the VSI we care about
1093  * @q_vector: the vector  on which to force writeback
1094  *
1095  **/
1096 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1097 {
1098 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1099 		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1100 			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
1101 			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1102 			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
1103 			  /* allow 00 to be written to the index */
1104 
1105 		wr32(&vsi->back->hw,
1106 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1107 	} else {
1108 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1109 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1110 			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1111 			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1112 			/* allow 00 to be written to the index */
1113 
1114 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1115 	}
1116 }
1117 
1118 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1119 					struct i40e_ring_container *rc)
1120 {
1121 	return &q_vector->rx == rc;
1122 }
1123 
1124 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1125 {
1126 	unsigned int divisor;
1127 
1128 	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1129 	case I40E_LINK_SPEED_40GB:
1130 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1131 		break;
1132 	case I40E_LINK_SPEED_25GB:
1133 	case I40E_LINK_SPEED_20GB:
1134 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1135 		break;
1136 	default:
1137 	case I40E_LINK_SPEED_10GB:
1138 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1139 		break;
1140 	case I40E_LINK_SPEED_1GB:
1141 	case I40E_LINK_SPEED_100MB:
1142 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1143 		break;
1144 	}
1145 
1146 	return divisor;
1147 }
1148 
1149 /**
1150  * i40e_update_itr - update the dynamic ITR value based on statistics
1151  * @q_vector: structure containing interrupt and ring information
1152  * @rc: structure containing ring performance data
1153  *
1154  * Stores a new ITR value based on packets and byte
1155  * counts during the last interrupt.  The advantage of per interrupt
1156  * computation is faster updates and more accurate ITR for the current
1157  * traffic pattern.  Constants in this function were computed
1158  * based on theoretical maximum wire speed and thresholds were set based
1159  * on testing data as well as attempting to minimize response time
1160  * while increasing bulk throughput.
1161  **/
1162 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1163 			    struct i40e_ring_container *rc)
1164 {
1165 	unsigned int avg_wire_size, packets, bytes, itr;
1166 	unsigned long next_update = jiffies;
1167 
1168 	/* If we don't have any rings just leave ourselves set for maximum
1169 	 * possible latency so we take ourselves out of the equation.
1170 	 */
1171 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1172 		return;
1173 
1174 	/* For Rx we want to push the delay up and default to low latency.
1175 	 * for Tx we want to pull the delay down and default to high latency.
1176 	 */
1177 	itr = i40e_container_is_rx(q_vector, rc) ?
1178 	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1179 	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1180 
1181 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1182 	 * that either packets are coming in so slow there hasn't been
1183 	 * any work, or that there is so much work that NAPI is dealing
1184 	 * with interrupt moderation and we don't need to do anything.
1185 	 */
1186 	if (time_after(next_update, rc->next_update))
1187 		goto clear_counts;
1188 
1189 	/* If itr_countdown is set it means we programmed an ITR within
1190 	 * the last 4 interrupt cycles. This has a side effect of us
1191 	 * potentially firing an early interrupt. In order to work around
1192 	 * this we need to throw out any data received for a few
1193 	 * interrupts following the update.
1194 	 */
1195 	if (q_vector->itr_countdown) {
1196 		itr = rc->target_itr;
1197 		goto clear_counts;
1198 	}
1199 
1200 	packets = rc->total_packets;
1201 	bytes = rc->total_bytes;
1202 
1203 	if (i40e_container_is_rx(q_vector, rc)) {
1204 		/* If Rx there are 1 to 4 packets and bytes are less than
1205 		 * 9000 assume insufficient data to use bulk rate limiting
1206 		 * approach unless Tx is already in bulk rate limiting. We
1207 		 * are likely latency driven.
1208 		 */
1209 		if (packets && packets < 4 && bytes < 9000 &&
1210 		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1211 			itr = I40E_ITR_ADAPTIVE_LATENCY;
1212 			goto adjust_by_size;
1213 		}
1214 	} else if (packets < 4) {
1215 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1216 		 * bulk mode and we are receiving 4 or fewer packets just
1217 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1218 		 * that the Rx can relax.
1219 		 */
1220 		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1221 		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1222 		     I40E_ITR_ADAPTIVE_MAX_USECS)
1223 			goto clear_counts;
1224 	} else if (packets > 32) {
1225 		/* If we have processed over 32 packets in a single interrupt
1226 		 * for Tx assume we need to switch over to "bulk" mode.
1227 		 */
1228 		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1229 	}
1230 
1231 	/* We have no packets to actually measure against. This means
1232 	 * either one of the other queues on this vector is active or
1233 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1234 	 *
1235 	 * Between 4 and 56 we can assume that our current interrupt delay
1236 	 * is only slightly too low. As such we should increase it by a small
1237 	 * fixed amount.
1238 	 */
1239 	if (packets < 56) {
1240 		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1241 		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1242 			itr &= I40E_ITR_ADAPTIVE_LATENCY;
1243 			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1244 		}
1245 		goto clear_counts;
1246 	}
1247 
1248 	if (packets <= 256) {
1249 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1250 		itr &= I40E_ITR_MASK;
1251 
1252 		/* Between 56 and 112 is our "goldilocks" zone where we are
1253 		 * working out "just right". Just report that our current
1254 		 * ITR is good for us.
1255 		 */
1256 		if (packets <= 112)
1257 			goto clear_counts;
1258 
1259 		/* If packet count is 128 or greater we are likely looking
1260 		 * at a slight overrun of the delay we want. Try halving
1261 		 * our delay to see if that will cut the number of packets
1262 		 * in half per interrupt.
1263 		 */
1264 		itr /= 2;
1265 		itr &= I40E_ITR_MASK;
1266 		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1267 			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1268 
1269 		goto clear_counts;
1270 	}
1271 
1272 	/* The paths below assume we are dealing with a bulk ITR since
1273 	 * number of packets is greater than 256. We are just going to have
1274 	 * to compute a value and try to bring the count under control,
1275 	 * though for smaller packet sizes there isn't much we can do as
1276 	 * NAPI polling will likely be kicking in sooner rather than later.
1277 	 */
1278 	itr = I40E_ITR_ADAPTIVE_BULK;
1279 
1280 adjust_by_size:
1281 	/* If packet counts are 256 or greater we can assume we have a gross
1282 	 * overestimation of what the rate should be. Instead of trying to fine
1283 	 * tune it just use the formula below to try and dial in an exact value
1284 	 * give the current packet size of the frame.
1285 	 */
1286 	avg_wire_size = bytes / packets;
1287 
1288 	/* The following is a crude approximation of:
1289 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1290 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1291 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1292 	 *
1293 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1294 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1295 	 * formula down to
1296 	 *
1297 	 *  (170 * (size + 24)) / (size + 640) = ITR
1298 	 *
1299 	 * We first do some math on the packet size and then finally bitshift
1300 	 * by 8 after rounding up. We also have to account for PCIe link speed
1301 	 * difference as ITR scales based on this.
1302 	 */
1303 	if (avg_wire_size <= 60) {
1304 		/* Start at 250k ints/sec */
1305 		avg_wire_size = 4096;
1306 	} else if (avg_wire_size <= 380) {
1307 		/* 250K ints/sec to 60K ints/sec */
1308 		avg_wire_size *= 40;
1309 		avg_wire_size += 1696;
1310 	} else if (avg_wire_size <= 1084) {
1311 		/* 60K ints/sec to 36K ints/sec */
1312 		avg_wire_size *= 15;
1313 		avg_wire_size += 11452;
1314 	} else if (avg_wire_size <= 1980) {
1315 		/* 36K ints/sec to 30K ints/sec */
1316 		avg_wire_size *= 5;
1317 		avg_wire_size += 22420;
1318 	} else {
1319 		/* plateau at a limit of 30K ints/sec */
1320 		avg_wire_size = 32256;
1321 	}
1322 
1323 	/* If we are in low latency mode halve our delay which doubles the
1324 	 * rate to somewhere between 100K to 16K ints/sec
1325 	 */
1326 	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1327 		avg_wire_size /= 2;
1328 
1329 	/* Resultant value is 256 times larger than it needs to be. This
1330 	 * gives us room to adjust the value as needed to either increase
1331 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1332 	 *
1333 	 * Use addition as we have already recorded the new latency flag
1334 	 * for the ITR value.
1335 	 */
1336 	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1337 	       I40E_ITR_ADAPTIVE_MIN_INC;
1338 
1339 	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1340 		itr &= I40E_ITR_ADAPTIVE_LATENCY;
1341 		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1342 	}
1343 
1344 clear_counts:
1345 	/* write back value */
1346 	rc->target_itr = itr;
1347 
1348 	/* next update should occur within next jiffy */
1349 	rc->next_update = next_update + 1;
1350 
1351 	rc->total_bytes = 0;
1352 	rc->total_packets = 0;
1353 }
1354 
1355 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1356 {
1357 	return &rx_ring->rx_bi[idx];
1358 }
1359 
1360 /**
1361  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1362  * @rx_ring: rx descriptor ring to store buffers on
1363  * @old_buff: donor buffer to have page reused
1364  *
1365  * Synchronizes page for reuse by the adapter
1366  **/
1367 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1368 			       struct i40e_rx_buffer *old_buff)
1369 {
1370 	struct i40e_rx_buffer *new_buff;
1371 	u16 nta = rx_ring->next_to_alloc;
1372 
1373 	new_buff = i40e_rx_bi(rx_ring, nta);
1374 
1375 	/* update, and store next to alloc */
1376 	nta++;
1377 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1378 
1379 	/* transfer page from old buffer to new buffer */
1380 	new_buff->dma		= old_buff->dma;
1381 	new_buff->page		= old_buff->page;
1382 	new_buff->page_offset	= old_buff->page_offset;
1383 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1384 
1385 	rx_ring->rx_stats.page_reuse_count++;
1386 
1387 	/* clear contents of buffer_info */
1388 	old_buff->page = NULL;
1389 }
1390 
1391 /**
1392  * i40e_clean_programming_status - clean the programming status descriptor
1393  * @rx_ring: the rx ring that has this descriptor
1394  * @qword0_raw: qword0
1395  * @qword1: qword1 representing status_error_len in CPU ordering
1396  *
1397  * Flow director should handle FD_FILTER_STATUS to check its filter programming
1398  * status being successful or not and take actions accordingly. FCoE should
1399  * handle its context/filter programming/invalidation status and take actions.
1400  *
1401  * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1402  **/
1403 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1404 				   u64 qword1)
1405 {
1406 	u8 id;
1407 
1408 	id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1409 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1410 
1411 	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1412 		i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1413 }
1414 
1415 /**
1416  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1417  * @tx_ring: the tx ring to set up
1418  *
1419  * Return 0 on success, negative on error
1420  **/
1421 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1422 {
1423 	struct device *dev = tx_ring->dev;
1424 	int bi_size;
1425 
1426 	if (!dev)
1427 		return -ENOMEM;
1428 
1429 	/* warn if we are about to overwrite the pointer */
1430 	WARN_ON(tx_ring->tx_bi);
1431 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1432 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1433 	if (!tx_ring->tx_bi)
1434 		goto err;
1435 
1436 	if (ring_is_xdp(tx_ring)) {
1437 		tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
1438 					     GFP_KERNEL);
1439 		if (!tx_ring->xsk_descs)
1440 			goto err;
1441 	}
1442 
1443 	u64_stats_init(&tx_ring->syncp);
1444 
1445 	/* round up to nearest 4K */
1446 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1447 	/* add u32 for head writeback, align after this takes care of
1448 	 * guaranteeing this is at least one cache line in size
1449 	 */
1450 	tx_ring->size += sizeof(u32);
1451 	tx_ring->size = ALIGN(tx_ring->size, 4096);
1452 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1453 					   &tx_ring->dma, GFP_KERNEL);
1454 	if (!tx_ring->desc) {
1455 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1456 			 tx_ring->size);
1457 		goto err;
1458 	}
1459 
1460 	tx_ring->next_to_use = 0;
1461 	tx_ring->next_to_clean = 0;
1462 	tx_ring->tx_stats.prev_pkt_ctr = -1;
1463 	return 0;
1464 
1465 err:
1466 	kfree(tx_ring->xsk_descs);
1467 	tx_ring->xsk_descs = NULL;
1468 	kfree(tx_ring->tx_bi);
1469 	tx_ring->tx_bi = NULL;
1470 	return -ENOMEM;
1471 }
1472 
1473 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
1474 {
1475 	unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
1476 
1477 	rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
1478 	return rx_ring->rx_bi ? 0 : -ENOMEM;
1479 }
1480 
1481 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1482 {
1483 	memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1484 }
1485 
1486 /**
1487  * i40e_clean_rx_ring - Free Rx buffers
1488  * @rx_ring: ring to be cleaned
1489  **/
1490 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1491 {
1492 	u16 i;
1493 
1494 	/* ring already cleared, nothing to do */
1495 	if (!rx_ring->rx_bi)
1496 		return;
1497 
1498 	if (rx_ring->skb) {
1499 		dev_kfree_skb(rx_ring->skb);
1500 		rx_ring->skb = NULL;
1501 	}
1502 
1503 	if (rx_ring->xsk_pool) {
1504 		i40e_xsk_clean_rx_ring(rx_ring);
1505 		goto skip_free;
1506 	}
1507 
1508 	/* Free all the Rx ring sk_buffs */
1509 	for (i = 0; i < rx_ring->count; i++) {
1510 		struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1511 
1512 		if (!rx_bi->page)
1513 			continue;
1514 
1515 		/* Invalidate cache lines that may have been written to by
1516 		 * device so that we avoid corrupting memory.
1517 		 */
1518 		dma_sync_single_range_for_cpu(rx_ring->dev,
1519 					      rx_bi->dma,
1520 					      rx_bi->page_offset,
1521 					      rx_ring->rx_buf_len,
1522 					      DMA_FROM_DEVICE);
1523 
1524 		/* free resources associated with mapping */
1525 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1526 				     i40e_rx_pg_size(rx_ring),
1527 				     DMA_FROM_DEVICE,
1528 				     I40E_RX_DMA_ATTR);
1529 
1530 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1531 
1532 		rx_bi->page = NULL;
1533 		rx_bi->page_offset = 0;
1534 	}
1535 
1536 skip_free:
1537 	if (rx_ring->xsk_pool)
1538 		i40e_clear_rx_bi_zc(rx_ring);
1539 	else
1540 		i40e_clear_rx_bi(rx_ring);
1541 
1542 	/* Zero out the descriptor ring */
1543 	memset(rx_ring->desc, 0, rx_ring->size);
1544 
1545 	rx_ring->next_to_alloc = 0;
1546 	rx_ring->next_to_clean = 0;
1547 	rx_ring->next_to_use = 0;
1548 }
1549 
1550 /**
1551  * i40e_free_rx_resources - Free Rx resources
1552  * @rx_ring: ring to clean the resources from
1553  *
1554  * Free all receive software resources
1555  **/
1556 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1557 {
1558 	i40e_clean_rx_ring(rx_ring);
1559 	if (rx_ring->vsi->type == I40E_VSI_MAIN)
1560 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1561 	rx_ring->xdp_prog = NULL;
1562 	kfree(rx_ring->rx_bi);
1563 	rx_ring->rx_bi = NULL;
1564 
1565 	if (rx_ring->desc) {
1566 		dma_free_coherent(rx_ring->dev, rx_ring->size,
1567 				  rx_ring->desc, rx_ring->dma);
1568 		rx_ring->desc = NULL;
1569 	}
1570 }
1571 
1572 /**
1573  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1574  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1575  *
1576  * Returns 0 on success, negative on failure
1577  **/
1578 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1579 {
1580 	struct device *dev = rx_ring->dev;
1581 	int err;
1582 
1583 	u64_stats_init(&rx_ring->syncp);
1584 
1585 	/* Round up to nearest 4K */
1586 	rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
1587 	rx_ring->size = ALIGN(rx_ring->size, 4096);
1588 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1589 					   &rx_ring->dma, GFP_KERNEL);
1590 
1591 	if (!rx_ring->desc) {
1592 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1593 			 rx_ring->size);
1594 		return -ENOMEM;
1595 	}
1596 
1597 	rx_ring->next_to_alloc = 0;
1598 	rx_ring->next_to_clean = 0;
1599 	rx_ring->next_to_use = 0;
1600 
1601 	/* XDP RX-queue info only needed for RX rings exposed to XDP */
1602 	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1603 		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1604 				       rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
1605 		if (err < 0)
1606 			return err;
1607 	}
1608 
1609 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1610 
1611 	return 0;
1612 }
1613 
1614 /**
1615  * i40e_release_rx_desc - Store the new tail and head values
1616  * @rx_ring: ring to bump
1617  * @val: new head index
1618  **/
1619 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1620 {
1621 	rx_ring->next_to_use = val;
1622 
1623 	/* update next to alloc since we have filled the ring */
1624 	rx_ring->next_to_alloc = val;
1625 
1626 	/* Force memory writes to complete before letting h/w
1627 	 * know there are new descriptors to fetch.  (Only
1628 	 * applicable for weak-ordered memory model archs,
1629 	 * such as IA-64).
1630 	 */
1631 	wmb();
1632 	writel(val, rx_ring->tail);
1633 }
1634 
1635 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1636 					   unsigned int size)
1637 {
1638 	unsigned int truesize;
1639 
1640 #if (PAGE_SIZE < 8192)
1641 	truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1642 #else
1643 	truesize = rx_ring->rx_offset ?
1644 		SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
1645 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1646 		SKB_DATA_ALIGN(size);
1647 #endif
1648 	return truesize;
1649 }
1650 
1651 /**
1652  * i40e_alloc_mapped_page - recycle or make a new page
1653  * @rx_ring: ring to use
1654  * @bi: rx_buffer struct to modify
1655  *
1656  * Returns true if the page was successfully allocated or
1657  * reused.
1658  **/
1659 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1660 				   struct i40e_rx_buffer *bi)
1661 {
1662 	struct page *page = bi->page;
1663 	dma_addr_t dma;
1664 
1665 	/* since we are recycling buffers we should seldom need to alloc */
1666 	if (likely(page)) {
1667 		rx_ring->rx_stats.page_reuse_count++;
1668 		return true;
1669 	}
1670 
1671 	/* alloc new page for storage */
1672 	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1673 	if (unlikely(!page)) {
1674 		rx_ring->rx_stats.alloc_page_failed++;
1675 		return false;
1676 	}
1677 
1678 	/* map page for use */
1679 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1680 				 i40e_rx_pg_size(rx_ring),
1681 				 DMA_FROM_DEVICE,
1682 				 I40E_RX_DMA_ATTR);
1683 
1684 	/* if mapping failed free memory back to system since
1685 	 * there isn't much point in holding memory we can't use
1686 	 */
1687 	if (dma_mapping_error(rx_ring->dev, dma)) {
1688 		__free_pages(page, i40e_rx_pg_order(rx_ring));
1689 		rx_ring->rx_stats.alloc_page_failed++;
1690 		return false;
1691 	}
1692 
1693 	bi->dma = dma;
1694 	bi->page = page;
1695 	bi->page_offset = rx_ring->rx_offset;
1696 	page_ref_add(page, USHRT_MAX - 1);
1697 	bi->pagecnt_bias = USHRT_MAX;
1698 
1699 	return true;
1700 }
1701 
1702 /**
1703  * i40e_alloc_rx_buffers - Replace used receive buffers
1704  * @rx_ring: ring to place buffers on
1705  * @cleaned_count: number of buffers to replace
1706  *
1707  * Returns false if all allocations were successful, true if any fail
1708  **/
1709 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1710 {
1711 	u16 ntu = rx_ring->next_to_use;
1712 	union i40e_rx_desc *rx_desc;
1713 	struct i40e_rx_buffer *bi;
1714 
1715 	/* do nothing if no valid netdev defined */
1716 	if (!rx_ring->netdev || !cleaned_count)
1717 		return false;
1718 
1719 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
1720 	bi = i40e_rx_bi(rx_ring, ntu);
1721 
1722 	do {
1723 		if (!i40e_alloc_mapped_page(rx_ring, bi))
1724 			goto no_buffers;
1725 
1726 		/* sync the buffer for use by the device */
1727 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1728 						 bi->page_offset,
1729 						 rx_ring->rx_buf_len,
1730 						 DMA_FROM_DEVICE);
1731 
1732 		/* Refresh the desc even if buffer_addrs didn't change
1733 		 * because each write-back erases this info.
1734 		 */
1735 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1736 
1737 		rx_desc++;
1738 		bi++;
1739 		ntu++;
1740 		if (unlikely(ntu == rx_ring->count)) {
1741 			rx_desc = I40E_RX_DESC(rx_ring, 0);
1742 			bi = i40e_rx_bi(rx_ring, 0);
1743 			ntu = 0;
1744 		}
1745 
1746 		/* clear the status bits for the next_to_use descriptor */
1747 		rx_desc->wb.qword1.status_error_len = 0;
1748 
1749 		cleaned_count--;
1750 	} while (cleaned_count);
1751 
1752 	if (rx_ring->next_to_use != ntu)
1753 		i40e_release_rx_desc(rx_ring, ntu);
1754 
1755 	return false;
1756 
1757 no_buffers:
1758 	if (rx_ring->next_to_use != ntu)
1759 		i40e_release_rx_desc(rx_ring, ntu);
1760 
1761 	/* make sure to come back via polling to try again after
1762 	 * allocation failure
1763 	 */
1764 	return true;
1765 }
1766 
1767 /**
1768  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1769  * @vsi: the VSI we care about
1770  * @skb: skb currently being received and modified
1771  * @rx_desc: the receive descriptor
1772  **/
1773 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1774 				    struct sk_buff *skb,
1775 				    union i40e_rx_desc *rx_desc)
1776 {
1777 	struct i40e_rx_ptype_decoded decoded;
1778 	u32 rx_error, rx_status;
1779 	bool ipv4, ipv6;
1780 	u8 ptype;
1781 	u64 qword;
1782 
1783 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1784 	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1785 	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1786 		   I40E_RXD_QW1_ERROR_SHIFT;
1787 	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1788 		    I40E_RXD_QW1_STATUS_SHIFT;
1789 	decoded = decode_rx_desc_ptype(ptype);
1790 
1791 	skb->ip_summed = CHECKSUM_NONE;
1792 
1793 	skb_checksum_none_assert(skb);
1794 
1795 	/* Rx csum enabled and ip headers found? */
1796 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1797 		return;
1798 
1799 	/* did the hardware decode the packet and checksum? */
1800 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1801 		return;
1802 
1803 	/* both known and outer_ip must be set for the below code to work */
1804 	if (!(decoded.known && decoded.outer_ip))
1805 		return;
1806 
1807 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1808 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1809 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1810 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1811 
1812 	if (ipv4 &&
1813 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1814 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1815 		goto checksum_fail;
1816 
1817 	/* likely incorrect csum if alternate IP extension headers found */
1818 	if (ipv6 &&
1819 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1820 		/* don't increment checksum err here, non-fatal err */
1821 		return;
1822 
1823 	/* there was some L4 error, count error and punt packet to the stack */
1824 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1825 		goto checksum_fail;
1826 
1827 	/* handle packets that were not able to be checksummed due
1828 	 * to arrival speed, in this case the stack can compute
1829 	 * the csum.
1830 	 */
1831 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1832 		return;
1833 
1834 	/* If there is an outer header present that might contain a checksum
1835 	 * we need to bump the checksum level by 1 to reflect the fact that
1836 	 * we are indicating we validated the inner checksum.
1837 	 */
1838 	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1839 		skb->csum_level = 1;
1840 
1841 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
1842 	switch (decoded.inner_prot) {
1843 	case I40E_RX_PTYPE_INNER_PROT_TCP:
1844 	case I40E_RX_PTYPE_INNER_PROT_UDP:
1845 	case I40E_RX_PTYPE_INNER_PROT_SCTP:
1846 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1847 		fallthrough;
1848 	default:
1849 		break;
1850 	}
1851 
1852 	return;
1853 
1854 checksum_fail:
1855 	vsi->back->hw_csum_rx_error++;
1856 }
1857 
1858 /**
1859  * i40e_ptype_to_htype - get a hash type
1860  * @ptype: the ptype value from the descriptor
1861  *
1862  * Returns a hash type to be used by skb_set_hash
1863  **/
1864 static inline int i40e_ptype_to_htype(u8 ptype)
1865 {
1866 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1867 
1868 	if (!decoded.known)
1869 		return PKT_HASH_TYPE_NONE;
1870 
1871 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1872 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1873 		return PKT_HASH_TYPE_L4;
1874 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1875 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1876 		return PKT_HASH_TYPE_L3;
1877 	else
1878 		return PKT_HASH_TYPE_L2;
1879 }
1880 
1881 /**
1882  * i40e_rx_hash - set the hash value in the skb
1883  * @ring: descriptor ring
1884  * @rx_desc: specific descriptor
1885  * @skb: skb currently being received and modified
1886  * @rx_ptype: Rx packet type
1887  **/
1888 static inline void i40e_rx_hash(struct i40e_ring *ring,
1889 				union i40e_rx_desc *rx_desc,
1890 				struct sk_buff *skb,
1891 				u8 rx_ptype)
1892 {
1893 	u32 hash;
1894 	const __le64 rss_mask =
1895 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1896 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1897 
1898 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1899 		return;
1900 
1901 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1902 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1903 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1904 	}
1905 }
1906 
1907 /**
1908  * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1909  * @rx_ring: rx descriptor ring packet is being transacted on
1910  * @rx_desc: pointer to the EOP Rx descriptor
1911  * @skb: pointer to current skb being populated
1912  *
1913  * This function checks the ring, descriptor, and packet information in
1914  * order to populate the hash, checksum, VLAN, protocol, and
1915  * other fields within the skb.
1916  **/
1917 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1918 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1919 {
1920 	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1921 	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1922 			I40E_RXD_QW1_STATUS_SHIFT;
1923 	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1924 	u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1925 		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1926 	u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1927 		      I40E_RXD_QW1_PTYPE_SHIFT;
1928 
1929 	if (unlikely(tsynvalid))
1930 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1931 
1932 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1933 
1934 	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1935 
1936 	skb_record_rx_queue(skb, rx_ring->queue_index);
1937 
1938 	if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1939 		__le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1940 
1941 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1942 				       le16_to_cpu(vlan_tag));
1943 	}
1944 
1945 	/* modifies the skb - consumes the enet header */
1946 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1947 }
1948 
1949 /**
1950  * i40e_cleanup_headers - Correct empty headers
1951  * @rx_ring: rx descriptor ring packet is being transacted on
1952  * @skb: pointer to current skb being fixed
1953  * @rx_desc: pointer to the EOP Rx descriptor
1954  *
1955  * In addition if skb is not at least 60 bytes we need to pad it so that
1956  * it is large enough to qualify as a valid Ethernet frame.
1957  *
1958  * Returns true if an error was encountered and skb was freed.
1959  **/
1960 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1961 				 union i40e_rx_desc *rx_desc)
1962 
1963 {
1964 	/* ERR_MASK will only have valid bits if EOP set, and
1965 	 * what we are doing here is actually checking
1966 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1967 	 * the error field
1968 	 */
1969 	if (unlikely(i40e_test_staterr(rx_desc,
1970 				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1971 		dev_kfree_skb_any(skb);
1972 		return true;
1973 	}
1974 
1975 	/* if eth_skb_pad returns an error the skb was freed */
1976 	if (eth_skb_pad(skb))
1977 		return true;
1978 
1979 	return false;
1980 }
1981 
1982 /**
1983  * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1984  * @rx_buffer: buffer containing the page
1985  * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
1986  *
1987  * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
1988  * which will assign the current buffer to the buffer that next_to_alloc is
1989  * pointing to; otherwise, the DMA mapping needs to be destroyed and
1990  * page freed
1991  */
1992 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1993 				   int rx_buffer_pgcnt)
1994 {
1995 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1996 	struct page *page = rx_buffer->page;
1997 
1998 	/* Is any reuse possible? */
1999 	if (!dev_page_is_reusable(page))
2000 		return false;
2001 
2002 #if (PAGE_SIZE < 8192)
2003 	/* if we are only owner of page we can reuse it */
2004 	if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
2005 		return false;
2006 #else
2007 #define I40E_LAST_OFFSET \
2008 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
2009 	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
2010 		return false;
2011 #endif
2012 
2013 	/* If we have drained the page fragment pool we need to update
2014 	 * the pagecnt_bias and page count so that we fully restock the
2015 	 * number of references the driver holds.
2016 	 */
2017 	if (unlikely(pagecnt_bias == 1)) {
2018 		page_ref_add(page, USHRT_MAX - 1);
2019 		rx_buffer->pagecnt_bias = USHRT_MAX;
2020 	}
2021 
2022 	return true;
2023 }
2024 
2025 /**
2026  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
2027  * @rx_ring: rx descriptor ring to transact packets on
2028  * @rx_buffer: buffer containing page to add
2029  * @skb: sk_buff to place the data into
2030  * @size: packet length from rx_desc
2031  *
2032  * This function will add the data contained in rx_buffer->page to the skb.
2033  * It will just attach the page as a frag to the skb.
2034  *
2035  * The function will then update the page offset.
2036  **/
2037 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
2038 			     struct i40e_rx_buffer *rx_buffer,
2039 			     struct sk_buff *skb,
2040 			     unsigned int size)
2041 {
2042 #if (PAGE_SIZE < 8192)
2043 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2044 #else
2045 	unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
2046 #endif
2047 
2048 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2049 			rx_buffer->page_offset, size, truesize);
2050 
2051 	/* page is being used so we must update the page offset */
2052 #if (PAGE_SIZE < 8192)
2053 	rx_buffer->page_offset ^= truesize;
2054 #else
2055 	rx_buffer->page_offset += truesize;
2056 #endif
2057 }
2058 
2059 /**
2060  * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2061  * @rx_ring: rx descriptor ring to transact packets on
2062  * @size: size of buffer to add to skb
2063  * @rx_buffer_pgcnt: buffer page refcount
2064  *
2065  * This function will pull an Rx buffer from the ring and synchronize it
2066  * for use by the CPU.
2067  */
2068 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2069 						 const unsigned int size,
2070 						 int *rx_buffer_pgcnt)
2071 {
2072 	struct i40e_rx_buffer *rx_buffer;
2073 
2074 	rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2075 	*rx_buffer_pgcnt =
2076 #if (PAGE_SIZE < 8192)
2077 		page_count(rx_buffer->page);
2078 #else
2079 		0;
2080 #endif
2081 	prefetch_page_address(rx_buffer->page);
2082 
2083 	/* we are reusing so sync this buffer for CPU use */
2084 	dma_sync_single_range_for_cpu(rx_ring->dev,
2085 				      rx_buffer->dma,
2086 				      rx_buffer->page_offset,
2087 				      size,
2088 				      DMA_FROM_DEVICE);
2089 
2090 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
2091 	rx_buffer->pagecnt_bias--;
2092 
2093 	return rx_buffer;
2094 }
2095 
2096 /**
2097  * i40e_construct_skb - Allocate skb and populate it
2098  * @rx_ring: rx descriptor ring to transact packets on
2099  * @rx_buffer: rx buffer to pull data from
2100  * @xdp: xdp_buff pointing to the data
2101  *
2102  * This function allocates an skb.  It then populates it with the page
2103  * data from the current receive descriptor, taking care to set up the
2104  * skb correctly.
2105  */
2106 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2107 					  struct i40e_rx_buffer *rx_buffer,
2108 					  struct xdp_buff *xdp)
2109 {
2110 	unsigned int size = xdp->data_end - xdp->data;
2111 #if (PAGE_SIZE < 8192)
2112 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2113 #else
2114 	unsigned int truesize = SKB_DATA_ALIGN(size);
2115 #endif
2116 	unsigned int headlen;
2117 	struct sk_buff *skb;
2118 
2119 	/* prefetch first cache line of first page */
2120 	net_prefetch(xdp->data);
2121 
2122 	/* Note, we get here by enabling legacy-rx via:
2123 	 *
2124 	 *    ethtool --set-priv-flags <dev> legacy-rx on
2125 	 *
2126 	 * In this mode, we currently get 0 extra XDP headroom as
2127 	 * opposed to having legacy-rx off, where we process XDP
2128 	 * packets going to stack via i40e_build_skb(). The latter
2129 	 * provides us currently with 192 bytes of headroom.
2130 	 *
2131 	 * For i40e_construct_skb() mode it means that the
2132 	 * xdp->data_meta will always point to xdp->data, since
2133 	 * the helper cannot expand the head. Should this ever
2134 	 * change in future for legacy-rx mode on, then lets also
2135 	 * add xdp->data_meta handling here.
2136 	 */
2137 
2138 	/* allocate a skb to store the frags */
2139 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2140 			       I40E_RX_HDR_SIZE,
2141 			       GFP_ATOMIC | __GFP_NOWARN);
2142 	if (unlikely(!skb))
2143 		return NULL;
2144 
2145 	/* Determine available headroom for copy */
2146 	headlen = size;
2147 	if (headlen > I40E_RX_HDR_SIZE)
2148 		headlen = eth_get_headlen(skb->dev, xdp->data,
2149 					  I40E_RX_HDR_SIZE);
2150 
2151 	/* align pull length to size of long to optimize memcpy performance */
2152 	memcpy(__skb_put(skb, headlen), xdp->data,
2153 	       ALIGN(headlen, sizeof(long)));
2154 
2155 	/* update all of the pointers */
2156 	size -= headlen;
2157 	if (size) {
2158 		skb_add_rx_frag(skb, 0, rx_buffer->page,
2159 				rx_buffer->page_offset + headlen,
2160 				size, truesize);
2161 
2162 		/* buffer is used by skb, update page_offset */
2163 #if (PAGE_SIZE < 8192)
2164 		rx_buffer->page_offset ^= truesize;
2165 #else
2166 		rx_buffer->page_offset += truesize;
2167 #endif
2168 	} else {
2169 		/* buffer is unused, reset bias back to rx_buffer */
2170 		rx_buffer->pagecnt_bias++;
2171 	}
2172 
2173 	return skb;
2174 }
2175 
2176 /**
2177  * i40e_build_skb - Build skb around an existing buffer
2178  * @rx_ring: Rx descriptor ring to transact packets on
2179  * @rx_buffer: Rx buffer to pull data from
2180  * @xdp: xdp_buff pointing to the data
2181  *
2182  * This function builds an skb around an existing Rx buffer, taking care
2183  * to set up the skb correctly and avoid any memcpy overhead.
2184  */
2185 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2186 				      struct i40e_rx_buffer *rx_buffer,
2187 				      struct xdp_buff *xdp)
2188 {
2189 	unsigned int metasize = xdp->data - xdp->data_meta;
2190 #if (PAGE_SIZE < 8192)
2191 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2192 #else
2193 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2194 				SKB_DATA_ALIGN(xdp->data_end -
2195 					       xdp->data_hard_start);
2196 #endif
2197 	struct sk_buff *skb;
2198 
2199 	/* Prefetch first cache line of first page. If xdp->data_meta
2200 	 * is unused, this points exactly as xdp->data, otherwise we
2201 	 * likely have a consumer accessing first few bytes of meta
2202 	 * data, and then actual data.
2203 	 */
2204 	net_prefetch(xdp->data_meta);
2205 
2206 	/* build an skb around the page buffer */
2207 	skb = build_skb(xdp->data_hard_start, truesize);
2208 	if (unlikely(!skb))
2209 		return NULL;
2210 
2211 	/* update pointers within the skb to store the data */
2212 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2213 	__skb_put(skb, xdp->data_end - xdp->data);
2214 	if (metasize)
2215 		skb_metadata_set(skb, metasize);
2216 
2217 	/* buffer is used by skb, update page_offset */
2218 #if (PAGE_SIZE < 8192)
2219 	rx_buffer->page_offset ^= truesize;
2220 #else
2221 	rx_buffer->page_offset += truesize;
2222 #endif
2223 
2224 	return skb;
2225 }
2226 
2227 /**
2228  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2229  * @rx_ring: rx descriptor ring to transact packets on
2230  * @rx_buffer: rx buffer to pull data from
2231  * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
2232  *
2233  * This function will clean up the contents of the rx_buffer.  It will
2234  * either recycle the buffer or unmap it and free the associated resources.
2235  */
2236 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2237 			       struct i40e_rx_buffer *rx_buffer,
2238 			       int rx_buffer_pgcnt)
2239 {
2240 	if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2241 		/* hand second half of page back to the ring */
2242 		i40e_reuse_rx_page(rx_ring, rx_buffer);
2243 	} else {
2244 		/* we are not reusing the buffer so unmap it */
2245 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2246 				     i40e_rx_pg_size(rx_ring),
2247 				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2248 		__page_frag_cache_drain(rx_buffer->page,
2249 					rx_buffer->pagecnt_bias);
2250 		/* clear contents of buffer_info */
2251 		rx_buffer->page = NULL;
2252 	}
2253 }
2254 
2255 /**
2256  * i40e_is_non_eop - process handling of non-EOP buffers
2257  * @rx_ring: Rx ring being processed
2258  * @rx_desc: Rx descriptor for current buffer
2259  *
2260  * If the buffer is an EOP buffer, this function exits returning false,
2261  * otherwise return true indicating that this is in fact a non-EOP buffer.
2262  */
2263 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2264 			    union i40e_rx_desc *rx_desc)
2265 {
2266 	/* if we are the last buffer then there is nothing else to do */
2267 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2268 	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2269 		return false;
2270 
2271 	rx_ring->rx_stats.non_eop_descs++;
2272 
2273 	return true;
2274 }
2275 
2276 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2277 			      struct i40e_ring *xdp_ring);
2278 
2279 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2280 {
2281 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2282 
2283 	if (unlikely(!xdpf))
2284 		return I40E_XDP_CONSUMED;
2285 
2286 	return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2287 }
2288 
2289 /**
2290  * i40e_run_xdp - run an XDP program
2291  * @rx_ring: Rx ring being processed
2292  * @xdp: XDP buffer containing the frame
2293  **/
2294 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
2295 {
2296 	int err, result = I40E_XDP_PASS;
2297 	struct i40e_ring *xdp_ring;
2298 	struct bpf_prog *xdp_prog;
2299 	u32 act;
2300 
2301 	rcu_read_lock();
2302 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2303 
2304 	if (!xdp_prog)
2305 		goto xdp_out;
2306 
2307 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
2308 
2309 	act = bpf_prog_run_xdp(xdp_prog, xdp);
2310 	switch (act) {
2311 	case XDP_PASS:
2312 		break;
2313 	case XDP_TX:
2314 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2315 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2316 		break;
2317 	case XDP_REDIRECT:
2318 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2319 		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2320 		break;
2321 	default:
2322 		bpf_warn_invalid_xdp_action(act);
2323 		fallthrough;
2324 	case XDP_ABORTED:
2325 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2326 		fallthrough; /* handle aborts by dropping packet */
2327 	case XDP_DROP:
2328 		result = I40E_XDP_CONSUMED;
2329 		break;
2330 	}
2331 xdp_out:
2332 	rcu_read_unlock();
2333 	return result;
2334 }
2335 
2336 /**
2337  * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2338  * @rx_ring: Rx ring
2339  * @rx_buffer: Rx buffer to adjust
2340  * @size: Size of adjustment
2341  **/
2342 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2343 				struct i40e_rx_buffer *rx_buffer,
2344 				unsigned int size)
2345 {
2346 	unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2347 
2348 #if (PAGE_SIZE < 8192)
2349 	rx_buffer->page_offset ^= truesize;
2350 #else
2351 	rx_buffer->page_offset += truesize;
2352 #endif
2353 }
2354 
2355 /**
2356  * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2357  * @xdp_ring: XDP Tx ring
2358  *
2359  * This function updates the XDP Tx ring tail register.
2360  **/
2361 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2362 {
2363 	/* Force memory writes to complete before letting h/w
2364 	 * know there are new descriptors to fetch.
2365 	 */
2366 	wmb();
2367 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2368 }
2369 
2370 /**
2371  * i40e_update_rx_stats - Update Rx ring statistics
2372  * @rx_ring: rx descriptor ring
2373  * @total_rx_bytes: number of bytes received
2374  * @total_rx_packets: number of packets received
2375  *
2376  * This function updates the Rx ring statistics.
2377  **/
2378 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2379 			  unsigned int total_rx_bytes,
2380 			  unsigned int total_rx_packets)
2381 {
2382 	u64_stats_update_begin(&rx_ring->syncp);
2383 	rx_ring->stats.packets += total_rx_packets;
2384 	rx_ring->stats.bytes += total_rx_bytes;
2385 	u64_stats_update_end(&rx_ring->syncp);
2386 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
2387 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2388 }
2389 
2390 /**
2391  * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2392  * @rx_ring: Rx ring
2393  * @xdp_res: Result of the receive batch
2394  *
2395  * This function bumps XDP Tx tail and/or flush redirect map, and
2396  * should be called when a batch of packets has been processed in the
2397  * napi loop.
2398  **/
2399 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2400 {
2401 	if (xdp_res & I40E_XDP_REDIR)
2402 		xdp_do_flush_map();
2403 
2404 	if (xdp_res & I40E_XDP_TX) {
2405 		struct i40e_ring *xdp_ring =
2406 			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2407 
2408 		i40e_xdp_ring_update_tail(xdp_ring);
2409 	}
2410 }
2411 
2412 /**
2413  * i40e_inc_ntc: Advance the next_to_clean index
2414  * @rx_ring: Rx ring
2415  **/
2416 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
2417 {
2418 	u32 ntc = rx_ring->next_to_clean + 1;
2419 
2420 	ntc = (ntc < rx_ring->count) ? ntc : 0;
2421 	rx_ring->next_to_clean = ntc;
2422 	prefetch(I40E_RX_DESC(rx_ring, ntc));
2423 }
2424 
2425 /**
2426  * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2427  * @rx_ring: rx descriptor ring to transact packets on
2428  * @budget: Total limit on number of packets to process
2429  *
2430  * This function provides a "bounce buffer" approach to Rx interrupt
2431  * processing.  The advantage to this is that on systems that have
2432  * expensive overhead for IOMMU access this provides a means of avoiding
2433  * it by maintaining the mapping of the page to the system.
2434  *
2435  * Returns amount of work completed
2436  **/
2437 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2438 {
2439 	unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2440 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2441 	unsigned int offset = rx_ring->rx_offset;
2442 	struct sk_buff *skb = rx_ring->skb;
2443 	unsigned int xdp_xmit = 0;
2444 	bool failure = false;
2445 	struct xdp_buff xdp;
2446 	int xdp_res = 0;
2447 
2448 #if (PAGE_SIZE < 8192)
2449 	frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
2450 #endif
2451 	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2452 
2453 	while (likely(total_rx_packets < (unsigned int)budget)) {
2454 		struct i40e_rx_buffer *rx_buffer;
2455 		union i40e_rx_desc *rx_desc;
2456 		int rx_buffer_pgcnt;
2457 		unsigned int size;
2458 		u64 qword;
2459 
2460 		/* return some buffers to hardware, one at a time is too slow */
2461 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2462 			failure = failure ||
2463 				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2464 			cleaned_count = 0;
2465 		}
2466 
2467 		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2468 
2469 		/* status_error_len will always be zero for unused descriptors
2470 		 * because it's cleared in cleanup, and overlaps with hdr_addr
2471 		 * which is always zero because packet split isn't used, if the
2472 		 * hardware wrote DD then the length will be non-zero
2473 		 */
2474 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2475 
2476 		/* This memory barrier is needed to keep us from reading
2477 		 * any other fields out of the rx_desc until we have
2478 		 * verified the descriptor has been written back.
2479 		 */
2480 		dma_rmb();
2481 
2482 		if (i40e_rx_is_programming_status(qword)) {
2483 			i40e_clean_programming_status(rx_ring,
2484 						      rx_desc->raw.qword[0],
2485 						      qword);
2486 			rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2487 			i40e_inc_ntc(rx_ring);
2488 			i40e_reuse_rx_page(rx_ring, rx_buffer);
2489 			cleaned_count++;
2490 			continue;
2491 		}
2492 
2493 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2494 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2495 		if (!size)
2496 			break;
2497 
2498 		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2499 		rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2500 
2501 		/* retrieve a buffer from the ring */
2502 		if (!skb) {
2503 			unsigned char *hard_start;
2504 
2505 			hard_start = page_address(rx_buffer->page) +
2506 				     rx_buffer->page_offset - offset;
2507 			xdp_prepare_buff(&xdp, hard_start, offset, size, true);
2508 #if (PAGE_SIZE > 4096)
2509 			/* At larger PAGE_SIZE, frame_sz depend on len size */
2510 			xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2511 #endif
2512 			xdp_res = i40e_run_xdp(rx_ring, &xdp);
2513 		}
2514 
2515 		if (xdp_res) {
2516 			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2517 				xdp_xmit |= xdp_res;
2518 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2519 			} else {
2520 				rx_buffer->pagecnt_bias++;
2521 			}
2522 			total_rx_bytes += size;
2523 			total_rx_packets++;
2524 		} else if (skb) {
2525 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2526 		} else if (ring_uses_build_skb(rx_ring)) {
2527 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2528 		} else {
2529 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2530 		}
2531 
2532 		/* exit if we failed to retrieve a buffer */
2533 		if (!xdp_res && !skb) {
2534 			rx_ring->rx_stats.alloc_buff_failed++;
2535 			rx_buffer->pagecnt_bias++;
2536 			break;
2537 		}
2538 
2539 		i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2540 		cleaned_count++;
2541 
2542 		i40e_inc_ntc(rx_ring);
2543 		if (i40e_is_non_eop(rx_ring, rx_desc))
2544 			continue;
2545 
2546 		if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2547 			skb = NULL;
2548 			continue;
2549 		}
2550 
2551 		/* probably a little skewed due to removing CRC */
2552 		total_rx_bytes += skb->len;
2553 
2554 		/* populate checksum, VLAN, and protocol */
2555 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
2556 
2557 		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2558 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
2559 		skb = NULL;
2560 
2561 		/* update budget accounting */
2562 		total_rx_packets++;
2563 	}
2564 
2565 	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2566 	rx_ring->skb = skb;
2567 
2568 	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2569 
2570 	/* guarantee a trip back through this routine if there was a failure */
2571 	return failure ? budget : (int)total_rx_packets;
2572 }
2573 
2574 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2575 {
2576 	u32 val;
2577 
2578 	/* We don't bother with setting the CLEARPBA bit as the data sheet
2579 	 * points out doing so is "meaningless since it was already
2580 	 * auto-cleared". The auto-clearing happens when the interrupt is
2581 	 * asserted.
2582 	 *
2583 	 * Hardware errata 28 for also indicates that writing to a
2584 	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2585 	 * an event in the PBA anyway so we need to rely on the automask
2586 	 * to hold pending events for us until the interrupt is re-enabled
2587 	 *
2588 	 * The itr value is reported in microseconds, and the register
2589 	 * value is recorded in 2 microsecond units. For this reason we
2590 	 * only need to shift by the interval shift - 1 instead of the
2591 	 * full value.
2592 	 */
2593 	itr &= I40E_ITR_MASK;
2594 
2595 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2596 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2597 	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2598 
2599 	return val;
2600 }
2601 
2602 /* a small macro to shorten up some long lines */
2603 #define INTREG I40E_PFINT_DYN_CTLN
2604 
2605 /* The act of updating the ITR will cause it to immediately trigger. In order
2606  * to prevent this from throwing off adaptive update statistics we defer the
2607  * update so that it can only happen so often. So after either Tx or Rx are
2608  * updated we make the adaptive scheme wait until either the ITR completely
2609  * expires via the next_update expiration or we have been through at least
2610  * 3 interrupts.
2611  */
2612 #define ITR_COUNTDOWN_START 3
2613 
2614 /**
2615  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2616  * @vsi: the VSI we care about
2617  * @q_vector: q_vector for which itr is being updated and interrupt enabled
2618  *
2619  **/
2620 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2621 					  struct i40e_q_vector *q_vector)
2622 {
2623 	struct i40e_hw *hw = &vsi->back->hw;
2624 	u32 intval;
2625 
2626 	/* If we don't have MSIX, then we only need to re-enable icr0 */
2627 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2628 		i40e_irq_dynamic_enable_icr0(vsi->back);
2629 		return;
2630 	}
2631 
2632 	/* These will do nothing if dynamic updates are not enabled */
2633 	i40e_update_itr(q_vector, &q_vector->tx);
2634 	i40e_update_itr(q_vector, &q_vector->rx);
2635 
2636 	/* This block of logic allows us to get away with only updating
2637 	 * one ITR value with each interrupt. The idea is to perform a
2638 	 * pseudo-lazy update with the following criteria.
2639 	 *
2640 	 * 1. Rx is given higher priority than Tx if both are in same state
2641 	 * 2. If we must reduce an ITR that is given highest priority.
2642 	 * 3. We then give priority to increasing ITR based on amount.
2643 	 */
2644 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2645 		/* Rx ITR needs to be reduced, this is highest priority */
2646 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2647 					   q_vector->rx.target_itr);
2648 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2649 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2650 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2651 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2652 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2653 		/* Tx ITR needs to be reduced, this is second priority
2654 		 * Tx ITR needs to be increased more than Rx, fourth priority
2655 		 */
2656 		intval = i40e_buildreg_itr(I40E_TX_ITR,
2657 					   q_vector->tx.target_itr);
2658 		q_vector->tx.current_itr = q_vector->tx.target_itr;
2659 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2660 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2661 		/* Rx ITR needs to be increased, third priority */
2662 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2663 					   q_vector->rx.target_itr);
2664 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2665 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2666 	} else {
2667 		/* No ITR update, lowest priority */
2668 		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2669 		if (q_vector->itr_countdown)
2670 			q_vector->itr_countdown--;
2671 	}
2672 
2673 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2674 		wr32(hw, INTREG(q_vector->reg_idx), intval);
2675 }
2676 
2677 /**
2678  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2679  * @napi: napi struct with our devices info in it
2680  * @budget: amount of work driver is allowed to do this pass, in packets
2681  *
2682  * This function will clean all queues associated with a q_vector.
2683  *
2684  * Returns the amount of work done
2685  **/
2686 int i40e_napi_poll(struct napi_struct *napi, int budget)
2687 {
2688 	struct i40e_q_vector *q_vector =
2689 			       container_of(napi, struct i40e_q_vector, napi);
2690 	struct i40e_vsi *vsi = q_vector->vsi;
2691 	struct i40e_ring *ring;
2692 	bool clean_complete = true;
2693 	bool arm_wb = false;
2694 	int budget_per_ring;
2695 	int work_done = 0;
2696 
2697 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2698 		napi_complete(napi);
2699 		return 0;
2700 	}
2701 
2702 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2703 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2704 	 */
2705 	i40e_for_each_ring(ring, q_vector->tx) {
2706 		bool wd = ring->xsk_pool ?
2707 			  i40e_clean_xdp_tx_irq(vsi, ring) :
2708 			  i40e_clean_tx_irq(vsi, ring, budget);
2709 
2710 		if (!wd) {
2711 			clean_complete = false;
2712 			continue;
2713 		}
2714 		arm_wb |= ring->arm_wb;
2715 		ring->arm_wb = false;
2716 	}
2717 
2718 	/* Handle case where we are called by netpoll with a budget of 0 */
2719 	if (budget <= 0)
2720 		goto tx_only;
2721 
2722 	/* normally we have 1 Rx ring per q_vector */
2723 	if (unlikely(q_vector->num_ringpairs > 1))
2724 		/* We attempt to distribute budget to each Rx queue fairly, but
2725 		 * don't allow the budget to go below 1 because that would exit
2726 		 * polling early.
2727 		 */
2728 		budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2729 	else
2730 		/* Max of 1 Rx ring in this q_vector so give it the budget */
2731 		budget_per_ring = budget;
2732 
2733 	i40e_for_each_ring(ring, q_vector->rx) {
2734 		int cleaned = ring->xsk_pool ?
2735 			      i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2736 			      i40e_clean_rx_irq(ring, budget_per_ring);
2737 
2738 		work_done += cleaned;
2739 		/* if we clean as many as budgeted, we must not be done */
2740 		if (cleaned >= budget_per_ring)
2741 			clean_complete = false;
2742 	}
2743 
2744 	/* If work not completed, return budget and polling will return */
2745 	if (!clean_complete) {
2746 		int cpu_id = smp_processor_id();
2747 
2748 		/* It is possible that the interrupt affinity has changed but,
2749 		 * if the cpu is pegged at 100%, polling will never exit while
2750 		 * traffic continues and the interrupt will be stuck on this
2751 		 * cpu.  We check to make sure affinity is correct before we
2752 		 * continue to poll, otherwise we must stop polling so the
2753 		 * interrupt can move to the correct cpu.
2754 		 */
2755 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2756 			/* Tell napi that we are done polling */
2757 			napi_complete_done(napi, work_done);
2758 
2759 			/* Force an interrupt */
2760 			i40e_force_wb(vsi, q_vector);
2761 
2762 			/* Return budget-1 so that polling stops */
2763 			return budget - 1;
2764 		}
2765 tx_only:
2766 		if (arm_wb) {
2767 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2768 			i40e_enable_wb_on_itr(vsi, q_vector);
2769 		}
2770 		return budget;
2771 	}
2772 
2773 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2774 		q_vector->arm_wb_state = false;
2775 
2776 	/* Exit the polling mode, but don't re-enable interrupts if stack might
2777 	 * poll us due to busy-polling
2778 	 */
2779 	if (likely(napi_complete_done(napi, work_done)))
2780 		i40e_update_enable_itr(vsi, q_vector);
2781 
2782 	return min(work_done, budget - 1);
2783 }
2784 
2785 /**
2786  * i40e_atr - Add a Flow Director ATR filter
2787  * @tx_ring:  ring to add programming descriptor to
2788  * @skb:      send buffer
2789  * @tx_flags: send tx flags
2790  **/
2791 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2792 		     u32 tx_flags)
2793 {
2794 	struct i40e_filter_program_desc *fdir_desc;
2795 	struct i40e_pf *pf = tx_ring->vsi->back;
2796 	union {
2797 		unsigned char *network;
2798 		struct iphdr *ipv4;
2799 		struct ipv6hdr *ipv6;
2800 	} hdr;
2801 	struct tcphdr *th;
2802 	unsigned int hlen;
2803 	u32 flex_ptype, dtype_cmd;
2804 	int l4_proto;
2805 	u16 i;
2806 
2807 	/* make sure ATR is enabled */
2808 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2809 		return;
2810 
2811 	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2812 		return;
2813 
2814 	/* if sampling is disabled do nothing */
2815 	if (!tx_ring->atr_sample_rate)
2816 		return;
2817 
2818 	/* Currently only IPv4/IPv6 with TCP is supported */
2819 	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2820 		return;
2821 
2822 	/* snag network header to get L4 type and address */
2823 	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2824 		      skb_inner_network_header(skb) : skb_network_header(skb);
2825 
2826 	/* Note: tx_flags gets modified to reflect inner protocols in
2827 	 * tx_enable_csum function if encap is enabled.
2828 	 */
2829 	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2830 		/* access ihl as u8 to avoid unaligned access on ia64 */
2831 		hlen = (hdr.network[0] & 0x0F) << 2;
2832 		l4_proto = hdr.ipv4->protocol;
2833 	} else {
2834 		/* find the start of the innermost ipv6 header */
2835 		unsigned int inner_hlen = hdr.network - skb->data;
2836 		unsigned int h_offset = inner_hlen;
2837 
2838 		/* this function updates h_offset to the end of the header */
2839 		l4_proto =
2840 		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2841 		/* hlen will contain our best estimate of the tcp header */
2842 		hlen = h_offset - inner_hlen;
2843 	}
2844 
2845 	if (l4_proto != IPPROTO_TCP)
2846 		return;
2847 
2848 	th = (struct tcphdr *)(hdr.network + hlen);
2849 
2850 	/* Due to lack of space, no more new filters can be programmed */
2851 	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2852 		return;
2853 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2854 		/* HW ATR eviction will take care of removing filters on FIN
2855 		 * and RST packets.
2856 		 */
2857 		if (th->fin || th->rst)
2858 			return;
2859 	}
2860 
2861 	tx_ring->atr_count++;
2862 
2863 	/* sample on all syn/fin/rst packets or once every atr sample rate */
2864 	if (!th->fin &&
2865 	    !th->syn &&
2866 	    !th->rst &&
2867 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2868 		return;
2869 
2870 	tx_ring->atr_count = 0;
2871 
2872 	/* grab the next descriptor */
2873 	i = tx_ring->next_to_use;
2874 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2875 
2876 	i++;
2877 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2878 
2879 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2880 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2881 	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2882 		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2883 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2884 		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2885 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2886 
2887 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2888 
2889 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2890 
2891 	dtype_cmd |= (th->fin || th->rst) ?
2892 		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2893 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2894 		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2895 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2896 
2897 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2898 		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2899 
2900 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2901 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2902 
2903 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2904 	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2905 		dtype_cmd |=
2906 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2907 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2908 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2909 	else
2910 		dtype_cmd |=
2911 			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2912 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2913 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2914 
2915 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2916 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2917 
2918 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2919 	fdir_desc->rsvd = cpu_to_le32(0);
2920 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2921 	fdir_desc->fd_id = cpu_to_le32(0);
2922 }
2923 
2924 /**
2925  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2926  * @skb:     send buffer
2927  * @tx_ring: ring to send buffer on
2928  * @flags:   the tx flags to be set
2929  *
2930  * Checks the skb and set up correspondingly several generic transmit flags
2931  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2932  *
2933  * Returns error code indicate the frame should be dropped upon error and the
2934  * otherwise  returns 0 to indicate the flags has been set properly.
2935  **/
2936 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2937 					     struct i40e_ring *tx_ring,
2938 					     u32 *flags)
2939 {
2940 	__be16 protocol = skb->protocol;
2941 	u32  tx_flags = 0;
2942 
2943 	if (protocol == htons(ETH_P_8021Q) &&
2944 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2945 		/* When HW VLAN acceleration is turned off by the user the
2946 		 * stack sets the protocol to 8021q so that the driver
2947 		 * can take any steps required to support the SW only
2948 		 * VLAN handling.  In our case the driver doesn't need
2949 		 * to take any further steps so just set the protocol
2950 		 * to the encapsulated ethertype.
2951 		 */
2952 		skb->protocol = vlan_get_protocol(skb);
2953 		goto out;
2954 	}
2955 
2956 	/* if we have a HW VLAN tag being added, default to the HW one */
2957 	if (skb_vlan_tag_present(skb)) {
2958 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2959 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2960 	/* else if it is a SW VLAN, check the next protocol and store the tag */
2961 	} else if (protocol == htons(ETH_P_8021Q)) {
2962 		struct vlan_hdr *vhdr, _vhdr;
2963 
2964 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2965 		if (!vhdr)
2966 			return -EINVAL;
2967 
2968 		protocol = vhdr->h_vlan_encapsulated_proto;
2969 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2970 		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2971 	}
2972 
2973 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2974 		goto out;
2975 
2976 	/* Insert 802.1p priority into VLAN header */
2977 	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2978 	    (skb->priority != TC_PRIO_CONTROL)) {
2979 		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2980 		tx_flags |= (skb->priority & 0x7) <<
2981 				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2982 		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2983 			struct vlan_ethhdr *vhdr;
2984 			int rc;
2985 
2986 			rc = skb_cow_head(skb, 0);
2987 			if (rc < 0)
2988 				return rc;
2989 			vhdr = (struct vlan_ethhdr *)skb->data;
2990 			vhdr->h_vlan_TCI = htons(tx_flags >>
2991 						 I40E_TX_FLAGS_VLAN_SHIFT);
2992 		} else {
2993 			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2994 		}
2995 	}
2996 
2997 out:
2998 	*flags = tx_flags;
2999 	return 0;
3000 }
3001 
3002 /**
3003  * i40e_tso - set up the tso context descriptor
3004  * @first:    pointer to first Tx buffer for xmit
3005  * @hdr_len:  ptr to the size of the packet header
3006  * @cd_type_cmd_tso_mss: Quad Word 1
3007  *
3008  * Returns 0 if no TSO can happen, 1 if tso is going, or error
3009  **/
3010 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
3011 		    u64 *cd_type_cmd_tso_mss)
3012 {
3013 	struct sk_buff *skb = first->skb;
3014 	u64 cd_cmd, cd_tso_len, cd_mss;
3015 	union {
3016 		struct iphdr *v4;
3017 		struct ipv6hdr *v6;
3018 		unsigned char *hdr;
3019 	} ip;
3020 	union {
3021 		struct tcphdr *tcp;
3022 		struct udphdr *udp;
3023 		unsigned char *hdr;
3024 	} l4;
3025 	u32 paylen, l4_offset;
3026 	u16 gso_segs, gso_size;
3027 	int err;
3028 
3029 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3030 		return 0;
3031 
3032 	if (!skb_is_gso(skb))
3033 		return 0;
3034 
3035 	err = skb_cow_head(skb, 0);
3036 	if (err < 0)
3037 		return err;
3038 
3039 	ip.hdr = skb_network_header(skb);
3040 	l4.hdr = skb_transport_header(skb);
3041 
3042 	/* initialize outer IP header fields */
3043 	if (ip.v4->version == 4) {
3044 		ip.v4->tot_len = 0;
3045 		ip.v4->check = 0;
3046 	} else {
3047 		ip.v6->payload_len = 0;
3048 	}
3049 
3050 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
3051 					 SKB_GSO_GRE_CSUM |
3052 					 SKB_GSO_IPXIP4 |
3053 					 SKB_GSO_IPXIP6 |
3054 					 SKB_GSO_UDP_TUNNEL |
3055 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
3056 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3057 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
3058 			l4.udp->len = 0;
3059 
3060 			/* determine offset of outer transport header */
3061 			l4_offset = l4.hdr - skb->data;
3062 
3063 			/* remove payload length from outer checksum */
3064 			paylen = skb->len - l4_offset;
3065 			csum_replace_by_diff(&l4.udp->check,
3066 					     (__force __wsum)htonl(paylen));
3067 		}
3068 
3069 		/* reset pointers to inner headers */
3070 		ip.hdr = skb_inner_network_header(skb);
3071 		l4.hdr = skb_inner_transport_header(skb);
3072 
3073 		/* initialize inner IP header fields */
3074 		if (ip.v4->version == 4) {
3075 			ip.v4->tot_len = 0;
3076 			ip.v4->check = 0;
3077 		} else {
3078 			ip.v6->payload_len = 0;
3079 		}
3080 	}
3081 
3082 	/* determine offset of inner transport header */
3083 	l4_offset = l4.hdr - skb->data;
3084 
3085 	/* remove payload length from inner checksum */
3086 	paylen = skb->len - l4_offset;
3087 
3088 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3089 		csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
3090 		/* compute length of segmentation header */
3091 		*hdr_len = sizeof(*l4.udp) + l4_offset;
3092 	} else {
3093 		csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3094 		/* compute length of segmentation header */
3095 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
3096 	}
3097 
3098 	/* pull values out of skb_shinfo */
3099 	gso_size = skb_shinfo(skb)->gso_size;
3100 	gso_segs = skb_shinfo(skb)->gso_segs;
3101 
3102 	/* update GSO size and bytecount with header size */
3103 	first->gso_segs = gso_segs;
3104 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
3105 
3106 	/* find the field values */
3107 	cd_cmd = I40E_TX_CTX_DESC_TSO;
3108 	cd_tso_len = skb->len - *hdr_len;
3109 	cd_mss = gso_size;
3110 	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3111 				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3112 				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3113 	return 1;
3114 }
3115 
3116 /**
3117  * i40e_tsyn - set up the tsyn context descriptor
3118  * @tx_ring:  ptr to the ring to send
3119  * @skb:      ptr to the skb we're sending
3120  * @tx_flags: the collected send information
3121  * @cd_type_cmd_tso_mss: Quad Word 1
3122  *
3123  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3124  **/
3125 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3126 		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3127 {
3128 	struct i40e_pf *pf;
3129 
3130 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3131 		return 0;
3132 
3133 	/* Tx timestamps cannot be sampled when doing TSO */
3134 	if (tx_flags & I40E_TX_FLAGS_TSO)
3135 		return 0;
3136 
3137 	/* only timestamp the outbound packet if the user has requested it and
3138 	 * we are not already transmitting a packet to be timestamped
3139 	 */
3140 	pf = i40e_netdev_to_pf(tx_ring->netdev);
3141 	if (!(pf->flags & I40E_FLAG_PTP))
3142 		return 0;
3143 
3144 	if (pf->ptp_tx &&
3145 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3146 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3147 		pf->ptp_tx_start = jiffies;
3148 		pf->ptp_tx_skb = skb_get(skb);
3149 	} else {
3150 		pf->tx_hwtstamp_skipped++;
3151 		return 0;
3152 	}
3153 
3154 	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3155 				I40E_TXD_CTX_QW1_CMD_SHIFT;
3156 
3157 	return 1;
3158 }
3159 
3160 /**
3161  * i40e_tx_enable_csum - Enable Tx checksum offloads
3162  * @skb: send buffer
3163  * @tx_flags: pointer to Tx flags currently set
3164  * @td_cmd: Tx descriptor command bits to set
3165  * @td_offset: Tx descriptor header offsets to set
3166  * @tx_ring: Tx descriptor ring
3167  * @cd_tunneling: ptr to context desc bits
3168  **/
3169 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3170 			       u32 *td_cmd, u32 *td_offset,
3171 			       struct i40e_ring *tx_ring,
3172 			       u32 *cd_tunneling)
3173 {
3174 	union {
3175 		struct iphdr *v4;
3176 		struct ipv6hdr *v6;
3177 		unsigned char *hdr;
3178 	} ip;
3179 	union {
3180 		struct tcphdr *tcp;
3181 		struct udphdr *udp;
3182 		unsigned char *hdr;
3183 	} l4;
3184 	unsigned char *exthdr;
3185 	u32 offset, cmd = 0;
3186 	__be16 frag_off;
3187 	u8 l4_proto = 0;
3188 
3189 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3190 		return 0;
3191 
3192 	ip.hdr = skb_network_header(skb);
3193 	l4.hdr = skb_transport_header(skb);
3194 
3195 	/* compute outer L2 header size */
3196 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3197 
3198 	if (skb->encapsulation) {
3199 		u32 tunnel = 0;
3200 		/* define outer network header type */
3201 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3202 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3203 				  I40E_TX_CTX_EXT_IP_IPV4 :
3204 				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3205 
3206 			l4_proto = ip.v4->protocol;
3207 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3208 			int ret;
3209 
3210 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3211 
3212 			exthdr = ip.hdr + sizeof(*ip.v6);
3213 			l4_proto = ip.v6->nexthdr;
3214 			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
3215 					       &l4_proto, &frag_off);
3216 			if (ret < 0)
3217 				return -1;
3218 		}
3219 
3220 		/* define outer transport */
3221 		switch (l4_proto) {
3222 		case IPPROTO_UDP:
3223 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3224 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3225 			break;
3226 		case IPPROTO_GRE:
3227 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3228 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3229 			break;
3230 		case IPPROTO_IPIP:
3231 		case IPPROTO_IPV6:
3232 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3233 			l4.hdr = skb_inner_network_header(skb);
3234 			break;
3235 		default:
3236 			if (*tx_flags & I40E_TX_FLAGS_TSO)
3237 				return -1;
3238 
3239 			skb_checksum_help(skb);
3240 			return 0;
3241 		}
3242 
3243 		/* compute outer L3 header size */
3244 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3245 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3246 
3247 		/* switch IP header pointer from outer to inner header */
3248 		ip.hdr = skb_inner_network_header(skb);
3249 
3250 		/* compute tunnel header size */
3251 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3252 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3253 
3254 		/* indicate if we need to offload outer UDP header */
3255 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3256 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3257 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3258 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3259 
3260 		/* record tunnel offload values */
3261 		*cd_tunneling |= tunnel;
3262 
3263 		/* switch L4 header pointer from outer to inner */
3264 		l4.hdr = skb_inner_transport_header(skb);
3265 		l4_proto = 0;
3266 
3267 		/* reset type as we transition from outer to inner headers */
3268 		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3269 		if (ip.v4->version == 4)
3270 			*tx_flags |= I40E_TX_FLAGS_IPV4;
3271 		if (ip.v6->version == 6)
3272 			*tx_flags |= I40E_TX_FLAGS_IPV6;
3273 	}
3274 
3275 	/* Enable IP checksum offloads */
3276 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3277 		l4_proto = ip.v4->protocol;
3278 		/* the stack computes the IP header already, the only time we
3279 		 * need the hardware to recompute it is in the case of TSO.
3280 		 */
3281 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3282 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3283 		       I40E_TX_DESC_CMD_IIPT_IPV4;
3284 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3285 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3286 
3287 		exthdr = ip.hdr + sizeof(*ip.v6);
3288 		l4_proto = ip.v6->nexthdr;
3289 		if (l4.hdr != exthdr)
3290 			ipv6_skip_exthdr(skb, exthdr - skb->data,
3291 					 &l4_proto, &frag_off);
3292 	}
3293 
3294 	/* compute inner L3 header size */
3295 	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3296 
3297 	/* Enable L4 checksum offloads */
3298 	switch (l4_proto) {
3299 	case IPPROTO_TCP:
3300 		/* enable checksum offloads */
3301 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3302 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3303 		break;
3304 	case IPPROTO_SCTP:
3305 		/* enable SCTP checksum offload */
3306 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3307 		offset |= (sizeof(struct sctphdr) >> 2) <<
3308 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3309 		break;
3310 	case IPPROTO_UDP:
3311 		/* enable UDP checksum offload */
3312 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3313 		offset |= (sizeof(struct udphdr) >> 2) <<
3314 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3315 		break;
3316 	default:
3317 		if (*tx_flags & I40E_TX_FLAGS_TSO)
3318 			return -1;
3319 		skb_checksum_help(skb);
3320 		return 0;
3321 	}
3322 
3323 	*td_cmd |= cmd;
3324 	*td_offset |= offset;
3325 
3326 	return 1;
3327 }
3328 
3329 /**
3330  * i40e_create_tx_ctx - Build the Tx context descriptor
3331  * @tx_ring:  ring to create the descriptor on
3332  * @cd_type_cmd_tso_mss: Quad Word 1
3333  * @cd_tunneling: Quad Word 0 - bits 0-31
3334  * @cd_l2tag2: Quad Word 0 - bits 32-63
3335  **/
3336 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3337 			       const u64 cd_type_cmd_tso_mss,
3338 			       const u32 cd_tunneling, const u32 cd_l2tag2)
3339 {
3340 	struct i40e_tx_context_desc *context_desc;
3341 	int i = tx_ring->next_to_use;
3342 
3343 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3344 	    !cd_tunneling && !cd_l2tag2)
3345 		return;
3346 
3347 	/* grab the next descriptor */
3348 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3349 
3350 	i++;
3351 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3352 
3353 	/* cpu_to_le32 and assign to struct fields */
3354 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3355 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3356 	context_desc->rsvd = cpu_to_le16(0);
3357 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3358 }
3359 
3360 /**
3361  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3362  * @tx_ring: the ring to be checked
3363  * @size:    the size buffer we want to assure is available
3364  *
3365  * Returns -EBUSY if a stop is needed, else 0
3366  **/
3367 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3368 {
3369 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3370 	/* Memory barrier before checking head and tail */
3371 	smp_mb();
3372 
3373 	/* Check again in a case another CPU has just made room available. */
3374 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3375 		return -EBUSY;
3376 
3377 	/* A reprieve! - use start_queue because it doesn't call schedule */
3378 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3379 	++tx_ring->tx_stats.restart_queue;
3380 	return 0;
3381 }
3382 
3383 /**
3384  * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3385  * @skb:      send buffer
3386  *
3387  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3388  * and so we need to figure out the cases where we need to linearize the skb.
3389  *
3390  * For TSO we need to count the TSO header and segment payload separately.
3391  * As such we need to check cases where we have 7 fragments or more as we
3392  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3393  * the segment payload in the first descriptor, and another 7 for the
3394  * fragments.
3395  **/
3396 bool __i40e_chk_linearize(struct sk_buff *skb)
3397 {
3398 	const skb_frag_t *frag, *stale;
3399 	int nr_frags, sum;
3400 
3401 	/* no need to check if number of frags is less than 7 */
3402 	nr_frags = skb_shinfo(skb)->nr_frags;
3403 	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3404 		return false;
3405 
3406 	/* We need to walk through the list and validate that each group
3407 	 * of 6 fragments totals at least gso_size.
3408 	 */
3409 	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3410 	frag = &skb_shinfo(skb)->frags[0];
3411 
3412 	/* Initialize size to the negative value of gso_size minus 1.  We
3413 	 * use this as the worst case scenerio in which the frag ahead
3414 	 * of us only provides one byte which is why we are limited to 6
3415 	 * descriptors for a single transmit as the header and previous
3416 	 * fragment are already consuming 2 descriptors.
3417 	 */
3418 	sum = 1 - skb_shinfo(skb)->gso_size;
3419 
3420 	/* Add size of frags 0 through 4 to create our initial sum */
3421 	sum += skb_frag_size(frag++);
3422 	sum += skb_frag_size(frag++);
3423 	sum += skb_frag_size(frag++);
3424 	sum += skb_frag_size(frag++);
3425 	sum += skb_frag_size(frag++);
3426 
3427 	/* Walk through fragments adding latest fragment, testing it, and
3428 	 * then removing stale fragments from the sum.
3429 	 */
3430 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3431 		int stale_size = skb_frag_size(stale);
3432 
3433 		sum += skb_frag_size(frag++);
3434 
3435 		/* The stale fragment may present us with a smaller
3436 		 * descriptor than the actual fragment size. To account
3437 		 * for that we need to remove all the data on the front and
3438 		 * figure out what the remainder would be in the last
3439 		 * descriptor associated with the fragment.
3440 		 */
3441 		if (stale_size > I40E_MAX_DATA_PER_TXD) {
3442 			int align_pad = -(skb_frag_off(stale)) &
3443 					(I40E_MAX_READ_REQ_SIZE - 1);
3444 
3445 			sum -= align_pad;
3446 			stale_size -= align_pad;
3447 
3448 			do {
3449 				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3450 				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3451 			} while (stale_size > I40E_MAX_DATA_PER_TXD);
3452 		}
3453 
3454 		/* if sum is negative we failed to make sufficient progress */
3455 		if (sum < 0)
3456 			return true;
3457 
3458 		if (!nr_frags--)
3459 			break;
3460 
3461 		sum -= stale_size;
3462 	}
3463 
3464 	return false;
3465 }
3466 
3467 /**
3468  * i40e_tx_map - Build the Tx descriptor
3469  * @tx_ring:  ring to send buffer on
3470  * @skb:      send buffer
3471  * @first:    first buffer info buffer to use
3472  * @tx_flags: collected send information
3473  * @hdr_len:  size of the packet header
3474  * @td_cmd:   the command field in the descriptor
3475  * @td_offset: offset for checksum or crc
3476  *
3477  * Returns 0 on success, -1 on failure to DMA
3478  **/
3479 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3480 			      struct i40e_tx_buffer *first, u32 tx_flags,
3481 			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3482 {
3483 	unsigned int data_len = skb->data_len;
3484 	unsigned int size = skb_headlen(skb);
3485 	skb_frag_t *frag;
3486 	struct i40e_tx_buffer *tx_bi;
3487 	struct i40e_tx_desc *tx_desc;
3488 	u16 i = tx_ring->next_to_use;
3489 	u32 td_tag = 0;
3490 	dma_addr_t dma;
3491 	u16 desc_count = 1;
3492 
3493 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3494 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3495 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3496 			 I40E_TX_FLAGS_VLAN_SHIFT;
3497 	}
3498 
3499 	first->tx_flags = tx_flags;
3500 
3501 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3502 
3503 	tx_desc = I40E_TX_DESC(tx_ring, i);
3504 	tx_bi = first;
3505 
3506 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3507 		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3508 
3509 		if (dma_mapping_error(tx_ring->dev, dma))
3510 			goto dma_error;
3511 
3512 		/* record length, and DMA address */
3513 		dma_unmap_len_set(tx_bi, len, size);
3514 		dma_unmap_addr_set(tx_bi, dma, dma);
3515 
3516 		/* align size to end of page */
3517 		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3518 		tx_desc->buffer_addr = cpu_to_le64(dma);
3519 
3520 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3521 			tx_desc->cmd_type_offset_bsz =
3522 				build_ctob(td_cmd, td_offset,
3523 					   max_data, td_tag);
3524 
3525 			tx_desc++;
3526 			i++;
3527 			desc_count++;
3528 
3529 			if (i == tx_ring->count) {
3530 				tx_desc = I40E_TX_DESC(tx_ring, 0);
3531 				i = 0;
3532 			}
3533 
3534 			dma += max_data;
3535 			size -= max_data;
3536 
3537 			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3538 			tx_desc->buffer_addr = cpu_to_le64(dma);
3539 		}
3540 
3541 		if (likely(!data_len))
3542 			break;
3543 
3544 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3545 							  size, td_tag);
3546 
3547 		tx_desc++;
3548 		i++;
3549 		desc_count++;
3550 
3551 		if (i == tx_ring->count) {
3552 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3553 			i = 0;
3554 		}
3555 
3556 		size = skb_frag_size(frag);
3557 		data_len -= size;
3558 
3559 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3560 				       DMA_TO_DEVICE);
3561 
3562 		tx_bi = &tx_ring->tx_bi[i];
3563 	}
3564 
3565 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3566 
3567 	i++;
3568 	if (i == tx_ring->count)
3569 		i = 0;
3570 
3571 	tx_ring->next_to_use = i;
3572 
3573 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3574 
3575 	/* write last descriptor with EOP bit */
3576 	td_cmd |= I40E_TX_DESC_CMD_EOP;
3577 
3578 	/* We OR these values together to check both against 4 (WB_STRIDE)
3579 	 * below. This is safe since we don't re-use desc_count afterwards.
3580 	 */
3581 	desc_count |= ++tx_ring->packet_stride;
3582 
3583 	if (desc_count >= WB_STRIDE) {
3584 		/* write last descriptor with RS bit set */
3585 		td_cmd |= I40E_TX_DESC_CMD_RS;
3586 		tx_ring->packet_stride = 0;
3587 	}
3588 
3589 	tx_desc->cmd_type_offset_bsz =
3590 			build_ctob(td_cmd, td_offset, size, td_tag);
3591 
3592 	skb_tx_timestamp(skb);
3593 
3594 	/* Force memory writes to complete before letting h/w know there
3595 	 * are new descriptors to fetch.
3596 	 *
3597 	 * We also use this memory barrier to make certain all of the
3598 	 * status bits have been updated before next_to_watch is written.
3599 	 */
3600 	wmb();
3601 
3602 	/* set next_to_watch value indicating a packet is present */
3603 	first->next_to_watch = tx_desc;
3604 
3605 	/* notify HW of packet */
3606 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3607 		writel(i, tx_ring->tail);
3608 	}
3609 
3610 	return 0;
3611 
3612 dma_error:
3613 	dev_info(tx_ring->dev, "TX DMA map failed\n");
3614 
3615 	/* clear dma mappings for failed tx_bi map */
3616 	for (;;) {
3617 		tx_bi = &tx_ring->tx_bi[i];
3618 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3619 		if (tx_bi == first)
3620 			break;
3621 		if (i == 0)
3622 			i = tx_ring->count;
3623 		i--;
3624 	}
3625 
3626 	tx_ring->next_to_use = i;
3627 
3628 	return -1;
3629 }
3630 
3631 /**
3632  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3633  * @xdpf: data to transmit
3634  * @xdp_ring: XDP Tx ring
3635  **/
3636 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3637 			      struct i40e_ring *xdp_ring)
3638 {
3639 	u16 i = xdp_ring->next_to_use;
3640 	struct i40e_tx_buffer *tx_bi;
3641 	struct i40e_tx_desc *tx_desc;
3642 	void *data = xdpf->data;
3643 	u32 size = xdpf->len;
3644 	dma_addr_t dma;
3645 
3646 	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3647 		xdp_ring->tx_stats.tx_busy++;
3648 		return I40E_XDP_CONSUMED;
3649 	}
3650 	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3651 	if (dma_mapping_error(xdp_ring->dev, dma))
3652 		return I40E_XDP_CONSUMED;
3653 
3654 	tx_bi = &xdp_ring->tx_bi[i];
3655 	tx_bi->bytecount = size;
3656 	tx_bi->gso_segs = 1;
3657 	tx_bi->xdpf = xdpf;
3658 
3659 	/* record length, and DMA address */
3660 	dma_unmap_len_set(tx_bi, len, size);
3661 	dma_unmap_addr_set(tx_bi, dma, dma);
3662 
3663 	tx_desc = I40E_TX_DESC(xdp_ring, i);
3664 	tx_desc->buffer_addr = cpu_to_le64(dma);
3665 	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3666 						  | I40E_TXD_CMD,
3667 						  0, size, 0);
3668 
3669 	/* Make certain all of the status bits have been updated
3670 	 * before next_to_watch is written.
3671 	 */
3672 	smp_wmb();
3673 
3674 	xdp_ring->xdp_tx_active++;
3675 	i++;
3676 	if (i == xdp_ring->count)
3677 		i = 0;
3678 
3679 	tx_bi->next_to_watch = tx_desc;
3680 	xdp_ring->next_to_use = i;
3681 
3682 	return I40E_XDP_TX;
3683 }
3684 
3685 /**
3686  * i40e_xmit_frame_ring - Sends buffer on Tx ring
3687  * @skb:     send buffer
3688  * @tx_ring: ring to send buffer on
3689  *
3690  * Returns NETDEV_TX_OK if sent, else an error code
3691  **/
3692 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3693 					struct i40e_ring *tx_ring)
3694 {
3695 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3696 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
3697 	struct i40e_tx_buffer *first;
3698 	u32 td_offset = 0;
3699 	u32 tx_flags = 0;
3700 	__be16 protocol;
3701 	u32 td_cmd = 0;
3702 	u8 hdr_len = 0;
3703 	int tso, count;
3704 	int tsyn;
3705 
3706 	/* prefetch the data, we'll need it later */
3707 	prefetch(skb->data);
3708 
3709 	i40e_trace(xmit_frame_ring, skb, tx_ring);
3710 
3711 	count = i40e_xmit_descriptor_count(skb);
3712 	if (i40e_chk_linearize(skb, count)) {
3713 		if (__skb_linearize(skb)) {
3714 			dev_kfree_skb_any(skb);
3715 			return NETDEV_TX_OK;
3716 		}
3717 		count = i40e_txd_use_count(skb->len);
3718 		tx_ring->tx_stats.tx_linearize++;
3719 	}
3720 
3721 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3722 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3723 	 *       + 4 desc gap to avoid the cache line where head is,
3724 	 *       + 1 desc for context descriptor,
3725 	 * otherwise try next time
3726 	 */
3727 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3728 		tx_ring->tx_stats.tx_busy++;
3729 		return NETDEV_TX_BUSY;
3730 	}
3731 
3732 	/* record the location of the first descriptor for this packet */
3733 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
3734 	first->skb = skb;
3735 	first->bytecount = skb->len;
3736 	first->gso_segs = 1;
3737 
3738 	/* prepare the xmit flags */
3739 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3740 		goto out_drop;
3741 
3742 	/* obtain protocol of skb */
3743 	protocol = vlan_get_protocol(skb);
3744 
3745 	/* setup IPv4/IPv6 offloads */
3746 	if (protocol == htons(ETH_P_IP))
3747 		tx_flags |= I40E_TX_FLAGS_IPV4;
3748 	else if (protocol == htons(ETH_P_IPV6))
3749 		tx_flags |= I40E_TX_FLAGS_IPV6;
3750 
3751 	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3752 
3753 	if (tso < 0)
3754 		goto out_drop;
3755 	else if (tso)
3756 		tx_flags |= I40E_TX_FLAGS_TSO;
3757 
3758 	/* Always offload the checksum, since it's in the data descriptor */
3759 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3760 				  tx_ring, &cd_tunneling);
3761 	if (tso < 0)
3762 		goto out_drop;
3763 
3764 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3765 
3766 	if (tsyn)
3767 		tx_flags |= I40E_TX_FLAGS_TSYN;
3768 
3769 	/* always enable CRC insertion offload */
3770 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
3771 
3772 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3773 			   cd_tunneling, cd_l2tag2);
3774 
3775 	/* Add Flow Director ATR if it's enabled.
3776 	 *
3777 	 * NOTE: this must always be directly before the data descriptor.
3778 	 */
3779 	i40e_atr(tx_ring, skb, tx_flags);
3780 
3781 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3782 			td_cmd, td_offset))
3783 		goto cleanup_tx_tstamp;
3784 
3785 	return NETDEV_TX_OK;
3786 
3787 out_drop:
3788 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3789 	dev_kfree_skb_any(first->skb);
3790 	first->skb = NULL;
3791 cleanup_tx_tstamp:
3792 	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3793 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3794 
3795 		dev_kfree_skb_any(pf->ptp_tx_skb);
3796 		pf->ptp_tx_skb = NULL;
3797 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3798 	}
3799 
3800 	return NETDEV_TX_OK;
3801 }
3802 
3803 /**
3804  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3805  * @skb:    send buffer
3806  * @netdev: network interface device structure
3807  *
3808  * Returns NETDEV_TX_OK if sent, else an error code
3809  **/
3810 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3811 {
3812 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3813 	struct i40e_vsi *vsi = np->vsi;
3814 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3815 
3816 	/* hardware can't handle really short frames, hardware padding works
3817 	 * beyond this point
3818 	 */
3819 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3820 		return NETDEV_TX_OK;
3821 
3822 	return i40e_xmit_frame_ring(skb, tx_ring);
3823 }
3824 
3825 /**
3826  * i40e_xdp_xmit - Implements ndo_xdp_xmit
3827  * @dev: netdev
3828  * @n: number of frames
3829  * @frames: array of XDP buffer pointers
3830  * @flags: XDP extra info
3831  *
3832  * Returns number of frames successfully sent. Failed frames
3833  * will be free'ed by XDP core.
3834  *
3835  * For error cases, a negative errno code is returned and no-frames
3836  * are transmitted (caller must handle freeing frames).
3837  **/
3838 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3839 		  u32 flags)
3840 {
3841 	struct i40e_netdev_priv *np = netdev_priv(dev);
3842 	unsigned int queue_index = smp_processor_id();
3843 	struct i40e_vsi *vsi = np->vsi;
3844 	struct i40e_pf *pf = vsi->back;
3845 	struct i40e_ring *xdp_ring;
3846 	int nxmit = 0;
3847 	int i;
3848 
3849 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
3850 		return -ENETDOWN;
3851 
3852 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3853 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
3854 		return -ENXIO;
3855 
3856 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3857 		return -EINVAL;
3858 
3859 	xdp_ring = vsi->xdp_rings[queue_index];
3860 
3861 	for (i = 0; i < n; i++) {
3862 		struct xdp_frame *xdpf = frames[i];
3863 		int err;
3864 
3865 		err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3866 		if (err != I40E_XDP_TX)
3867 			break;
3868 		nxmit++;
3869 	}
3870 
3871 	if (unlikely(flags & XDP_XMIT_FLUSH))
3872 		i40e_xdp_ring_update_tail(xdp_ring);
3873 
3874 	return nxmit;
3875 }
3876