1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
29 #include "i40e.h"
30 #include "i40e_prototype.h"
31 
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 				u32 td_tag)
34 {
35 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
37 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
40 }
41 
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
44 /**
45  * i40e_program_fdir_filter - Program a Flow Director filter
46  * @fdir_data: Packet data that will be filter parameters
47  * @raw_packet: the pre-allocated packet buffer for FDir
48  * @pf: The PF pointer
49  * @add: True for add/update, False for remove
50  **/
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 			     struct i40e_pf *pf, bool add)
53 {
54 	struct i40e_filter_program_desc *fdir_desc;
55 	struct i40e_tx_buffer *tx_buf, *first;
56 	struct i40e_tx_desc *tx_desc;
57 	struct i40e_ring *tx_ring;
58 	unsigned int fpt, dcc;
59 	struct i40e_vsi *vsi;
60 	struct device *dev;
61 	dma_addr_t dma;
62 	u32 td_cmd = 0;
63 	u16 delay = 0;
64 	u16 i;
65 
66 	/* find existing FDIR VSI */
67 	vsi = NULL;
68 	for (i = 0; i < pf->num_alloc_vsi; i++)
69 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 			vsi = pf->vsi[i];
71 	if (!vsi)
72 		return -ENOENT;
73 
74 	tx_ring = vsi->tx_rings[0];
75 	dev = tx_ring->dev;
76 
77 	/* we need two descriptors to add/del a filter and we can wait */
78 	do {
79 		if (I40E_DESC_UNUSED(tx_ring) > 1)
80 			break;
81 		msleep_interruptible(1);
82 		delay++;
83 	} while (delay < I40E_FD_CLEAN_DELAY);
84 
85 	if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 		return -EAGAIN;
87 
88 	dma = dma_map_single(dev, raw_packet,
89 			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 	if (dma_mapping_error(dev, dma))
91 		goto dma_fail;
92 
93 	/* grab the next descriptor */
94 	i = tx_ring->next_to_use;
95 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 	first = &tx_ring->tx_bi[i];
97 	memset(first, 0, sizeof(struct i40e_tx_buffer));
98 
99 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
100 
101 	fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 	      I40E_TXD_FLTR_QW0_QINDEX_MASK;
103 
104 	fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 	       I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
106 
107 	fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 	       I40E_TXD_FLTR_QW0_PCTYPE_MASK;
109 
110 	/* Use LAN VSI Id if not programmed by user */
111 	if (fdir_data->dest_vsi == 0)
112 		fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
114 	else
115 		fpt |= ((u32)fdir_data->dest_vsi <<
116 			I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 		       I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
118 
119 	dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
120 
121 	if (add)
122 		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
124 	else
125 		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
127 
128 	dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 	       I40E_TXD_FLTR_QW1_DEST_MASK;
130 
131 	dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 	       I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
133 
134 	if (fdir_data->cnt_index != 0) {
135 		dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 		dcc |= ((u32)fdir_data->cnt_index <<
137 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
139 	}
140 
141 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 	fdir_desc->rsvd = cpu_to_le32(0);
143 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 	fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145 
146 	/* Now program a dummy descriptor */
147 	i = tx_ring->next_to_use;
148 	tx_desc = I40E_TX_DESC(tx_ring, i);
149 	tx_buf = &tx_ring->tx_bi[i];
150 
151 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152 
153 	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
154 
155 	/* record length, and DMA address */
156 	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 	dma_unmap_addr_set(tx_buf, dma, dma);
158 
159 	tx_desc->buffer_addr = cpu_to_le64(dma);
160 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
161 
162 	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 	tx_buf->raw_buf = (void *)raw_packet;
164 
165 	tx_desc->cmd_type_offset_bsz =
166 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
167 
168 	/* Force memory writes to complete before letting h/w
169 	 * know there are new descriptors to fetch.
170 	 */
171 	wmb();
172 
173 	/* Mark the data descriptor to be watched */
174 	first->next_to_watch = tx_desc;
175 
176 	writel(tx_ring->next_to_use, tx_ring->tail);
177 	return 0;
178 
179 dma_fail:
180 	return -1;
181 }
182 
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
185 /**
186  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187  * @vsi: pointer to the targeted VSI
188  * @fd_data: the flow director data required for the FDir descriptor
189  * @add: true adds a filter, false removes it
190  *
191  * Returns 0 if the filters were successfully added or removed
192  **/
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 				   struct i40e_fdir_filter *fd_data,
195 				   bool add)
196 {
197 	struct i40e_pf *pf = vsi->back;
198 	struct udphdr *udp;
199 	struct iphdr *ip;
200 	bool err = false;
201 	u8 *raw_packet;
202 	int ret;
203 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206 
207 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 	if (!raw_packet)
209 		return -ENOMEM;
210 	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211 
212 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 	      + sizeof(struct iphdr));
215 
216 	ip->daddr = fd_data->dst_ip[0];
217 	udp->dest = fd_data->dst_port;
218 	ip->saddr = fd_data->src_ip[0];
219 	udp->source = fd_data->src_port;
220 
221 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 	if (ret) {
224 		dev_info(&pf->pdev->dev,
225 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 			 fd_data->pctype, fd_data->fd_id, ret);
227 		err = true;
228 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
229 		if (add)
230 			dev_info(&pf->pdev->dev,
231 				 "Filter OK for PCTYPE %d loc = %d\n",
232 				 fd_data->pctype, fd_data->fd_id);
233 		else
234 			dev_info(&pf->pdev->dev,
235 				 "Filter deleted for PCTYPE %d loc = %d\n",
236 				 fd_data->pctype, fd_data->fd_id);
237 	}
238 	return err ? -EOPNOTSUPP : 0;
239 }
240 
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
242 /**
243  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244  * @vsi: pointer to the targeted VSI
245  * @fd_data: the flow director data required for the FDir descriptor
246  * @add: true adds a filter, false removes it
247  *
248  * Returns 0 if the filters were successfully added or removed
249  **/
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 				   struct i40e_fdir_filter *fd_data,
252 				   bool add)
253 {
254 	struct i40e_pf *pf = vsi->back;
255 	struct tcphdr *tcp;
256 	struct iphdr *ip;
257 	bool err = false;
258 	u8 *raw_packet;
259 	int ret;
260 	/* Dummy packet */
261 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 		0x0, 0x72, 0, 0, 0, 0};
265 
266 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267 	if (!raw_packet)
268 		return -ENOMEM;
269 	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
270 
271 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 	      + sizeof(struct iphdr));
274 
275 	ip->daddr = fd_data->dst_ip[0];
276 	tcp->dest = fd_data->dst_port;
277 	ip->saddr = fd_data->src_ip[0];
278 	tcp->source = fd_data->src_port;
279 
280 	if (add) {
281 		pf->fd_tcp_rule++;
282 		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
286 		}
287 	} else {
288 		pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 				  (pf->fd_tcp_rule - 1) : 0;
290 		if (pf->fd_tcp_rule == 0) {
291 			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 				dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
294 		}
295 	}
296 
297 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
299 
300 	if (ret) {
301 		dev_info(&pf->pdev->dev,
302 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 			 fd_data->pctype, fd_data->fd_id, ret);
304 		err = true;
305 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
306 		if (add)
307 			dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 				 fd_data->pctype, fd_data->fd_id);
309 		else
310 			dev_info(&pf->pdev->dev,
311 				 "Filter deleted for PCTYPE %d loc = %d\n",
312 				 fd_data->pctype, fd_data->fd_id);
313 	}
314 
315 	return err ? -EOPNOTSUPP : 0;
316 }
317 
318 /**
319  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320  * a specific flow spec
321  * @vsi: pointer to the targeted VSI
322  * @fd_data: the flow director data required for the FDir descriptor
323  * @add: true adds a filter, false removes it
324  *
325  * Always returns -EOPNOTSUPP
326  **/
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 				    struct i40e_fdir_filter *fd_data,
329 				    bool add)
330 {
331 	return -EOPNOTSUPP;
332 }
333 
334 #define I40E_IP_DUMMY_PACKET_LEN 34
335 /**
336  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337  * a specific flow spec
338  * @vsi: pointer to the targeted VSI
339  * @fd_data: the flow director data required for the FDir descriptor
340  * @add: true adds a filter, false removes it
341  *
342  * Returns 0 if the filters were successfully added or removed
343  **/
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 				  struct i40e_fdir_filter *fd_data,
346 				  bool add)
347 {
348 	struct i40e_pf *pf = vsi->back;
349 	struct iphdr *ip;
350 	bool err = false;
351 	u8 *raw_packet;
352 	int ret;
353 	int i;
354 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
356 		0, 0, 0, 0};
357 
358 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
360 		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
361 		if (!raw_packet)
362 			return -ENOMEM;
363 		memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 		ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
365 
366 		ip->saddr = fd_data->src_ip[0];
367 		ip->daddr = fd_data->dst_ip[0];
368 		ip->protocol = 0;
369 
370 		fd_data->pctype = i;
371 		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372 
373 		if (ret) {
374 			dev_info(&pf->pdev->dev,
375 				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 				 fd_data->pctype, fd_data->fd_id, ret);
377 			err = true;
378 		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
379 			if (add)
380 				dev_info(&pf->pdev->dev,
381 					 "Filter OK for PCTYPE %d loc = %d\n",
382 					 fd_data->pctype, fd_data->fd_id);
383 			else
384 				dev_info(&pf->pdev->dev,
385 					 "Filter deleted for PCTYPE %d loc = %d\n",
386 					 fd_data->pctype, fd_data->fd_id);
387 		}
388 	}
389 
390 	return err ? -EOPNOTSUPP : 0;
391 }
392 
393 /**
394  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395  * @vsi: pointer to the targeted VSI
396  * @cmd: command to get or set RX flow classification rules
397  * @add: true adds a filter, false removes it
398  *
399  **/
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 		      struct i40e_fdir_filter *input, bool add)
402 {
403 	struct i40e_pf *pf = vsi->back;
404 	int ret;
405 
406 	switch (input->flow_type & ~FLOW_EXT) {
407 	case TCP_V4_FLOW:
408 		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
409 		break;
410 	case UDP_V4_FLOW:
411 		ret = i40e_add_del_fdir_udpv4(vsi, input, add);
412 		break;
413 	case SCTP_V4_FLOW:
414 		ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
415 		break;
416 	case IPV4_FLOW:
417 		ret = i40e_add_del_fdir_ipv4(vsi, input, add);
418 		break;
419 	case IP_USER_FLOW:
420 		switch (input->ip4_proto) {
421 		case IPPROTO_TCP:
422 			ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
423 			break;
424 		case IPPROTO_UDP:
425 			ret = i40e_add_del_fdir_udpv4(vsi, input, add);
426 			break;
427 		case IPPROTO_SCTP:
428 			ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
429 			break;
430 		default:
431 			ret = i40e_add_del_fdir_ipv4(vsi, input, add);
432 			break;
433 		}
434 		break;
435 	default:
436 		dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
437 			 input->flow_type);
438 		ret = -EINVAL;
439 	}
440 
441 	/* The buffer allocated here is freed by the i40e_clean_tx_ring() */
442 	return ret;
443 }
444 
445 /**
446  * i40e_fd_handle_status - check the Programming Status for FD
447  * @rx_ring: the Rx ring for this descriptor
448  * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449  * @prog_id: the id originally used for programming
450  *
451  * This is used to verify if the FD programming or invalidation
452  * requested by SW to the HW is successful or not and take actions accordingly.
453  **/
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 				  union i40e_rx_desc *rx_desc, u8 prog_id)
456 {
457 	struct i40e_pf *pf = rx_ring->vsi->back;
458 	struct pci_dev *pdev = pf->pdev;
459 	u32 fcnt_prog, fcnt_avail;
460 	u32 error;
461 	u64 qw;
462 
463 	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
466 
467 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
469 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
471 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
472 				 pf->fd_inv);
473 
474 		/* Check if the programming error is for ATR.
475 		 * If so, auto disable ATR and set a state for
476 		 * flush in progress. Next time we come here if flush is in
477 		 * progress do nothing, once flush is complete the state will
478 		 * be cleared.
479 		 */
480 		if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
481 			return;
482 
483 		pf->fd_add_err++;
484 		/* store the current atr filter count */
485 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
486 
487 		if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489 			pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490 			set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
491 		}
492 
493 		/* filter programming failed most likely due to table full */
494 		fcnt_prog = i40e_get_global_fd_count(pf);
495 		fcnt_avail = pf->fdir_pf_filter_count;
496 		/* If ATR is running fcnt_prog can quickly change,
497 		 * if we are very close to full, it makes sense to disable
498 		 * FD ATR/SB and then re-enable it when there is room.
499 		 */
500 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
501 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502 			    !(pf->auto_disable_flags &
503 				     I40E_FLAG_FD_SB_ENABLED)) {
504 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
505 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
506 				pf->auto_disable_flags |=
507 							I40E_FLAG_FD_SB_ENABLED;
508 			}
509 		} else {
510 			dev_info(&pdev->dev,
511 				"FD filter programming failed due to incorrect filter parameters\n");
512 		}
513 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
514 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
515 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
516 				 rx_desc->wb.qword0.hi_dword.fd_id);
517 	}
518 }
519 
520 /**
521  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522  * @ring:      the ring that owns the buffer
523  * @tx_buffer: the buffer to free
524  **/
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526 					    struct i40e_tx_buffer *tx_buffer)
527 {
528 	if (tx_buffer->skb) {
529 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530 			kfree(tx_buffer->raw_buf);
531 		else
532 			dev_kfree_skb_any(tx_buffer->skb);
533 
534 		if (dma_unmap_len(tx_buffer, len))
535 			dma_unmap_single(ring->dev,
536 					 dma_unmap_addr(tx_buffer, dma),
537 					 dma_unmap_len(tx_buffer, len),
538 					 DMA_TO_DEVICE);
539 	} else if (dma_unmap_len(tx_buffer, len)) {
540 		dma_unmap_page(ring->dev,
541 			       dma_unmap_addr(tx_buffer, dma),
542 			       dma_unmap_len(tx_buffer, len),
543 			       DMA_TO_DEVICE);
544 	}
545 	tx_buffer->next_to_watch = NULL;
546 	tx_buffer->skb = NULL;
547 	dma_unmap_len_set(tx_buffer, len, 0);
548 	/* tx_buffer must be completely set up in the transmit path */
549 }
550 
551 /**
552  * i40e_clean_tx_ring - Free any empty Tx buffers
553  * @tx_ring: ring to be cleaned
554  **/
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
556 {
557 	unsigned long bi_size;
558 	u16 i;
559 
560 	/* ring already cleared, nothing to do */
561 	if (!tx_ring->tx_bi)
562 		return;
563 
564 	/* Free all the Tx ring sk_buffs */
565 	for (i = 0; i < tx_ring->count; i++)
566 		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
567 
568 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569 	memset(tx_ring->tx_bi, 0, bi_size);
570 
571 	/* Zero out the descriptor ring */
572 	memset(tx_ring->desc, 0, tx_ring->size);
573 
574 	tx_ring->next_to_use = 0;
575 	tx_ring->next_to_clean = 0;
576 
577 	if (!tx_ring->netdev)
578 		return;
579 
580 	/* cleanup Tx queue statistics */
581 	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582 						  tx_ring->queue_index));
583 }
584 
585 /**
586  * i40e_free_tx_resources - Free Tx resources per queue
587  * @tx_ring: Tx descriptor ring for a specific queue
588  *
589  * Free all transmit software resources
590  **/
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
592 {
593 	i40e_clean_tx_ring(tx_ring);
594 	kfree(tx_ring->tx_bi);
595 	tx_ring->tx_bi = NULL;
596 
597 	if (tx_ring->desc) {
598 		dma_free_coherent(tx_ring->dev, tx_ring->size,
599 				  tx_ring->desc, tx_ring->dma);
600 		tx_ring->desc = NULL;
601 	}
602 }
603 
604 /**
605  * i40e_get_tx_pending - how many tx descriptors not processed
606  * @tx_ring: the ring of descriptors
607  *
608  * Since there is no access to the ring head register
609  * in XL710, we need to use our local copies
610  **/
611 u32 i40e_get_tx_pending(struct i40e_ring *ring)
612 {
613 	u32 head, tail;
614 
615 	head = i40e_get_head(ring);
616 	tail = readl(ring->tail);
617 
618 	if (head != tail)
619 		return (head < tail) ?
620 			tail - head : (tail + ring->count - head);
621 
622 	return 0;
623 }
624 
625 #define WB_STRIDE 0x3
626 
627 /**
628  * i40e_clean_tx_irq - Reclaim resources after transmit completes
629  * @tx_ring:  tx ring to clean
630  * @budget:   how many cleans we're allowed
631  *
632  * Returns true if there's any budget left (e.g. the clean is finished)
633  **/
634 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
635 {
636 	u16 i = tx_ring->next_to_clean;
637 	struct i40e_tx_buffer *tx_buf;
638 	struct i40e_tx_desc *tx_head;
639 	struct i40e_tx_desc *tx_desc;
640 	unsigned int total_packets = 0;
641 	unsigned int total_bytes = 0;
642 
643 	tx_buf = &tx_ring->tx_bi[i];
644 	tx_desc = I40E_TX_DESC(tx_ring, i);
645 	i -= tx_ring->count;
646 
647 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
648 
649 	do {
650 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
651 
652 		/* if next_to_watch is not set then there is no work pending */
653 		if (!eop_desc)
654 			break;
655 
656 		/* prevent any other reads prior to eop_desc */
657 		read_barrier_depends();
658 
659 		/* we have caught up to head, no work left to do */
660 		if (tx_head == tx_desc)
661 			break;
662 
663 		/* clear next_to_watch to prevent false hangs */
664 		tx_buf->next_to_watch = NULL;
665 
666 		/* update the statistics for this packet */
667 		total_bytes += tx_buf->bytecount;
668 		total_packets += tx_buf->gso_segs;
669 
670 		/* free the skb */
671 		dev_consume_skb_any(tx_buf->skb);
672 
673 		/* unmap skb header data */
674 		dma_unmap_single(tx_ring->dev,
675 				 dma_unmap_addr(tx_buf, dma),
676 				 dma_unmap_len(tx_buf, len),
677 				 DMA_TO_DEVICE);
678 
679 		/* clear tx_buffer data */
680 		tx_buf->skb = NULL;
681 		dma_unmap_len_set(tx_buf, len, 0);
682 
683 		/* unmap remaining buffers */
684 		while (tx_desc != eop_desc) {
685 
686 			tx_buf++;
687 			tx_desc++;
688 			i++;
689 			if (unlikely(!i)) {
690 				i -= tx_ring->count;
691 				tx_buf = tx_ring->tx_bi;
692 				tx_desc = I40E_TX_DESC(tx_ring, 0);
693 			}
694 
695 			/* unmap any remaining paged data */
696 			if (dma_unmap_len(tx_buf, len)) {
697 				dma_unmap_page(tx_ring->dev,
698 					       dma_unmap_addr(tx_buf, dma),
699 					       dma_unmap_len(tx_buf, len),
700 					       DMA_TO_DEVICE);
701 				dma_unmap_len_set(tx_buf, len, 0);
702 			}
703 		}
704 
705 		/* move us one more past the eop_desc for start of next pkt */
706 		tx_buf++;
707 		tx_desc++;
708 		i++;
709 		if (unlikely(!i)) {
710 			i -= tx_ring->count;
711 			tx_buf = tx_ring->tx_bi;
712 			tx_desc = I40E_TX_DESC(tx_ring, 0);
713 		}
714 
715 		prefetch(tx_desc);
716 
717 		/* update budget accounting */
718 		budget--;
719 	} while (likely(budget));
720 
721 	i += tx_ring->count;
722 	tx_ring->next_to_clean = i;
723 	u64_stats_update_begin(&tx_ring->syncp);
724 	tx_ring->stats.bytes += total_bytes;
725 	tx_ring->stats.packets += total_packets;
726 	u64_stats_update_end(&tx_ring->syncp);
727 	tx_ring->q_vector->tx.total_bytes += total_bytes;
728 	tx_ring->q_vector->tx.total_packets += total_packets;
729 
730 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
731 		unsigned int j = 0;
732 
733 		/* check to see if there are < 4 descriptors
734 		 * waiting to be written back, then kick the hardware to force
735 		 * them to be written back in case we stay in NAPI.
736 		 * In this mode on X722 we do not enable Interrupt.
737 		 */
738 		j = i40e_get_tx_pending(tx_ring);
739 
740 		if (budget &&
741 		    ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742 		    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744 			tx_ring->arm_wb = true;
745 	}
746 
747 	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 						      tx_ring->queue_index),
749 				  total_packets, total_bytes);
750 
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 		/* Make sure that anybody stopping the queue after this
755 		 * sees the new next_to_clean.
756 		 */
757 		smp_mb();
758 		if (__netif_subqueue_stopped(tx_ring->netdev,
759 					     tx_ring->queue_index) &&
760 		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 			netif_wake_subqueue(tx_ring->netdev,
762 					    tx_ring->queue_index);
763 			++tx_ring->tx_stats.restart_queue;
764 		}
765 	}
766 
767 	return !!budget;
768 }
769 
770 /**
771  * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772  * @vsi: the VSI we care about
773  * @q_vector: the vector  on which to force writeback
774  *
775  **/
776 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
777 {
778 	u16 flags = q_vector->tx.ring[0].flags;
779 
780 	if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
781 		u32 val;
782 
783 		if (q_vector->arm_wb_state)
784 			return;
785 
786 		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
787 
788 		wr32(&vsi->back->hw,
789 		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790 					 vsi->base_vector - 1),
791 		     val);
792 		q_vector->arm_wb_state = true;
793 	} else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794 		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795 			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796 			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797 			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798 			  /* allow 00 to be written to the index */
799 
800 		wr32(&vsi->back->hw,
801 		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802 					 vsi->base_vector - 1), val);
803 	} else {
804 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806 			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807 			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808 			/* allow 00 to be written to the index */
809 
810 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
811 	}
812 }
813 
814 /**
815  * i40e_set_new_dynamic_itr - Find new ITR level
816  * @rc: structure containing ring performance data
817  *
818  * Returns true if ITR changed, false if not
819  *
820  * Stores a new ITR value based on packets and byte counts during
821  * the last interrupt.  The advantage of per interrupt computation
822  * is faster updates and more accurate ITR for the current traffic
823  * pattern.  Constants in this function were computed based on
824  * theoretical maximum wire speed and thresholds were set based on
825  * testing data as well as attempting to minimize response time
826  * while increasing bulk throughput.
827  **/
828 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
829 {
830 	enum i40e_latency_range new_latency_range = rc->latency_range;
831 	struct i40e_q_vector *qv = rc->ring->q_vector;
832 	u32 new_itr = rc->itr;
833 	int bytes_per_int;
834 	int usecs;
835 
836 	if (rc->total_packets == 0 || !rc->itr)
837 		return false;
838 
839 	/* simple throttlerate management
840 	 *   0-10MB/s   lowest (50000 ints/s)
841 	 *  10-20MB/s   low    (20000 ints/s)
842 	 *  20-1249MB/s bulk   (18000 ints/s)
843 	 *  > 40000 Rx packets per second (8000 ints/s)
844 	 *
845 	 * The math works out because the divisor is in 10^(-6) which
846 	 * turns the bytes/us input value into MB/s values, but
847 	 * make sure to use usecs, as the register values written
848 	 * are in 2 usec increments in the ITR registers, and make sure
849 	 * to use the smoothed values that the countdown timer gives us.
850 	 */
851 	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
852 	bytes_per_int = rc->total_bytes / usecs;
853 
854 	switch (new_latency_range) {
855 	case I40E_LOWEST_LATENCY:
856 		if (bytes_per_int > 10)
857 			new_latency_range = I40E_LOW_LATENCY;
858 		break;
859 	case I40E_LOW_LATENCY:
860 		if (bytes_per_int > 20)
861 			new_latency_range = I40E_BULK_LATENCY;
862 		else if (bytes_per_int <= 10)
863 			new_latency_range = I40E_LOWEST_LATENCY;
864 		break;
865 	case I40E_BULK_LATENCY:
866 	case I40E_ULTRA_LATENCY:
867 	default:
868 		if (bytes_per_int <= 20)
869 			new_latency_range = I40E_LOW_LATENCY;
870 		break;
871 	}
872 
873 	/* this is to adjust RX more aggressively when streaming small
874 	 * packets.  The value of 40000 was picked as it is just beyond
875 	 * what the hardware can receive per second if in low latency
876 	 * mode.
877 	 */
878 #define RX_ULTRA_PACKET_RATE 40000
879 
880 	if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
881 	    (&qv->rx == rc))
882 		new_latency_range = I40E_ULTRA_LATENCY;
883 
884 	rc->latency_range = new_latency_range;
885 
886 	switch (new_latency_range) {
887 	case I40E_LOWEST_LATENCY:
888 		new_itr = I40E_ITR_50K;
889 		break;
890 	case I40E_LOW_LATENCY:
891 		new_itr = I40E_ITR_20K;
892 		break;
893 	case I40E_BULK_LATENCY:
894 		new_itr = I40E_ITR_18K;
895 		break;
896 	case I40E_ULTRA_LATENCY:
897 		new_itr = I40E_ITR_8K;
898 		break;
899 	default:
900 		break;
901 	}
902 
903 	rc->total_bytes = 0;
904 	rc->total_packets = 0;
905 
906 	if (new_itr != rc->itr) {
907 		rc->itr = new_itr;
908 		return true;
909 	}
910 
911 	return false;
912 }
913 
914 /**
915  * i40e_clean_programming_status - clean the programming status descriptor
916  * @rx_ring: the rx ring that has this descriptor
917  * @rx_desc: the rx descriptor written back by HW
918  *
919  * Flow director should handle FD_FILTER_STATUS to check its filter programming
920  * status being successful or not and take actions accordingly. FCoE should
921  * handle its context/filter programming/invalidation status and take actions.
922  *
923  **/
924 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
925 					  union i40e_rx_desc *rx_desc)
926 {
927 	u64 qw;
928 	u8 id;
929 
930 	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
931 	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
932 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
933 
934 	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
935 		i40e_fd_handle_status(rx_ring, rx_desc, id);
936 #ifdef I40E_FCOE
937 	else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
938 		 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
939 		i40e_fcoe_handle_status(rx_ring, rx_desc, id);
940 #endif
941 }
942 
943 /**
944  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
945  * @tx_ring: the tx ring to set up
946  *
947  * Return 0 on success, negative on error
948  **/
949 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
950 {
951 	struct device *dev = tx_ring->dev;
952 	int bi_size;
953 
954 	if (!dev)
955 		return -ENOMEM;
956 
957 	/* warn if we are about to overwrite the pointer */
958 	WARN_ON(tx_ring->tx_bi);
959 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
960 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
961 	if (!tx_ring->tx_bi)
962 		goto err;
963 
964 	/* round up to nearest 4K */
965 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
966 	/* add u32 for head writeback, align after this takes care of
967 	 * guaranteeing this is at least one cache line in size
968 	 */
969 	tx_ring->size += sizeof(u32);
970 	tx_ring->size = ALIGN(tx_ring->size, 4096);
971 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
972 					   &tx_ring->dma, GFP_KERNEL);
973 	if (!tx_ring->desc) {
974 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
975 			 tx_ring->size);
976 		goto err;
977 	}
978 
979 	tx_ring->next_to_use = 0;
980 	tx_ring->next_to_clean = 0;
981 	return 0;
982 
983 err:
984 	kfree(tx_ring->tx_bi);
985 	tx_ring->tx_bi = NULL;
986 	return -ENOMEM;
987 }
988 
989 /**
990  * i40e_clean_rx_ring - Free Rx buffers
991  * @rx_ring: ring to be cleaned
992  **/
993 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
994 {
995 	struct device *dev = rx_ring->dev;
996 	struct i40e_rx_buffer *rx_bi;
997 	unsigned long bi_size;
998 	u16 i;
999 
1000 	/* ring already cleared, nothing to do */
1001 	if (!rx_ring->rx_bi)
1002 		return;
1003 
1004 	if (ring_is_ps_enabled(rx_ring)) {
1005 		int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1006 
1007 		rx_bi = &rx_ring->rx_bi[0];
1008 		if (rx_bi->hdr_buf) {
1009 			dma_free_coherent(dev,
1010 					  bufsz,
1011 					  rx_bi->hdr_buf,
1012 					  rx_bi->dma);
1013 			for (i = 0; i < rx_ring->count; i++) {
1014 				rx_bi = &rx_ring->rx_bi[i];
1015 				rx_bi->dma = 0;
1016 				rx_bi->hdr_buf = NULL;
1017 			}
1018 		}
1019 	}
1020 	/* Free all the Rx ring sk_buffs */
1021 	for (i = 0; i < rx_ring->count; i++) {
1022 		rx_bi = &rx_ring->rx_bi[i];
1023 		if (rx_bi->dma) {
1024 			dma_unmap_single(dev,
1025 					 rx_bi->dma,
1026 					 rx_ring->rx_buf_len,
1027 					 DMA_FROM_DEVICE);
1028 			rx_bi->dma = 0;
1029 		}
1030 		if (rx_bi->skb) {
1031 			dev_kfree_skb(rx_bi->skb);
1032 			rx_bi->skb = NULL;
1033 		}
1034 		if (rx_bi->page) {
1035 			if (rx_bi->page_dma) {
1036 				dma_unmap_page(dev,
1037 					       rx_bi->page_dma,
1038 					       PAGE_SIZE / 2,
1039 					       DMA_FROM_DEVICE);
1040 				rx_bi->page_dma = 0;
1041 			}
1042 			__free_page(rx_bi->page);
1043 			rx_bi->page = NULL;
1044 			rx_bi->page_offset = 0;
1045 		}
1046 	}
1047 
1048 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1049 	memset(rx_ring->rx_bi, 0, bi_size);
1050 
1051 	/* Zero out the descriptor ring */
1052 	memset(rx_ring->desc, 0, rx_ring->size);
1053 
1054 	rx_ring->next_to_clean = 0;
1055 	rx_ring->next_to_use = 0;
1056 }
1057 
1058 /**
1059  * i40e_free_rx_resources - Free Rx resources
1060  * @rx_ring: ring to clean the resources from
1061  *
1062  * Free all receive software resources
1063  **/
1064 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1065 {
1066 	i40e_clean_rx_ring(rx_ring);
1067 	kfree(rx_ring->rx_bi);
1068 	rx_ring->rx_bi = NULL;
1069 
1070 	if (rx_ring->desc) {
1071 		dma_free_coherent(rx_ring->dev, rx_ring->size,
1072 				  rx_ring->desc, rx_ring->dma);
1073 		rx_ring->desc = NULL;
1074 	}
1075 }
1076 
1077 /**
1078  * i40e_alloc_rx_headers - allocate rx header buffers
1079  * @rx_ring: ring to alloc buffers
1080  *
1081  * Allocate rx header buffers for the entire ring. As these are static,
1082  * this is only called when setting up a new ring.
1083  **/
1084 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1085 {
1086 	struct device *dev = rx_ring->dev;
1087 	struct i40e_rx_buffer *rx_bi;
1088 	dma_addr_t dma;
1089 	void *buffer;
1090 	int buf_size;
1091 	int i;
1092 
1093 	if (rx_ring->rx_bi[0].hdr_buf)
1094 		return;
1095 	/* Make sure the buffers don't cross cache line boundaries. */
1096 	buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1097 	buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1098 				    &dma, GFP_KERNEL);
1099 	if (!buffer)
1100 		return;
1101 	for (i = 0; i < rx_ring->count; i++) {
1102 		rx_bi = &rx_ring->rx_bi[i];
1103 		rx_bi->dma = dma + (i * buf_size);
1104 		rx_bi->hdr_buf = buffer + (i * buf_size);
1105 	}
1106 }
1107 
1108 /**
1109  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1110  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1111  *
1112  * Returns 0 on success, negative on failure
1113  **/
1114 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1115 {
1116 	struct device *dev = rx_ring->dev;
1117 	int bi_size;
1118 
1119 	/* warn if we are about to overwrite the pointer */
1120 	WARN_ON(rx_ring->rx_bi);
1121 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1122 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1123 	if (!rx_ring->rx_bi)
1124 		goto err;
1125 
1126 	u64_stats_init(&rx_ring->syncp);
1127 
1128 	/* Round up to nearest 4K */
1129 	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1130 		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1131 		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1132 	rx_ring->size = ALIGN(rx_ring->size, 4096);
1133 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1134 					   &rx_ring->dma, GFP_KERNEL);
1135 
1136 	if (!rx_ring->desc) {
1137 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1138 			 rx_ring->size);
1139 		goto err;
1140 	}
1141 
1142 	rx_ring->next_to_clean = 0;
1143 	rx_ring->next_to_use = 0;
1144 
1145 	return 0;
1146 err:
1147 	kfree(rx_ring->rx_bi);
1148 	rx_ring->rx_bi = NULL;
1149 	return -ENOMEM;
1150 }
1151 
1152 /**
1153  * i40e_release_rx_desc - Store the new tail and head values
1154  * @rx_ring: ring to bump
1155  * @val: new head index
1156  **/
1157 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1158 {
1159 	rx_ring->next_to_use = val;
1160 	/* Force memory writes to complete before letting h/w
1161 	 * know there are new descriptors to fetch.  (Only
1162 	 * applicable for weak-ordered memory model archs,
1163 	 * such as IA-64).
1164 	 */
1165 	wmb();
1166 	writel(val, rx_ring->tail);
1167 }
1168 
1169 /**
1170  * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1171  * @rx_ring: ring to place buffers on
1172  * @cleaned_count: number of buffers to replace
1173  **/
1174 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1175 {
1176 	u16 i = rx_ring->next_to_use;
1177 	union i40e_rx_desc *rx_desc;
1178 	struct i40e_rx_buffer *bi;
1179 
1180 	/* do nothing if no valid netdev defined */
1181 	if (!rx_ring->netdev || !cleaned_count)
1182 		return;
1183 
1184 	while (cleaned_count--) {
1185 		rx_desc = I40E_RX_DESC(rx_ring, i);
1186 		bi = &rx_ring->rx_bi[i];
1187 
1188 		if (bi->skb) /* desc is in use */
1189 			goto no_buffers;
1190 		if (!bi->page) {
1191 			bi->page = alloc_page(GFP_ATOMIC);
1192 			if (!bi->page) {
1193 				rx_ring->rx_stats.alloc_page_failed++;
1194 				goto no_buffers;
1195 			}
1196 		}
1197 
1198 		if (!bi->page_dma) {
1199 			/* use a half page if we're re-using */
1200 			bi->page_offset ^= PAGE_SIZE / 2;
1201 			bi->page_dma = dma_map_page(rx_ring->dev,
1202 						    bi->page,
1203 						    bi->page_offset,
1204 						    PAGE_SIZE / 2,
1205 						    DMA_FROM_DEVICE);
1206 			if (dma_mapping_error(rx_ring->dev,
1207 					      bi->page_dma)) {
1208 				rx_ring->rx_stats.alloc_page_failed++;
1209 				bi->page_dma = 0;
1210 				goto no_buffers;
1211 			}
1212 		}
1213 
1214 		dma_sync_single_range_for_device(rx_ring->dev,
1215 						 bi->dma,
1216 						 0,
1217 						 rx_ring->rx_hdr_len,
1218 						 DMA_FROM_DEVICE);
1219 		/* Refresh the desc even if buffer_addrs didn't change
1220 		 * because each write-back erases this info.
1221 		 */
1222 		rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1223 		rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1224 		i++;
1225 		if (i == rx_ring->count)
1226 			i = 0;
1227 	}
1228 
1229 no_buffers:
1230 	if (rx_ring->next_to_use != i)
1231 		i40e_release_rx_desc(rx_ring, i);
1232 }
1233 
1234 /**
1235  * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1236  * @rx_ring: ring to place buffers on
1237  * @cleaned_count: number of buffers to replace
1238  **/
1239 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1240 {
1241 	u16 i = rx_ring->next_to_use;
1242 	union i40e_rx_desc *rx_desc;
1243 	struct i40e_rx_buffer *bi;
1244 	struct sk_buff *skb;
1245 
1246 	/* do nothing if no valid netdev defined */
1247 	if (!rx_ring->netdev || !cleaned_count)
1248 		return;
1249 
1250 	while (cleaned_count--) {
1251 		rx_desc = I40E_RX_DESC(rx_ring, i);
1252 		bi = &rx_ring->rx_bi[i];
1253 		skb = bi->skb;
1254 
1255 		if (!skb) {
1256 			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1257 							rx_ring->rx_buf_len);
1258 			if (!skb) {
1259 				rx_ring->rx_stats.alloc_buff_failed++;
1260 				goto no_buffers;
1261 			}
1262 			/* initialize queue mapping */
1263 			skb_record_rx_queue(skb, rx_ring->queue_index);
1264 			bi->skb = skb;
1265 		}
1266 
1267 		if (!bi->dma) {
1268 			bi->dma = dma_map_single(rx_ring->dev,
1269 						 skb->data,
1270 						 rx_ring->rx_buf_len,
1271 						 DMA_FROM_DEVICE);
1272 			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1273 				rx_ring->rx_stats.alloc_buff_failed++;
1274 				bi->dma = 0;
1275 				goto no_buffers;
1276 			}
1277 		}
1278 
1279 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1280 		rx_desc->read.hdr_addr = 0;
1281 		i++;
1282 		if (i == rx_ring->count)
1283 			i = 0;
1284 	}
1285 
1286 no_buffers:
1287 	if (rx_ring->next_to_use != i)
1288 		i40e_release_rx_desc(rx_ring, i);
1289 }
1290 
1291 /**
1292  * i40e_receive_skb - Send a completed packet up the stack
1293  * @rx_ring:  rx ring in play
1294  * @skb: packet to send up
1295  * @vlan_tag: vlan tag for packet
1296  **/
1297 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1298 			     struct sk_buff *skb, u16 vlan_tag)
1299 {
1300 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
1301 
1302 	if (vlan_tag & VLAN_VID_MASK)
1303 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1304 
1305 	napi_gro_receive(&q_vector->napi, skb);
1306 }
1307 
1308 /**
1309  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1310  * @vsi: the VSI we care about
1311  * @skb: skb currently being received and modified
1312  * @rx_status: status value of last descriptor in packet
1313  * @rx_error: error value of last descriptor in packet
1314  * @rx_ptype: ptype value of last descriptor in packet
1315  **/
1316 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1317 				    struct sk_buff *skb,
1318 				    u32 rx_status,
1319 				    u32 rx_error,
1320 				    u16 rx_ptype)
1321 {
1322 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1323 	bool ipv4 = false, ipv6 = false;
1324 	bool ipv4_tunnel, ipv6_tunnel;
1325 	__wsum rx_udp_csum;
1326 	struct iphdr *iph;
1327 	__sum16 csum;
1328 
1329 	ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1330 		     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1331 	ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1332 		     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1333 
1334 	skb->ip_summed = CHECKSUM_NONE;
1335 
1336 	/* Rx csum enabled and ip headers found? */
1337 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1338 		return;
1339 
1340 	/* did the hardware decode the packet and checksum? */
1341 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1342 		return;
1343 
1344 	/* both known and outer_ip must be set for the below code to work */
1345 	if (!(decoded.known && decoded.outer_ip))
1346 		return;
1347 
1348 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1349 	    decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1350 		ipv4 = true;
1351 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1352 		 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1353 		ipv6 = true;
1354 
1355 	if (ipv4 &&
1356 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1357 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1358 		goto checksum_fail;
1359 
1360 	/* likely incorrect csum if alternate IP extension headers found */
1361 	if (ipv6 &&
1362 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1363 		/* don't increment checksum err here, non-fatal err */
1364 		return;
1365 
1366 	/* there was some L4 error, count error and punt packet to the stack */
1367 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1368 		goto checksum_fail;
1369 
1370 	/* handle packets that were not able to be checksummed due
1371 	 * to arrival speed, in this case the stack can compute
1372 	 * the csum.
1373 	 */
1374 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1375 		return;
1376 
1377 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
1378 	 * it in the driver, hardware does not do it for us.
1379 	 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1380 	 * so the total length of IPv4 header is IHL*4 bytes
1381 	 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1382 	 */
1383 	if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1384 	    (ipv4_tunnel)) {
1385 		skb->transport_header = skb->mac_header +
1386 					sizeof(struct ethhdr) +
1387 					(ip_hdr(skb)->ihl * 4);
1388 
1389 		/* Add 4 bytes for VLAN tagged packets */
1390 		skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1391 					  skb->protocol == htons(ETH_P_8021AD))
1392 					  ? VLAN_HLEN : 0;
1393 
1394 		if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1395 		    (udp_hdr(skb)->check != 0)) {
1396 			rx_udp_csum = udp_csum(skb);
1397 			iph = ip_hdr(skb);
1398 			csum = csum_tcpudp_magic(
1399 					iph->saddr, iph->daddr,
1400 					(skb->len - skb_transport_offset(skb)),
1401 					IPPROTO_UDP, rx_udp_csum);
1402 
1403 			if (udp_hdr(skb)->check != csum)
1404 				goto checksum_fail;
1405 
1406 		} /* else its GRE and so no outer UDP header */
1407 	}
1408 
1409 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 	skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1411 
1412 	return;
1413 
1414 checksum_fail:
1415 	vsi->back->hw_csum_rx_error++;
1416 }
1417 
1418 /**
1419  * i40e_rx_hash - returns the hash value from the Rx descriptor
1420  * @ring: descriptor ring
1421  * @rx_desc: specific descriptor
1422  **/
1423 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1424 			       union i40e_rx_desc *rx_desc)
1425 {
1426 	const __le64 rss_mask =
1427 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1428 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1429 
1430 	if ((ring->netdev->features & NETIF_F_RXHASH) &&
1431 	    (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1432 		return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1433 	else
1434 		return 0;
1435 }
1436 
1437 /**
1438  * i40e_ptype_to_hash - get a hash type
1439  * @ptype: the ptype value from the descriptor
1440  *
1441  * Returns a hash type to be used by skb_set_hash
1442  **/
1443 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1444 {
1445 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1446 
1447 	if (!decoded.known)
1448 		return PKT_HASH_TYPE_NONE;
1449 
1450 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1451 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1452 		return PKT_HASH_TYPE_L4;
1453 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1454 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1455 		return PKT_HASH_TYPE_L3;
1456 	else
1457 		return PKT_HASH_TYPE_L2;
1458 }
1459 
1460 /**
1461  * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1462  * @rx_ring:  rx ring to clean
1463  * @budget:   how many cleans we're allowed
1464  *
1465  * Returns true if there's any budget left (e.g. the clean is finished)
1466  **/
1467 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1468 {
1469 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1470 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1471 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1472 	const int current_node = numa_mem_id();
1473 	struct i40e_vsi *vsi = rx_ring->vsi;
1474 	u16 i = rx_ring->next_to_clean;
1475 	union i40e_rx_desc *rx_desc;
1476 	u32 rx_error, rx_status;
1477 	u8 rx_ptype;
1478 	u64 qword;
1479 
1480 	if (budget <= 0)
1481 		return 0;
1482 
1483 	do {
1484 		struct i40e_rx_buffer *rx_bi;
1485 		struct sk_buff *skb;
1486 		u16 vlan_tag;
1487 		/* return some buffers to hardware, one at a time is too slow */
1488 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1489 			i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1490 			cleaned_count = 0;
1491 		}
1492 
1493 		i = rx_ring->next_to_clean;
1494 		rx_desc = I40E_RX_DESC(rx_ring, i);
1495 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1496 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1497 			I40E_RXD_QW1_STATUS_SHIFT;
1498 
1499 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1500 			break;
1501 
1502 		/* This memory barrier is needed to keep us from reading
1503 		 * any other fields out of the rx_desc until we know the
1504 		 * DD bit is set.
1505 		 */
1506 		dma_rmb();
1507 		if (i40e_rx_is_programming_status(qword)) {
1508 			i40e_clean_programming_status(rx_ring, rx_desc);
1509 			I40E_RX_INCREMENT(rx_ring, i);
1510 			continue;
1511 		}
1512 		rx_bi = &rx_ring->rx_bi[i];
1513 		skb = rx_bi->skb;
1514 		if (likely(!skb)) {
1515 			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1516 							rx_ring->rx_hdr_len);
1517 			if (!skb) {
1518 				rx_ring->rx_stats.alloc_buff_failed++;
1519 				break;
1520 			}
1521 
1522 			/* initialize queue mapping */
1523 			skb_record_rx_queue(skb, rx_ring->queue_index);
1524 			/* we are reusing so sync this buffer for CPU use */
1525 			dma_sync_single_range_for_cpu(rx_ring->dev,
1526 						      rx_bi->dma,
1527 						      0,
1528 						      rx_ring->rx_hdr_len,
1529 						      DMA_FROM_DEVICE);
1530 		}
1531 		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1532 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1533 		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1534 				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1535 		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1536 			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1537 
1538 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1539 			   I40E_RXD_QW1_ERROR_SHIFT;
1540 		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1541 		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1542 
1543 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1544 			   I40E_RXD_QW1_PTYPE_SHIFT;
1545 		prefetch(rx_bi->page);
1546 		rx_bi->skb = NULL;
1547 		cleaned_count++;
1548 		if (rx_hbo || rx_sph) {
1549 			int len;
1550 
1551 			if (rx_hbo)
1552 				len = I40E_RX_HDR_SIZE;
1553 			else
1554 				len = rx_header_len;
1555 			memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1556 		} else if (skb->len == 0) {
1557 			int len;
1558 
1559 			len = (rx_packet_len > skb_headlen(skb) ?
1560 				skb_headlen(skb) : rx_packet_len);
1561 			memcpy(__skb_put(skb, len),
1562 			       rx_bi->page + rx_bi->page_offset,
1563 			       len);
1564 			rx_bi->page_offset += len;
1565 			rx_packet_len -= len;
1566 		}
1567 
1568 		/* Get the rest of the data if this was a header split */
1569 		if (rx_packet_len) {
1570 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1571 					   rx_bi->page,
1572 					   rx_bi->page_offset,
1573 					   rx_packet_len);
1574 
1575 			skb->len += rx_packet_len;
1576 			skb->data_len += rx_packet_len;
1577 			skb->truesize += rx_packet_len;
1578 
1579 			if ((page_count(rx_bi->page) == 1) &&
1580 			    (page_to_nid(rx_bi->page) == current_node))
1581 				get_page(rx_bi->page);
1582 			else
1583 				rx_bi->page = NULL;
1584 
1585 			dma_unmap_page(rx_ring->dev,
1586 				       rx_bi->page_dma,
1587 				       PAGE_SIZE / 2,
1588 				       DMA_FROM_DEVICE);
1589 			rx_bi->page_dma = 0;
1590 		}
1591 		I40E_RX_INCREMENT(rx_ring, i);
1592 
1593 		if (unlikely(
1594 		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1595 			struct i40e_rx_buffer *next_buffer;
1596 
1597 			next_buffer = &rx_ring->rx_bi[i];
1598 			next_buffer->skb = skb;
1599 			rx_ring->rx_stats.non_eop_descs++;
1600 			continue;
1601 		}
1602 
1603 		/* ERR_MASK will only have valid bits if EOP set */
1604 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1605 			dev_kfree_skb_any(skb);
1606 			continue;
1607 		}
1608 
1609 		skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1610 			     i40e_ptype_to_hash(rx_ptype));
1611 		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1612 			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1613 					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1614 					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1615 			rx_ring->last_rx_timestamp = jiffies;
1616 		}
1617 
1618 		/* probably a little skewed due to removing CRC */
1619 		total_rx_bytes += skb->len;
1620 		total_rx_packets++;
1621 
1622 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1623 
1624 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1625 
1626 		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1627 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1628 			 : 0;
1629 #ifdef I40E_FCOE
1630 		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1631 			dev_kfree_skb_any(skb);
1632 			continue;
1633 		}
1634 #endif
1635 		skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1636 		i40e_receive_skb(rx_ring, skb, vlan_tag);
1637 
1638 		rx_desc->wb.qword1.status_error_len = 0;
1639 
1640 	} while (likely(total_rx_packets < budget));
1641 
1642 	u64_stats_update_begin(&rx_ring->syncp);
1643 	rx_ring->stats.packets += total_rx_packets;
1644 	rx_ring->stats.bytes += total_rx_bytes;
1645 	u64_stats_update_end(&rx_ring->syncp);
1646 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
1647 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1648 
1649 	return total_rx_packets;
1650 }
1651 
1652 /**
1653  * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1654  * @rx_ring:  rx ring to clean
1655  * @budget:   how many cleans we're allowed
1656  *
1657  * Returns number of packets cleaned
1658  **/
1659 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1660 {
1661 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1662 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1663 	struct i40e_vsi *vsi = rx_ring->vsi;
1664 	union i40e_rx_desc *rx_desc;
1665 	u32 rx_error, rx_status;
1666 	u16 rx_packet_len;
1667 	u8 rx_ptype;
1668 	u64 qword;
1669 	u16 i;
1670 
1671 	do {
1672 		struct i40e_rx_buffer *rx_bi;
1673 		struct sk_buff *skb;
1674 		u16 vlan_tag;
1675 		/* return some buffers to hardware, one at a time is too slow */
1676 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1677 			i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1678 			cleaned_count = 0;
1679 		}
1680 
1681 		i = rx_ring->next_to_clean;
1682 		rx_desc = I40E_RX_DESC(rx_ring, i);
1683 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1684 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1685 			I40E_RXD_QW1_STATUS_SHIFT;
1686 
1687 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1688 			break;
1689 
1690 		/* This memory barrier is needed to keep us from reading
1691 		 * any other fields out of the rx_desc until we know the
1692 		 * DD bit is set.
1693 		 */
1694 		dma_rmb();
1695 
1696 		if (i40e_rx_is_programming_status(qword)) {
1697 			i40e_clean_programming_status(rx_ring, rx_desc);
1698 			I40E_RX_INCREMENT(rx_ring, i);
1699 			continue;
1700 		}
1701 		rx_bi = &rx_ring->rx_bi[i];
1702 		skb = rx_bi->skb;
1703 		prefetch(skb->data);
1704 
1705 		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1706 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1707 
1708 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1709 			   I40E_RXD_QW1_ERROR_SHIFT;
1710 		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1711 
1712 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1713 			   I40E_RXD_QW1_PTYPE_SHIFT;
1714 		rx_bi->skb = NULL;
1715 		cleaned_count++;
1716 
1717 		/* Get the header and possibly the whole packet
1718 		 * If this is an skb from previous receive dma will be 0
1719 		 */
1720 		skb_put(skb, rx_packet_len);
1721 		dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1722 				 DMA_FROM_DEVICE);
1723 		rx_bi->dma = 0;
1724 
1725 		I40E_RX_INCREMENT(rx_ring, i);
1726 
1727 		if (unlikely(
1728 		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1729 			rx_ring->rx_stats.non_eop_descs++;
1730 			continue;
1731 		}
1732 
1733 		/* ERR_MASK will only have valid bits if EOP set */
1734 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1735 			dev_kfree_skb_any(skb);
1736 			continue;
1737 		}
1738 
1739 		skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1740 			     i40e_ptype_to_hash(rx_ptype));
1741 		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1742 			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1743 					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1744 					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1745 			rx_ring->last_rx_timestamp = jiffies;
1746 		}
1747 
1748 		/* probably a little skewed due to removing CRC */
1749 		total_rx_bytes += skb->len;
1750 		total_rx_packets++;
1751 
1752 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1753 
1754 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1755 
1756 		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1757 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1758 			 : 0;
1759 #ifdef I40E_FCOE
1760 		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1761 			dev_kfree_skb_any(skb);
1762 			continue;
1763 		}
1764 #endif
1765 		i40e_receive_skb(rx_ring, skb, vlan_tag);
1766 
1767 		rx_desc->wb.qword1.status_error_len = 0;
1768 	} while (likely(total_rx_packets < budget));
1769 
1770 	u64_stats_update_begin(&rx_ring->syncp);
1771 	rx_ring->stats.packets += total_rx_packets;
1772 	rx_ring->stats.bytes += total_rx_bytes;
1773 	u64_stats_update_end(&rx_ring->syncp);
1774 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
1775 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1776 
1777 	return total_rx_packets;
1778 }
1779 
1780 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1781 {
1782 	u32 val;
1783 
1784 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1785 	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1786 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1787 	      (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1788 
1789 	return val;
1790 }
1791 
1792 /* a small macro to shorten up some long lines */
1793 #define INTREG I40E_PFINT_DYN_CTLN
1794 
1795 /**
1796  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1797  * @vsi: the VSI we care about
1798  * @q_vector: q_vector for which itr is being updated and interrupt enabled
1799  *
1800  **/
1801 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1802 					  struct i40e_q_vector *q_vector)
1803 {
1804 	struct i40e_hw *hw = &vsi->back->hw;
1805 	bool rx = false, tx = false;
1806 	u32 rxval, txval;
1807 	int vector;
1808 
1809 	vector = (q_vector->v_idx + vsi->base_vector);
1810 
1811 	/* avoid dynamic calculation if in countdown mode OR if
1812 	 * all dynamic is disabled
1813 	 */
1814 	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1815 
1816 	if (q_vector->itr_countdown > 0 ||
1817 	    (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1818 	     !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1819 		goto enable_int;
1820 	}
1821 
1822 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1823 		rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1824 		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1825 	}
1826 
1827 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1828 		tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1829 		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1830 	}
1831 
1832 	if (rx || tx) {
1833 		/* get the higher of the two ITR adjustments and
1834 		 * use the same value for both ITR registers
1835 		 * when in adaptive mode (Rx and/or Tx)
1836 		 */
1837 		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1838 
1839 		q_vector->tx.itr = q_vector->rx.itr = itr;
1840 		txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1841 		tx = true;
1842 		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1843 		rx = true;
1844 	}
1845 
1846 	/* only need to enable the interrupt once, but need
1847 	 * to possibly update both ITR values
1848 	 */
1849 	if (rx) {
1850 		/* set the INTENA_MSK_MASK so that this first write
1851 		 * won't actually enable the interrupt, instead just
1852 		 * updating the ITR (it's bit 31 PF and VF)
1853 		 */
1854 		rxval |= BIT(31);
1855 		/* don't check _DOWN because interrupt isn't being enabled */
1856 		wr32(hw, INTREG(vector - 1), rxval);
1857 	}
1858 
1859 enable_int:
1860 	if (!test_bit(__I40E_DOWN, &vsi->state))
1861 		wr32(hw, INTREG(vector - 1), txval);
1862 
1863 	if (q_vector->itr_countdown)
1864 		q_vector->itr_countdown--;
1865 	else
1866 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1867 
1868 }
1869 
1870 /**
1871  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1872  * @napi: napi struct with our devices info in it
1873  * @budget: amount of work driver is allowed to do this pass, in packets
1874  *
1875  * This function will clean all queues associated with a q_vector.
1876  *
1877  * Returns the amount of work done
1878  **/
1879 int i40e_napi_poll(struct napi_struct *napi, int budget)
1880 {
1881 	struct i40e_q_vector *q_vector =
1882 			       container_of(napi, struct i40e_q_vector, napi);
1883 	struct i40e_vsi *vsi = q_vector->vsi;
1884 	struct i40e_ring *ring;
1885 	bool clean_complete = true;
1886 	bool arm_wb = false;
1887 	int budget_per_ring;
1888 	int work_done = 0;
1889 
1890 	if (test_bit(__I40E_DOWN, &vsi->state)) {
1891 		napi_complete(napi);
1892 		return 0;
1893 	}
1894 
1895 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1896 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1897 	 */
1898 	i40e_for_each_ring(ring, q_vector->tx) {
1899 		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1900 		arm_wb |= ring->arm_wb;
1901 		ring->arm_wb = false;
1902 	}
1903 
1904 	/* Handle case where we are called by netpoll with a budget of 0 */
1905 	if (budget <= 0)
1906 		goto tx_only;
1907 
1908 	/* We attempt to distribute budget to each Rx queue fairly, but don't
1909 	 * allow the budget to go below 1 because that would exit polling early.
1910 	 */
1911 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1912 
1913 	i40e_for_each_ring(ring, q_vector->rx) {
1914 		int cleaned;
1915 
1916 		if (ring_is_ps_enabled(ring))
1917 			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1918 		else
1919 			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1920 
1921 		work_done += cleaned;
1922 		/* if we didn't clean as many as budgeted, we must be done */
1923 		clean_complete &= (budget_per_ring != cleaned);
1924 	}
1925 
1926 	/* If work not completed, return budget and polling will return */
1927 	if (!clean_complete) {
1928 tx_only:
1929 		if (arm_wb)
1930 			i40e_force_wb(vsi, q_vector);
1931 		return budget;
1932 	}
1933 
1934 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1935 		q_vector->arm_wb_state = false;
1936 
1937 	/* Work is done so exit the polling mode and re-enable the interrupt */
1938 	napi_complete_done(napi, work_done);
1939 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1940 		i40e_update_enable_itr(vsi, q_vector);
1941 	} else { /* Legacy mode */
1942 		struct i40e_hw *hw = &vsi->back->hw;
1943 		/* We re-enable the queue 0 cause, but
1944 		 * don't worry about dynamic_enable
1945 		 * because we left it on for the other
1946 		 * possible interrupts during napi
1947 		 */
1948 		u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1949 			   I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1950 
1951 		wr32(hw, I40E_QINT_RQCTL(0), qval);
1952 		qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1953 		       I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1954 		wr32(hw, I40E_QINT_TQCTL(0), qval);
1955 		i40e_irq_dynamic_enable_icr0(vsi->back);
1956 	}
1957 	return 0;
1958 }
1959 
1960 /**
1961  * i40e_atr - Add a Flow Director ATR filter
1962  * @tx_ring:  ring to add programming descriptor to
1963  * @skb:      send buffer
1964  * @tx_flags: send tx flags
1965  * @protocol: wire protocol
1966  **/
1967 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1968 		     u32 tx_flags, __be16 protocol)
1969 {
1970 	struct i40e_filter_program_desc *fdir_desc;
1971 	struct i40e_pf *pf = tx_ring->vsi->back;
1972 	union {
1973 		unsigned char *network;
1974 		struct iphdr *ipv4;
1975 		struct ipv6hdr *ipv6;
1976 	} hdr;
1977 	struct tcphdr *th;
1978 	unsigned int hlen;
1979 	u32 flex_ptype, dtype_cmd;
1980 	u16 i;
1981 
1982 	/* make sure ATR is enabled */
1983 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1984 		return;
1985 
1986 	if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1987 		return;
1988 
1989 	/* if sampling is disabled do nothing */
1990 	if (!tx_ring->atr_sample_rate)
1991 		return;
1992 
1993 	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1994 		return;
1995 
1996 	if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1997 		/* snag network header to get L4 type and address */
1998 		hdr.network = skb_network_header(skb);
1999 
2000 		/* Currently only IPv4/IPv6 with TCP is supported
2001 		 * access ihl as u8 to avoid unaligned access on ia64
2002 		 */
2003 		if (tx_flags & I40E_TX_FLAGS_IPV4)
2004 			hlen = (hdr.network[0] & 0x0F) << 2;
2005 		else if (protocol == htons(ETH_P_IPV6))
2006 			hlen = sizeof(struct ipv6hdr);
2007 		else
2008 			return;
2009 	} else {
2010 		hdr.network = skb_inner_network_header(skb);
2011 		hlen = skb_inner_network_header_len(skb);
2012 	}
2013 
2014 	/* Currently only IPv4/IPv6 with TCP is supported
2015 	 * Note: tx_flags gets modified to reflect inner protocols in
2016 	 * tx_enable_csum function if encap is enabled.
2017 	 */
2018 	if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2019 	    (hdr.ipv4->protocol != IPPROTO_TCP))
2020 		return;
2021 	else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2022 		 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2023 		return;
2024 
2025 	th = (struct tcphdr *)(hdr.network + hlen);
2026 
2027 	/* Due to lack of space, no more new filters can be programmed */
2028 	if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2029 		return;
2030 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2031 		/* HW ATR eviction will take care of removing filters on FIN
2032 		 * and RST packets.
2033 		 */
2034 		if (th->fin || th->rst)
2035 			return;
2036 	}
2037 
2038 	tx_ring->atr_count++;
2039 
2040 	/* sample on all syn/fin/rst packets or once every atr sample rate */
2041 	if (!th->fin &&
2042 	    !th->syn &&
2043 	    !th->rst &&
2044 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2045 		return;
2046 
2047 	tx_ring->atr_count = 0;
2048 
2049 	/* grab the next descriptor */
2050 	i = tx_ring->next_to_use;
2051 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2052 
2053 	i++;
2054 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2055 
2056 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2057 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2058 	flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2059 		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2060 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2061 		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2062 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2063 
2064 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2065 
2066 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2067 
2068 	dtype_cmd |= (th->fin || th->rst) ?
2069 		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2070 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2071 		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2072 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2073 
2074 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2075 		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2076 
2077 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2078 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2079 
2080 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2081 	if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2082 		dtype_cmd |=
2083 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2084 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2085 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2086 	else
2087 		dtype_cmd |=
2088 			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2089 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2090 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2091 
2092 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2093 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2094 
2095 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2096 	fdir_desc->rsvd = cpu_to_le32(0);
2097 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2098 	fdir_desc->fd_id = cpu_to_le32(0);
2099 }
2100 
2101 /**
2102  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2103  * @skb:     send buffer
2104  * @tx_ring: ring to send buffer on
2105  * @flags:   the tx flags to be set
2106  *
2107  * Checks the skb and set up correspondingly several generic transmit flags
2108  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2109  *
2110  * Returns error code indicate the frame should be dropped upon error and the
2111  * otherwise  returns 0 to indicate the flags has been set properly.
2112  **/
2113 #ifdef I40E_FCOE
2114 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2115 				      struct i40e_ring *tx_ring,
2116 				      u32 *flags)
2117 #else
2118 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2119 					     struct i40e_ring *tx_ring,
2120 					     u32 *flags)
2121 #endif
2122 {
2123 	__be16 protocol = skb->protocol;
2124 	u32  tx_flags = 0;
2125 
2126 	if (protocol == htons(ETH_P_8021Q) &&
2127 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2128 		/* When HW VLAN acceleration is turned off by the user the
2129 		 * stack sets the protocol to 8021q so that the driver
2130 		 * can take any steps required to support the SW only
2131 		 * VLAN handling.  In our case the driver doesn't need
2132 		 * to take any further steps so just set the protocol
2133 		 * to the encapsulated ethertype.
2134 		 */
2135 		skb->protocol = vlan_get_protocol(skb);
2136 		goto out;
2137 	}
2138 
2139 	/* if we have a HW VLAN tag being added, default to the HW one */
2140 	if (skb_vlan_tag_present(skb)) {
2141 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2142 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2143 	/* else if it is a SW VLAN, check the next protocol and store the tag */
2144 	} else if (protocol == htons(ETH_P_8021Q)) {
2145 		struct vlan_hdr *vhdr, _vhdr;
2146 
2147 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2148 		if (!vhdr)
2149 			return -EINVAL;
2150 
2151 		protocol = vhdr->h_vlan_encapsulated_proto;
2152 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2153 		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2154 	}
2155 
2156 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2157 		goto out;
2158 
2159 	/* Insert 802.1p priority into VLAN header */
2160 	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2161 	    (skb->priority != TC_PRIO_CONTROL)) {
2162 		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2163 		tx_flags |= (skb->priority & 0x7) <<
2164 				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2165 		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2166 			struct vlan_ethhdr *vhdr;
2167 			int rc;
2168 
2169 			rc = skb_cow_head(skb, 0);
2170 			if (rc < 0)
2171 				return rc;
2172 			vhdr = (struct vlan_ethhdr *)skb->data;
2173 			vhdr->h_vlan_TCI = htons(tx_flags >>
2174 						 I40E_TX_FLAGS_VLAN_SHIFT);
2175 		} else {
2176 			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2177 		}
2178 	}
2179 
2180 out:
2181 	*flags = tx_flags;
2182 	return 0;
2183 }
2184 
2185 /**
2186  * i40e_tso - set up the tso context descriptor
2187  * @tx_ring:  ptr to the ring to send
2188  * @skb:      ptr to the skb we're sending
2189  * @hdr_len:  ptr to the size of the packet header
2190  * @cd_type_cmd_tso_mss: ptr to u64 object
2191  * @cd_tunneling: ptr to context descriptor bits
2192  *
2193  * Returns 0 if no TSO can happen, 1 if tso is going, or error
2194  **/
2195 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2196 		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2197 		    u32 *cd_tunneling)
2198 {
2199 	u32 cd_cmd, cd_tso_len, cd_mss;
2200 	struct ipv6hdr *ipv6h;
2201 	struct tcphdr *tcph;
2202 	struct iphdr *iph;
2203 	u32 l4len;
2204 	int err;
2205 
2206 	if (!skb_is_gso(skb))
2207 		return 0;
2208 
2209 	err = skb_cow_head(skb, 0);
2210 	if (err < 0)
2211 		return err;
2212 
2213 	iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2214 	ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2215 
2216 	if (iph->version == 4) {
2217 		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2218 		iph->tot_len = 0;
2219 		iph->check = 0;
2220 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2221 						 0, IPPROTO_TCP, 0);
2222 	} else if (ipv6h->version == 6) {
2223 		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2224 		ipv6h->payload_len = 0;
2225 		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2226 					       0, IPPROTO_TCP, 0);
2227 	}
2228 
2229 	l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2230 	*hdr_len = (skb->encapsulation
2231 		    ? (skb_inner_transport_header(skb) - skb->data)
2232 		    : skb_transport_offset(skb)) + l4len;
2233 
2234 	/* find the field values */
2235 	cd_cmd = I40E_TX_CTX_DESC_TSO;
2236 	cd_tso_len = skb->len - *hdr_len;
2237 	cd_mss = skb_shinfo(skb)->gso_size;
2238 	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2239 				((u64)cd_tso_len <<
2240 				 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2241 				((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2242 	return 1;
2243 }
2244 
2245 /**
2246  * i40e_tsyn - set up the tsyn context descriptor
2247  * @tx_ring:  ptr to the ring to send
2248  * @skb:      ptr to the skb we're sending
2249  * @tx_flags: the collected send information
2250  * @cd_type_cmd_tso_mss: ptr to u64 object
2251  *
2252  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2253  **/
2254 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2255 		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2256 {
2257 	struct i40e_pf *pf;
2258 
2259 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2260 		return 0;
2261 
2262 	/* Tx timestamps cannot be sampled when doing TSO */
2263 	if (tx_flags & I40E_TX_FLAGS_TSO)
2264 		return 0;
2265 
2266 	/* only timestamp the outbound packet if the user has requested it and
2267 	 * we are not already transmitting a packet to be timestamped
2268 	 */
2269 	pf = i40e_netdev_to_pf(tx_ring->netdev);
2270 	if (!(pf->flags & I40E_FLAG_PTP))
2271 		return 0;
2272 
2273 	if (pf->ptp_tx &&
2274 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2275 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2276 		pf->ptp_tx_skb = skb_get(skb);
2277 	} else {
2278 		return 0;
2279 	}
2280 
2281 	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2282 				I40E_TXD_CTX_QW1_CMD_SHIFT;
2283 
2284 	return 1;
2285 }
2286 
2287 /**
2288  * i40e_tx_enable_csum - Enable Tx checksum offloads
2289  * @skb: send buffer
2290  * @tx_flags: pointer to Tx flags currently set
2291  * @td_cmd: Tx descriptor command bits to set
2292  * @td_offset: Tx descriptor header offsets to set
2293  * @tx_ring: Tx descriptor ring
2294  * @cd_tunneling: ptr to context desc bits
2295  **/
2296 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2297 				u32 *td_cmd, u32 *td_offset,
2298 				struct i40e_ring *tx_ring,
2299 				u32 *cd_tunneling)
2300 {
2301 	struct ipv6hdr *this_ipv6_hdr;
2302 	unsigned int this_tcp_hdrlen;
2303 	struct iphdr *this_ip_hdr;
2304 	u32 network_hdr_len;
2305 	u8 l4_hdr = 0;
2306 	struct udphdr *oudph;
2307 	struct iphdr *oiph;
2308 	u32 l4_tunnel = 0;
2309 
2310 	if (skb->encapsulation) {
2311 		switch (ip_hdr(skb)->protocol) {
2312 		case IPPROTO_UDP:
2313 			oudph = udp_hdr(skb);
2314 			oiph = ip_hdr(skb);
2315 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2316 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2317 			break;
2318 		case IPPROTO_GRE:
2319 			l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2320 			break;
2321 		default:
2322 			return;
2323 		}
2324 		network_hdr_len = skb_inner_network_header_len(skb);
2325 		this_ip_hdr = inner_ip_hdr(skb);
2326 		this_ipv6_hdr = inner_ipv6_hdr(skb);
2327 		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2328 
2329 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2330 			if (*tx_flags & I40E_TX_FLAGS_TSO) {
2331 				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2332 				ip_hdr(skb)->check = 0;
2333 			} else {
2334 				*cd_tunneling |=
2335 					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2336 			}
2337 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2338 			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2339 			if (*tx_flags & I40E_TX_FLAGS_TSO)
2340 				ip_hdr(skb)->check = 0;
2341 		}
2342 
2343 		/* Now set the ctx descriptor fields */
2344 		*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2345 				   I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
2346 				   l4_tunnel                             |
2347 				   ((skb_inner_network_offset(skb) -
2348 					skb_transport_offset(skb)) >> 1) <<
2349 				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2350 		if (this_ip_hdr->version == 6) {
2351 			*tx_flags &= ~I40E_TX_FLAGS_IPV4;
2352 			*tx_flags |= I40E_TX_FLAGS_IPV6;
2353 		}
2354 		if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2355 		    (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
2356 		    (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2357 			oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2358 					oiph->daddr,
2359 					(skb->len - skb_transport_offset(skb)),
2360 					IPPROTO_UDP, 0);
2361 			*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2362 		}
2363 	} else {
2364 		network_hdr_len = skb_network_header_len(skb);
2365 		this_ip_hdr = ip_hdr(skb);
2366 		this_ipv6_hdr = ipv6_hdr(skb);
2367 		this_tcp_hdrlen = tcp_hdrlen(skb);
2368 	}
2369 
2370 	/* Enable IP checksum offloads */
2371 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2372 		l4_hdr = this_ip_hdr->protocol;
2373 		/* the stack computes the IP header already, the only time we
2374 		 * need the hardware to recompute it is in the case of TSO.
2375 		 */
2376 		if (*tx_flags & I40E_TX_FLAGS_TSO) {
2377 			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2378 			this_ip_hdr->check = 0;
2379 		} else {
2380 			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2381 		}
2382 		/* Now set the td_offset for IP header length */
2383 		*td_offset = (network_hdr_len >> 2) <<
2384 			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2385 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2386 		l4_hdr = this_ipv6_hdr->nexthdr;
2387 		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2388 		/* Now set the td_offset for IP header length */
2389 		*td_offset = (network_hdr_len >> 2) <<
2390 			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2391 	}
2392 	/* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2393 	*td_offset |= (skb_network_offset(skb) >> 1) <<
2394 		       I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2395 
2396 	/* Enable L4 checksum offloads */
2397 	switch (l4_hdr) {
2398 	case IPPROTO_TCP:
2399 		/* enable checksum offloads */
2400 		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2401 		*td_offset |= (this_tcp_hdrlen >> 2) <<
2402 			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2403 		break;
2404 	case IPPROTO_SCTP:
2405 		/* enable SCTP checksum offload */
2406 		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2407 		*td_offset |= (sizeof(struct sctphdr) >> 2) <<
2408 			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2409 		break;
2410 	case IPPROTO_UDP:
2411 		/* enable UDP checksum offload */
2412 		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2413 		*td_offset |= (sizeof(struct udphdr) >> 2) <<
2414 			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2415 		break;
2416 	default:
2417 		break;
2418 	}
2419 }
2420 
2421 /**
2422  * i40e_create_tx_ctx Build the Tx context descriptor
2423  * @tx_ring:  ring to create the descriptor on
2424  * @cd_type_cmd_tso_mss: Quad Word 1
2425  * @cd_tunneling: Quad Word 0 - bits 0-31
2426  * @cd_l2tag2: Quad Word 0 - bits 32-63
2427  **/
2428 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2429 			       const u64 cd_type_cmd_tso_mss,
2430 			       const u32 cd_tunneling, const u32 cd_l2tag2)
2431 {
2432 	struct i40e_tx_context_desc *context_desc;
2433 	int i = tx_ring->next_to_use;
2434 
2435 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2436 	    !cd_tunneling && !cd_l2tag2)
2437 		return;
2438 
2439 	/* grab the next descriptor */
2440 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2441 
2442 	i++;
2443 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2444 
2445 	/* cpu_to_le32 and assign to struct fields */
2446 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2447 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2448 	context_desc->rsvd = cpu_to_le16(0);
2449 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2450 }
2451 
2452 /**
2453  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2454  * @tx_ring: the ring to be checked
2455  * @size:    the size buffer we want to assure is available
2456  *
2457  * Returns -EBUSY if a stop is needed, else 0
2458  **/
2459 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2460 {
2461 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2462 	/* Memory barrier before checking head and tail */
2463 	smp_mb();
2464 
2465 	/* Check again in a case another CPU has just made room available. */
2466 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2467 		return -EBUSY;
2468 
2469 	/* A reprieve! - use start_queue because it doesn't call schedule */
2470 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2471 	++tx_ring->tx_stats.restart_queue;
2472 	return 0;
2473 }
2474 
2475 /**
2476  * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2477  * @tx_ring: the ring to be checked
2478  * @size:    the size buffer we want to assure is available
2479  *
2480  * Returns 0 if stop is not needed
2481  **/
2482 #ifdef I40E_FCOE
2483 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2484 #else
2485 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2486 #endif
2487 {
2488 	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2489 		return 0;
2490 	return __i40e_maybe_stop_tx(tx_ring, size);
2491 }
2492 
2493 /**
2494  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2495  * @skb:      send buffer
2496  * @tx_flags: collected send information
2497  *
2498  * Note: Our HW can't scatter-gather more than 8 fragments to build
2499  * a packet on the wire and so we need to figure out the cases where we
2500  * need to linearize the skb.
2501  **/
2502 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2503 {
2504 	struct skb_frag_struct *frag;
2505 	bool linearize = false;
2506 	unsigned int size = 0;
2507 	u16 num_frags;
2508 	u16 gso_segs;
2509 
2510 	num_frags = skb_shinfo(skb)->nr_frags;
2511 	gso_segs = skb_shinfo(skb)->gso_segs;
2512 
2513 	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2514 		u16 j = 0;
2515 
2516 		if (num_frags < (I40E_MAX_BUFFER_TXD))
2517 			goto linearize_chk_done;
2518 		/* try the simple math, if we have too many frags per segment */
2519 		if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2520 		    I40E_MAX_BUFFER_TXD) {
2521 			linearize = true;
2522 			goto linearize_chk_done;
2523 		}
2524 		frag = &skb_shinfo(skb)->frags[0];
2525 		/* we might still have more fragments per segment */
2526 		do {
2527 			size += skb_frag_size(frag);
2528 			frag++; j++;
2529 			if ((size >= skb_shinfo(skb)->gso_size) &&
2530 			    (j < I40E_MAX_BUFFER_TXD)) {
2531 				size = (size % skb_shinfo(skb)->gso_size);
2532 				j = (size) ? 1 : 0;
2533 			}
2534 			if (j == I40E_MAX_BUFFER_TXD) {
2535 				linearize = true;
2536 				break;
2537 			}
2538 			num_frags--;
2539 		} while (num_frags);
2540 	} else {
2541 		if (num_frags >= I40E_MAX_BUFFER_TXD)
2542 			linearize = true;
2543 	}
2544 
2545 linearize_chk_done:
2546 	return linearize;
2547 }
2548 
2549 /**
2550  * i40e_tx_map - Build the Tx descriptor
2551  * @tx_ring:  ring to send buffer on
2552  * @skb:      send buffer
2553  * @first:    first buffer info buffer to use
2554  * @tx_flags: collected send information
2555  * @hdr_len:  size of the packet header
2556  * @td_cmd:   the command field in the descriptor
2557  * @td_offset: offset for checksum or crc
2558  **/
2559 #ifdef I40E_FCOE
2560 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2561 			struct i40e_tx_buffer *first, u32 tx_flags,
2562 			const u8 hdr_len, u32 td_cmd, u32 td_offset)
2563 #else
2564 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2565 			       struct i40e_tx_buffer *first, u32 tx_flags,
2566 			       const u8 hdr_len, u32 td_cmd, u32 td_offset)
2567 #endif
2568 {
2569 	unsigned int data_len = skb->data_len;
2570 	unsigned int size = skb_headlen(skb);
2571 	struct skb_frag_struct *frag;
2572 	struct i40e_tx_buffer *tx_bi;
2573 	struct i40e_tx_desc *tx_desc;
2574 	u16 i = tx_ring->next_to_use;
2575 	u32 td_tag = 0;
2576 	dma_addr_t dma;
2577 	u16 gso_segs;
2578 	u16 desc_count = 0;
2579 	bool tail_bump = true;
2580 	bool do_rs = false;
2581 
2582 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2583 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2584 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2585 			 I40E_TX_FLAGS_VLAN_SHIFT;
2586 	}
2587 
2588 	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2589 		gso_segs = skb_shinfo(skb)->gso_segs;
2590 	else
2591 		gso_segs = 1;
2592 
2593 	/* multiply data chunks by size of headers */
2594 	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2595 	first->gso_segs = gso_segs;
2596 	first->skb = skb;
2597 	first->tx_flags = tx_flags;
2598 
2599 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2600 
2601 	tx_desc = I40E_TX_DESC(tx_ring, i);
2602 	tx_bi = first;
2603 
2604 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2605 		if (dma_mapping_error(tx_ring->dev, dma))
2606 			goto dma_error;
2607 
2608 		/* record length, and DMA address */
2609 		dma_unmap_len_set(tx_bi, len, size);
2610 		dma_unmap_addr_set(tx_bi, dma, dma);
2611 
2612 		tx_desc->buffer_addr = cpu_to_le64(dma);
2613 
2614 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2615 			tx_desc->cmd_type_offset_bsz =
2616 				build_ctob(td_cmd, td_offset,
2617 					   I40E_MAX_DATA_PER_TXD, td_tag);
2618 
2619 			tx_desc++;
2620 			i++;
2621 			desc_count++;
2622 
2623 			if (i == tx_ring->count) {
2624 				tx_desc = I40E_TX_DESC(tx_ring, 0);
2625 				i = 0;
2626 			}
2627 
2628 			dma += I40E_MAX_DATA_PER_TXD;
2629 			size -= I40E_MAX_DATA_PER_TXD;
2630 
2631 			tx_desc->buffer_addr = cpu_to_le64(dma);
2632 		}
2633 
2634 		if (likely(!data_len))
2635 			break;
2636 
2637 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2638 							  size, td_tag);
2639 
2640 		tx_desc++;
2641 		i++;
2642 		desc_count++;
2643 
2644 		if (i == tx_ring->count) {
2645 			tx_desc = I40E_TX_DESC(tx_ring, 0);
2646 			i = 0;
2647 		}
2648 
2649 		size = skb_frag_size(frag);
2650 		data_len -= size;
2651 
2652 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2653 				       DMA_TO_DEVICE);
2654 
2655 		tx_bi = &tx_ring->tx_bi[i];
2656 	}
2657 
2658 	/* set next_to_watch value indicating a packet is present */
2659 	first->next_to_watch = tx_desc;
2660 
2661 	i++;
2662 	if (i == tx_ring->count)
2663 		i = 0;
2664 
2665 	tx_ring->next_to_use = i;
2666 
2667 	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2668 						 tx_ring->queue_index),
2669 						 first->bytecount);
2670 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2671 
2672 	/* Algorithm to optimize tail and RS bit setting:
2673 	 * if xmit_more is supported
2674 	 *	if xmit_more is true
2675 	 *		do not update tail and do not mark RS bit.
2676 	 *	if xmit_more is false and last xmit_more was false
2677 	 *		if every packet spanned less than 4 desc
2678 	 *			then set RS bit on 4th packet and update tail
2679 	 *			on every packet
2680 	 *		else
2681 	 *			update tail and set RS bit on every packet.
2682 	 *	if xmit_more is false and last_xmit_more was true
2683 	 *		update tail and set RS bit.
2684 	 *
2685 	 * Optimization: wmb to be issued only in case of tail update.
2686 	 * Also optimize the Descriptor WB path for RS bit with the same
2687 	 * algorithm.
2688 	 *
2689 	 * Note: If there are less than 4 packets
2690 	 * pending and interrupts were disabled the service task will
2691 	 * trigger a force WB.
2692 	 */
2693 	if (skb->xmit_more  &&
2694 	    !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2695 						    tx_ring->queue_index))) {
2696 		tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2697 		tail_bump = false;
2698 	} else if (!skb->xmit_more &&
2699 		   !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2700 						       tx_ring->queue_index)) &&
2701 		   (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2702 		   (tx_ring->packet_stride < WB_STRIDE) &&
2703 		   (desc_count < WB_STRIDE)) {
2704 		tx_ring->packet_stride++;
2705 	} else {
2706 		tx_ring->packet_stride = 0;
2707 		tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2708 		do_rs = true;
2709 	}
2710 	if (do_rs)
2711 		tx_ring->packet_stride = 0;
2712 
2713 	tx_desc->cmd_type_offset_bsz =
2714 			build_ctob(td_cmd, td_offset, size, td_tag) |
2715 			cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2716 						  I40E_TX_DESC_CMD_EOP) <<
2717 						  I40E_TXD_QW1_CMD_SHIFT);
2718 
2719 	/* notify HW of packet */
2720 	if (!tail_bump)
2721 		prefetchw(tx_desc + 1);
2722 
2723 	if (tail_bump) {
2724 		/* Force memory writes to complete before letting h/w
2725 		 * know there are new descriptors to fetch.  (Only
2726 		 * applicable for weak-ordered memory model archs,
2727 		 * such as IA-64).
2728 		 */
2729 		wmb();
2730 		writel(i, tx_ring->tail);
2731 	}
2732 
2733 	return;
2734 
2735 dma_error:
2736 	dev_info(tx_ring->dev, "TX DMA map failed\n");
2737 
2738 	/* clear dma mappings for failed tx_bi map */
2739 	for (;;) {
2740 		tx_bi = &tx_ring->tx_bi[i];
2741 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2742 		if (tx_bi == first)
2743 			break;
2744 		if (i == 0)
2745 			i = tx_ring->count;
2746 		i--;
2747 	}
2748 
2749 	tx_ring->next_to_use = i;
2750 }
2751 
2752 /**
2753  * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2754  * @skb:     send buffer
2755  * @tx_ring: ring to send buffer on
2756  *
2757  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2758  * there is not enough descriptors available in this ring since we need at least
2759  * one descriptor.
2760  **/
2761 #ifdef I40E_FCOE
2762 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2763 				      struct i40e_ring *tx_ring)
2764 #else
2765 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2766 					     struct i40e_ring *tx_ring)
2767 #endif
2768 {
2769 	unsigned int f;
2770 	int count = 0;
2771 
2772 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2773 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2774 	 *       + 4 desc gap to avoid the cache line where head is,
2775 	 *       + 1 desc for context descriptor,
2776 	 * otherwise try next time
2777 	 */
2778 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2779 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2780 
2781 	count += TXD_USE_COUNT(skb_headlen(skb));
2782 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2783 		tx_ring->tx_stats.tx_busy++;
2784 		return 0;
2785 	}
2786 	return count;
2787 }
2788 
2789 /**
2790  * i40e_xmit_frame_ring - Sends buffer on Tx ring
2791  * @skb:     send buffer
2792  * @tx_ring: ring to send buffer on
2793  *
2794  * Returns NETDEV_TX_OK if sent, else an error code
2795  **/
2796 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2797 					struct i40e_ring *tx_ring)
2798 {
2799 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2800 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
2801 	struct i40e_tx_buffer *first;
2802 	u32 td_offset = 0;
2803 	u32 tx_flags = 0;
2804 	__be16 protocol;
2805 	u32 td_cmd = 0;
2806 	u8 hdr_len = 0;
2807 	int tsyn;
2808 	int tso;
2809 
2810 	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2811 		return NETDEV_TX_BUSY;
2812 
2813 	/* prepare the xmit flags */
2814 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2815 		goto out_drop;
2816 
2817 	/* obtain protocol of skb */
2818 	protocol = vlan_get_protocol(skb);
2819 
2820 	/* record the location of the first descriptor for this packet */
2821 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
2822 
2823 	/* setup IPv4/IPv6 offloads */
2824 	if (protocol == htons(ETH_P_IP))
2825 		tx_flags |= I40E_TX_FLAGS_IPV4;
2826 	else if (protocol == htons(ETH_P_IPV6))
2827 		tx_flags |= I40E_TX_FLAGS_IPV6;
2828 
2829 	tso = i40e_tso(tx_ring, skb, &hdr_len,
2830 		       &cd_type_cmd_tso_mss, &cd_tunneling);
2831 
2832 	if (tso < 0)
2833 		goto out_drop;
2834 	else if (tso)
2835 		tx_flags |= I40E_TX_FLAGS_TSO;
2836 
2837 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2838 
2839 	if (tsyn)
2840 		tx_flags |= I40E_TX_FLAGS_TSYN;
2841 
2842 	if (i40e_chk_linearize(skb, tx_flags)) {
2843 		if (skb_linearize(skb))
2844 			goto out_drop;
2845 		tx_ring->tx_stats.tx_linearize++;
2846 	}
2847 	skb_tx_timestamp(skb);
2848 
2849 	/* always enable CRC insertion offload */
2850 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
2851 
2852 	/* Always offload the checksum, since it's in the data descriptor */
2853 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2854 		tx_flags |= I40E_TX_FLAGS_CSUM;
2855 
2856 		i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2857 				    tx_ring, &cd_tunneling);
2858 	}
2859 
2860 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2861 			   cd_tunneling, cd_l2tag2);
2862 
2863 	/* Add Flow Director ATR if it's enabled.
2864 	 *
2865 	 * NOTE: this must always be directly before the data descriptor.
2866 	 */
2867 	i40e_atr(tx_ring, skb, tx_flags, protocol);
2868 
2869 	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2870 		    td_cmd, td_offset);
2871 
2872 	return NETDEV_TX_OK;
2873 
2874 out_drop:
2875 	dev_kfree_skb_any(skb);
2876 	return NETDEV_TX_OK;
2877 }
2878 
2879 /**
2880  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2881  * @skb:    send buffer
2882  * @netdev: network interface device structure
2883  *
2884  * Returns NETDEV_TX_OK if sent, else an error code
2885  **/
2886 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2887 {
2888 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2889 	struct i40e_vsi *vsi = np->vsi;
2890 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2891 
2892 	/* hardware can't handle really short frames, hardware padding works
2893 	 * beyond this point
2894 	 */
2895 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2896 		return NETDEV_TX_OK;
2897 
2898 	return i40e_xmit_frame_ring(skb, tx_ring);
2899 }
2900