1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
6 #include <net/xdp.h>
7 
8 #include "ixgbe.h"
9 #include "ixgbe_txrx_common.h"
10 
11 struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
12 				struct ixgbe_ring *ring)
13 {
14 	bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 	int qid = ring->ring_idx;
16 
17 	if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18 		return NULL;
19 
20 	return xdp_get_umem_from_qid(adapter->netdev, qid);
21 }
22 
23 static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
24 				  struct xdp_umem *umem)
25 {
26 	struct device *dev = &adapter->pdev->dev;
27 	unsigned int i, j;
28 	dma_addr_t dma;
29 
30 	for (i = 0; i < umem->npgs; i++) {
31 		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
32 					 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
33 		if (dma_mapping_error(dev, dma))
34 			goto out_unmap;
35 
36 		umem->pages[i].dma = dma;
37 	}
38 
39 	return 0;
40 
41 out_unmap:
42 	for (j = 0; j < i; j++) {
43 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
44 				     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
45 		umem->pages[i].dma = 0;
46 	}
47 
48 	return -1;
49 }
50 
51 static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
52 				     struct xdp_umem *umem)
53 {
54 	struct device *dev = &adapter->pdev->dev;
55 	unsigned int i;
56 
57 	for (i = 0; i < umem->npgs; i++) {
58 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
59 				     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
60 
61 		umem->pages[i].dma = 0;
62 	}
63 }
64 
65 static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
66 				 struct xdp_umem *umem,
67 				 u16 qid)
68 {
69 	struct net_device *netdev = adapter->netdev;
70 	struct xdp_umem_fq_reuse *reuseq;
71 	bool if_running;
72 	int err;
73 
74 	if (qid >= adapter->num_rx_queues)
75 		return -EINVAL;
76 
77 	if (qid >= netdev->real_num_rx_queues ||
78 	    qid >= netdev->real_num_tx_queues)
79 		return -EINVAL;
80 
81 	reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
82 	if (!reuseq)
83 		return -ENOMEM;
84 
85 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
86 
87 	err = ixgbe_xsk_umem_dma_map(adapter, umem);
88 	if (err)
89 		return err;
90 
91 	if_running = netif_running(adapter->netdev) &&
92 		     ixgbe_enabled_xdp_adapter(adapter);
93 
94 	if (if_running)
95 		ixgbe_txrx_ring_disable(adapter, qid);
96 
97 	set_bit(qid, adapter->af_xdp_zc_qps);
98 
99 	if (if_running) {
100 		ixgbe_txrx_ring_enable(adapter, qid);
101 
102 		/* Kick start the NAPI context so that receiving will start */
103 		err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
104 		if (err)
105 			return err;
106 	}
107 
108 	return 0;
109 }
110 
111 static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
112 {
113 	struct xdp_umem *umem;
114 	bool if_running;
115 
116 	umem = xdp_get_umem_from_qid(adapter->netdev, qid);
117 	if (!umem)
118 		return -EINVAL;
119 
120 	if_running = netif_running(adapter->netdev) &&
121 		     ixgbe_enabled_xdp_adapter(adapter);
122 
123 	if (if_running)
124 		ixgbe_txrx_ring_disable(adapter, qid);
125 
126 	clear_bit(qid, adapter->af_xdp_zc_qps);
127 	ixgbe_xsk_umem_dma_unmap(adapter, umem);
128 
129 	if (if_running)
130 		ixgbe_txrx_ring_enable(adapter, qid);
131 
132 	return 0;
133 }
134 
135 int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
136 			 u16 qid)
137 {
138 	return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
139 		ixgbe_xsk_umem_disable(adapter, qid);
140 }
141 
142 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
143 			    struct ixgbe_ring *rx_ring,
144 			    struct xdp_buff *xdp)
145 {
146 	int err, result = IXGBE_XDP_PASS;
147 	struct bpf_prog *xdp_prog;
148 	struct xdp_frame *xdpf;
149 	u32 act;
150 
151 	rcu_read_lock();
152 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
153 	act = bpf_prog_run_xdp(xdp_prog, xdp);
154 	xdp->handle += xdp->data - xdp->data_hard_start;
155 	switch (act) {
156 	case XDP_PASS:
157 		break;
158 	case XDP_TX:
159 		xdpf = convert_to_xdp_frame(xdp);
160 		if (unlikely(!xdpf)) {
161 			result = IXGBE_XDP_CONSUMED;
162 			break;
163 		}
164 		result = ixgbe_xmit_xdp_ring(adapter, xdpf);
165 		break;
166 	case XDP_REDIRECT:
167 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
168 		result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
169 		break;
170 	default:
171 		bpf_warn_invalid_xdp_action(act);
172 		/* fallthrough */
173 	case XDP_ABORTED:
174 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
175 		/* fallthrough -- handle aborts by dropping packet */
176 	case XDP_DROP:
177 		result = IXGBE_XDP_CONSUMED;
178 		break;
179 	}
180 	rcu_read_unlock();
181 	return result;
182 }
183 
184 static struct
185 ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
186 					unsigned int size)
187 {
188 	struct ixgbe_rx_buffer *bi;
189 
190 	bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
191 
192 	/* we are reusing so sync this buffer for CPU use */
193 	dma_sync_single_range_for_cpu(rx_ring->dev,
194 				      bi->dma, 0,
195 				      size,
196 				      DMA_BIDIRECTIONAL);
197 
198 	return bi;
199 }
200 
201 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
202 				     struct ixgbe_rx_buffer *obi)
203 {
204 	unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
205 	u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
206 	u16 nta = rx_ring->next_to_alloc;
207 	struct ixgbe_rx_buffer *nbi;
208 
209 	nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
210 	/* update, and store next to alloc */
211 	nta++;
212 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
213 
214 	/* transfer page from old buffer to new buffer */
215 	nbi->dma = obi->dma & mask;
216 	nbi->dma += hr;
217 
218 	nbi->addr = (void *)((unsigned long)obi->addr & mask);
219 	nbi->addr += hr;
220 
221 	nbi->handle = obi->handle & mask;
222 	nbi->handle += rx_ring->xsk_umem->headroom;
223 
224 	obi->addr = NULL;
225 	obi->skb = NULL;
226 }
227 
228 void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
229 {
230 	struct ixgbe_rx_buffer *bi;
231 	struct ixgbe_ring *rx_ring;
232 	u64 hr, mask;
233 	u16 nta;
234 
235 	rx_ring = container_of(alloc, struct ixgbe_ring, zca);
236 	hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
237 	mask = rx_ring->xsk_umem->chunk_mask;
238 
239 	nta = rx_ring->next_to_alloc;
240 	bi = rx_ring->rx_buffer_info;
241 
242 	nta++;
243 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
244 
245 	handle &= mask;
246 
247 	bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
248 	bi->dma += hr;
249 
250 	bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
251 	bi->addr += hr;
252 
253 	bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
254 }
255 
256 static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
257 				  struct ixgbe_rx_buffer *bi)
258 {
259 	struct xdp_umem *umem = rx_ring->xsk_umem;
260 	void *addr = bi->addr;
261 	u64 handle, hr;
262 
263 	if (addr)
264 		return true;
265 
266 	if (!xsk_umem_peek_addr(umem, &handle)) {
267 		rx_ring->rx_stats.alloc_rx_page_failed++;
268 		return false;
269 	}
270 
271 	hr = umem->headroom + XDP_PACKET_HEADROOM;
272 
273 	bi->dma = xdp_umem_get_dma(umem, handle);
274 	bi->dma += hr;
275 
276 	bi->addr = xdp_umem_get_data(umem, handle);
277 	bi->addr += hr;
278 
279 	bi->handle = handle + umem->headroom;
280 
281 	xsk_umem_discard_addr(umem);
282 	return true;
283 }
284 
285 static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
286 				       struct ixgbe_rx_buffer *bi)
287 {
288 	struct xdp_umem *umem = rx_ring->xsk_umem;
289 	u64 handle, hr;
290 
291 	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
292 		rx_ring->rx_stats.alloc_rx_page_failed++;
293 		return false;
294 	}
295 
296 	handle &= rx_ring->xsk_umem->chunk_mask;
297 
298 	hr = umem->headroom + XDP_PACKET_HEADROOM;
299 
300 	bi->dma = xdp_umem_get_dma(umem, handle);
301 	bi->dma += hr;
302 
303 	bi->addr = xdp_umem_get_data(umem, handle);
304 	bi->addr += hr;
305 
306 	bi->handle = handle + umem->headroom;
307 
308 	xsk_umem_discard_addr_rq(umem);
309 	return true;
310 }
311 
312 static __always_inline bool
313 __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
314 			    bool alloc(struct ixgbe_ring *rx_ring,
315 				       struct ixgbe_rx_buffer *bi))
316 {
317 	union ixgbe_adv_rx_desc *rx_desc;
318 	struct ixgbe_rx_buffer *bi;
319 	u16 i = rx_ring->next_to_use;
320 	bool ok = true;
321 
322 	/* nothing to do */
323 	if (!cleaned_count)
324 		return true;
325 
326 	rx_desc = IXGBE_RX_DESC(rx_ring, i);
327 	bi = &rx_ring->rx_buffer_info[i];
328 	i -= rx_ring->count;
329 
330 	do {
331 		if (!alloc(rx_ring, bi)) {
332 			ok = false;
333 			break;
334 		}
335 
336 		/* sync the buffer for use by the device */
337 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
338 						 bi->page_offset,
339 						 rx_ring->rx_buf_len,
340 						 DMA_BIDIRECTIONAL);
341 
342 		/* Refresh the desc even if buffer_addrs didn't change
343 		 * because each write-back erases this info.
344 		 */
345 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
346 
347 		rx_desc++;
348 		bi++;
349 		i++;
350 		if (unlikely(!i)) {
351 			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
352 			bi = rx_ring->rx_buffer_info;
353 			i -= rx_ring->count;
354 		}
355 
356 		/* clear the length for the next_to_use descriptor */
357 		rx_desc->wb.upper.length = 0;
358 
359 		cleaned_count--;
360 	} while (cleaned_count);
361 
362 	i += rx_ring->count;
363 
364 	if (rx_ring->next_to_use != i) {
365 		rx_ring->next_to_use = i;
366 
367 		/* update next to alloc since we have filled the ring */
368 		rx_ring->next_to_alloc = i;
369 
370 		/* Force memory writes to complete before letting h/w
371 		 * know there are new descriptors to fetch.  (Only
372 		 * applicable for weak-ordered memory model archs,
373 		 * such as IA-64).
374 		 */
375 		wmb();
376 		writel(i, rx_ring->tail);
377 	}
378 
379 	return ok;
380 }
381 
382 void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
383 {
384 	__ixgbe_alloc_rx_buffers_zc(rx_ring, count,
385 				    ixgbe_alloc_buffer_slow_zc);
386 }
387 
388 static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
389 					   u16 count)
390 {
391 	return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
392 					   ixgbe_alloc_buffer_zc);
393 }
394 
395 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
396 					      struct ixgbe_rx_buffer *bi,
397 					      struct xdp_buff *xdp)
398 {
399 	unsigned int metasize = xdp->data - xdp->data_meta;
400 	unsigned int datasize = xdp->data_end - xdp->data;
401 	struct sk_buff *skb;
402 
403 	/* allocate a skb to store the frags */
404 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
405 			       xdp->data_end - xdp->data_hard_start,
406 			       GFP_ATOMIC | __GFP_NOWARN);
407 	if (unlikely(!skb))
408 		return NULL;
409 
410 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
411 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
412 	if (metasize)
413 		skb_metadata_set(skb, metasize);
414 
415 	ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
416 	return skb;
417 }
418 
419 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
420 {
421 	u32 ntc = rx_ring->next_to_clean + 1;
422 
423 	ntc = (ntc < rx_ring->count) ? ntc : 0;
424 	rx_ring->next_to_clean = ntc;
425 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
426 }
427 
428 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
429 			  struct ixgbe_ring *rx_ring,
430 			  const int budget)
431 {
432 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
433 	struct ixgbe_adapter *adapter = q_vector->adapter;
434 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
435 	unsigned int xdp_res, xdp_xmit = 0;
436 	bool failure = false;
437 	struct sk_buff *skb;
438 	struct xdp_buff xdp;
439 
440 	xdp.rxq = &rx_ring->xdp_rxq;
441 
442 	while (likely(total_rx_packets < budget)) {
443 		union ixgbe_adv_rx_desc *rx_desc;
444 		struct ixgbe_rx_buffer *bi;
445 		unsigned int size;
446 
447 		/* return some buffers to hardware, one at a time is too slow */
448 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
449 			failure = failure ||
450 				  !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
451 								 cleaned_count);
452 			cleaned_count = 0;
453 		}
454 
455 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
456 		size = le16_to_cpu(rx_desc->wb.upper.length);
457 		if (!size)
458 			break;
459 
460 		/* This memory barrier is needed to keep us from reading
461 		 * any other fields out of the rx_desc until we know the
462 		 * descriptor has been written back
463 		 */
464 		dma_rmb();
465 
466 		bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
467 
468 		if (unlikely(!ixgbe_test_staterr(rx_desc,
469 						 IXGBE_RXD_STAT_EOP))) {
470 			struct ixgbe_rx_buffer *next_bi;
471 
472 			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
473 			ixgbe_inc_ntc(rx_ring);
474 			next_bi =
475 			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
476 			next_bi->skb = ERR_PTR(-EINVAL);
477 			continue;
478 		}
479 
480 		if (unlikely(bi->skb)) {
481 			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
482 			ixgbe_inc_ntc(rx_ring);
483 			continue;
484 		}
485 
486 		xdp.data = bi->addr;
487 		xdp.data_meta = xdp.data;
488 		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
489 		xdp.data_end = xdp.data + size;
490 		xdp.handle = bi->handle;
491 
492 		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
493 
494 		if (xdp_res) {
495 			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
496 				xdp_xmit |= xdp_res;
497 				bi->addr = NULL;
498 				bi->skb = NULL;
499 			} else {
500 				ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
501 			}
502 			total_rx_packets++;
503 			total_rx_bytes += size;
504 
505 			cleaned_count++;
506 			ixgbe_inc_ntc(rx_ring);
507 			continue;
508 		}
509 
510 		/* XDP_PASS path */
511 		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
512 		if (!skb) {
513 			rx_ring->rx_stats.alloc_rx_buff_failed++;
514 			break;
515 		}
516 
517 		cleaned_count++;
518 		ixgbe_inc_ntc(rx_ring);
519 
520 		if (eth_skb_pad(skb))
521 			continue;
522 
523 		total_rx_bytes += skb->len;
524 		total_rx_packets++;
525 
526 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
527 		ixgbe_rx_skb(q_vector, skb);
528 	}
529 
530 	if (xdp_xmit & IXGBE_XDP_REDIR)
531 		xdp_do_flush_map();
532 
533 	if (xdp_xmit & IXGBE_XDP_TX) {
534 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
535 
536 		/* Force memory writes to complete before letting h/w
537 		 * know there are new descriptors to fetch.
538 		 */
539 		wmb();
540 		writel(ring->next_to_use, ring->tail);
541 	}
542 
543 	u64_stats_update_begin(&rx_ring->syncp);
544 	rx_ring->stats.packets += total_rx_packets;
545 	rx_ring->stats.bytes += total_rx_bytes;
546 	u64_stats_update_end(&rx_ring->syncp);
547 	q_vector->rx.total_packets += total_rx_packets;
548 	q_vector->rx.total_bytes += total_rx_bytes;
549 
550 	return failure ? budget : (int)total_rx_packets;
551 }
552 
553 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
554 {
555 	u16 i = rx_ring->next_to_clean;
556 	struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
557 
558 	while (i != rx_ring->next_to_alloc) {
559 		xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
560 		i++;
561 		bi++;
562 		if (i == rx_ring->count) {
563 			i = 0;
564 			bi = rx_ring->rx_buffer_info;
565 		}
566 	}
567 }
568 
569 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
570 {
571 	union ixgbe_adv_tx_desc *tx_desc = NULL;
572 	struct ixgbe_tx_buffer *tx_bi;
573 	bool work_done = true;
574 	struct xdp_desc desc;
575 	dma_addr_t dma;
576 	u32 cmd_type;
577 
578 	while (budget-- > 0) {
579 		if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
580 		    !netif_carrier_ok(xdp_ring->netdev)) {
581 			work_done = false;
582 			break;
583 		}
584 
585 		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
586 			break;
587 
588 		dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
589 
590 		dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
591 					   DMA_BIDIRECTIONAL);
592 
593 		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
594 		tx_bi->bytecount = desc.len;
595 		tx_bi->xdpf = NULL;
596 		tx_bi->gso_segs = 1;
597 
598 		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
599 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
600 
601 		/* put descriptor type bits */
602 		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
603 			   IXGBE_ADVTXD_DCMD_DEXT |
604 			   IXGBE_ADVTXD_DCMD_IFCS;
605 		cmd_type |= desc.len | IXGBE_TXD_CMD;
606 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
607 		tx_desc->read.olinfo_status =
608 			cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
609 
610 		xdp_ring->next_to_use++;
611 		if (xdp_ring->next_to_use == xdp_ring->count)
612 			xdp_ring->next_to_use = 0;
613 	}
614 
615 	if (tx_desc) {
616 		ixgbe_xdp_ring_update_tail(xdp_ring);
617 		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
618 	}
619 
620 	return !!budget && work_done;
621 }
622 
623 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
624 				      struct ixgbe_tx_buffer *tx_bi)
625 {
626 	xdp_return_frame(tx_bi->xdpf);
627 	dma_unmap_single(tx_ring->dev,
628 			 dma_unmap_addr(tx_bi, dma),
629 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
630 	dma_unmap_len_set(tx_bi, len, 0);
631 }
632 
633 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
634 			    struct ixgbe_ring *tx_ring, int napi_budget)
635 {
636 	unsigned int total_packets = 0, total_bytes = 0;
637 	u32 i = tx_ring->next_to_clean, xsk_frames = 0;
638 	unsigned int budget = q_vector->tx.work_limit;
639 	struct xdp_umem *umem = tx_ring->xsk_umem;
640 	union ixgbe_adv_tx_desc *tx_desc;
641 	struct ixgbe_tx_buffer *tx_bi;
642 	bool xmit_done;
643 
644 	tx_bi = &tx_ring->tx_buffer_info[i];
645 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
646 	i -= tx_ring->count;
647 
648 	do {
649 		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
650 			break;
651 
652 		total_bytes += tx_bi->bytecount;
653 		total_packets += tx_bi->gso_segs;
654 
655 		if (tx_bi->xdpf)
656 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
657 		else
658 			xsk_frames++;
659 
660 		tx_bi->xdpf = NULL;
661 
662 		tx_bi++;
663 		tx_desc++;
664 		i++;
665 		if (unlikely(!i)) {
666 			i -= tx_ring->count;
667 			tx_bi = tx_ring->tx_buffer_info;
668 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
669 		}
670 
671 		/* issue prefetch for next Tx descriptor */
672 		prefetch(tx_desc);
673 
674 		/* update budget accounting */
675 		budget--;
676 	} while (likely(budget));
677 
678 	i += tx_ring->count;
679 	tx_ring->next_to_clean = i;
680 
681 	u64_stats_update_begin(&tx_ring->syncp);
682 	tx_ring->stats.bytes += total_bytes;
683 	tx_ring->stats.packets += total_packets;
684 	u64_stats_update_end(&tx_ring->syncp);
685 	q_vector->tx.total_bytes += total_bytes;
686 	q_vector->tx.total_packets += total_packets;
687 
688 	if (xsk_frames)
689 		xsk_umem_complete_tx(umem, xsk_frames);
690 
691 	xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
692 	return budget > 0 && xmit_done;
693 }
694 
695 int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
696 {
697 	struct ixgbe_adapter *adapter = netdev_priv(dev);
698 	struct ixgbe_ring *ring;
699 
700 	if (test_bit(__IXGBE_DOWN, &adapter->state))
701 		return -ENETDOWN;
702 
703 	if (!READ_ONCE(adapter->xdp_prog))
704 		return -ENXIO;
705 
706 	if (qid >= adapter->num_xdp_queues)
707 		return -ENXIO;
708 
709 	if (!adapter->xdp_ring[qid]->xsk_umem)
710 		return -ENXIO;
711 
712 	ring = adapter->xdp_ring[qid];
713 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
714 		u64 eics = BIT_ULL(ring->q_vector->v_idx);
715 
716 		ixgbe_irq_rearm_queues(adapter, eics);
717 	}
718 
719 	return 0;
720 }
721 
722 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
723 {
724 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
725 	struct xdp_umem *umem = tx_ring->xsk_umem;
726 	struct ixgbe_tx_buffer *tx_bi;
727 	u32 xsk_frames = 0;
728 
729 	while (ntc != ntu) {
730 		tx_bi = &tx_ring->tx_buffer_info[ntc];
731 
732 		if (tx_bi->xdpf)
733 			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
734 		else
735 			xsk_frames++;
736 
737 		tx_bi->xdpf = NULL;
738 
739 		ntc++;
740 		if (ntc == tx_ring->count)
741 			ntc = 0;
742 	}
743 
744 	if (xsk_frames)
745 		xsk_umem_complete_tx(umem, xsk_frames);
746 }
747