1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14 
15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
16 {
17 	return &rx_ring->xdp_buf[idx];
18 }
19 
20 /**
21  * ice_qp_reset_stats - Resets all stats for rings of given index
22  * @vsi: VSI that contains rings of interest
23  * @q_idx: ring index in array
24  */
25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
26 {
27 	memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
28 	       sizeof(vsi->rx_rings[q_idx]->rx_stats));
29 	memset(&vsi->tx_rings[q_idx]->stats, 0,
30 	       sizeof(vsi->tx_rings[q_idx]->stats));
31 	if (ice_is_xdp_ena_vsi(vsi))
32 		memset(&vsi->xdp_rings[q_idx]->stats, 0,
33 		       sizeof(vsi->xdp_rings[q_idx]->stats));
34 }
35 
36 /**
37  * ice_qp_clean_rings - Cleans all the rings of a given index
38  * @vsi: VSI that contains rings of interest
39  * @q_idx: ring index in array
40  */
41 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
42 {
43 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
44 	if (ice_is_xdp_ena_vsi(vsi)) {
45 		synchronize_rcu();
46 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
47 	}
48 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
49 }
50 
51 /**
52  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
53  * @vsi: VSI that has netdev
54  * @q_vector: q_vector that has NAPI context
55  * @enable: true for enable, false for disable
56  */
57 static void
58 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
59 		     bool enable)
60 {
61 	if (!vsi->netdev || !q_vector)
62 		return;
63 
64 	if (enable)
65 		napi_enable(&q_vector->napi);
66 	else
67 		napi_disable(&q_vector->napi);
68 }
69 
70 /**
71  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
72  * @vsi: the VSI that contains queue vector being un-configured
73  * @rx_ring: Rx ring that will have its IRQ disabled
74  * @q_vector: queue vector
75  */
76 static void
77 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
78 		 struct ice_q_vector *q_vector)
79 {
80 	struct ice_pf *pf = vsi->back;
81 	struct ice_hw *hw = &pf->hw;
82 	int base = vsi->base_vector;
83 	u16 reg;
84 	u32 val;
85 
86 	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
87 	 * here only QINT_RQCTL
88 	 */
89 	reg = rx_ring->reg_idx;
90 	val = rd32(hw, QINT_RQCTL(reg));
91 	val &= ~QINT_RQCTL_CAUSE_ENA_M;
92 	wr32(hw, QINT_RQCTL(reg), val);
93 
94 	if (q_vector) {
95 		u16 v_idx = q_vector->v_idx;
96 
97 		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
98 		ice_flush(hw);
99 		synchronize_irq(pf->msix_entries[v_idx + base].vector);
100 	}
101 }
102 
103 /**
104  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
105  * @vsi: the VSI that contains queue vector
106  * @q_vector: queue vector
107  */
108 static void
109 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
110 {
111 	u16 reg_idx = q_vector->reg_idx;
112 	struct ice_pf *pf = vsi->back;
113 	struct ice_hw *hw = &pf->hw;
114 	struct ice_tx_ring *tx_ring;
115 	struct ice_rx_ring *rx_ring;
116 
117 	ice_cfg_itr(hw, q_vector);
118 
119 	ice_for_each_tx_ring(tx_ring, q_vector->tx)
120 		ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
121 				      q_vector->tx.itr_idx);
122 
123 	ice_for_each_rx_ring(rx_ring, q_vector->rx)
124 		ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
125 				      q_vector->rx.itr_idx);
126 
127 	ice_flush(hw);
128 }
129 
130 /**
131  * ice_qvec_ena_irq - Enable IRQ for given queue vector
132  * @vsi: the VSI that contains queue vector
133  * @q_vector: queue vector
134  */
135 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
136 {
137 	struct ice_pf *pf = vsi->back;
138 	struct ice_hw *hw = &pf->hw;
139 
140 	ice_irq_dynamic_ena(hw, vsi, q_vector);
141 
142 	ice_flush(hw);
143 }
144 
145 /**
146  * ice_qp_dis - Disables a queue pair
147  * @vsi: VSI of interest
148  * @q_idx: ring index in array
149  *
150  * Returns 0 on success, negative on failure.
151  */
152 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
153 {
154 	struct ice_txq_meta txq_meta = { };
155 	struct ice_q_vector *q_vector;
156 	struct ice_tx_ring *tx_ring;
157 	struct ice_rx_ring *rx_ring;
158 	int timeout = 50;
159 	int err;
160 
161 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
162 		return -EINVAL;
163 
164 	tx_ring = vsi->tx_rings[q_idx];
165 	rx_ring = vsi->rx_rings[q_idx];
166 	q_vector = rx_ring->q_vector;
167 
168 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
169 		timeout--;
170 		if (!timeout)
171 			return -EBUSY;
172 		usleep_range(1000, 2000);
173 	}
174 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
175 
176 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
177 
178 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
179 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
180 	if (err)
181 		return err;
182 	if (ice_is_xdp_ena_vsi(vsi)) {
183 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
184 
185 		memset(&txq_meta, 0, sizeof(txq_meta));
186 		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
187 		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
188 					   &txq_meta);
189 		if (err)
190 			return err;
191 	}
192 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
193 	if (err)
194 		return err;
195 
196 	ice_qvec_toggle_napi(vsi, q_vector, false);
197 	ice_qp_clean_rings(vsi, q_idx);
198 	ice_qp_reset_stats(vsi, q_idx);
199 
200 	return 0;
201 }
202 
203 /**
204  * ice_qp_ena - Enables a queue pair
205  * @vsi: VSI of interest
206  * @q_idx: ring index in array
207  *
208  * Returns 0 on success, negative on failure.
209  */
210 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
211 {
212 	struct ice_aqc_add_tx_qgrp *qg_buf;
213 	struct ice_q_vector *q_vector;
214 	struct ice_tx_ring *tx_ring;
215 	struct ice_rx_ring *rx_ring;
216 	u16 size;
217 	int err;
218 
219 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
220 		return -EINVAL;
221 
222 	size = struct_size(qg_buf, txqs, 1);
223 	qg_buf = kzalloc(size, GFP_KERNEL);
224 	if (!qg_buf)
225 		return -ENOMEM;
226 
227 	qg_buf->num_txqs = 1;
228 
229 	tx_ring = vsi->tx_rings[q_idx];
230 	rx_ring = vsi->rx_rings[q_idx];
231 	q_vector = rx_ring->q_vector;
232 
233 	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
234 	if (err)
235 		goto free_buf;
236 
237 	if (ice_is_xdp_ena_vsi(vsi)) {
238 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
239 
240 		memset(qg_buf, 0, size);
241 		qg_buf->num_txqs = 1;
242 		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
243 		if (err)
244 			goto free_buf;
245 		ice_set_ring_xdp(xdp_ring);
246 		xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
247 	}
248 
249 	err = ice_vsi_cfg_rxq(rx_ring);
250 	if (err)
251 		goto free_buf;
252 
253 	ice_qvec_cfg_msix(vsi, q_vector);
254 
255 	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
256 	if (err)
257 		goto free_buf;
258 
259 	clear_bit(ICE_CFG_BUSY, vsi->state);
260 	ice_qvec_toggle_napi(vsi, q_vector, true);
261 	ice_qvec_ena_irq(vsi, q_vector);
262 
263 	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
264 free_buf:
265 	kfree(qg_buf);
266 	return err;
267 }
268 
269 /**
270  * ice_xsk_pool_disable - disable a buffer pool region
271  * @vsi: Current VSI
272  * @qid: queue ID
273  *
274  * Returns 0 on success, negative on failure
275  */
276 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
277 {
278 	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
279 
280 	if (!pool)
281 		return -EINVAL;
282 
283 	clear_bit(qid, vsi->af_xdp_zc_qps);
284 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
285 
286 	return 0;
287 }
288 
289 /**
290  * ice_xsk_pool_enable - enable a buffer pool region
291  * @vsi: Current VSI
292  * @pool: pointer to a requested buffer pool region
293  * @qid: queue ID
294  *
295  * Returns 0 on success, negative on failure
296  */
297 static int
298 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
299 {
300 	int err;
301 
302 	if (vsi->type != ICE_VSI_PF)
303 		return -EINVAL;
304 
305 	if (qid >= vsi->netdev->real_num_rx_queues ||
306 	    qid >= vsi->netdev->real_num_tx_queues)
307 		return -EINVAL;
308 
309 	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
310 			       ICE_RX_DMA_ATTR);
311 	if (err)
312 		return err;
313 
314 	set_bit(qid, vsi->af_xdp_zc_qps);
315 
316 	return 0;
317 }
318 
319 /**
320  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
321  * @vsi: Current VSI
322  * @pool: buffer pool to enable/associate to a ring, NULL to disable
323  * @qid: queue ID
324  *
325  * Returns 0 on success, negative on failure
326  */
327 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
328 {
329 	bool if_running, pool_present = !!pool;
330 	int ret = 0, pool_failure = 0;
331 
332 	if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
333 	    !is_power_of_2(vsi->tx_rings[qid]->count)) {
334 		netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
335 		pool_failure = -EINVAL;
336 		goto failure;
337 	}
338 
339 	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
340 
341 	if (if_running) {
342 		ret = ice_qp_dis(vsi, qid);
343 		if (ret) {
344 			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
345 			goto xsk_pool_if_up;
346 		}
347 	}
348 
349 	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
350 				      ice_xsk_pool_disable(vsi, qid);
351 
352 xsk_pool_if_up:
353 	if (if_running) {
354 		ret = ice_qp_ena(vsi, qid);
355 		if (!ret && pool_present)
356 			napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
357 		else if (ret)
358 			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
359 	}
360 
361 failure:
362 	if (pool_failure) {
363 		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
364 			   pool_present ? "en" : "dis", pool_failure);
365 		return pool_failure;
366 	}
367 
368 	return ret;
369 }
370 
371 /**
372  * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
373  * @pool: XSK Buffer pool to pull the buffers from
374  * @xdp: SW ring of xdp_buff that will hold the buffers
375  * @rx_desc: Pointer to Rx descriptors that will be filled
376  * @count: The number of buffers to allocate
377  *
378  * This function allocates a number of Rx buffers from the fill ring
379  * or the internal recycle mechanism and places them on the Rx ring.
380  *
381  * Note that ring wrap should be handled by caller of this function.
382  *
383  * Returns the amount of allocated Rx descriptors
384  */
385 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
386 			     union ice_32b_rx_flex_desc *rx_desc, u16 count)
387 {
388 	dma_addr_t dma;
389 	u16 buffs;
390 	int i;
391 
392 	buffs = xsk_buff_alloc_batch(pool, xdp, count);
393 	for (i = 0; i < buffs; i++) {
394 		dma = xsk_buff_xdp_get_dma(*xdp);
395 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
396 		rx_desc->wb.status_error0 = 0;
397 
398 		rx_desc++;
399 		xdp++;
400 	}
401 
402 	return buffs;
403 }
404 
405 /**
406  * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
407  * @rx_ring: Rx ring
408  * @count: The number of buffers to allocate
409  *
410  * Place the @count of descriptors onto Rx ring. Handle the ring wrap
411  * for case where space from next_to_use up to the end of ring is less
412  * than @count. Finally do a tail bump.
413  *
414  * Returns true if all allocations were successful, false if any fail.
415  */
416 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
417 {
418 	union ice_32b_rx_flex_desc *rx_desc;
419 	u32 nb_buffs_extra = 0, nb_buffs;
420 	u16 ntu = rx_ring->next_to_use;
421 	u16 total_count = count;
422 	struct xdp_buff **xdp;
423 
424 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
425 	xdp = ice_xdp_buf(rx_ring, ntu);
426 
427 	if (ntu + count >= rx_ring->count) {
428 		nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
429 						   rx_desc,
430 						   rx_ring->count - ntu);
431 		rx_desc = ICE_RX_DESC(rx_ring, 0);
432 		xdp = ice_xdp_buf(rx_ring, 0);
433 		ntu = 0;
434 		count -= nb_buffs_extra;
435 		ice_release_rx_desc(rx_ring, 0);
436 	}
437 
438 	nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
439 
440 	ntu += nb_buffs;
441 	if (ntu == rx_ring->count)
442 		ntu = 0;
443 
444 	if (rx_ring->next_to_use != ntu)
445 		ice_release_rx_desc(rx_ring, ntu);
446 
447 	return total_count == (nb_buffs_extra + nb_buffs);
448 }
449 
450 /**
451  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
452  * @rx_ring: Rx ring
453  * @count: The number of buffers to allocate
454  *
455  * Wrapper for internal allocation routine; figure out how many tail
456  * bumps should take place based on the given threshold
457  *
458  * Returns true if all calls to internal alloc routine succeeded
459  */
460 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
461 {
462 	u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
463 	u16 batched, leftover, i, tail_bumps;
464 
465 	batched = ALIGN_DOWN(count, rx_thresh);
466 	tail_bumps = batched / rx_thresh;
467 	leftover = count & (rx_thresh - 1);
468 
469 	for (i = 0; i < tail_bumps; i++)
470 		if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
471 			return false;
472 	return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
473 }
474 
475 /**
476  * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
477  * @rx_ring: Rx ring
478  */
479 static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
480 {
481 	int ntc = rx_ring->next_to_clean + 1;
482 
483 	ntc = (ntc < rx_ring->count) ? ntc : 0;
484 	rx_ring->next_to_clean = ntc;
485 	prefetch(ICE_RX_DESC(rx_ring, ntc));
486 }
487 
488 /**
489  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
490  * @rx_ring: Rx ring
491  * @xdp: Pointer to XDP buffer
492  *
493  * This function allocates a new skb from a zero-copy Rx buffer.
494  *
495  * Returns the skb on success, NULL on failure.
496  */
497 static struct sk_buff *
498 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
499 {
500 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
501 	unsigned int metasize = xdp->data - xdp->data_meta;
502 	struct sk_buff *skb;
503 
504 	net_prefetch(xdp->data_meta);
505 
506 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
507 			       GFP_ATOMIC | __GFP_NOWARN);
508 	if (unlikely(!skb))
509 		return NULL;
510 
511 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
512 	       ALIGN(totalsize, sizeof(long)));
513 
514 	if (metasize) {
515 		skb_metadata_set(skb, metasize);
516 		__skb_pull(skb, metasize);
517 	}
518 
519 	xsk_buff_free(xdp);
520 	return skb;
521 }
522 
523 /**
524  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
525  * @rx_ring: Rx ring
526  * @xdp: xdp_buff used as input to the XDP program
527  * @xdp_prog: XDP program to run
528  * @xdp_ring: ring to be used for XDP_TX action
529  *
530  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
531  */
532 static int
533 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
534 	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
535 {
536 	int err, result = ICE_XDP_PASS;
537 	u32 act;
538 
539 	act = bpf_prog_run_xdp(xdp_prog, xdp);
540 
541 	if (likely(act == XDP_REDIRECT)) {
542 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
543 		if (err)
544 			goto out_failure;
545 		return ICE_XDP_REDIR;
546 	}
547 
548 	switch (act) {
549 	case XDP_PASS:
550 		break;
551 	case XDP_TX:
552 		result = ice_xmit_xdp_buff(xdp, xdp_ring);
553 		if (result == ICE_XDP_CONSUMED)
554 			goto out_failure;
555 		break;
556 	default:
557 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
558 		fallthrough;
559 	case XDP_ABORTED:
560 out_failure:
561 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
562 		fallthrough;
563 	case XDP_DROP:
564 		result = ICE_XDP_CONSUMED;
565 		break;
566 	}
567 
568 	return result;
569 }
570 
571 /**
572  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
573  * @rx_ring: AF_XDP Rx ring
574  * @budget: NAPI budget
575  *
576  * Returns number of processed packets on success, remaining budget on failure.
577  */
578 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
579 {
580 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
581 	struct ice_tx_ring *xdp_ring;
582 	unsigned int xdp_xmit = 0;
583 	struct bpf_prog *xdp_prog;
584 	bool failure = false;
585 
586 	/* ZC patch is enabled only when XDP program is set,
587 	 * so here it can not be NULL
588 	 */
589 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
590 	xdp_ring = rx_ring->xdp_ring;
591 
592 	while (likely(total_rx_packets < (unsigned int)budget)) {
593 		union ice_32b_rx_flex_desc *rx_desc;
594 		unsigned int size, xdp_res = 0;
595 		struct xdp_buff *xdp;
596 		struct sk_buff *skb;
597 		u16 stat_err_bits;
598 		u16 vlan_tag = 0;
599 		u16 rx_ptype;
600 
601 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
602 
603 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
604 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
605 			break;
606 
607 		/* This memory barrier is needed to keep us from reading
608 		 * any other fields out of the rx_desc until we have
609 		 * verified the descriptor has been written back.
610 		 */
611 		dma_rmb();
612 
613 		if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
614 			break;
615 
616 		xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
617 
618 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
619 				   ICE_RX_FLX_DESC_PKT_LEN_M;
620 		if (!size) {
621 			xdp->data = NULL;
622 			xdp->data_end = NULL;
623 			xdp->data_hard_start = NULL;
624 			xdp->data_meta = NULL;
625 			goto construct_skb;
626 		}
627 
628 		xsk_buff_set_size(xdp, size);
629 		xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
630 
631 		xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
632 		if (xdp_res) {
633 			if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
634 				xdp_xmit |= xdp_res;
635 			else
636 				xsk_buff_free(xdp);
637 
638 			total_rx_bytes += size;
639 			total_rx_packets++;
640 
641 			ice_bump_ntc(rx_ring);
642 			continue;
643 		}
644 construct_skb:
645 		/* XDP_PASS path */
646 		skb = ice_construct_skb_zc(rx_ring, xdp);
647 		if (!skb) {
648 			rx_ring->rx_stats.alloc_buf_failed++;
649 			break;
650 		}
651 
652 		ice_bump_ntc(rx_ring);
653 
654 		if (eth_skb_pad(skb)) {
655 			skb = NULL;
656 			continue;
657 		}
658 
659 		total_rx_bytes += skb->len;
660 		total_rx_packets++;
661 
662 		vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
663 
664 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
665 				       ICE_RX_FLEX_DESC_PTYPE_M;
666 
667 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
668 		ice_receive_skb(rx_ring, skb, vlan_tag);
669 	}
670 
671 	failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
672 
673 	ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
674 	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
675 
676 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
677 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
678 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
679 		else
680 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
681 
682 		return (int)total_rx_packets;
683 	}
684 
685 	return failure ? budget : (int)total_rx_packets;
686 }
687 
688 /**
689  * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
690  * @xdp_ring: XDP Tx ring
691  * @tx_buf: Tx buffer to clean
692  */
693 static void
694 ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
695 {
696 	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
697 	xdp_ring->xdp_tx_active--;
698 	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
699 			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
700 	dma_unmap_len_set(tx_buf, len, 0);
701 }
702 
703 /**
704  * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
705  * @xdp_ring: XDP ring to clean
706  * @napi_budget: amount of descriptors that NAPI allows us to clean
707  *
708  * Returns count of cleaned descriptors
709  */
710 static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
711 {
712 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
713 	int budget = napi_budget / tx_thresh;
714 	u16 next_dd = xdp_ring->next_dd;
715 	u16 ntc, cleared_dds = 0;
716 
717 	do {
718 		struct ice_tx_desc *next_dd_desc;
719 		u16 desc_cnt = xdp_ring->count;
720 		struct ice_tx_buf *tx_buf;
721 		u32 xsk_frames;
722 		u16 i;
723 
724 		next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
725 		if (!(next_dd_desc->cmd_type_offset_bsz &
726 		    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
727 			break;
728 
729 		cleared_dds++;
730 		xsk_frames = 0;
731 		if (likely(!xdp_ring->xdp_tx_active)) {
732 			xsk_frames = tx_thresh;
733 			goto skip;
734 		}
735 
736 		ntc = xdp_ring->next_to_clean;
737 
738 		for (i = 0; i < tx_thresh; i++) {
739 			tx_buf = &xdp_ring->tx_buf[ntc];
740 
741 			if (tx_buf->raw_buf) {
742 				ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
743 				tx_buf->raw_buf = NULL;
744 			} else {
745 				xsk_frames++;
746 			}
747 
748 			ntc++;
749 			if (ntc >= xdp_ring->count)
750 				ntc = 0;
751 		}
752 skip:
753 		xdp_ring->next_to_clean += tx_thresh;
754 		if (xdp_ring->next_to_clean >= desc_cnt)
755 			xdp_ring->next_to_clean -= desc_cnt;
756 		if (xsk_frames)
757 			xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
758 		next_dd_desc->cmd_type_offset_bsz = 0;
759 		next_dd = next_dd + tx_thresh;
760 		if (next_dd >= desc_cnt)
761 			next_dd = tx_thresh - 1;
762 	} while (--budget);
763 
764 	xdp_ring->next_dd = next_dd;
765 
766 	return cleared_dds * tx_thresh;
767 }
768 
769 /**
770  * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
771  * @xdp_ring: XDP ring to produce the HW Tx descriptor on
772  * @desc: AF_XDP descriptor to pull the DMA address and length from
773  * @total_bytes: bytes accumulator that will be used for stats update
774  */
775 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
776 			 unsigned int *total_bytes)
777 {
778 	struct ice_tx_desc *tx_desc;
779 	dma_addr_t dma;
780 
781 	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
782 	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
783 
784 	tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
785 	tx_desc->buf_addr = cpu_to_le64(dma);
786 	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
787 						      0, desc->len, 0);
788 
789 	*total_bytes += desc->len;
790 }
791 
792 /**
793  * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
794  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
795  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
796  * @total_bytes: bytes accumulator that will be used for stats update
797  */
798 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
799 			       unsigned int *total_bytes)
800 {
801 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
802 	u16 ntu = xdp_ring->next_to_use;
803 	struct ice_tx_desc *tx_desc;
804 	u32 i;
805 
806 	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
807 		dma_addr_t dma;
808 
809 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
810 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
811 
812 		tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
813 		tx_desc->buf_addr = cpu_to_le64(dma);
814 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
815 							      0, descs[i].len, 0);
816 
817 		*total_bytes += descs[i].len;
818 	}
819 
820 	xdp_ring->next_to_use = ntu;
821 
822 	if (xdp_ring->next_to_use > xdp_ring->next_rs) {
823 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
824 		tx_desc->cmd_type_offset_bsz |=
825 			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
826 		xdp_ring->next_rs += tx_thresh;
827 	}
828 }
829 
830 /**
831  * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
832  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
833  * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
834  * @nb_pkts: count of packets to be send
835  * @total_bytes: bytes accumulator that will be used for stats update
836  */
837 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
838 				u32 nb_pkts, unsigned int *total_bytes)
839 {
840 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
841 	u32 batched, leftover, i;
842 
843 	batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
844 	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
845 	for (i = 0; i < batched; i += PKTS_PER_BATCH)
846 		ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
847 	for (; i < batched + leftover; i++)
848 		ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
849 
850 	if (xdp_ring->next_to_use > xdp_ring->next_rs) {
851 		struct ice_tx_desc *tx_desc;
852 
853 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
854 		tx_desc->cmd_type_offset_bsz |=
855 			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
856 		xdp_ring->next_rs += tx_thresh;
857 	}
858 }
859 
860 /**
861  * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
862  * @xdp_ring: XDP ring to produce the HW Tx descriptors on
863  * @budget: number of free descriptors on HW Tx ring that can be used
864  * @napi_budget: amount of descriptors that NAPI allows us to clean
865  *
866  * Returns true if there is no more work that needs to be done, false otherwise
867  */
868 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
869 {
870 	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
871 	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
872 	u32 nb_pkts, nb_processed = 0;
873 	unsigned int total_bytes = 0;
874 
875 	if (budget < tx_thresh)
876 		budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
877 
878 	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
879 	if (!nb_pkts)
880 		return true;
881 
882 	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
883 		struct ice_tx_desc *tx_desc;
884 
885 		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
886 		ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
887 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
888 		tx_desc->cmd_type_offset_bsz |=
889 			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
890 		xdp_ring->next_rs = tx_thresh - 1;
891 		xdp_ring->next_to_use = 0;
892 	}
893 
894 	ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
895 			    &total_bytes);
896 
897 	ice_xdp_ring_update_tail(xdp_ring);
898 	ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
899 
900 	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
901 		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
902 
903 	return nb_pkts < budget;
904 }
905 
906 /**
907  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
908  * @netdev: net_device
909  * @queue_id: queue to wake up
910  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
911  *
912  * Returns negative on error, zero otherwise.
913  */
914 int
915 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
916 	       u32 __always_unused flags)
917 {
918 	struct ice_netdev_priv *np = netdev_priv(netdev);
919 	struct ice_q_vector *q_vector;
920 	struct ice_vsi *vsi = np->vsi;
921 	struct ice_tx_ring *ring;
922 
923 	if (test_bit(ICE_VSI_DOWN, vsi->state))
924 		return -ENETDOWN;
925 
926 	if (!ice_is_xdp_ena_vsi(vsi))
927 		return -ENXIO;
928 
929 	if (queue_id >= vsi->num_txq)
930 		return -ENXIO;
931 
932 	if (!vsi->xdp_rings[queue_id]->xsk_pool)
933 		return -ENXIO;
934 
935 	ring = vsi->xdp_rings[queue_id];
936 
937 	/* The idea here is that if NAPI is running, mark a miss, so
938 	 * it will run again. If not, trigger an interrupt and
939 	 * schedule the NAPI from interrupt context. If NAPI would be
940 	 * scheduled here, the interrupt affinity would not be
941 	 * honored.
942 	 */
943 	q_vector = ring->q_vector;
944 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
945 		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
946 
947 	return 0;
948 }
949 
950 /**
951  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
952  * @vsi: VSI to be checked
953  *
954  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
955  */
956 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
957 {
958 	int i;
959 
960 	ice_for_each_rxq(vsi, i) {
961 		if (xsk_get_pool_from_qid(vsi->netdev, i))
962 			return true;
963 	}
964 
965 	return false;
966 }
967 
968 /**
969  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
970  * @rx_ring: ring to be cleaned
971  */
972 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
973 {
974 	u16 count_mask = rx_ring->count - 1;
975 	u16 ntc = rx_ring->next_to_clean;
976 	u16 ntu = rx_ring->next_to_use;
977 
978 	for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
979 		struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
980 
981 		xsk_buff_free(xdp);
982 	}
983 }
984 
985 /**
986  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
987  * @xdp_ring: XDP_Tx ring
988  */
989 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
990 {
991 	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
992 	u32 xsk_frames = 0;
993 
994 	while (ntc != ntu) {
995 		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
996 
997 		if (tx_buf->raw_buf)
998 			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
999 		else
1000 			xsk_frames++;
1001 
1002 		tx_buf->raw_buf = NULL;
1003 
1004 		ntc++;
1005 		if (ntc >= xdp_ring->count)
1006 			ntc = 0;
1007 	}
1008 
1009 	if (xsk_frames)
1010 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1011 }
1012