1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14 
15 /**
16  * ice_qp_reset_stats - Resets all stats for rings of given index
17  * @vsi: VSI that contains rings of interest
18  * @q_idx: ring index in array
19  */
20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
21 {
22 	memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23 	       sizeof(vsi->rx_rings[q_idx]->rx_stats));
24 	memset(&vsi->tx_rings[q_idx]->stats, 0,
25 	       sizeof(vsi->tx_rings[q_idx]->stats));
26 	if (ice_is_xdp_ena_vsi(vsi))
27 		memset(&vsi->xdp_rings[q_idx]->stats, 0,
28 		       sizeof(vsi->xdp_rings[q_idx]->stats));
29 }
30 
31 /**
32  * ice_qp_clean_rings - Cleans all the rings of a given index
33  * @vsi: VSI that contains rings of interest
34  * @q_idx: ring index in array
35  */
36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
37 {
38 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39 	if (ice_is_xdp_ena_vsi(vsi))
40 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
41 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
42 }
43 
44 /**
45  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46  * @vsi: VSI that has netdev
47  * @q_vector: q_vector that has NAPI context
48  * @enable: true for enable, false for disable
49  */
50 static void
51 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
52 		     bool enable)
53 {
54 	if (!vsi->netdev || !q_vector)
55 		return;
56 
57 	if (enable)
58 		napi_enable(&q_vector->napi);
59 	else
60 		napi_disable(&q_vector->napi);
61 }
62 
63 /**
64  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65  * @vsi: the VSI that contains queue vector being un-configured
66  * @rx_ring: Rx ring that will have its IRQ disabled
67  * @q_vector: queue vector
68  */
69 static void
70 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
71 		 struct ice_q_vector *q_vector)
72 {
73 	struct ice_pf *pf = vsi->back;
74 	struct ice_hw *hw = &pf->hw;
75 	int base = vsi->base_vector;
76 	u16 reg;
77 	u32 val;
78 
79 	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80 	 * here only QINT_RQCTL
81 	 */
82 	reg = rx_ring->reg_idx;
83 	val = rd32(hw, QINT_RQCTL(reg));
84 	val &= ~QINT_RQCTL_CAUSE_ENA_M;
85 	wr32(hw, QINT_RQCTL(reg), val);
86 
87 	if (q_vector) {
88 		u16 v_idx = q_vector->v_idx;
89 
90 		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
91 		ice_flush(hw);
92 		synchronize_irq(pf->msix_entries[v_idx + base].vector);
93 	}
94 }
95 
96 /**
97  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98  * @vsi: the VSI that contains queue vector
99  * @q_vector: queue vector
100  */
101 static void
102 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
103 {
104 	u16 reg_idx = q_vector->reg_idx;
105 	struct ice_pf *pf = vsi->back;
106 	struct ice_hw *hw = &pf->hw;
107 	struct ice_ring *ring;
108 
109 	ice_cfg_itr(hw, q_vector);
110 
111 	wr32(hw, GLINT_RATE(reg_idx),
112 	     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
113 
114 	ice_for_each_ring(ring, q_vector->tx)
115 		ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
116 				      q_vector->tx.itr_idx);
117 
118 	ice_for_each_ring(ring, q_vector->rx)
119 		ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
120 				      q_vector->rx.itr_idx);
121 
122 	ice_flush(hw);
123 }
124 
125 /**
126  * ice_qvec_ena_irq - Enable IRQ for given queue vector
127  * @vsi: the VSI that contains queue vector
128  * @q_vector: queue vector
129  */
130 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
131 {
132 	struct ice_pf *pf = vsi->back;
133 	struct ice_hw *hw = &pf->hw;
134 
135 	ice_irq_dynamic_ena(hw, vsi, q_vector);
136 
137 	ice_flush(hw);
138 }
139 
140 /**
141  * ice_qp_dis - Disables a queue pair
142  * @vsi: VSI of interest
143  * @q_idx: ring index in array
144  *
145  * Returns 0 on success, negative on failure.
146  */
147 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
148 {
149 	struct ice_txq_meta txq_meta = { };
150 	struct ice_ring *tx_ring, *rx_ring;
151 	struct ice_q_vector *q_vector;
152 	int timeout = 50;
153 	int err;
154 
155 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
156 		return -EINVAL;
157 
158 	tx_ring = vsi->tx_rings[q_idx];
159 	rx_ring = vsi->rx_rings[q_idx];
160 	q_vector = rx_ring->q_vector;
161 
162 	while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
163 		timeout--;
164 		if (!timeout)
165 			return -EBUSY;
166 		usleep_range(1000, 2000);
167 	}
168 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
169 
170 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
171 
172 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
173 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
174 	if (err)
175 		return err;
176 	if (ice_is_xdp_ena_vsi(vsi)) {
177 		struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
178 
179 		memset(&txq_meta, 0, sizeof(txq_meta));
180 		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
181 		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
182 					   &txq_meta);
183 		if (err)
184 			return err;
185 	}
186 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
187 	if (err)
188 		return err;
189 
190 	ice_qvec_toggle_napi(vsi, q_vector, false);
191 	ice_qp_clean_rings(vsi, q_idx);
192 	ice_qp_reset_stats(vsi, q_idx);
193 
194 	return 0;
195 }
196 
197 /**
198  * ice_qp_ena - Enables a queue pair
199  * @vsi: VSI of interest
200  * @q_idx: ring index in array
201  *
202  * Returns 0 on success, negative on failure.
203  */
204 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
205 {
206 	struct ice_aqc_add_tx_qgrp *qg_buf;
207 	struct ice_ring *tx_ring, *rx_ring;
208 	struct ice_q_vector *q_vector;
209 	u16 size;
210 	int err;
211 
212 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
213 		return -EINVAL;
214 
215 	size = struct_size(qg_buf, txqs, 1);
216 	qg_buf = kzalloc(size, GFP_KERNEL);
217 	if (!qg_buf)
218 		return -ENOMEM;
219 
220 	qg_buf->num_txqs = 1;
221 
222 	tx_ring = vsi->tx_rings[q_idx];
223 	rx_ring = vsi->rx_rings[q_idx];
224 	q_vector = rx_ring->q_vector;
225 
226 	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
227 	if (err)
228 		goto free_buf;
229 
230 	if (ice_is_xdp_ena_vsi(vsi)) {
231 		struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
232 
233 		memset(qg_buf, 0, size);
234 		qg_buf->num_txqs = 1;
235 		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
236 		if (err)
237 			goto free_buf;
238 		ice_set_ring_xdp(xdp_ring);
239 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
240 	}
241 
242 	err = ice_setup_rx_ctx(rx_ring);
243 	if (err)
244 		goto free_buf;
245 
246 	ice_qvec_cfg_msix(vsi, q_vector);
247 
248 	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
249 	if (err)
250 		goto free_buf;
251 
252 	clear_bit(__ICE_CFG_BUSY, vsi->state);
253 	ice_qvec_toggle_napi(vsi, q_vector, true);
254 	ice_qvec_ena_irq(vsi, q_vector);
255 
256 	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
257 free_buf:
258 	kfree(qg_buf);
259 	return err;
260 }
261 
262 /**
263  * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
264  * @vsi: VSI to allocate the buffer pool on
265  *
266  * Returns 0 on success, negative on error
267  */
268 static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
269 {
270 	if (vsi->xsk_pools)
271 		return 0;
272 
273 	vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
274 				 GFP_KERNEL);
275 
276 	if (!vsi->xsk_pools) {
277 		vsi->num_xsk_pools = 0;
278 		return -ENOMEM;
279 	}
280 
281 	return 0;
282 }
283 
284 /**
285  * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
286  * @vsi: VSI from which the VSI will be removed
287  * @qid: Ring/qid associated with the buffer pool
288  */
289 static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
290 {
291 	vsi->xsk_pools[qid] = NULL;
292 	vsi->num_xsk_pools_used--;
293 
294 	if (vsi->num_xsk_pools_used == 0) {
295 		kfree(vsi->xsk_pools);
296 		vsi->xsk_pools = NULL;
297 		vsi->num_xsk_pools = 0;
298 	}
299 }
300 
301 /**
302  * ice_xsk_pool_disable - disable a buffer pool region
303  * @vsi: Current VSI
304  * @qid: queue ID
305  *
306  * Returns 0 on success, negative on failure
307  */
308 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
309 {
310 	if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
311 	    !vsi->xsk_pools[qid])
312 		return -EINVAL;
313 
314 	xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
315 	ice_xsk_remove_pool(vsi, qid);
316 
317 	return 0;
318 }
319 
320 /**
321  * ice_xsk_pool_enable - enable a buffer pool region
322  * @vsi: Current VSI
323  * @pool: pointer to a requested buffer pool region
324  * @qid: queue ID
325  *
326  * Returns 0 on success, negative on failure
327  */
328 static int
329 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
330 {
331 	int err;
332 
333 	if (vsi->type != ICE_VSI_PF)
334 		return -EINVAL;
335 
336 	if (!vsi->num_xsk_pools)
337 		vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
338 	if (qid >= vsi->num_xsk_pools)
339 		return -EINVAL;
340 
341 	err = ice_xsk_alloc_pools(vsi);
342 	if (err)
343 		return err;
344 
345 	if (vsi->xsk_pools && vsi->xsk_pools[qid])
346 		return -EBUSY;
347 
348 	vsi->xsk_pools[qid] = pool;
349 	vsi->num_xsk_pools_used++;
350 
351 	err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
352 			       ICE_RX_DMA_ATTR);
353 	if (err)
354 		return err;
355 
356 	return 0;
357 }
358 
359 /**
360  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
361  * @vsi: Current VSI
362  * @pool: buffer pool to enable/associate to a ring, NULL to disable
363  * @qid: queue ID
364  *
365  * Returns 0 on success, negative on failure
366  */
367 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
368 {
369 	bool if_running, pool_present = !!pool;
370 	int ret = 0, pool_failure = 0;
371 
372 	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
373 
374 	if (if_running) {
375 		ret = ice_qp_dis(vsi, qid);
376 		if (ret) {
377 			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
378 			goto xsk_pool_if_up;
379 		}
380 	}
381 
382 	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
383 				      ice_xsk_pool_disable(vsi, qid);
384 
385 xsk_pool_if_up:
386 	if (if_running) {
387 		ret = ice_qp_ena(vsi, qid);
388 		if (!ret && pool_present)
389 			napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
390 		else if (ret)
391 			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
392 	}
393 
394 	if (pool_failure) {
395 		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
396 			   pool_present ? "en" : "dis", pool_failure);
397 		return pool_failure;
398 	}
399 
400 	return ret;
401 }
402 
403 /**
404  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
405  * @rx_ring: Rx ring
406  * @count: The number of buffers to allocate
407  *
408  * This function allocates a number of Rx buffers from the fill ring
409  * or the internal recycle mechanism and places them on the Rx ring.
410  *
411  * Returns false if all allocations were successful, true if any fail.
412  */
413 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
414 {
415 	union ice_32b_rx_flex_desc *rx_desc;
416 	u16 ntu = rx_ring->next_to_use;
417 	struct ice_rx_buf *rx_buf;
418 	bool ret = false;
419 	dma_addr_t dma;
420 
421 	if (!count)
422 		return false;
423 
424 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
425 	rx_buf = &rx_ring->rx_buf[ntu];
426 
427 	do {
428 		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
429 		if (!rx_buf->xdp) {
430 			ret = true;
431 			break;
432 		}
433 
434 		dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
435 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
436 		rx_desc->wb.status_error0 = 0;
437 
438 		rx_desc++;
439 		rx_buf++;
440 		ntu++;
441 
442 		if (unlikely(ntu == rx_ring->count)) {
443 			rx_desc = ICE_RX_DESC(rx_ring, 0);
444 			rx_buf = rx_ring->rx_buf;
445 			ntu = 0;
446 		}
447 	} while (--count);
448 
449 	if (rx_ring->next_to_use != ntu)
450 		ice_release_rx_desc(rx_ring, ntu);
451 
452 	return ret;
453 }
454 
455 /**
456  * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
457  * @rx_ring: Rx ring
458  */
459 static void ice_bump_ntc(struct ice_ring *rx_ring)
460 {
461 	int ntc = rx_ring->next_to_clean + 1;
462 
463 	ntc = (ntc < rx_ring->count) ? ntc : 0;
464 	rx_ring->next_to_clean = ntc;
465 	prefetch(ICE_RX_DESC(rx_ring, ntc));
466 }
467 
468 /**
469  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
470  * @rx_ring: Rx ring
471  * @rx_buf: zero-copy Rx buffer
472  *
473  * This function allocates a new skb from a zero-copy Rx buffer.
474  *
475  * Returns the skb on success, NULL on failure.
476  */
477 static struct sk_buff *
478 ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
479 {
480 	unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
481 	unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
482 	unsigned int datasize_hard = rx_buf->xdp->data_end -
483 				     rx_buf->xdp->data_hard_start;
484 	struct sk_buff *skb;
485 
486 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
487 			       GFP_ATOMIC | __GFP_NOWARN);
488 	if (unlikely(!skb))
489 		return NULL;
490 
491 	skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
492 	memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
493 	if (metasize)
494 		skb_metadata_set(skb, metasize);
495 
496 	xsk_buff_free(rx_buf->xdp);
497 	rx_buf->xdp = NULL;
498 	return skb;
499 }
500 
501 /**
502  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
503  * @rx_ring: Rx ring
504  * @xdp: xdp_buff used as input to the XDP program
505  *
506  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
507  */
508 static int
509 ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
510 {
511 	int err, result = ICE_XDP_PASS;
512 	struct bpf_prog *xdp_prog;
513 	struct ice_ring *xdp_ring;
514 	u32 act;
515 
516 	rcu_read_lock();
517 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
518 	if (!xdp_prog) {
519 		rcu_read_unlock();
520 		return ICE_XDP_PASS;
521 	}
522 
523 	act = bpf_prog_run_xdp(xdp_prog, xdp);
524 	switch (act) {
525 	case XDP_PASS:
526 		break;
527 	case XDP_TX:
528 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
529 		result = ice_xmit_xdp_buff(xdp, xdp_ring);
530 		break;
531 	case XDP_REDIRECT:
532 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
533 		result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
534 		break;
535 	default:
536 		bpf_warn_invalid_xdp_action(act);
537 		fallthrough;
538 	case XDP_ABORTED:
539 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
540 		fallthrough;
541 	case XDP_DROP:
542 		result = ICE_XDP_CONSUMED;
543 		break;
544 	}
545 
546 	rcu_read_unlock();
547 	return result;
548 }
549 
550 /**
551  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
552  * @rx_ring: AF_XDP Rx ring
553  * @budget: NAPI budget
554  *
555  * Returns number of processed packets on success, remaining budget on failure.
556  */
557 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
558 {
559 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
560 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
561 	unsigned int xdp_xmit = 0;
562 	bool failure = false;
563 
564 	while (likely(total_rx_packets < (unsigned int)budget)) {
565 		union ice_32b_rx_flex_desc *rx_desc;
566 		unsigned int size, xdp_res = 0;
567 		struct ice_rx_buf *rx_buf;
568 		struct sk_buff *skb;
569 		u16 stat_err_bits;
570 		u16 vlan_tag = 0;
571 		u8 rx_ptype;
572 
573 		if (cleaned_count >= ICE_RX_BUF_WRITE) {
574 			failure |= ice_alloc_rx_bufs_zc(rx_ring,
575 							cleaned_count);
576 			cleaned_count = 0;
577 		}
578 
579 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
580 
581 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
582 		if (!ice_test_staterr(rx_desc, stat_err_bits))
583 			break;
584 
585 		/* This memory barrier is needed to keep us from reading
586 		 * any other fields out of the rx_desc until we have
587 		 * verified the descriptor has been written back.
588 		 */
589 		dma_rmb();
590 
591 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
592 				   ICE_RX_FLX_DESC_PKT_LEN_M;
593 		if (!size)
594 			break;
595 
596 		rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
597 		rx_buf->xdp->data_end = rx_buf->xdp->data + size;
598 		xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
599 
600 		xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
601 		if (xdp_res) {
602 			if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
603 				xdp_xmit |= xdp_res;
604 			else
605 				xsk_buff_free(rx_buf->xdp);
606 
607 			rx_buf->xdp = NULL;
608 			total_rx_bytes += size;
609 			total_rx_packets++;
610 			cleaned_count++;
611 
612 			ice_bump_ntc(rx_ring);
613 			continue;
614 		}
615 
616 		/* XDP_PASS path */
617 		skb = ice_construct_skb_zc(rx_ring, rx_buf);
618 		if (!skb) {
619 			rx_ring->rx_stats.alloc_buf_failed++;
620 			break;
621 		}
622 
623 		cleaned_count++;
624 		ice_bump_ntc(rx_ring);
625 
626 		if (eth_skb_pad(skb)) {
627 			skb = NULL;
628 			continue;
629 		}
630 
631 		total_rx_bytes += skb->len;
632 		total_rx_packets++;
633 
634 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
635 		if (ice_test_staterr(rx_desc, stat_err_bits))
636 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
637 
638 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
639 				       ICE_RX_FLEX_DESC_PTYPE_M;
640 
641 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
642 		ice_receive_skb(rx_ring, skb, vlan_tag);
643 	}
644 
645 	ice_finalize_xdp_rx(rx_ring, xdp_xmit);
646 	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
647 
648 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
649 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
650 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
651 		else
652 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
653 
654 		return (int)total_rx_packets;
655 	}
656 
657 	return failure ? budget : (int)total_rx_packets;
658 }
659 
660 /**
661  * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
662  * @xdp_ring: XDP Tx ring
663  * @budget: max number of frames to xmit
664  *
665  * Returns true if cleanup/transmission is done.
666  */
667 static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
668 {
669 	struct ice_tx_desc *tx_desc = NULL;
670 	bool work_done = true;
671 	struct xdp_desc desc;
672 	dma_addr_t dma;
673 
674 	while (likely(budget-- > 0)) {
675 		struct ice_tx_buf *tx_buf;
676 
677 		if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
678 			xdp_ring->tx_stats.tx_busy++;
679 			work_done = false;
680 			break;
681 		}
682 
683 		tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
684 
685 		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
686 			break;
687 
688 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
689 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
690 						 desc.len);
691 
692 		tx_buf->bytecount = desc.len;
693 
694 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
695 		tx_desc->buf_addr = cpu_to_le64(dma);
696 		tx_desc->cmd_type_offset_bsz =
697 			ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
698 
699 		xdp_ring->next_to_use++;
700 		if (xdp_ring->next_to_use == xdp_ring->count)
701 			xdp_ring->next_to_use = 0;
702 	}
703 
704 	if (tx_desc) {
705 		ice_xdp_ring_update_tail(xdp_ring);
706 		xsk_tx_release(xdp_ring->xsk_pool);
707 	}
708 
709 	return budget > 0 && work_done;
710 }
711 
712 /**
713  * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
714  * @xdp_ring: XDP Tx ring
715  * @tx_buf: Tx buffer to clean
716  */
717 static void
718 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
719 {
720 	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
721 	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
722 			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
723 	dma_unmap_len_set(tx_buf, len, 0);
724 }
725 
726 /**
727  * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
728  * @xdp_ring: XDP Tx ring
729  * @budget: NAPI budget
730  *
731  * Returns true if cleanup/tranmission is done.
732  */
733 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
734 {
735 	int total_packets = 0, total_bytes = 0;
736 	s16 ntc = xdp_ring->next_to_clean;
737 	struct ice_tx_desc *tx_desc;
738 	struct ice_tx_buf *tx_buf;
739 	u32 xsk_frames = 0;
740 	bool xmit_done;
741 
742 	tx_desc = ICE_TX_DESC(xdp_ring, ntc);
743 	tx_buf = &xdp_ring->tx_buf[ntc];
744 	ntc -= xdp_ring->count;
745 
746 	do {
747 		if (!(tx_desc->cmd_type_offset_bsz &
748 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
749 			break;
750 
751 		total_bytes += tx_buf->bytecount;
752 		total_packets++;
753 
754 		if (tx_buf->raw_buf) {
755 			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
756 			tx_buf->raw_buf = NULL;
757 		} else {
758 			xsk_frames++;
759 		}
760 
761 		tx_desc->cmd_type_offset_bsz = 0;
762 		tx_buf++;
763 		tx_desc++;
764 		ntc++;
765 
766 		if (unlikely(!ntc)) {
767 			ntc -= xdp_ring->count;
768 			tx_buf = xdp_ring->tx_buf;
769 			tx_desc = ICE_TX_DESC(xdp_ring, 0);
770 		}
771 
772 		prefetch(tx_desc);
773 
774 	} while (likely(--budget));
775 
776 	ntc += xdp_ring->count;
777 	xdp_ring->next_to_clean = ntc;
778 
779 	if (xsk_frames)
780 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
781 
782 	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
783 		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
784 
785 	ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
786 	xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
787 
788 	return budget > 0 && xmit_done;
789 }
790 
791 /**
792  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
793  * @netdev: net_device
794  * @queue_id: queue to wake up
795  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
796  *
797  * Returns negative on error, zero otherwise.
798  */
799 int
800 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
801 	       u32 __always_unused flags)
802 {
803 	struct ice_netdev_priv *np = netdev_priv(netdev);
804 	struct ice_q_vector *q_vector;
805 	struct ice_vsi *vsi = np->vsi;
806 	struct ice_ring *ring;
807 
808 	if (test_bit(__ICE_DOWN, vsi->state))
809 		return -ENETDOWN;
810 
811 	if (!ice_is_xdp_ena_vsi(vsi))
812 		return -ENXIO;
813 
814 	if (queue_id >= vsi->num_txq)
815 		return -ENXIO;
816 
817 	if (!vsi->xdp_rings[queue_id]->xsk_pool)
818 		return -ENXIO;
819 
820 	ring = vsi->xdp_rings[queue_id];
821 
822 	/* The idea here is that if NAPI is running, mark a miss, so
823 	 * it will run again. If not, trigger an interrupt and
824 	 * schedule the NAPI from interrupt context. If NAPI would be
825 	 * scheduled here, the interrupt affinity would not be
826 	 * honored.
827 	 */
828 	q_vector = ring->q_vector;
829 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
830 		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
831 
832 	return 0;
833 }
834 
835 /**
836  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
837  * @vsi: VSI to be checked
838  *
839  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
840  */
841 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
842 {
843 	int i;
844 
845 	if (!vsi->xsk_pools)
846 		return false;
847 
848 	for (i = 0; i < vsi->num_xsk_pools; i++) {
849 		if (vsi->xsk_pools[i])
850 			return true;
851 	}
852 
853 	return false;
854 }
855 
856 /**
857  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
858  * @rx_ring: ring to be cleaned
859  */
860 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
861 {
862 	u16 i;
863 
864 	for (i = 0; i < rx_ring->count; i++) {
865 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
866 
867 		if (!rx_buf->xdp)
868 			continue;
869 
870 		rx_buf->xdp = NULL;
871 	}
872 }
873 
874 /**
875  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
876  * @xdp_ring: XDP_Tx ring
877  */
878 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
879 {
880 	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
881 	u32 xsk_frames = 0;
882 
883 	while (ntc != ntu) {
884 		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
885 
886 		if (tx_buf->raw_buf)
887 			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
888 		else
889 			xsk_frames++;
890 
891 		tx_buf->raw_buf = NULL;
892 
893 		ntc++;
894 		if (ntc >= xdp_ring->count)
895 			ntc = 0;
896 	}
897 
898 	if (xsk_frames)
899 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
900 }
901