1d0bcacd0SBjörn Töpel // SPDX-License-Identifier: GPL-2.0
2d0bcacd0SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */
3d0bcacd0SBjörn Töpel
4d0bcacd0SBjörn Töpel #include <linux/bpf_trace.h>
5a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h>
6d0bcacd0SBjörn Töpel #include <net/xdp.h>
7d0bcacd0SBjörn Töpel
8d0bcacd0SBjörn Töpel #include "ixgbe.h"
9d0bcacd0SBjörn Töpel #include "ixgbe_txrx_common.h"
10d0bcacd0SBjörn Töpel
ixgbe_xsk_pool(struct ixgbe_adapter * adapter,struct ixgbe_ring * ring)111742b3d5SMagnus Karlsson struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12d0bcacd0SBjörn Töpel struct ixgbe_ring *ring)
13d0bcacd0SBjörn Töpel {
14d0bcacd0SBjörn Töpel bool xdp_on = READ_ONCE(adapter->xdp_prog);
15d0bcacd0SBjörn Töpel int qid = ring->ring_idx;
16d0bcacd0SBjörn Töpel
179ba095a6SJan Sokolowski if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18d0bcacd0SBjörn Töpel return NULL;
19d0bcacd0SBjörn Töpel
20c4655761SMagnus Karlsson return xsk_get_pool_from_qid(adapter->netdev, qid);
21d0bcacd0SBjörn Töpel }
22d0bcacd0SBjörn Töpel
ixgbe_xsk_pool_enable(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)231742b3d5SMagnus Karlsson static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
241742b3d5SMagnus Karlsson struct xsk_buff_pool *pool,
25d0bcacd0SBjörn Töpel u16 qid)
26d0bcacd0SBjörn Töpel {
279ba095a6SJan Sokolowski struct net_device *netdev = adapter->netdev;
28d0bcacd0SBjörn Töpel bool if_running;
29d0bcacd0SBjörn Töpel int err;
30d0bcacd0SBjörn Töpel
31d0bcacd0SBjörn Töpel if (qid >= adapter->num_rx_queues)
32d0bcacd0SBjörn Töpel return -EINVAL;
33d0bcacd0SBjörn Töpel
349ba095a6SJan Sokolowski if (qid >= netdev->real_num_rx_queues ||
359ba095a6SJan Sokolowski qid >= netdev->real_num_tx_queues)
36d0bcacd0SBjörn Töpel return -EINVAL;
37d0bcacd0SBjörn Töpel
38c4655761SMagnus Karlsson err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
39d0bcacd0SBjörn Töpel if (err)
40d0bcacd0SBjörn Töpel return err;
41d0bcacd0SBjörn Töpel
42d0bcacd0SBjörn Töpel if_running = netif_running(adapter->netdev) &&
439ba095a6SJan Sokolowski ixgbe_enabled_xdp_adapter(adapter);
44d0bcacd0SBjörn Töpel
45d0bcacd0SBjörn Töpel if (if_running)
46d0bcacd0SBjörn Töpel ixgbe_txrx_ring_disable(adapter, qid);
47d0bcacd0SBjörn Töpel
48d49e286dSJan Sokolowski set_bit(qid, adapter->af_xdp_zc_qps);
49d0bcacd0SBjörn Töpel
504a9b32f3SMagnus Karlsson if (if_running) {
51d0bcacd0SBjörn Töpel ixgbe_txrx_ring_enable(adapter, qid);
52d0bcacd0SBjörn Töpel
534a9b32f3SMagnus Karlsson /* Kick start the NAPI context so that receiving will start */
549116e5e2SMagnus Karlsson err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
551b80fec7SWang Hai if (err) {
561b80fec7SWang Hai clear_bit(qid, adapter->af_xdp_zc_qps);
571b80fec7SWang Hai xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
58d0bcacd0SBjörn Töpel return err;
59d0bcacd0SBjörn Töpel }
601b80fec7SWang Hai }
61d0bcacd0SBjörn Töpel
624a9b32f3SMagnus Karlsson return 0;
634a9b32f3SMagnus Karlsson }
644a9b32f3SMagnus Karlsson
ixgbe_xsk_pool_disable(struct ixgbe_adapter * adapter,u16 qid)651742b3d5SMagnus Karlsson static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
66d0bcacd0SBjörn Töpel {
671742b3d5SMagnus Karlsson struct xsk_buff_pool *pool;
68d0bcacd0SBjörn Töpel bool if_running;
69d0bcacd0SBjörn Töpel
70c4655761SMagnus Karlsson pool = xsk_get_pool_from_qid(adapter->netdev, qid);
711742b3d5SMagnus Karlsson if (!pool)
72d0bcacd0SBjörn Töpel return -EINVAL;
73d0bcacd0SBjörn Töpel
74d0bcacd0SBjörn Töpel if_running = netif_running(adapter->netdev) &&
759ba095a6SJan Sokolowski ixgbe_enabled_xdp_adapter(adapter);
76d0bcacd0SBjörn Töpel
77d0bcacd0SBjörn Töpel if (if_running)
78d0bcacd0SBjörn Töpel ixgbe_txrx_ring_disable(adapter, qid);
79d0bcacd0SBjörn Töpel
80d49e286dSJan Sokolowski clear_bit(qid, adapter->af_xdp_zc_qps);
81c4655761SMagnus Karlsson xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
82d0bcacd0SBjörn Töpel
83d0bcacd0SBjörn Töpel if (if_running)
84d0bcacd0SBjörn Töpel ixgbe_txrx_ring_enable(adapter, qid);
85d0bcacd0SBjörn Töpel
86d0bcacd0SBjörn Töpel return 0;
87d0bcacd0SBjörn Töpel }
88d0bcacd0SBjörn Töpel
ixgbe_xsk_pool_setup(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)891742b3d5SMagnus Karlsson int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
901742b3d5SMagnus Karlsson struct xsk_buff_pool *pool,
91d0bcacd0SBjörn Töpel u16 qid)
92d0bcacd0SBjörn Töpel {
931742b3d5SMagnus Karlsson return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
941742b3d5SMagnus Karlsson ixgbe_xsk_pool_disable(adapter, qid);
95d0bcacd0SBjörn Töpel }
96d0bcacd0SBjörn Töpel
ixgbe_run_xdp_zc(struct ixgbe_adapter * adapter,struct ixgbe_ring * rx_ring,struct xdp_buff * xdp)97d0bcacd0SBjörn Töpel static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98d0bcacd0SBjörn Töpel struct ixgbe_ring *rx_ring,
99d0bcacd0SBjörn Töpel struct xdp_buff *xdp)
100d0bcacd0SBjörn Töpel {
101d0bcacd0SBjörn Töpel int err, result = IXGBE_XDP_PASS;
102d0bcacd0SBjörn Töpel struct bpf_prog *xdp_prog;
1034fe81585SJason Xing struct ixgbe_ring *ring;
104d0bcacd0SBjörn Töpel struct xdp_frame *xdpf;
105d0bcacd0SBjörn Töpel u32 act;
106d0bcacd0SBjörn Töpel
107d0bcacd0SBjörn Töpel xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108d0bcacd0SBjörn Töpel act = bpf_prog_run_xdp(xdp_prog, xdp);
109d8c3061eSKevin Laatz
1107d52fe2eSMagnus Karlsson if (likely(act == XDP_REDIRECT)) {
1117d52fe2eSMagnus Karlsson err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112c7dd09fdSMaciej Fijalkowski if (!err)
1138281356bSMagnus Karlsson return IXGBE_XDP_REDIR;
114c7dd09fdSMaciej Fijalkowski if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
115c7dd09fdSMaciej Fijalkowski result = IXGBE_XDP_EXIT;
116c7dd09fdSMaciej Fijalkowski else
117c7dd09fdSMaciej Fijalkowski result = IXGBE_XDP_CONSUMED;
118c7dd09fdSMaciej Fijalkowski goto out_failure;
1197d52fe2eSMagnus Karlsson }
1207d52fe2eSMagnus Karlsson
121d0bcacd0SBjörn Töpel switch (act) {
122d0bcacd0SBjörn Töpel case XDP_PASS:
123d0bcacd0SBjörn Töpel break;
124d0bcacd0SBjörn Töpel case XDP_TX:
1251b698fa5SLorenzo Bianconi xdpf = xdp_convert_buff_to_frame(xdp);
1268281356bSMagnus Karlsson if (unlikely(!xdpf))
1278281356bSMagnus Karlsson goto out_failure;
1284fe81585SJason Xing ring = ixgbe_determine_xdp_ring(adapter);
1294fe81585SJason Xing if (static_branch_unlikely(&ixgbe_xdp_locking_key))
1304fe81585SJason Xing spin_lock(&ring->tx_lock);
1314fe81585SJason Xing result = ixgbe_xmit_xdp_ring(ring, xdpf);
1324fe81585SJason Xing if (static_branch_unlikely(&ixgbe_xdp_locking_key))
1334fe81585SJason Xing spin_unlock(&ring->tx_lock);
1348281356bSMagnus Karlsson if (result == IXGBE_XDP_CONSUMED)
1358281356bSMagnus Karlsson goto out_failure;
136d0bcacd0SBjörn Töpel break;
137c7dd09fdSMaciej Fijalkowski case XDP_DROP:
138c7dd09fdSMaciej Fijalkowski result = IXGBE_XDP_CONSUMED;
139c7dd09fdSMaciej Fijalkowski break;
140d0bcacd0SBjörn Töpel default:
141c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
1425463fce6SJeff Kirsher fallthrough;
143d0bcacd0SBjörn Töpel case XDP_ABORTED:
144c7dd09fdSMaciej Fijalkowski result = IXGBE_XDP_CONSUMED;
1458281356bSMagnus Karlsson out_failure:
146d0bcacd0SBjörn Töpel trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
147d0bcacd0SBjörn Töpel }
148d0bcacd0SBjörn Töpel return result;
149d0bcacd0SBjörn Töpel }
150d0bcacd0SBjörn Töpel
ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring * rx_ring,u16 count)1517117132bSBjörn Töpel bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
152d0bcacd0SBjörn Töpel {
153d0bcacd0SBjörn Töpel union ixgbe_adv_rx_desc *rx_desc;
154d0bcacd0SBjörn Töpel struct ixgbe_rx_buffer *bi;
155d0bcacd0SBjörn Töpel u16 i = rx_ring->next_to_use;
1567117132bSBjörn Töpel dma_addr_t dma;
157d0bcacd0SBjörn Töpel bool ok = true;
158d0bcacd0SBjörn Töpel
159d0bcacd0SBjörn Töpel /* nothing to do */
1607117132bSBjörn Töpel if (!count)
161d0bcacd0SBjörn Töpel return true;
162d0bcacd0SBjörn Töpel
163d0bcacd0SBjörn Töpel rx_desc = IXGBE_RX_DESC(rx_ring, i);
164d0bcacd0SBjörn Töpel bi = &rx_ring->rx_buffer_info[i];
165d0bcacd0SBjörn Töpel i -= rx_ring->count;
166d0bcacd0SBjörn Töpel
167d0bcacd0SBjörn Töpel do {
168c4655761SMagnus Karlsson bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
1697117132bSBjörn Töpel if (!bi->xdp) {
170d0bcacd0SBjörn Töpel ok = false;
171d0bcacd0SBjörn Töpel break;
172d0bcacd0SBjörn Töpel }
173d0bcacd0SBjörn Töpel
1747117132bSBjörn Töpel dma = xsk_buff_xdp_get_dma(bi->xdp);
175d0bcacd0SBjörn Töpel
176d0bcacd0SBjörn Töpel /* Refresh the desc even if buffer_addrs didn't change
177d0bcacd0SBjörn Töpel * because each write-back erases this info.
178d0bcacd0SBjörn Töpel */
1797117132bSBjörn Töpel rx_desc->read.pkt_addr = cpu_to_le64(dma);
180d0bcacd0SBjörn Töpel
181d0bcacd0SBjörn Töpel rx_desc++;
182d0bcacd0SBjörn Töpel bi++;
183d0bcacd0SBjörn Töpel i++;
184d0bcacd0SBjörn Töpel if (unlikely(!i)) {
185d0bcacd0SBjörn Töpel rx_desc = IXGBE_RX_DESC(rx_ring, 0);
186d0bcacd0SBjörn Töpel bi = rx_ring->rx_buffer_info;
187d0bcacd0SBjörn Töpel i -= rx_ring->count;
188d0bcacd0SBjörn Töpel }
189d0bcacd0SBjörn Töpel
190d0bcacd0SBjörn Töpel /* clear the length for the next_to_use descriptor */
191d0bcacd0SBjörn Töpel rx_desc->wb.upper.length = 0;
192d0bcacd0SBjörn Töpel
1937117132bSBjörn Töpel count--;
1947117132bSBjörn Töpel } while (count);
195d0bcacd0SBjörn Töpel
196d0bcacd0SBjörn Töpel i += rx_ring->count;
197d0bcacd0SBjörn Töpel
198d0bcacd0SBjörn Töpel if (rx_ring->next_to_use != i) {
199d0bcacd0SBjörn Töpel rx_ring->next_to_use = i;
200d0bcacd0SBjörn Töpel
201d0bcacd0SBjörn Töpel /* Force memory writes to complete before letting h/w
202d0bcacd0SBjörn Töpel * know there are new descriptors to fetch. (Only
203d0bcacd0SBjörn Töpel * applicable for weak-ordered memory model archs,
204d0bcacd0SBjörn Töpel * such as IA-64).
205d0bcacd0SBjörn Töpel */
206d0bcacd0SBjörn Töpel wmb();
207d0bcacd0SBjörn Töpel writel(i, rx_ring->tail);
208d0bcacd0SBjörn Töpel }
209d0bcacd0SBjörn Töpel
210d0bcacd0SBjörn Töpel return ok;
211d0bcacd0SBjörn Töpel }
212d0bcacd0SBjörn Töpel
ixgbe_construct_skb_zc(struct ixgbe_ring * rx_ring,const struct xdp_buff * xdp)213d0bcacd0SBjörn Töpel static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
2141fbdaa13SAlexander Lobakin const struct xdp_buff *xdp)
215d0bcacd0SBjörn Töpel {
216f322a620SAlexander Lobakin unsigned int totalsize = xdp->data_end - xdp->data_meta;
2171fbdaa13SAlexander Lobakin unsigned int metasize = xdp->data - xdp->data_meta;
218d0bcacd0SBjörn Töpel struct sk_buff *skb;
219d0bcacd0SBjörn Töpel
220f322a620SAlexander Lobakin net_prefetch(xdp->data_meta);
221f322a620SAlexander Lobakin
222d0bcacd0SBjörn Töpel /* allocate a skb to store the frags */
223f322a620SAlexander Lobakin skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
224d0bcacd0SBjörn Töpel GFP_ATOMIC | __GFP_NOWARN);
225d0bcacd0SBjörn Töpel if (unlikely(!skb))
226d0bcacd0SBjörn Töpel return NULL;
227d0bcacd0SBjörn Töpel
228f322a620SAlexander Lobakin memcpy(__skb_put(skb, totalsize), xdp->data_meta,
229f322a620SAlexander Lobakin ALIGN(totalsize, sizeof(long)));
230f322a620SAlexander Lobakin
231f322a620SAlexander Lobakin if (metasize) {
232d0bcacd0SBjörn Töpel skb_metadata_set(skb, metasize);
233f322a620SAlexander Lobakin __skb_pull(skb, metasize);
234f322a620SAlexander Lobakin }
235d0bcacd0SBjörn Töpel
236d0bcacd0SBjörn Töpel return skb;
237d0bcacd0SBjörn Töpel }
238d0bcacd0SBjörn Töpel
ixgbe_inc_ntc(struct ixgbe_ring * rx_ring)239d0bcacd0SBjörn Töpel static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
240d0bcacd0SBjörn Töpel {
241d0bcacd0SBjörn Töpel u32 ntc = rx_ring->next_to_clean + 1;
242d0bcacd0SBjörn Töpel
243d0bcacd0SBjörn Töpel ntc = (ntc < rx_ring->count) ? ntc : 0;
244d0bcacd0SBjörn Töpel rx_ring->next_to_clean = ntc;
245d0bcacd0SBjörn Töpel prefetch(IXGBE_RX_DESC(rx_ring, ntc));
246d0bcacd0SBjörn Töpel }
247d0bcacd0SBjörn Töpel
ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * rx_ring,const int budget)248d0bcacd0SBjörn Töpel int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
249d0bcacd0SBjörn Töpel struct ixgbe_ring *rx_ring,
250d0bcacd0SBjörn Töpel const int budget)
251d0bcacd0SBjörn Töpel {
252d0bcacd0SBjörn Töpel unsigned int total_rx_bytes = 0, total_rx_packets = 0;
253d0bcacd0SBjörn Töpel struct ixgbe_adapter *adapter = q_vector->adapter;
254d0bcacd0SBjörn Töpel u16 cleaned_count = ixgbe_desc_unused(rx_ring);
255d0bcacd0SBjörn Töpel unsigned int xdp_res, xdp_xmit = 0;
256d0bcacd0SBjörn Töpel bool failure = false;
257d0bcacd0SBjörn Töpel struct sk_buff *skb;
258d0bcacd0SBjörn Töpel
259d0bcacd0SBjörn Töpel while (likely(total_rx_packets < budget)) {
260d0bcacd0SBjörn Töpel union ixgbe_adv_rx_desc *rx_desc;
261d0bcacd0SBjörn Töpel struct ixgbe_rx_buffer *bi;
262d0bcacd0SBjörn Töpel unsigned int size;
263d0bcacd0SBjörn Töpel
264d0bcacd0SBjörn Töpel /* return some buffers to hardware, one at a time is too slow */
265d0bcacd0SBjörn Töpel if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
266d0bcacd0SBjörn Töpel failure = failure ||
2677117132bSBjörn Töpel !ixgbe_alloc_rx_buffers_zc(rx_ring,
268d0bcacd0SBjörn Töpel cleaned_count);
269d0bcacd0SBjörn Töpel cleaned_count = 0;
270d0bcacd0SBjörn Töpel }
271d0bcacd0SBjörn Töpel
272d0bcacd0SBjörn Töpel rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
273d0bcacd0SBjörn Töpel size = le16_to_cpu(rx_desc->wb.upper.length);
274d0bcacd0SBjörn Töpel if (!size)
275d0bcacd0SBjörn Töpel break;
276d0bcacd0SBjörn Töpel
277d0bcacd0SBjörn Töpel /* This memory barrier is needed to keep us from reading
278d0bcacd0SBjörn Töpel * any other fields out of the rx_desc until we know the
279d0bcacd0SBjörn Töpel * descriptor has been written back
280d0bcacd0SBjörn Töpel */
281d0bcacd0SBjörn Töpel dma_rmb();
282d0bcacd0SBjörn Töpel
2837117132bSBjörn Töpel bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
284d0bcacd0SBjörn Töpel
285d0bcacd0SBjörn Töpel if (unlikely(!ixgbe_test_staterr(rx_desc,
286d0bcacd0SBjörn Töpel IXGBE_RXD_STAT_EOP))) {
287d0bcacd0SBjörn Töpel struct ixgbe_rx_buffer *next_bi;
288d0bcacd0SBjörn Töpel
2897117132bSBjörn Töpel xsk_buff_free(bi->xdp);
2907117132bSBjörn Töpel bi->xdp = NULL;
291d0bcacd0SBjörn Töpel ixgbe_inc_ntc(rx_ring);
292d0bcacd0SBjörn Töpel next_bi =
293d0bcacd0SBjörn Töpel &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2947117132bSBjörn Töpel next_bi->discard = true;
295d0bcacd0SBjörn Töpel continue;
296d0bcacd0SBjörn Töpel }
297d0bcacd0SBjörn Töpel
2987117132bSBjörn Töpel if (unlikely(bi->discard)) {
2997117132bSBjörn Töpel xsk_buff_free(bi->xdp);
3007117132bSBjörn Töpel bi->xdp = NULL;
3017117132bSBjörn Töpel bi->discard = false;
302d0bcacd0SBjörn Töpel ixgbe_inc_ntc(rx_ring);
303d0bcacd0SBjörn Töpel continue;
304d0bcacd0SBjörn Töpel }
305d0bcacd0SBjörn Töpel
3067117132bSBjörn Töpel bi->xdp->data_end = bi->xdp->data + size;
3079647c57bSMagnus Karlsson xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
3087117132bSBjörn Töpel xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
309d0bcacd0SBjörn Töpel
310c7dd09fdSMaciej Fijalkowski if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
311d0bcacd0SBjörn Töpel xdp_xmit |= xdp_res;
312c7dd09fdSMaciej Fijalkowski } else if (xdp_res == IXGBE_XDP_EXIT) {
313c7dd09fdSMaciej Fijalkowski failure = true;
314c7dd09fdSMaciej Fijalkowski break;
315c7dd09fdSMaciej Fijalkowski } else if (xdp_res == IXGBE_XDP_CONSUMED) {
3167117132bSBjörn Töpel xsk_buff_free(bi->xdp);
317c7dd09fdSMaciej Fijalkowski } else if (xdp_res == IXGBE_XDP_PASS) {
318d090c885SMaciej Fijalkowski goto construct_skb;
319c7dd09fdSMaciej Fijalkowski }
3207117132bSBjörn Töpel
3217117132bSBjörn Töpel bi->xdp = NULL;
322d0bcacd0SBjörn Töpel total_rx_packets++;
323d0bcacd0SBjörn Töpel total_rx_bytes += size;
324d0bcacd0SBjörn Töpel
325d0bcacd0SBjörn Töpel cleaned_count++;
326d0bcacd0SBjörn Töpel ixgbe_inc_ntc(rx_ring);
327d0bcacd0SBjörn Töpel continue;
328d0bcacd0SBjörn Töpel
329d090c885SMaciej Fijalkowski construct_skb:
330d0bcacd0SBjörn Töpel /* XDP_PASS path */
3311fbdaa13SAlexander Lobakin skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
332d0bcacd0SBjörn Töpel if (!skb) {
333d0bcacd0SBjörn Töpel rx_ring->rx_stats.alloc_rx_buff_failed++;
334d0bcacd0SBjörn Töpel break;
335d0bcacd0SBjörn Töpel }
336d0bcacd0SBjörn Töpel
3371fbdaa13SAlexander Lobakin xsk_buff_free(bi->xdp);
3381fbdaa13SAlexander Lobakin bi->xdp = NULL;
3391fbdaa13SAlexander Lobakin
340d0bcacd0SBjörn Töpel cleaned_count++;
341d0bcacd0SBjörn Töpel ixgbe_inc_ntc(rx_ring);
342d0bcacd0SBjörn Töpel
343d0bcacd0SBjörn Töpel if (eth_skb_pad(skb))
344d0bcacd0SBjörn Töpel continue;
345d0bcacd0SBjörn Töpel
346d0bcacd0SBjörn Töpel total_rx_bytes += skb->len;
347d0bcacd0SBjörn Töpel total_rx_packets++;
348d0bcacd0SBjörn Töpel
349d0bcacd0SBjörn Töpel ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
350d0bcacd0SBjörn Töpel ixgbe_rx_skb(q_vector, skb);
351d0bcacd0SBjörn Töpel }
352d0bcacd0SBjörn Töpel
353d0bcacd0SBjörn Töpel if (xdp_xmit & IXGBE_XDP_REDIR)
354d0bcacd0SBjörn Töpel xdp_do_flush_map();
355d0bcacd0SBjörn Töpel
356d0bcacd0SBjörn Töpel if (xdp_xmit & IXGBE_XDP_TX) {
3574fe81585SJason Xing struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
358d0bcacd0SBjörn Töpel
3594fe81585SJason Xing ixgbe_xdp_ring_update_tail_locked(ring);
360d0bcacd0SBjörn Töpel }
361d0bcacd0SBjörn Töpel
362d0bcacd0SBjörn Töpel u64_stats_update_begin(&rx_ring->syncp);
363d0bcacd0SBjörn Töpel rx_ring->stats.packets += total_rx_packets;
364d0bcacd0SBjörn Töpel rx_ring->stats.bytes += total_rx_bytes;
365d0bcacd0SBjörn Töpel u64_stats_update_end(&rx_ring->syncp);
366d0bcacd0SBjörn Töpel q_vector->rx.total_packets += total_rx_packets;
367d0bcacd0SBjörn Töpel q_vector->rx.total_bytes += total_rx_bytes;
368d0bcacd0SBjörn Töpel
369c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
3705c129241SMagnus Karlsson if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
371c4655761SMagnus Karlsson xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
3725c129241SMagnus Karlsson else
373c4655761SMagnus Karlsson xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
3745c129241SMagnus Karlsson
3755c129241SMagnus Karlsson return (int)total_rx_packets;
3765c129241SMagnus Karlsson }
377d0bcacd0SBjörn Töpel return failure ? budget : (int)total_rx_packets;
378d0bcacd0SBjörn Töpel }
379d0bcacd0SBjörn Töpel
ixgbe_xsk_clean_rx_ring(struct ixgbe_ring * rx_ring)380d0bcacd0SBjörn Töpel void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
381d0bcacd0SBjörn Töpel {
3827117132bSBjörn Töpel struct ixgbe_rx_buffer *bi;
3837117132bSBjörn Töpel u16 i;
384d0bcacd0SBjörn Töpel
3857117132bSBjörn Töpel for (i = 0; i < rx_ring->count; i++) {
3867117132bSBjörn Töpel bi = &rx_ring->rx_buffer_info[i];
3877117132bSBjörn Töpel
3887117132bSBjörn Töpel if (!bi->xdp)
3897117132bSBjörn Töpel continue;
3907117132bSBjörn Töpel
3917117132bSBjörn Töpel xsk_buff_free(bi->xdp);
3927117132bSBjörn Töpel bi->xdp = NULL;
393d0bcacd0SBjörn Töpel }
394d0bcacd0SBjörn Töpel }
3958221c5ebSBjörn Töpel
ixgbe_xmit_zc(struct ixgbe_ring * xdp_ring,unsigned int budget)3968221c5ebSBjörn Töpel static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
3978221c5ebSBjörn Töpel {
3981742b3d5SMagnus Karlsson struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
3998221c5ebSBjörn Töpel union ixgbe_adv_tx_desc *tx_desc = NULL;
4008221c5ebSBjörn Töpel struct ixgbe_tx_buffer *tx_bi;
4018221c5ebSBjörn Töpel bool work_done = true;
4024bce4e5cSMaxim Mikityanskiy struct xdp_desc desc;
4038221c5ebSBjörn Töpel dma_addr_t dma;
4044bce4e5cSMaxim Mikityanskiy u32 cmd_type;
4058221c5ebSBjörn Töpel
4068221c5ebSBjörn Töpel while (budget-- > 0) {
4076c7273a2SMaciej Fijalkowski if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
4088221c5ebSBjörn Töpel work_done = false;
4098221c5ebSBjörn Töpel break;
4108221c5ebSBjörn Töpel }
4118221c5ebSBjörn Töpel
4126c7273a2SMaciej Fijalkowski if (!netif_carrier_ok(xdp_ring->netdev))
4136c7273a2SMaciej Fijalkowski break;
4146c7273a2SMaciej Fijalkowski
415c4655761SMagnus Karlsson if (!xsk_tx_peek_desc(pool, &desc))
4168221c5ebSBjörn Töpel break;
4178221c5ebSBjörn Töpel
418c4655761SMagnus Karlsson dma = xsk_buff_raw_get_dma(pool, desc.addr);
419c4655761SMagnus Karlsson xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
4208221c5ebSBjörn Töpel
4218221c5ebSBjörn Töpel tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
4224bce4e5cSMaxim Mikityanskiy tx_bi->bytecount = desc.len;
4238221c5ebSBjörn Töpel tx_bi->xdpf = NULL;
4241bc1ffb0SWilliam Tu tx_bi->gso_segs = 1;
4258221c5ebSBjörn Töpel
4268221c5ebSBjörn Töpel tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
4278221c5ebSBjörn Töpel tx_desc->read.buffer_addr = cpu_to_le64(dma);
4288221c5ebSBjörn Töpel
4298221c5ebSBjörn Töpel /* put descriptor type bits */
4308221c5ebSBjörn Töpel cmd_type = IXGBE_ADVTXD_DTYP_DATA |
4318221c5ebSBjörn Töpel IXGBE_ADVTXD_DCMD_DEXT |
4328221c5ebSBjörn Töpel IXGBE_ADVTXD_DCMD_IFCS;
4334bce4e5cSMaxim Mikityanskiy cmd_type |= desc.len | IXGBE_TXD_CMD;
4348221c5ebSBjörn Töpel tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
4358221c5ebSBjörn Töpel tx_desc->read.olinfo_status =
4364bce4e5cSMaxim Mikityanskiy cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
4378221c5ebSBjörn Töpel
4388221c5ebSBjörn Töpel xdp_ring->next_to_use++;
4398221c5ebSBjörn Töpel if (xdp_ring->next_to_use == xdp_ring->count)
4408221c5ebSBjörn Töpel xdp_ring->next_to_use = 0;
4418221c5ebSBjörn Töpel }
4428221c5ebSBjörn Töpel
4438221c5ebSBjörn Töpel if (tx_desc) {
4448221c5ebSBjörn Töpel ixgbe_xdp_ring_update_tail(xdp_ring);
445c4655761SMagnus Karlsson xsk_tx_release(pool);
4468221c5ebSBjörn Töpel }
4478221c5ebSBjörn Töpel
4488221c5ebSBjörn Töpel return !!budget && work_done;
4498221c5ebSBjörn Töpel }
4508221c5ebSBjörn Töpel
ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * tx_bi)4518221c5ebSBjörn Töpel static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
4528221c5ebSBjörn Töpel struct ixgbe_tx_buffer *tx_bi)
4538221c5ebSBjörn Töpel {
4548221c5ebSBjörn Töpel xdp_return_frame(tx_bi->xdpf);
4558221c5ebSBjörn Töpel dma_unmap_single(tx_ring->dev,
4568221c5ebSBjörn Töpel dma_unmap_addr(tx_bi, dma),
4578221c5ebSBjörn Töpel dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
4588221c5ebSBjörn Töpel dma_unmap_len_set(tx_bi, len, 0);
4598221c5ebSBjörn Töpel }
4608221c5ebSBjörn Töpel
ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * tx_ring,int napi_budget)4618221c5ebSBjörn Töpel bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
4628221c5ebSBjörn Töpel struct ixgbe_ring *tx_ring, int napi_budget)
4638221c5ebSBjörn Töpel {
464bf280c03SIlya Maximets u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
4658221c5ebSBjörn Töpel unsigned int total_packets = 0, total_bytes = 0;
4661742b3d5SMagnus Karlsson struct xsk_buff_pool *pool = tx_ring->xsk_pool;
4678221c5ebSBjörn Töpel union ixgbe_adv_tx_desc *tx_desc;
4688221c5ebSBjörn Töpel struct ixgbe_tx_buffer *tx_bi;
469bf280c03SIlya Maximets u32 xsk_frames = 0;
4708221c5ebSBjörn Töpel
471bf280c03SIlya Maximets tx_bi = &tx_ring->tx_buffer_info[ntc];
472bf280c03SIlya Maximets tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
4738221c5ebSBjörn Töpel
474bf280c03SIlya Maximets while (ntc != ntu) {
4758221c5ebSBjörn Töpel if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
4768221c5ebSBjörn Töpel break;
4778221c5ebSBjörn Töpel
4788221c5ebSBjörn Töpel total_bytes += tx_bi->bytecount;
4798221c5ebSBjörn Töpel total_packets += tx_bi->gso_segs;
4808221c5ebSBjörn Töpel
4818221c5ebSBjörn Töpel if (tx_bi->xdpf)
4828221c5ebSBjörn Töpel ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
4838221c5ebSBjörn Töpel else
4848221c5ebSBjörn Töpel xsk_frames++;
4858221c5ebSBjörn Töpel
4868221c5ebSBjörn Töpel tx_bi->xdpf = NULL;
4878221c5ebSBjörn Töpel
4888221c5ebSBjörn Töpel tx_bi++;
4898221c5ebSBjörn Töpel tx_desc++;
490bf280c03SIlya Maximets ntc++;
491bf280c03SIlya Maximets if (unlikely(ntc == tx_ring->count)) {
492bf280c03SIlya Maximets ntc = 0;
4938221c5ebSBjörn Töpel tx_bi = tx_ring->tx_buffer_info;
4948221c5ebSBjörn Töpel tx_desc = IXGBE_TX_DESC(tx_ring, 0);
4958221c5ebSBjörn Töpel }
4968221c5ebSBjörn Töpel
4978221c5ebSBjörn Töpel /* issue prefetch for next Tx descriptor */
4988221c5ebSBjörn Töpel prefetch(tx_desc);
499bf280c03SIlya Maximets }
5008221c5ebSBjörn Töpel
501bf280c03SIlya Maximets tx_ring->next_to_clean = ntc;
5028221c5ebSBjörn Töpel
5038221c5ebSBjörn Töpel u64_stats_update_begin(&tx_ring->syncp);
5048221c5ebSBjörn Töpel tx_ring->stats.bytes += total_bytes;
5058221c5ebSBjörn Töpel tx_ring->stats.packets += total_packets;
5068221c5ebSBjörn Töpel u64_stats_update_end(&tx_ring->syncp);
5078221c5ebSBjörn Töpel q_vector->tx.total_bytes += total_bytes;
5088221c5ebSBjörn Töpel q_vector->tx.total_packets += total_packets;
5098221c5ebSBjörn Töpel
5108221c5ebSBjörn Töpel if (xsk_frames)
511c4655761SMagnus Karlsson xsk_tx_completed(pool, xsk_frames);
5128221c5ebSBjörn Töpel
513c4655761SMagnus Karlsson if (xsk_uses_need_wakeup(pool))
514c4655761SMagnus Karlsson xsk_set_tx_need_wakeup(pool);
5155c129241SMagnus Karlsson
516bf280c03SIlya Maximets return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
5178221c5ebSBjörn Töpel }
5188221c5ebSBjörn Töpel
ixgbe_xsk_wakeup(struct net_device * dev,u32 qid,u32 flags)5199116e5e2SMagnus Karlsson int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
5208221c5ebSBjörn Töpel {
5218221c5ebSBjörn Töpel struct ixgbe_adapter *adapter = netdev_priv(dev);
5228221c5ebSBjörn Töpel struct ixgbe_ring *ring;
5238221c5ebSBjörn Töpel
5248221c5ebSBjörn Töpel if (test_bit(__IXGBE_DOWN, &adapter->state))
5258221c5ebSBjörn Töpel return -ENETDOWN;
5268221c5ebSBjörn Töpel
5278221c5ebSBjörn Töpel if (!READ_ONCE(adapter->xdp_prog))
528*0f8bf018SMaciej Fijalkowski return -EINVAL;
5298221c5ebSBjörn Töpel
5308221c5ebSBjörn Töpel if (qid >= adapter->num_xdp_queues)
531*0f8bf018SMaciej Fijalkowski return -EINVAL;
5328221c5ebSBjörn Töpel
533c0fdccfdSMaxim Mikityanskiy ring = adapter->xdp_ring[qid];
534c0fdccfdSMaxim Mikityanskiy
535c0fdccfdSMaxim Mikityanskiy if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
536c0fdccfdSMaxim Mikityanskiy return -ENETDOWN;
537c0fdccfdSMaxim Mikityanskiy
5381742b3d5SMagnus Karlsson if (!ring->xsk_pool)
539*0f8bf018SMaciej Fijalkowski return -EINVAL;
5408221c5ebSBjörn Töpel
5418221c5ebSBjörn Töpel if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
5428221c5ebSBjörn Töpel u64 eics = BIT_ULL(ring->q_vector->v_idx);
5438221c5ebSBjörn Töpel
5448221c5ebSBjörn Töpel ixgbe_irq_rearm_queues(adapter, eics);
5458221c5ebSBjörn Töpel }
5468221c5ebSBjörn Töpel
5478221c5ebSBjörn Töpel return 0;
5488221c5ebSBjörn Töpel }
5498221c5ebSBjörn Töpel
ixgbe_xsk_clean_tx_ring(struct ixgbe_ring * tx_ring)5508221c5ebSBjörn Töpel void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
5518221c5ebSBjörn Töpel {
5528221c5ebSBjörn Töpel u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
5531742b3d5SMagnus Karlsson struct xsk_buff_pool *pool = tx_ring->xsk_pool;
5548221c5ebSBjörn Töpel struct ixgbe_tx_buffer *tx_bi;
5558221c5ebSBjörn Töpel u32 xsk_frames = 0;
5568221c5ebSBjörn Töpel
5578221c5ebSBjörn Töpel while (ntc != ntu) {
5588221c5ebSBjörn Töpel tx_bi = &tx_ring->tx_buffer_info[ntc];
5598221c5ebSBjörn Töpel
5608221c5ebSBjörn Töpel if (tx_bi->xdpf)
5618221c5ebSBjörn Töpel ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
5628221c5ebSBjörn Töpel else
5638221c5ebSBjörn Töpel xsk_frames++;
5648221c5ebSBjörn Töpel
5658221c5ebSBjörn Töpel tx_bi->xdpf = NULL;
5668221c5ebSBjörn Töpel
5678221c5ebSBjörn Töpel ntc++;
5688221c5ebSBjörn Töpel if (ntc == tx_ring->count)
5698221c5ebSBjörn Töpel ntc = 0;
5708221c5ebSBjörn Töpel }
5718221c5ebSBjörn Töpel
5728221c5ebSBjörn Töpel if (xsk_frames)
573c4655761SMagnus Karlsson xsk_tx_completed(pool, xsk_frames);
5748221c5ebSBjörn Töpel }
575