1c891d767SDavid Arinzon // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2c891d767SDavid Arinzon /*
3c891d767SDavid Arinzon  * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4c891d767SDavid Arinzon  */
5c891d767SDavid Arinzon 
6c891d767SDavid Arinzon #include "ena_xdp.h"
7c891d767SDavid Arinzon 
validate_xdp_req_id(struct ena_ring * tx_ring,u16 req_id)8354627f9SDavid Arinzon static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
9c891d767SDavid Arinzon {
10c891d767SDavid Arinzon 	struct ena_tx_buffer *tx_info;
11c891d767SDavid Arinzon 
12354627f9SDavid Arinzon 	tx_info = &tx_ring->tx_buffer_info[req_id];
13c891d767SDavid Arinzon 	if (likely(tx_info->xdpf))
14c891d767SDavid Arinzon 		return 0;
15c891d767SDavid Arinzon 
16354627f9SDavid Arinzon 	return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
17c891d767SDavid Arinzon }
18c891d767SDavid Arinzon 
ena_xdp_tx_map_frame(struct ena_ring * tx_ring,struct ena_tx_buffer * tx_info,struct xdp_frame * xdpf,struct ena_com_tx_ctx * ena_tx_ctx)19354627f9SDavid Arinzon static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
20c891d767SDavid Arinzon 				struct ena_tx_buffer *tx_info,
21c891d767SDavid Arinzon 				struct xdp_frame *xdpf,
22c891d767SDavid Arinzon 				struct ena_com_tx_ctx *ena_tx_ctx)
23c891d767SDavid Arinzon {
24354627f9SDavid Arinzon 	struct ena_adapter *adapter = tx_ring->adapter;
25c891d767SDavid Arinzon 	struct ena_com_buf *ena_buf;
26c891d767SDavid Arinzon 	int push_len = 0;
27c891d767SDavid Arinzon 	dma_addr_t dma;
28c891d767SDavid Arinzon 	void *data;
29c891d767SDavid Arinzon 	u32 size;
30c891d767SDavid Arinzon 
31c891d767SDavid Arinzon 	tx_info->xdpf = xdpf;
32c891d767SDavid Arinzon 	data = tx_info->xdpf->data;
33c891d767SDavid Arinzon 	size = tx_info->xdpf->len;
34c891d767SDavid Arinzon 
35354627f9SDavid Arinzon 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
36c891d767SDavid Arinzon 		/* Designate part of the packet for LLQ */
37354627f9SDavid Arinzon 		push_len = min_t(u32, size, tx_ring->tx_max_header_size);
38c891d767SDavid Arinzon 
39c891d767SDavid Arinzon 		ena_tx_ctx->push_header = data;
40c891d767SDavid Arinzon 
41c891d767SDavid Arinzon 		size -= push_len;
42c891d767SDavid Arinzon 		data += push_len;
43c891d767SDavid Arinzon 	}
44c891d767SDavid Arinzon 
45c891d767SDavid Arinzon 	ena_tx_ctx->header_len = push_len;
46c891d767SDavid Arinzon 
47c891d767SDavid Arinzon 	if (size > 0) {
48354627f9SDavid Arinzon 		dma = dma_map_single(tx_ring->dev,
49c891d767SDavid Arinzon 				     data,
50c891d767SDavid Arinzon 				     size,
51c891d767SDavid Arinzon 				     DMA_TO_DEVICE);
52354627f9SDavid Arinzon 		if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
53c891d767SDavid Arinzon 			goto error_report_dma_error;
54c891d767SDavid Arinzon 
55c891d767SDavid Arinzon 		tx_info->map_linear_data = 0;
56c891d767SDavid Arinzon 
57c891d767SDavid Arinzon 		ena_buf = tx_info->bufs;
58c891d767SDavid Arinzon 		ena_buf->paddr = dma;
59c891d767SDavid Arinzon 		ena_buf->len = size;
60c891d767SDavid Arinzon 
61c891d767SDavid Arinzon 		ena_tx_ctx->ena_bufs = ena_buf;
62c891d767SDavid Arinzon 		ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
63c891d767SDavid Arinzon 	}
64c891d767SDavid Arinzon 
65c891d767SDavid Arinzon 	return 0;
66c891d767SDavid Arinzon 
67c891d767SDavid Arinzon error_report_dma_error:
68354627f9SDavid Arinzon 	ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
69354627f9SDavid Arinzon 			  &tx_ring->syncp);
70c891d767SDavid Arinzon 	netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
71c891d767SDavid Arinzon 
72c891d767SDavid Arinzon 	return -EINVAL;
73c891d767SDavid Arinzon }
74c891d767SDavid Arinzon 
ena_xdp_xmit_frame(struct ena_ring * tx_ring,struct ena_adapter * adapter,struct xdp_frame * xdpf,int flags)75354627f9SDavid Arinzon int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
76bc0ad685SDavid Arinzon 		       struct ena_adapter *adapter,
77c891d767SDavid Arinzon 		       struct xdp_frame *xdpf,
78c891d767SDavid Arinzon 		       int flags)
79c891d767SDavid Arinzon {
80c891d767SDavid Arinzon 	struct ena_com_tx_ctx ena_tx_ctx = {};
81c891d767SDavid Arinzon 	struct ena_tx_buffer *tx_info;
82c891d767SDavid Arinzon 	u16 next_to_use, req_id;
83c891d767SDavid Arinzon 	int rc;
84c891d767SDavid Arinzon 
85354627f9SDavid Arinzon 	next_to_use = tx_ring->next_to_use;
86354627f9SDavid Arinzon 	req_id = tx_ring->free_ids[next_to_use];
87354627f9SDavid Arinzon 	tx_info = &tx_ring->tx_buffer_info[req_id];
88c891d767SDavid Arinzon 	tx_info->num_of_bufs = 0;
89c891d767SDavid Arinzon 
90354627f9SDavid Arinzon 	rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
91c891d767SDavid Arinzon 	if (unlikely(rc))
922fc4d53fSDavid Arinzon 		goto err;
93c891d767SDavid Arinzon 
94c891d767SDavid Arinzon 	ena_tx_ctx.req_id = req_id;
95c891d767SDavid Arinzon 
96bc0ad685SDavid Arinzon 	rc = ena_xmit_common(adapter,
97354627f9SDavid Arinzon 			     tx_ring,
98c891d767SDavid Arinzon 			     tx_info,
99c891d767SDavid Arinzon 			     &ena_tx_ctx,
100c891d767SDavid Arinzon 			     next_to_use,
101c891d767SDavid Arinzon 			     xdpf->len);
102c891d767SDavid Arinzon 	if (rc)
103c891d767SDavid Arinzon 		goto error_unmap_dma;
104c891d767SDavid Arinzon 
105c891d767SDavid Arinzon 	/* trigger the dma engine. ena_ring_tx_doorbell()
106c891d767SDavid Arinzon 	 * calls a memory barrier inside it.
107c891d767SDavid Arinzon 	 */
108c891d767SDavid Arinzon 	if (flags & XDP_XMIT_FLUSH)
109354627f9SDavid Arinzon 		ena_ring_tx_doorbell(tx_ring);
110c891d767SDavid Arinzon 
111c891d767SDavid Arinzon 	return rc;
112c891d767SDavid Arinzon 
113c891d767SDavid Arinzon error_unmap_dma:
114354627f9SDavid Arinzon 	ena_unmap_tx_buff(tx_ring, tx_info);
1152fc4d53fSDavid Arinzon err:
116c891d767SDavid Arinzon 	tx_info->xdpf = NULL;
1172fc4d53fSDavid Arinzon 
118c891d767SDavid Arinzon 	return rc;
119c891d767SDavid Arinzon }
120c891d767SDavid Arinzon 
ena_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)121c891d767SDavid Arinzon int ena_xdp_xmit(struct net_device *dev, int n,
122c891d767SDavid Arinzon 		 struct xdp_frame **frames, u32 flags)
123c891d767SDavid Arinzon {
124c891d767SDavid Arinzon 	struct ena_adapter *adapter = netdev_priv(dev);
125354627f9SDavid Arinzon 	struct ena_ring *tx_ring;
126c891d767SDavid Arinzon 	int qid, i, nxmit = 0;
127c891d767SDavid Arinzon 
128c891d767SDavid Arinzon 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
129c891d767SDavid Arinzon 		return -EINVAL;
130c891d767SDavid Arinzon 
131c891d767SDavid Arinzon 	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
132c891d767SDavid Arinzon 		return -ENETDOWN;
133c891d767SDavid Arinzon 
134c891d767SDavid Arinzon 	/* We assume that all rings have the same XDP program */
135c891d767SDavid Arinzon 	if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
136c891d767SDavid Arinzon 		return -ENXIO;
137c891d767SDavid Arinzon 
138c891d767SDavid Arinzon 	qid = smp_processor_id() % adapter->xdp_num_queues;
139c891d767SDavid Arinzon 	qid += adapter->xdp_first_ring;
140354627f9SDavid Arinzon 	tx_ring = &adapter->tx_ring[qid];
141c891d767SDavid Arinzon 
142c891d767SDavid Arinzon 	/* Other CPU ids might try to send thorugh this queue */
143354627f9SDavid Arinzon 	spin_lock(&tx_ring->xdp_tx_lock);
144c891d767SDavid Arinzon 
145c891d767SDavid Arinzon 	for (i = 0; i < n; i++) {
146354627f9SDavid Arinzon 		if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
147c891d767SDavid Arinzon 			break;
148c891d767SDavid Arinzon 		nxmit++;
149c891d767SDavid Arinzon 	}
150c891d767SDavid Arinzon 
151c891d767SDavid Arinzon 	/* Ring doorbell to make device aware of the packets */
152c891d767SDavid Arinzon 	if (flags & XDP_XMIT_FLUSH)
153354627f9SDavid Arinzon 		ena_ring_tx_doorbell(tx_ring);
154c891d767SDavid Arinzon 
155354627f9SDavid Arinzon 	spin_unlock(&tx_ring->xdp_tx_lock);
156c891d767SDavid Arinzon 
157c891d767SDavid Arinzon 	/* Return number of packets sent */
158c891d767SDavid Arinzon 	return nxmit;
159c891d767SDavid Arinzon }
160c891d767SDavid Arinzon 
ena_init_all_xdp_queues(struct ena_adapter * adapter)161c891d767SDavid Arinzon static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
162c891d767SDavid Arinzon {
163c891d767SDavid Arinzon 	adapter->xdp_first_ring = adapter->num_io_queues;
164c891d767SDavid Arinzon 	adapter->xdp_num_queues = adapter->num_io_queues;
165c891d767SDavid Arinzon 
166c891d767SDavid Arinzon 	ena_init_io_rings(adapter,
167c891d767SDavid Arinzon 			  adapter->xdp_first_ring,
168c891d767SDavid Arinzon 			  adapter->xdp_num_queues);
169c891d767SDavid Arinzon }
170c891d767SDavid Arinzon 
ena_setup_and_create_all_xdp_queues(struct ena_adapter * adapter)171c891d767SDavid Arinzon int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
172c891d767SDavid Arinzon {
173c891d767SDavid Arinzon 	u32 xdp_first_ring = adapter->xdp_first_ring;
174c891d767SDavid Arinzon 	u32 xdp_num_queues = adapter->xdp_num_queues;
175c891d767SDavid Arinzon 	int rc = 0;
176c891d767SDavid Arinzon 
177c891d767SDavid Arinzon 	rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
178c891d767SDavid Arinzon 	if (rc)
179c891d767SDavid Arinzon 		goto setup_err;
180c891d767SDavid Arinzon 
181c891d767SDavid Arinzon 	rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
182c891d767SDavid Arinzon 	if (rc)
183c891d767SDavid Arinzon 		goto create_err;
184c891d767SDavid Arinzon 
185c891d767SDavid Arinzon 	return 0;
186c891d767SDavid Arinzon 
187c891d767SDavid Arinzon create_err:
188c891d767SDavid Arinzon 	ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
189c891d767SDavid Arinzon setup_err:
190c891d767SDavid Arinzon 	return rc;
191c891d767SDavid Arinzon }
192c891d767SDavid Arinzon 
193c891d767SDavid Arinzon /* Provides a way for both kernel and bpf-prog to know
194c891d767SDavid Arinzon  * more about the RX-queue a given XDP frame arrived on.
195c891d767SDavid Arinzon  */
ena_xdp_register_rxq_info(struct ena_ring * rx_ring)196c891d767SDavid Arinzon static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
197c891d767SDavid Arinzon {
198c891d767SDavid Arinzon 	int rc;
199c891d767SDavid Arinzon 
200c891d767SDavid Arinzon 	rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
201c891d767SDavid Arinzon 
202c891d767SDavid Arinzon 	if (rc) {
203c891d767SDavid Arinzon 		netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
204c891d767SDavid Arinzon 			  "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
205c891d767SDavid Arinzon 			  rx_ring->qid, rc);
206c891d767SDavid Arinzon 		goto err;
207c891d767SDavid Arinzon 	}
208c891d767SDavid Arinzon 
209c891d767SDavid Arinzon 	rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
210c891d767SDavid Arinzon 
211c891d767SDavid Arinzon 	if (rc) {
212c891d767SDavid Arinzon 		netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
213c891d767SDavid Arinzon 			  "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
214c891d767SDavid Arinzon 			  rx_ring->qid, rc);
215c891d767SDavid Arinzon 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
216c891d767SDavid Arinzon 	}
217c891d767SDavid Arinzon 
218c891d767SDavid Arinzon err:
219c891d767SDavid Arinzon 	return rc;
220c891d767SDavid Arinzon }
221c891d767SDavid Arinzon 
ena_xdp_unregister_rxq_info(struct ena_ring * rx_ring)222c891d767SDavid Arinzon static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
223c891d767SDavid Arinzon {
224c891d767SDavid Arinzon 	xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
225c891d767SDavid Arinzon 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
226c891d767SDavid Arinzon }
227c891d767SDavid Arinzon 
ena_xdp_exchange_program_rx_in_range(struct ena_adapter * adapter,struct bpf_prog * prog,int first,int count)228c891d767SDavid Arinzon void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
229c891d767SDavid Arinzon 					  struct bpf_prog *prog,
230c891d767SDavid Arinzon 					  int first, int count)
231c891d767SDavid Arinzon {
232c891d767SDavid Arinzon 	struct bpf_prog *old_bpf_prog;
233c891d767SDavid Arinzon 	struct ena_ring *rx_ring;
234c891d767SDavid Arinzon 	int i = 0;
235c891d767SDavid Arinzon 
236c891d767SDavid Arinzon 	for (i = first; i < count; i++) {
237c891d767SDavid Arinzon 		rx_ring = &adapter->rx_ring[i];
238c891d767SDavid Arinzon 		old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
239c891d767SDavid Arinzon 
240c891d767SDavid Arinzon 		if (!old_bpf_prog && prog) {
241c891d767SDavid Arinzon 			ena_xdp_register_rxq_info(rx_ring);
242c891d767SDavid Arinzon 			rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
243c891d767SDavid Arinzon 		} else if (old_bpf_prog && !prog) {
244c891d767SDavid Arinzon 			ena_xdp_unregister_rxq_info(rx_ring);
245c891d767SDavid Arinzon 			rx_ring->rx_headroom = NET_SKB_PAD;
246c891d767SDavid Arinzon 		}
247c891d767SDavid Arinzon 	}
248c891d767SDavid Arinzon }
249c891d767SDavid Arinzon 
ena_xdp_exchange_program(struct ena_adapter * adapter,struct bpf_prog * prog)250c891d767SDavid Arinzon static void ena_xdp_exchange_program(struct ena_adapter *adapter,
251c891d767SDavid Arinzon 				     struct bpf_prog *prog)
252c891d767SDavid Arinzon {
253c891d767SDavid Arinzon 	struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
254c891d767SDavid Arinzon 
255c891d767SDavid Arinzon 	ena_xdp_exchange_program_rx_in_range(adapter,
256c891d767SDavid Arinzon 					     prog,
257c891d767SDavid Arinzon 					     0,
258c891d767SDavid Arinzon 					     adapter->num_io_queues);
259c891d767SDavid Arinzon 
260c891d767SDavid Arinzon 	if (old_bpf_prog)
261c891d767SDavid Arinzon 		bpf_prog_put(old_bpf_prog);
262c891d767SDavid Arinzon }
263c891d767SDavid Arinzon 
ena_destroy_and_free_all_xdp_queues(struct ena_adapter * adapter)264c891d767SDavid Arinzon static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
265c891d767SDavid Arinzon {
266c891d767SDavid Arinzon 	bool was_up;
267c891d767SDavid Arinzon 	int rc;
268c891d767SDavid Arinzon 
269c891d767SDavid Arinzon 	was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
270c891d767SDavid Arinzon 
271c891d767SDavid Arinzon 	if (was_up)
272c891d767SDavid Arinzon 		ena_down(adapter);
273c891d767SDavid Arinzon 
274c891d767SDavid Arinzon 	adapter->xdp_first_ring = 0;
275c891d767SDavid Arinzon 	adapter->xdp_num_queues = 0;
276c891d767SDavid Arinzon 	ena_xdp_exchange_program(adapter, NULL);
277c891d767SDavid Arinzon 	if (was_up) {
278c891d767SDavid Arinzon 		rc = ena_up(adapter);
279c891d767SDavid Arinzon 		if (rc)
280c891d767SDavid Arinzon 			return rc;
281c891d767SDavid Arinzon 	}
282c891d767SDavid Arinzon 	return 0;
283c891d767SDavid Arinzon }
284c891d767SDavid Arinzon 
ena_xdp_set(struct net_device * netdev,struct netdev_bpf * bpf)285c891d767SDavid Arinzon static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
286c891d767SDavid Arinzon {
287c891d767SDavid Arinzon 	struct ena_adapter *adapter = netdev_priv(netdev);
288c891d767SDavid Arinzon 	struct bpf_prog *prog = bpf->prog;
289c891d767SDavid Arinzon 	struct bpf_prog *old_bpf_prog;
290c891d767SDavid Arinzon 	int rc, prev_mtu;
291c891d767SDavid Arinzon 	bool is_up;
292c891d767SDavid Arinzon 
293c891d767SDavid Arinzon 	is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
294c891d767SDavid Arinzon 	rc = ena_xdp_allowed(adapter);
295c891d767SDavid Arinzon 	if (rc == ENA_XDP_ALLOWED) {
296c891d767SDavid Arinzon 		old_bpf_prog = adapter->xdp_bpf_prog;
297c891d767SDavid Arinzon 		if (prog) {
298c891d767SDavid Arinzon 			if (!is_up) {
299c891d767SDavid Arinzon 				ena_init_all_xdp_queues(adapter);
300c891d767SDavid Arinzon 			} else if (!old_bpf_prog) {
301c891d767SDavid Arinzon 				ena_down(adapter);
302c891d767SDavid Arinzon 				ena_init_all_xdp_queues(adapter);
303c891d767SDavid Arinzon 			}
304c891d767SDavid Arinzon 			ena_xdp_exchange_program(adapter, prog);
305c891d767SDavid Arinzon 
306c891d767SDavid Arinzon 			if (is_up && !old_bpf_prog) {
307c891d767SDavid Arinzon 				rc = ena_up(adapter);
308c891d767SDavid Arinzon 				if (rc)
309c891d767SDavid Arinzon 					return rc;
310c891d767SDavid Arinzon 			}
311c891d767SDavid Arinzon 			xdp_features_set_redirect_target(netdev, false);
312c891d767SDavid Arinzon 		} else if (old_bpf_prog) {
313c891d767SDavid Arinzon 			xdp_features_clear_redirect_target(netdev);
314c891d767SDavid Arinzon 			rc = ena_destroy_and_free_all_xdp_queues(adapter);
315c891d767SDavid Arinzon 			if (rc)
316c891d767SDavid Arinzon 				return rc;
317c891d767SDavid Arinzon 		}
318c891d767SDavid Arinzon 
319c891d767SDavid Arinzon 		prev_mtu = netdev->max_mtu;
320c891d767SDavid Arinzon 		netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
321c891d767SDavid Arinzon 
322c891d767SDavid Arinzon 		if (!old_bpf_prog)
323c891d767SDavid Arinzon 			netif_info(adapter, drv, adapter->netdev,
324c891d767SDavid Arinzon 				   "XDP program is set, changing the max_mtu from %d to %d",
325c891d767SDavid Arinzon 				   prev_mtu, netdev->max_mtu);
326c891d767SDavid Arinzon 
327c891d767SDavid Arinzon 	} else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
328c891d767SDavid Arinzon 		netif_err(adapter, drv, adapter->netdev,
329c891d767SDavid Arinzon 			  "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
330c891d767SDavid Arinzon 			  netdev->mtu, ENA_XDP_MAX_MTU);
331c891d767SDavid Arinzon 		NL_SET_ERR_MSG_MOD(bpf->extack,
332c891d767SDavid Arinzon 				   "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
333c891d767SDavid Arinzon 		return -EINVAL;
334c891d767SDavid Arinzon 	} else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
335c891d767SDavid Arinzon 		netif_err(adapter, drv, adapter->netdev,
336c891d767SDavid Arinzon 			  "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
337c891d767SDavid Arinzon 			  adapter->num_io_queues, adapter->max_num_io_queues);
338c891d767SDavid Arinzon 		NL_SET_ERR_MSG_MOD(bpf->extack,
339c891d767SDavid Arinzon 				   "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
340c891d767SDavid Arinzon 		return -EINVAL;
341c891d767SDavid Arinzon 	}
342c891d767SDavid Arinzon 
343c891d767SDavid Arinzon 	return 0;
344c891d767SDavid Arinzon }
345c891d767SDavid Arinzon 
346c891d767SDavid Arinzon /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
347c891d767SDavid Arinzon  * program as well as to query the current xdp program id.
348c891d767SDavid Arinzon  */
ena_xdp(struct net_device * netdev,struct netdev_bpf * bpf)349c891d767SDavid Arinzon int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
350c891d767SDavid Arinzon {
351c891d767SDavid Arinzon 	switch (bpf->command) {
352c891d767SDavid Arinzon 	case XDP_SETUP_PROG:
353c891d767SDavid Arinzon 		return ena_xdp_set(netdev, bpf);
354c891d767SDavid Arinzon 	default:
355c891d767SDavid Arinzon 		return -EINVAL;
356c891d767SDavid Arinzon 	}
357c891d767SDavid Arinzon 	return 0;
358c891d767SDavid Arinzon }
359c891d767SDavid Arinzon 
ena_clean_xdp_irq(struct ena_ring * tx_ring,u32 budget)360354627f9SDavid Arinzon static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
361c891d767SDavid Arinzon {
362c891d767SDavid Arinzon 	u32 total_done = 0;
363c891d767SDavid Arinzon 	u16 next_to_clean;
364c891d767SDavid Arinzon 	int tx_pkts = 0;
365c891d767SDavid Arinzon 	u16 req_id;
366c891d767SDavid Arinzon 	int rc;
367c891d767SDavid Arinzon 
368354627f9SDavid Arinzon 	if (unlikely(!tx_ring))
369c891d767SDavid Arinzon 		return 0;
370354627f9SDavid Arinzon 	next_to_clean = tx_ring->next_to_clean;
371c891d767SDavid Arinzon 
372c891d767SDavid Arinzon 	while (tx_pkts < budget) {
373c891d767SDavid Arinzon 		struct ena_tx_buffer *tx_info;
374c891d767SDavid Arinzon 		struct xdp_frame *xdpf;
375c891d767SDavid Arinzon 
376354627f9SDavid Arinzon 		rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
377c891d767SDavid Arinzon 						&req_id);
378c891d767SDavid Arinzon 		if (rc) {
379c891d767SDavid Arinzon 			if (unlikely(rc == -EINVAL))
380354627f9SDavid Arinzon 				handle_invalid_req_id(tx_ring, req_id, NULL, true);
381c891d767SDavid Arinzon 			break;
382c891d767SDavid Arinzon 		}
383c891d767SDavid Arinzon 
384c891d767SDavid Arinzon 		/* validate that the request id points to a valid xdp_frame */
385354627f9SDavid Arinzon 		rc = validate_xdp_req_id(tx_ring, req_id);
386c891d767SDavid Arinzon 		if (rc)
387c891d767SDavid Arinzon 			break;
388c891d767SDavid Arinzon 
389354627f9SDavid Arinzon 		tx_info = &tx_ring->tx_buffer_info[req_id];
390c891d767SDavid Arinzon 		xdpf = tx_info->xdpf;
391c891d767SDavid Arinzon 
392c891d767SDavid Arinzon 		tx_info->xdpf = NULL;
393c891d767SDavid Arinzon 		tx_info->last_jiffies = 0;
394354627f9SDavid Arinzon 		ena_unmap_tx_buff(tx_ring, tx_info);
395c891d767SDavid Arinzon 
396354627f9SDavid Arinzon 		netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
397354627f9SDavid Arinzon 			  "tx_poll: q %d skb %p completed\n", tx_ring->qid,
398c891d767SDavid Arinzon 			  xdpf);
399c891d767SDavid Arinzon 
400c891d767SDavid Arinzon 		tx_pkts++;
401c891d767SDavid Arinzon 		total_done += tx_info->tx_descs;
402c891d767SDavid Arinzon 
403c891d767SDavid Arinzon 		xdp_return_frame(xdpf);
404354627f9SDavid Arinzon 		tx_ring->free_ids[next_to_clean] = req_id;
405c891d767SDavid Arinzon 		next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
406354627f9SDavid Arinzon 						     tx_ring->ring_size);
407c891d767SDavid Arinzon 	}
408c891d767SDavid Arinzon 
409354627f9SDavid Arinzon 	tx_ring->next_to_clean = next_to_clean;
410354627f9SDavid Arinzon 	ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
411354627f9SDavid Arinzon 	ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
412c891d767SDavid Arinzon 
413354627f9SDavid Arinzon 	netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
414c891d767SDavid Arinzon 		  "tx_poll: q %d done. total pkts: %d\n",
415354627f9SDavid Arinzon 		  tx_ring->qid, tx_pkts);
416c891d767SDavid Arinzon 
417c891d767SDavid Arinzon 	return tx_pkts;
418c891d767SDavid Arinzon }
419c891d767SDavid Arinzon 
420c891d767SDavid Arinzon /* This is the XDP napi callback. XDP queues use a separate napi callback
421c891d767SDavid Arinzon  * than Rx/Tx queues.
422c891d767SDavid Arinzon  */
ena_xdp_io_poll(struct napi_struct * napi,int budget)423c891d767SDavid Arinzon int ena_xdp_io_poll(struct napi_struct *napi, int budget)
424c891d767SDavid Arinzon {
425c891d767SDavid Arinzon 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
426c891d767SDavid Arinzon 	u32 xdp_work_done, xdp_budget;
427354627f9SDavid Arinzon 	struct ena_ring *tx_ring;
428c891d767SDavid Arinzon 	int napi_comp_call = 0;
429c891d767SDavid Arinzon 	int ret;
430c891d767SDavid Arinzon 
431354627f9SDavid Arinzon 	tx_ring = ena_napi->tx_ring;
432c891d767SDavid Arinzon 
433c891d767SDavid Arinzon 	xdp_budget = budget;
434c891d767SDavid Arinzon 
435354627f9SDavid Arinzon 	if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
436354627f9SDavid Arinzon 	    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
437c891d767SDavid Arinzon 		napi_complete_done(napi, 0);
438c891d767SDavid Arinzon 		return 0;
439c891d767SDavid Arinzon 	}
440c891d767SDavid Arinzon 
441354627f9SDavid Arinzon 	xdp_work_done = ena_clean_xdp_irq(tx_ring, xdp_budget);
442c891d767SDavid Arinzon 
443c891d767SDavid Arinzon 	/* If the device is about to reset or down, avoid unmask
444c891d767SDavid Arinzon 	 * the interrupt and return 0 so NAPI won't reschedule
445c891d767SDavid Arinzon 	 */
446354627f9SDavid Arinzon 	if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
447c891d767SDavid Arinzon 		napi_complete_done(napi, 0);
448c891d767SDavid Arinzon 		ret = 0;
449c891d767SDavid Arinzon 	} else if (xdp_budget > xdp_work_done) {
450c891d767SDavid Arinzon 		napi_comp_call = 1;
451c891d767SDavid Arinzon 		if (napi_complete_done(napi, xdp_work_done))
452354627f9SDavid Arinzon 			ena_unmask_interrupt(tx_ring, NULL);
453354627f9SDavid Arinzon 		ena_update_ring_numa_node(tx_ring, NULL);
454c891d767SDavid Arinzon 		ret = xdp_work_done;
455c891d767SDavid Arinzon 	} else {
456c891d767SDavid Arinzon 		ret = xdp_budget;
457c891d767SDavid Arinzon 	}
458c891d767SDavid Arinzon 
459354627f9SDavid Arinzon 	u64_stats_update_begin(&tx_ring->syncp);
460354627f9SDavid Arinzon 	tx_ring->tx_stats.napi_comp += napi_comp_call;
461354627f9SDavid Arinzon 	tx_ring->tx_stats.tx_poll++;
462354627f9SDavid Arinzon 	u64_stats_update_end(&tx_ring->syncp);
463354627f9SDavid Arinzon 	tx_ring->tx_stats.last_napi_jiffies = jiffies;
464c891d767SDavid Arinzon 
465c891d767SDavid Arinzon 	return ret;
466c891d767SDavid Arinzon }
467