xref: /openbmc/linux/drivers/net/ethernet/amazon/ena/ena_xdp.h (revision aeddf9a2731de8235b2b433533d06ee7dc73d233)
1c891d767SDavid Arinzon /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2c891d767SDavid Arinzon /*
3c891d767SDavid Arinzon  * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4c891d767SDavid Arinzon  */
5c891d767SDavid Arinzon 
6c891d767SDavid Arinzon #ifndef ENA_XDP_H
7c891d767SDavid Arinzon #define ENA_XDP_H
8c891d767SDavid Arinzon 
9c891d767SDavid Arinzon #include "ena_netdev.h"
10c891d767SDavid Arinzon #include <linux/bpf_trace.h>
11c891d767SDavid Arinzon 
12c891d767SDavid Arinzon /* The max MTU size is configured to be the ethernet frame size without
13c891d767SDavid Arinzon  * the overhead of the ethernet header, which can have a VLAN header, and
14c891d767SDavid Arinzon  * a frame check sequence (FCS).
15c891d767SDavid Arinzon  * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
16c891d767SDavid Arinzon  */
17c891d767SDavid Arinzon #define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN -	\
18c891d767SDavid Arinzon 			 VLAN_HLEN - XDP_PACKET_HEADROOM -		\
19c891d767SDavid Arinzon 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
20c891d767SDavid Arinzon 
21c891d767SDavid Arinzon #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
22c891d767SDavid Arinzon 	((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
23c891d767SDavid Arinzon 
24c891d767SDavid Arinzon enum ENA_XDP_ACTIONS {
25c891d767SDavid Arinzon 	ENA_XDP_PASS		= 0,
26c891d767SDavid Arinzon 	ENA_XDP_TX		= BIT(0),
27c891d767SDavid Arinzon 	ENA_XDP_REDIRECT	= BIT(1),
28c891d767SDavid Arinzon 	ENA_XDP_DROP		= BIT(2)
29c891d767SDavid Arinzon };
30c891d767SDavid Arinzon 
31c891d767SDavid Arinzon #define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
32c891d767SDavid Arinzon 
33c891d767SDavid Arinzon int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
34c891d767SDavid Arinzon void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
35c891d767SDavid Arinzon 					  struct bpf_prog *prog,
36c891d767SDavid Arinzon 					  int first, int count);
37c891d767SDavid Arinzon int ena_xdp_io_poll(struct napi_struct *napi, int budget);
38*354627f9SDavid Arinzon int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
39bc0ad685SDavid Arinzon 		       struct ena_adapter *adapter,
40c891d767SDavid Arinzon 		       struct xdp_frame *xdpf,
41c891d767SDavid Arinzon 		       int flags);
42c891d767SDavid Arinzon int ena_xdp_xmit(struct net_device *dev, int n,
43c891d767SDavid Arinzon 		 struct xdp_frame **frames, u32 flags);
44c891d767SDavid Arinzon int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
45c891d767SDavid Arinzon 
46c891d767SDavid Arinzon enum ena_xdp_errors_t {
47c891d767SDavid Arinzon 	ENA_XDP_ALLOWED = 0,
48c891d767SDavid Arinzon 	ENA_XDP_CURRENT_MTU_TOO_LARGE,
49c891d767SDavid Arinzon 	ENA_XDP_NO_ENOUGH_QUEUES,
50c891d767SDavid Arinzon };
51c891d767SDavid Arinzon 
ena_xdp_present(struct ena_adapter * adapter)52c891d767SDavid Arinzon static inline bool ena_xdp_present(struct ena_adapter *adapter)
53c891d767SDavid Arinzon {
54c891d767SDavid Arinzon 	return !!adapter->xdp_bpf_prog;
55c891d767SDavid Arinzon }
56c891d767SDavid Arinzon 
ena_xdp_present_ring(struct ena_ring * ring)57c891d767SDavid Arinzon static inline bool ena_xdp_present_ring(struct ena_ring *ring)
58c891d767SDavid Arinzon {
59c891d767SDavid Arinzon 	return !!ring->xdp_bpf_prog;
60c891d767SDavid Arinzon }
61c891d767SDavid Arinzon 
ena_xdp_legal_queue_count(struct ena_adapter * adapter,u32 queues)62c891d767SDavid Arinzon static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
63c891d767SDavid Arinzon 					     u32 queues)
64c891d767SDavid Arinzon {
65c891d767SDavid Arinzon 	return 2 * queues <= adapter->max_num_io_queues;
66c891d767SDavid Arinzon }
67c891d767SDavid Arinzon 
ena_xdp_allowed(struct ena_adapter * adapter)68c891d767SDavid Arinzon static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
69c891d767SDavid Arinzon {
70c891d767SDavid Arinzon 	enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
71c891d767SDavid Arinzon 
72c891d767SDavid Arinzon 	if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
73c891d767SDavid Arinzon 		rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
74c891d767SDavid Arinzon 	else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
75c891d767SDavid Arinzon 		rc = ENA_XDP_NO_ENOUGH_QUEUES;
76c891d767SDavid Arinzon 
77c891d767SDavid Arinzon 	return rc;
78c891d767SDavid Arinzon }
79c891d767SDavid Arinzon 
ena_xdp_execute(struct ena_ring * rx_ring,struct xdp_buff * xdp)80c891d767SDavid Arinzon static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
81c891d767SDavid Arinzon {
82c891d767SDavid Arinzon 	u32 verdict = ENA_XDP_PASS;
83c891d767SDavid Arinzon 	struct bpf_prog *xdp_prog;
84c891d767SDavid Arinzon 	struct ena_ring *xdp_ring;
85c891d767SDavid Arinzon 	struct xdp_frame *xdpf;
86c891d767SDavid Arinzon 	u64 *xdp_stat;
87c891d767SDavid Arinzon 
88c891d767SDavid Arinzon 	xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
89c891d767SDavid Arinzon 
90c891d767SDavid Arinzon 	if (!xdp_prog)
91c891d767SDavid Arinzon 		return verdict;
92c891d767SDavid Arinzon 
93c891d767SDavid Arinzon 	verdict = bpf_prog_run_xdp(xdp_prog, xdp);
94c891d767SDavid Arinzon 
95c891d767SDavid Arinzon 	switch (verdict) {
96c891d767SDavid Arinzon 	case XDP_TX:
97c891d767SDavid Arinzon 		xdpf = xdp_convert_buff_to_frame(xdp);
98c891d767SDavid Arinzon 		if (unlikely(!xdpf)) {
99c891d767SDavid Arinzon 			trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
100c891d767SDavid Arinzon 			xdp_stat = &rx_ring->rx_stats.xdp_aborted;
101c891d767SDavid Arinzon 			verdict = ENA_XDP_DROP;
102c891d767SDavid Arinzon 			break;
103c891d767SDavid Arinzon 		}
104c891d767SDavid Arinzon 
105c891d767SDavid Arinzon 		/* Find xmit queue */
106c891d767SDavid Arinzon 		xdp_ring = rx_ring->xdp_ring;
107c891d767SDavid Arinzon 
108c891d767SDavid Arinzon 		/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
109c891d767SDavid Arinzon 		spin_lock(&xdp_ring->xdp_tx_lock);
110c891d767SDavid Arinzon 
111bc0ad685SDavid Arinzon 		if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
112c891d767SDavid Arinzon 				       XDP_XMIT_FLUSH))
113c891d767SDavid Arinzon 			xdp_return_frame(xdpf);
114c891d767SDavid Arinzon 
115c891d767SDavid Arinzon 		spin_unlock(&xdp_ring->xdp_tx_lock);
116c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_tx;
117c891d767SDavid Arinzon 		verdict = ENA_XDP_TX;
118c891d767SDavid Arinzon 		break;
119c891d767SDavid Arinzon 	case XDP_REDIRECT:
120c891d767SDavid Arinzon 		if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
121c891d767SDavid Arinzon 			xdp_stat = &rx_ring->rx_stats.xdp_redirect;
122c891d767SDavid Arinzon 			verdict = ENA_XDP_REDIRECT;
123c891d767SDavid Arinzon 			break;
124c891d767SDavid Arinzon 		}
125c891d767SDavid Arinzon 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
126c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
127c891d767SDavid Arinzon 		verdict = ENA_XDP_DROP;
128c891d767SDavid Arinzon 		break;
129c891d767SDavid Arinzon 	case XDP_ABORTED:
130c891d767SDavid Arinzon 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
131c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
132c891d767SDavid Arinzon 		verdict = ENA_XDP_DROP;
133c891d767SDavid Arinzon 		break;
134c891d767SDavid Arinzon 	case XDP_DROP:
135c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_drop;
136c891d767SDavid Arinzon 		verdict = ENA_XDP_DROP;
137c891d767SDavid Arinzon 		break;
138c891d767SDavid Arinzon 	case XDP_PASS:
139c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_pass;
140c891d767SDavid Arinzon 		verdict = ENA_XDP_PASS;
141c891d767SDavid Arinzon 		break;
142c891d767SDavid Arinzon 	default:
143c891d767SDavid Arinzon 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
144c891d767SDavid Arinzon 		xdp_stat = &rx_ring->rx_stats.xdp_invalid;
145c891d767SDavid Arinzon 		verdict = ENA_XDP_DROP;
146c891d767SDavid Arinzon 	}
147c891d767SDavid Arinzon 
148c891d767SDavid Arinzon 	ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
149c891d767SDavid Arinzon 
150c891d767SDavid Arinzon 	return verdict;
151c891d767SDavid Arinzon }
152c891d767SDavid Arinzon #endif /* ENA_XDP_H */
153