1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #ifndef ENA_XDP_H
7 #define ENA_XDP_H
8
9 #include "ena_netdev.h"
10 #include <linux/bpf_trace.h>
11
12 /* The max MTU size is configured to be the ethernet frame size without
13 * the overhead of the ethernet header, which can have a VLAN header, and
14 * a frame check sequence (FCS).
15 * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
16 */
17 #define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
18 VLAN_HLEN - XDP_PACKET_HEADROOM - \
19 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
20
21 #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
22 ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
23
24 enum ENA_XDP_ACTIONS {
25 ENA_XDP_PASS = 0,
26 ENA_XDP_TX = BIT(0),
27 ENA_XDP_REDIRECT = BIT(1),
28 ENA_XDP_DROP = BIT(2)
29 };
30
31 #define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
32
33 int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
34 void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
35 struct bpf_prog *prog,
36 int first, int count);
37 int ena_xdp_io_poll(struct napi_struct *napi, int budget);
38 int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
39 struct ena_adapter *adapter,
40 struct xdp_frame *xdpf,
41 int flags);
42 int ena_xdp_xmit(struct net_device *dev, int n,
43 struct xdp_frame **frames, u32 flags);
44 int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
45
46 enum ena_xdp_errors_t {
47 ENA_XDP_ALLOWED = 0,
48 ENA_XDP_CURRENT_MTU_TOO_LARGE,
49 ENA_XDP_NO_ENOUGH_QUEUES,
50 };
51
ena_xdp_present(struct ena_adapter * adapter)52 static inline bool ena_xdp_present(struct ena_adapter *adapter)
53 {
54 return !!adapter->xdp_bpf_prog;
55 }
56
ena_xdp_present_ring(struct ena_ring * ring)57 static inline bool ena_xdp_present_ring(struct ena_ring *ring)
58 {
59 return !!ring->xdp_bpf_prog;
60 }
61
ena_xdp_legal_queue_count(struct ena_adapter * adapter,u32 queues)62 static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
63 u32 queues)
64 {
65 return 2 * queues <= adapter->max_num_io_queues;
66 }
67
ena_xdp_allowed(struct ena_adapter * adapter)68 static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
69 {
70 enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
71
72 if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
73 rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
74 else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
75 rc = ENA_XDP_NO_ENOUGH_QUEUES;
76
77 return rc;
78 }
79
ena_xdp_execute(struct ena_ring * rx_ring,struct xdp_buff * xdp)80 static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
81 {
82 u32 verdict = ENA_XDP_PASS;
83 struct bpf_prog *xdp_prog;
84 struct ena_ring *xdp_ring;
85 struct xdp_frame *xdpf;
86 u64 *xdp_stat;
87
88 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
89
90 if (!xdp_prog)
91 return verdict;
92
93 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
94
95 switch (verdict) {
96 case XDP_TX:
97 xdpf = xdp_convert_buff_to_frame(xdp);
98 if (unlikely(!xdpf)) {
99 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
100 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
101 verdict = ENA_XDP_DROP;
102 break;
103 }
104
105 /* Find xmit queue */
106 xdp_ring = rx_ring->xdp_ring;
107
108 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
109 spin_lock(&xdp_ring->xdp_tx_lock);
110
111 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
112 XDP_XMIT_FLUSH))
113 xdp_return_frame(xdpf);
114
115 spin_unlock(&xdp_ring->xdp_tx_lock);
116 xdp_stat = &rx_ring->rx_stats.xdp_tx;
117 verdict = ENA_XDP_TX;
118 break;
119 case XDP_REDIRECT:
120 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
121 xdp_stat = &rx_ring->rx_stats.xdp_redirect;
122 verdict = ENA_XDP_REDIRECT;
123 break;
124 }
125 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
126 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
127 verdict = ENA_XDP_DROP;
128 break;
129 case XDP_ABORTED:
130 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
131 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
132 verdict = ENA_XDP_DROP;
133 break;
134 case XDP_DROP:
135 xdp_stat = &rx_ring->rx_stats.xdp_drop;
136 verdict = ENA_XDP_DROP;
137 break;
138 case XDP_PASS:
139 xdp_stat = &rx_ring->rx_stats.xdp_pass;
140 verdict = ENA_XDP_PASS;
141 break;
142 default:
143 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
144 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
145 verdict = ENA_XDP_DROP;
146 }
147
148 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
149
150 return verdict;
151 }
152 #endif /* ENA_XDP_H */
153