1 /*
2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef __MLX5_EN_XDP_H__
33 #define __MLX5_EN_XDP_H__
34
35 #include <linux/indirect_call_wrapper.h>
36
37 #include "en.h"
38 #include "en/txrx.h"
39
40 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
41
42 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
43 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
44 (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
45 sizeof(struct mlx5_wqe_inline_seg))
46
47 struct mlx5e_xdp_buff {
48 struct xdp_buff xdp;
49 struct mlx5_cqe64 *cqe;
50 struct mlx5e_rq *rq;
51 };
52
53 /* XDP packets can be transmitted in different ways. On completion, we need to
54 * distinguish between them to clean up things in a proper way.
55 */
56 enum mlx5e_xdp_xmit_mode {
57 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
58 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
59 * returned.
60 */
61 MLX5E_XDP_XMIT_MODE_FRAME,
62
63 /* The xdp_frame was created in place as a result of XDP_TX from a
64 * regular RQ. No DMA remapping happened, and the page belongs to us.
65 */
66 MLX5E_XDP_XMIT_MODE_PAGE,
67
68 /* No xdp_frame was created at all, the transmit happened from a UMEM
69 * page. The UMEM Completion Ring producer pointer has to be increased.
70 */
71 MLX5E_XDP_XMIT_MODE_XSK,
72 };
73
74 /* xmit_mode entry is pushed to the fifo per packet, followed by multiple
75 * entries, as follows:
76 *
77 * MLX5E_XDP_XMIT_MODE_FRAME:
78 * xdpf, dma_addr_1, dma_addr_2, ... , dma_addr_num.
79 * 'num' is derived from xdpf.
80 *
81 * MLX5E_XDP_XMIT_MODE_PAGE:
82 * num, page_1, page_2, ... , page_num.
83 *
84 * MLX5E_XDP_XMIT_MODE_XSK:
85 * none.
86 */
87 #define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
88
89 union mlx5e_xdp_info {
90 enum mlx5e_xdp_xmit_mode mode;
91 union {
92 struct xdp_frame *xdpf;
93 dma_addr_t dma_addr;
94 } frame;
95 union {
96 struct mlx5e_rq *rq;
97 u8 num;
98 struct page *page;
99 } page;
100 };
101
102 struct mlx5e_xsk_param;
103 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
104 bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
105 struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
106 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
107 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
108 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
109 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
110 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
111 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
112 u32 flags);
113
114 extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
115
116 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
117 struct mlx5e_xmit_data *xdptxd,
118 int check_result));
119 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
120 struct mlx5e_xmit_data *xdptxd,
121 int check_result));
122 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
123 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
124
mlx5e_xdp_tx_enable(struct mlx5e_priv * priv)125 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
126 {
127 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
128
129 if (priv->channels.params.xdp_prog)
130 set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
131 }
132
mlx5e_xdp_tx_disable(struct mlx5e_priv * priv)133 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
134 {
135 if (priv->channels.params.xdp_prog)
136 clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
137
138 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
139 /* Let other device's napi(s) and XSK wakeups see our new state. */
140 synchronize_net();
141 }
142
mlx5e_xdp_tx_is_enabled(struct mlx5e_priv * priv)143 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
144 {
145 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
146 }
147
mlx5e_xdp_is_active(struct mlx5e_priv * priv)148 static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
149 {
150 return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
151 }
152
mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq * sq)153 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
154 {
155 if (sq->doorbell_cseg) {
156 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
157 sq->doorbell_cseg = NULL;
158 }
159 }
160
161 /* Enable inline WQEs to shift some load from a congested HCA (HW) to
162 * a less congested cpu (SW).
163 */
mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq * sq,bool cur)164 static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
165 {
166 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
167
168 #define MLX5E_XDP_INLINE_WATERMARK_LOW 10
169 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
170
171 if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
172 return false;
173
174 if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
175 return true;
176
177 return cur;
178 }
179
mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe * session,u8 max_sq_mpw_wqebbs)180 static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
181 {
182 if (session->inline_on)
183 return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
184 max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
185
186 return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
187 }
188
189 struct mlx5e_xdp_wqe_info {
190 u8 num_wqebbs;
191 u8 num_pkts;
192 };
193
194 static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq * sq,struct mlx5e_xmit_data * xdptxd,struct mlx5e_xdpsq_stats * stats)195 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
196 struct mlx5e_xmit_data *xdptxd,
197 struct mlx5e_xdpsq_stats *stats)
198 {
199 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
200 struct mlx5_wqe_data_seg *dseg =
201 (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
202 u32 dma_len = xdptxd->len;
203
204 session->pkt_count++;
205 session->bytes_count += dma_len;
206
207 if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
208 struct mlx5_wqe_inline_seg *inline_dseg =
209 (struct mlx5_wqe_inline_seg *)dseg;
210 u16 ds_len = sizeof(*inline_dseg) + dma_len;
211 u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
212
213 inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
214 memcpy(inline_dseg->data, xdptxd->data, dma_len);
215
216 session->ds_count += ds_cnt;
217 stats->inlnw++;
218 return;
219 }
220
221 dseg->addr = cpu_to_be64(xdptxd->dma_addr);
222 dseg->byte_count = cpu_to_be32(dma_len);
223 dseg->lkey = sq->mkey_be;
224 session->ds_count++;
225 }
226
227 static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo * fifo,union mlx5e_xdp_info xi)228 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
229 union mlx5e_xdp_info xi)
230 {
231 u32 i = (*fifo->pc)++ & fifo->mask;
232
233 fifo->xi[i] = xi;
234 }
235
236 static inline union mlx5e_xdp_info
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo * fifo)237 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
238 {
239 return fifo->xi[(*fifo->cc)++ & fifo->mask];
240 }
241 #endif
242