1 /*
2  * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __MLX5_EN_XDP_H__
33 #define __MLX5_EN_XDP_H__
34 
35 #include <linux/indirect_call_wrapper.h>
36 
37 #include "en.h"
38 #include "en/txrx.h"
39 
40 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
41 
42 #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
43 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
44 	(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
45 	 sizeof(struct mlx5_wqe_inline_seg))
46 
47 struct mlx5e_xdp_buff {
48 	struct xdp_buff xdp;
49 	struct mlx5_cqe64 *cqe;
50 	struct mlx5e_rq *rq;
51 };
52 
53 /* XDP packets can be transmitted in different ways. On completion, we need to
54  * distinguish between them to clean up things in a proper way.
55  */
56 enum mlx5e_xdp_xmit_mode {
57 	/* An xdp_frame was transmitted due to either XDP_REDIRECT from another
58 	 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
59 	 * returned.
60 	 */
61 	MLX5E_XDP_XMIT_MODE_FRAME,
62 
63 	/* The xdp_frame was created in place as a result of XDP_TX from a
64 	 * regular RQ. No DMA remapping happened, and the page belongs to us.
65 	 */
66 	MLX5E_XDP_XMIT_MODE_PAGE,
67 
68 	/* No xdp_frame was created at all, the transmit happened from a UMEM
69 	 * page. The UMEM Completion Ring producer pointer has to be increased.
70 	 */
71 	MLX5E_XDP_XMIT_MODE_XSK,
72 };
73 
74 /* xmit_mode entry is pushed to the fifo per packet, followed by multiple
75  * entries, as follows:
76  *
77  * MLX5E_XDP_XMIT_MODE_FRAME:
78  *    xdpf, dma_addr_1, dma_addr_2, ... , dma_addr_num.
79  *    'num' is derived from xdpf.
80  *
81  * MLX5E_XDP_XMIT_MODE_PAGE:
82  *    num, page_1, page_2, ... , page_num.
83  *
84  * MLX5E_XDP_XMIT_MODE_XSK:
85  *    none.
86  */
87 union mlx5e_xdp_info {
88 	enum mlx5e_xdp_xmit_mode mode;
89 	union {
90 		struct xdp_frame *xdpf;
91 		dma_addr_t dma_addr;
92 	} frame;
93 	union {
94 		struct mlx5e_rq *rq;
95 		u8 num;
96 		struct page *page;
97 	} page;
98 };
99 
100 struct mlx5e_xsk_param;
101 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
102 bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
103 		      struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
104 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
105 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
106 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
107 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
108 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
109 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
110 		   u32 flags);
111 
112 extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
113 
114 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
115 							  struct mlx5e_xmit_data *xdptxd,
116 							  int check_result));
117 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
118 						    struct mlx5e_xmit_data *xdptxd,
119 						    int check_result));
120 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
121 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
122 
123 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
124 {
125 	set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
126 
127 	if (priv->channels.params.xdp_prog)
128 		set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
129 }
130 
131 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
132 {
133 	if (priv->channels.params.xdp_prog)
134 		clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
135 
136 	clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
137 	/* Let other device's napi(s) and XSK wakeups see our new state. */
138 	synchronize_net();
139 }
140 
141 static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
142 {
143 	return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
144 }
145 
146 static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
147 {
148 	return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
149 }
150 
151 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
152 {
153 	if (sq->doorbell_cseg) {
154 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
155 		sq->doorbell_cseg = NULL;
156 	}
157 }
158 
159 /* Enable inline WQEs to shift some load from a congested HCA (HW) to
160  * a less congested cpu (SW).
161  */
162 static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
163 {
164 	u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
165 
166 #define MLX5E_XDP_INLINE_WATERMARK_LOW	10
167 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
168 
169 	if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
170 		return false;
171 
172 	if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
173 		return true;
174 
175 	return cur;
176 }
177 
178 static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
179 {
180 	if (session->inline_on)
181 		return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
182 		       max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
183 
184 	return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
185 }
186 
187 struct mlx5e_xdp_wqe_info {
188 	u8 num_wqebbs;
189 	u8 num_pkts;
190 };
191 
192 static inline void
193 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
194 			 struct mlx5e_xmit_data *xdptxd,
195 			 struct mlx5e_xdpsq_stats *stats)
196 {
197 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
198 	struct mlx5_wqe_data_seg *dseg =
199 		(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
200 	u32 dma_len = xdptxd->len;
201 
202 	session->pkt_count++;
203 	session->bytes_count += dma_len;
204 
205 	if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
206 		struct mlx5_wqe_inline_seg *inline_dseg =
207 			(struct mlx5_wqe_inline_seg *)dseg;
208 		u16 ds_len = sizeof(*inline_dseg) + dma_len;
209 		u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
210 
211 		inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
212 		memcpy(inline_dseg->data, xdptxd->data, dma_len);
213 
214 		session->ds_count += ds_cnt;
215 		stats->inlnw++;
216 		return;
217 	}
218 
219 	dseg->addr       = cpu_to_be64(xdptxd->dma_addr);
220 	dseg->byte_count = cpu_to_be32(dma_len);
221 	dseg->lkey       = sq->mkey_be;
222 	session->ds_count++;
223 }
224 
225 static inline void
226 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
227 		     union mlx5e_xdp_info xi)
228 {
229 	u32 i = (*fifo->pc)++ & fifo->mask;
230 
231 	fifo->xi[i] = xi;
232 }
233 
234 static inline union mlx5e_xdp_info
235 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
236 {
237 	return fifo->xi[(*fifo->cc)++ & fifo->mask];
238 }
239 #endif
240