xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/params.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "en/params.h"
5 #include "en/txrx.h"
6 #include "en/port.h"
7 #include "en_accel/en_accel.h"
8 #include "en_accel/ipsec.h"
9 #include <net/page_pool/types.h>
10 #include <net/xdp_sock_drv.h>
11 
mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev * mdev)12 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
13 {
14 	u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
15 
16 	return min_page_shift ? : 12;
17 }
18 
mlx5e_mpwrq_page_shift(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)19 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
20 {
21 	u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
22 	u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
23 
24 	/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
25 	if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
26 		min_page_shift = req_page_shift;
27 
28 	return max(req_page_shift, min_page_shift);
29 }
30 
31 enum mlx5e_mpwrq_umr_mode
mlx5e_mpwrq_umr_mode(struct mlx5_core_dev * mdev,struct mlx5e_xsk_param * xsk)32 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
33 {
34 	/* Different memory management schemes use different mechanisms to map
35 	 * user-mode memory. The stricter guarantees we have, the faster
36 	 * mechanisms we use:
37 	 * 1. MTT - direct mapping in page granularity.
38 	 * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
39 	 *    all mappings have the same size.
40 	 * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
41 	 *    mappings can have different sizes.
42 	 */
43 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
44 	bool unaligned = xsk ? xsk->unaligned : false;
45 	bool oversized = false;
46 
47 	if (xsk) {
48 		oversized = xsk->chunk_size < (1 << page_shift);
49 		WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
50 	}
51 
52 	/* XSK frame size doesn't match the UMR page size, either because the
53 	 * frame size is not a power of two, or it's smaller than the minimal
54 	 * page size supported by the firmware.
55 	 * It's possible to receive packets bigger than MTU in certain setups.
56 	 * To avoid writing over the XSK frame boundary, the top region of each
57 	 * stride is mapped to a garbage page, resulting in two mappings of
58 	 * different sizes per frame.
59 	 */
60 	if (oversized) {
61 		/* An optimization for frame sizes equal to 3 * power_of_two.
62 		 * 3 KSMs point to the frame, and one KSM points to the garbage
63 		 * page, which works faster than KLM.
64 		 */
65 		if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
66 			return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
67 
68 		return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
69 	}
70 
71 	/* XSK frames can start at arbitrary unaligned locations, but they all
72 	 * have the same size which is a power of two. It allows to optimize to
73 	 * one KSM per frame.
74 	 */
75 	if (unaligned)
76 		return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
77 
78 	/* XSK: frames are naturally aligned, MTT can be used.
79 	 * Non-XSK: Allocations happen in units of CPU pages, therefore, the
80 	 * mappings are naturally aligned.
81 	 */
82 	return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
83 }
84 
mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)85 u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
86 {
87 	switch (mode) {
88 	case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
89 		return sizeof(struct mlx5_mtt);
90 	case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
91 		return sizeof(struct mlx5_ksm);
92 	case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
93 		return sizeof(struct mlx5_klm) * 2;
94 	case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
95 		return sizeof(struct mlx5_ksm) * 4;
96 	}
97 	WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
98 	return 0;
99 }
100 
mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)101 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
102 			  enum mlx5e_mpwrq_umr_mode umr_mode)
103 {
104 	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
105 	u8 max_pages_per_wqe, max_log_mpwqe_size;
106 	u16 max_wqe_size;
107 
108 	/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
109 	max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
110 	max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
111 				       MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
112 	max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
113 
114 	WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
115 
116 	return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
117 }
118 
mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)119 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
120 			     enum mlx5e_mpwrq_umr_mode umr_mode)
121 {
122 	u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
123 	u8 pages_per_wqe;
124 
125 	pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
126 
127 	/* Two MTTs are needed to form an octword. The number of MTTs is encoded
128 	 * in octwords in a UMR WQE, so we need at least two to avoid mapping
129 	 * garbage addresses.
130 	 */
131 	if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
132 		pages_per_wqe = 2;
133 
134 	/* Sanity check for further calculations to succeed. */
135 	BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
136 	if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
137 		return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
138 
139 	return pages_per_wqe;
140 }
141 
mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)142 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
143 			   enum mlx5e_mpwrq_umr_mode umr_mode)
144 {
145 	u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
146 	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
147 	u16 umr_wqe_sz;
148 
149 	umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
150 		ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
151 
152 	WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
153 
154 	return umr_wqe_sz;
155 }
156 
mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)157 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
158 			  enum mlx5e_mpwrq_umr_mode umr_mode)
159 {
160 	return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
161 			    MLX5_SEND_WQE_BB);
162 }
163 
mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)164 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
165 			    enum mlx5e_mpwrq_umr_mode umr_mode)
166 {
167 	u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
168 
169 	/* Add another page as a buffer between WQEs. This page will absorb
170 	 * write overflow by the hardware, when receiving packets larger than
171 	 * MTU. These oversize packets are dropped by the driver at a later
172 	 * stage.
173 	 */
174 	return ALIGN(pages_per_wqe + 1,
175 		     MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
176 }
177 
mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev * mdev,enum mlx5e_mpwrq_umr_mode umr_mode)178 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
179 				enum mlx5e_mpwrq_umr_mode umr_mode)
180 {
181 	/* Same limits apply to KSMs and KLMs. */
182 	u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
183 			    1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
184 
185 	switch (umr_mode) {
186 	case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
187 		return MLX5E_MAX_RQ_NUM_MTTS;
188 	case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
189 		return klm_limit;
190 	case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
191 		/* Each entry is two KLMs. */
192 		return klm_limit / 2;
193 	case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
194 		/* Each entry is four KSMs. */
195 		return klm_limit / 4;
196 	}
197 	WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
198 	return 0;
199 }
200 
mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)201 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
202 				      enum mlx5e_mpwrq_umr_mode umr_mode)
203 {
204 	u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
205 	u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
206 
207 	return ilog2(max_entries / mtts_per_wqe);
208 }
209 
mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)210 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
211 			       enum mlx5e_mpwrq_umr_mode umr_mode)
212 {
213 	return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
214 		mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
215 		MLX5E_ORDER2_MAX_PACKET_MTU;
216 }
217 
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)218 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
219 				 struct mlx5e_xsk_param *xsk)
220 {
221 	u16 headroom;
222 
223 	if (xsk)
224 		return xsk->headroom;
225 
226 	headroom = NET_IP_ALIGN;
227 	if (params->xdp_prog)
228 		headroom += XDP_PACKET_HEADROOM;
229 	else
230 		headroom += MLX5_RX_HEADROOM;
231 
232 	return headroom;
233 }
234 
mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)235 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
236 				      struct mlx5e_xsk_param *xsk)
237 {
238 	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
239 
240 	return xsk->headroom + hw_mtu;
241 }
242 
mlx5e_rx_get_linear_sz_skb(struct mlx5e_params * params,bool xsk)243 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
244 {
245 	/* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
246 	u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
247 	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
248 
249 	return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
250 }
251 
mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,bool mpwqe)252 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
253 					 struct mlx5e_params *params,
254 					 struct mlx5e_xsk_param *xsk,
255 					 bool mpwqe)
256 {
257 	u32 sz;
258 
259 	/* XSK frames are mapped as individual pages, because frames may come in
260 	 * an arbitrary order from random locations in the UMEM.
261 	 */
262 	if (xsk)
263 		return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
264 
265 	sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
266 
267 	/* XDP in mlx5e doesn't support multiple packets per page.
268 	 * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
269 	 */
270 	return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz;
271 }
272 
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)273 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
274 				       struct mlx5e_params *params,
275 				       struct mlx5e_xsk_param *xsk)
276 {
277 	u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
278 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
279 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
280 
281 	return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
282 		order_base_2(linear_stride_sz);
283 }
284 
mlx5e_rx_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)285 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
286 			    struct mlx5e_params *params,
287 			    struct mlx5e_xsk_param *xsk)
288 {
289 	if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
290 		return false;
291 
292 	/* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
293 	 * must fit into a CPU page.
294 	 */
295 	if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
296 		return false;
297 
298 	/* XSK frames must be big enough to hold the packet data. */
299 	if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
300 		return false;
301 
302 	return true;
303 }
304 
mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev * mdev,u8 log_stride_sz,u8 log_num_strides,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)305 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
306 					  u8 log_stride_sz, u8 log_num_strides,
307 					  u8 page_shift,
308 					  enum mlx5e_mpwrq_umr_mode umr_mode)
309 {
310 	if (log_stride_sz + log_num_strides !=
311 	    mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
312 		return false;
313 
314 	if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
315 	    log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
316 		return false;
317 
318 	if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
319 		return false;
320 
321 	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
322 		return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
323 
324 	return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
325 }
326 
mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)327 bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
328 					  struct mlx5e_params *params,
329 					  struct mlx5e_xsk_param *xsk)
330 {
331 	u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
332 	u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
333 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
334 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
335 
336 	return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
337 					     log_wqe_num_of_strides,
338 					     page_shift, umr_mode);
339 }
340 
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)341 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
342 				  struct mlx5e_params *params,
343 				  struct mlx5e_xsk_param *xsk)
344 {
345 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
346 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
347 	u8 log_num_strides;
348 	u8 log_stride_sz;
349 	u8 log_wqe_sz;
350 
351 	if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
352 		return false;
353 
354 	log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
355 	log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
356 
357 	if (log_wqe_sz < log_stride_sz)
358 		return false;
359 
360 	log_num_strides = log_wqe_sz - log_stride_sz;
361 
362 	return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
363 					     log_num_strides, page_shift,
364 					     umr_mode);
365 }
366 
mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)367 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
368 			       struct mlx5e_params *params,
369 			       struct mlx5e_xsk_param *xsk)
370 {
371 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
372 	u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
373 
374 	log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
375 	page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
376 	max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
377 
378 	/* Numbers are unsigned, don't subtract to avoid underflow. */
379 	if (params->log_rq_mtu_frames <
380 	    log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
381 		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
382 
383 	/* Ethtool's rx_max_pending is calculated for regular RQ, that uses
384 	 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
385 	 * frame size not equal to PAGE_SIZE.
386 	 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
387 	 * unexpected failure.
388 	 */
389 	if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
390 		return max_log_rq_size;
391 
392 	return params->log_rq_mtu_frames - log_pkts_per_wqe;
393 }
394 
mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params)395 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
396 				      struct mlx5e_params *params)
397 {
398 	return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
399 }
400 
mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params)401 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
402 				  struct mlx5e_params *params)
403 {
404 	return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
405 }
406 
mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev * mdev,struct mlx5e_params * params)407 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
408 				     struct mlx5e_params *params)
409 {
410 	u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
411 			 MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
412 
413 	return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
414 }
415 
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)416 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
417 				   struct mlx5e_params *params,
418 				   struct mlx5e_xsk_param *xsk)
419 {
420 	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
421 		return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
422 
423 	/* XDP in mlx5e doesn't support multiple packets per page. */
424 	if (params->xdp_prog)
425 		return PAGE_SHIFT;
426 
427 	return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
428 }
429 
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)430 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
431 				   struct mlx5e_params *params,
432 				   struct mlx5e_xsk_param *xsk)
433 {
434 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
435 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
436 	u8 log_wqe_size, log_stride_size;
437 
438 	log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
439 	log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
440 	WARN(log_wqe_size < log_stride_size,
441 	     "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
442 	     log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
443 	return log_wqe_size - log_stride_size;
444 }
445 
mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)446 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
447 {
448 #define UMR_WQE_BULK (2)
449 	return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
450 }
451 
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)452 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
453 			  struct mlx5e_params *params,
454 			  struct mlx5e_xsk_param *xsk)
455 {
456 	u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
457 
458 	if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
459 		return linear_headroom;
460 
461 	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
462 		return linear_headroom;
463 
464 	if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
465 		return linear_headroom;
466 
467 	return 0;
468 }
469 
mlx5e_calc_sq_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)470 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
471 {
472 	bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
473 	u16 stop_room;
474 
475 	stop_room  = mlx5e_ktls_get_stop_room(mdev, params);
476 	stop_room += mlx5e_stop_room_for_max_wqe(mdev);
477 	if (is_mpwqe)
478 		/* A MPWQE can take up to the maximum cacheline-aligned WQE +
479 		 * all the normal stop room can be taken if a new packet breaks
480 		 * the active MPWQE session and allocates its WQEs right away.
481 		 */
482 		stop_room += mlx5e_stop_room_for_mpwqe(mdev);
483 
484 	return stop_room;
485 }
486 
mlx5e_validate_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)487 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
488 {
489 	size_t sq_size = 1 << params->log_sq_size;
490 	u16 stop_room;
491 
492 	stop_room = mlx5e_calc_sq_stop_room(mdev, params);
493 	if (stop_room >= sq_size) {
494 		mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
495 			      stop_room, sq_size);
496 		return -EINVAL;
497 	}
498 
499 	return 0;
500 }
501 
mlx5e_get_def_tx_moderation(u8 cq_period_mode)502 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
503 {
504 	struct dim_cq_moder moder = {};
505 
506 	moder.cq_period_mode = cq_period_mode;
507 	moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
508 	moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
509 	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
510 		moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
511 
512 	return moder;
513 }
514 
mlx5e_get_def_rx_moderation(u8 cq_period_mode)515 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
516 {
517 	struct dim_cq_moder moder = {};
518 
519 	moder.cq_period_mode = cq_period_mode;
520 	moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
521 	moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
522 	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
523 		moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
524 
525 	return moder;
526 }
527 
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)528 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
529 {
530 	return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
531 		DIM_CQ_PERIOD_MODE_START_FROM_CQE :
532 		DIM_CQ_PERIOD_MODE_START_FROM_EQE;
533 }
534 
mlx5e_reset_tx_moderation(struct mlx5e_params * params,u8 cq_period_mode)535 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
536 {
537 	if (params->tx_dim_enabled) {
538 		u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
539 
540 		params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
541 	} else {
542 		params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
543 	}
544 }
545 
mlx5e_reset_rx_moderation(struct mlx5e_params * params,u8 cq_period_mode)546 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
547 {
548 	if (params->rx_dim_enabled) {
549 		u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
550 
551 		params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
552 	} else {
553 		params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
554 	}
555 }
556 
mlx5e_set_tx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)557 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
558 {
559 	mlx5e_reset_tx_moderation(params, cq_period_mode);
560 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
561 			params->tx_cq_moderation.cq_period_mode ==
562 				MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
563 }
564 
mlx5e_set_rx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)565 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
566 {
567 	mlx5e_reset_rx_moderation(params, cq_period_mode);
568 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
569 			params->rx_cq_moderation.cq_period_mode ==
570 				MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
571 }
572 
slow_pci_heuristic(struct mlx5_core_dev * mdev)573 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
574 {
575 	u32 link_speed = 0;
576 	u32 pci_bw = 0;
577 
578 	mlx5_port_max_linkspeed(mdev, &link_speed);
579 	pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
580 	mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
581 			   link_speed, pci_bw);
582 
583 #define MLX5E_SLOW_PCI_RATIO (2)
584 
585 	return link_speed && pci_bw &&
586 		link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
587 }
588 
mlx5e_mpwrq_validate_regular(struct mlx5_core_dev * mdev,struct mlx5e_params * params)589 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
590 {
591 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
592 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
593 
594 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
595 		return -EOPNOTSUPP;
596 
597 	return 0;
598 }
599 
mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)600 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
601 			     struct mlx5e_xsk_param *xsk)
602 {
603 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
604 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
605 	u16 max_mtu_pkts;
606 
607 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
608 		mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
609 			      page_shift, umr_mode);
610 		return -EOPNOTSUPP;
611 	}
612 
613 	if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
614 		mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
615 		return -EINVAL;
616 	}
617 
618 	/* Current RQ length is too big for the given frame size, the
619 	 * needed number of WQEs exceeds the maximum.
620 	 */
621 	max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
622 			     mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
623 	if (params->log_rq_mtu_frames > max_mtu_pkts) {
624 		mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
625 			      1 << params->log_rq_mtu_frames, xsk->chunk_size);
626 		return -EINVAL;
627 	}
628 
629 	return 0;
630 }
631 
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)632 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
633 			       struct mlx5e_params *params)
634 {
635 	params->log_rq_mtu_frames = is_kdump_kernel() ?
636 		MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
637 		MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
638 }
639 
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)640 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
641 {
642 	params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
643 		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
644 		MLX5_WQ_TYPE_CYCLIC;
645 }
646 
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)647 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
648 			   struct mlx5e_params *params)
649 {
650 	/* Prefer Striding RQ, unless any of the following holds:
651 	 * - Striding RQ configuration is not possible/supported.
652 	 * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
653 	 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
654 	 *
655 	 * No XSK params: checking the availability of striding RQ in general.
656 	 */
657 	if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
658 	     MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
659 	    !mlx5e_mpwrq_validate_regular(mdev, params) &&
660 	    (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
661 	     !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
662 		MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
663 	mlx5e_set_rq_type(mdev, params);
664 	mlx5e_init_rq_type_params(mdev, params);
665 }
666 
667 /* Build queue parameters */
668 
mlx5e_build_create_cq_param(struct mlx5e_create_cq_param * ccp,struct mlx5e_channel * c)669 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
670 {
671 	*ccp = (struct mlx5e_create_cq_param) {
672 		.napi = &c->napi,
673 		.ch_stats = c->stats,
674 		.node = cpu_to_node(c->cpu),
675 		.ix = c->ix,
676 	};
677 }
678 
mlx5e_max_nonlinear_mtu(int first_frag_size,int frag_size,bool xdp)679 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
680 {
681 	if (xdp)
682 		/* XDP requires all fragments to be of the same size. */
683 		return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
684 
685 	/* Optimization for small packets: the last fragment is bigger than the others. */
686 	return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
687 }
688 
mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params * params,struct mlx5e_rq_frags_info * info)689 static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,
690 					     struct mlx5e_rq_frags_info *info)
691 {
692 	u16 bulk_bound_rq_size = (1 << params->log_rq_mtu_frames) / 4;
693 	u32 bulk_bound_rq_size_in_bytes;
694 	u32 sum_frag_strides = 0;
695 	u32 wqe_bulk_in_bytes;
696 	u16 split_factor;
697 	u32 wqe_bulk;
698 	int i;
699 
700 	for (i = 0; i < info->num_frags; i++)
701 		sum_frag_strides += info->arr[i].frag_stride;
702 
703 	/* For MTUs larger than PAGE_SIZE, align to PAGE_SIZE to reflect
704 	 * amount of consumed pages per wqe in bytes.
705 	 */
706 	if (sum_frag_strides > PAGE_SIZE)
707 		sum_frag_strides = ALIGN(sum_frag_strides, PAGE_SIZE);
708 
709 	bulk_bound_rq_size_in_bytes = bulk_bound_rq_size * sum_frag_strides;
710 
711 #define MAX_WQE_BULK_BYTES(xdp) ((xdp ? 256 : 512) * 1024)
712 
713 	/* A WQE bulk should not exceed min(512KB, 1/4 of rq size). For XDP
714 	 * keep bulk size smaller to avoid filling the page_pool cache on
715 	 * every bulk refill.
716 	 */
717 	wqe_bulk_in_bytes = min_t(u32, MAX_WQE_BULK_BYTES(params->xdp_prog),
718 				  bulk_bound_rq_size_in_bytes);
719 	wqe_bulk = DIV_ROUND_UP(wqe_bulk_in_bytes, sum_frag_strides);
720 
721 	/* Make sure that allocations don't start when the page is still used
722 	 * by older WQEs.
723 	 */
724 	info->wqe_bulk = max_t(u16, info->wqe_index_mask + 1, wqe_bulk);
725 
726 	split_factor = DIV_ROUND_UP(MAX_WQE_BULK_BYTES(params->xdp_prog),
727 				    PP_ALLOC_CACHE_REFILL * PAGE_SIZE);
728 	info->refill_unit = DIV_ROUND_UP(info->wqe_bulk, split_factor);
729 }
730 
731 #define DEFAULT_FRAG_SIZE (2048)
732 
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info,u32 * xdp_frag_size)733 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
734 				     struct mlx5e_params *params,
735 				     struct mlx5e_xsk_param *xsk,
736 				     struct mlx5e_rq_frags_info *info,
737 				     u32 *xdp_frag_size)
738 {
739 	u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
740 	int frag_size_max = DEFAULT_FRAG_SIZE;
741 	int first_frag_size_max;
742 	u32 buf_size = 0;
743 	u16 headroom;
744 	int max_mtu;
745 	int i;
746 
747 	if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
748 		int frag_stride;
749 
750 		frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
751 
752 		info->arr[0].frag_size = byte_count;
753 		info->arr[0].frag_stride = frag_stride;
754 		info->num_frags = 1;
755 
756 		/* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
757 		 * first WQE in the page is responsible for allocation of this
758 		 * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
759 		 * still not completed, the allocation must stop before k*N.
760 		 */
761 		info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
762 
763 		goto out;
764 	}
765 
766 	headroom = mlx5e_get_linear_rq_headroom(params, xsk);
767 	first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
768 
769 	max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
770 					  params->xdp_prog);
771 	if (byte_count > max_mtu || params->xdp_prog) {
772 		frag_size_max = PAGE_SIZE;
773 		first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
774 
775 		max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
776 						  params->xdp_prog);
777 		if (byte_count > max_mtu) {
778 			mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
779 				      params->sw_mtu, max_mtu);
780 			return -EINVAL;
781 		}
782 	}
783 
784 	i = 0;
785 	while (buf_size < byte_count) {
786 		int frag_size = byte_count - buf_size;
787 
788 		if (i == 0)
789 			frag_size = min(frag_size, first_frag_size_max);
790 		else if (i < MLX5E_MAX_RX_FRAGS - 1)
791 			frag_size = min(frag_size, frag_size_max);
792 
793 		info->arr[i].frag_size = frag_size;
794 		buf_size += frag_size;
795 
796 		if (params->xdp_prog) {
797 			/* XDP multi buffer expects fragments of the same size. */
798 			info->arr[i].frag_stride = frag_size_max;
799 		} else {
800 			if (i == 0) {
801 				/* Ensure that headroom and tailroom are included. */
802 				frag_size += headroom;
803 				frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
804 			}
805 			info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
806 		}
807 
808 		i++;
809 	}
810 	info->num_frags = i;
811 
812 	/* The last fragment of WQE with index 2*N may share the page with the
813 	 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
814 	 * is not completed yet, WQE 2*N must not be allocated, as it's
815 	 * responsible for allocating a new page.
816 	 */
817 	if (frag_size_max == PAGE_SIZE) {
818 		/* No WQE can start in the middle of a page. */
819 		info->wqe_index_mask = 0;
820 	} else {
821 		/* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
822 		 * because there would be more than MLX5E_MAX_RX_FRAGS of them.
823 		 */
824 		WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
825 
826 		/* Odd number of fragments allows to pack the last fragment of
827 		 * the previous WQE and the first fragment of the next WQE into
828 		 * the same page.
829 		 * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
830 		 * is 4, the last fragment can be bigger than the rest only if
831 		 * it's the fourth one, so WQEs consisting of 3 fragments will
832 		 * always share a page.
833 		 * When a page is shared, WQE bulk size is 2, otherwise just 1.
834 		 */
835 		info->wqe_index_mask = info->num_frags % 2;
836 	}
837 
838 out:
839 	/* Bulking optimization to skip allocation until a large enough number
840 	 * of WQEs can be allocated in a row. Bulking also influences how well
841 	 * deferred page release works.
842 	 */
843 	mlx5e_rx_compute_wqe_bulk_params(params, info);
844 
845 	mlx5_core_dbg(mdev, "%s: wqe_bulk = %u, wqe_bulk_refill_unit = %u\n",
846 		      __func__, info->wqe_bulk, info->refill_unit);
847 
848 	info->log_num_frags = order_base_2(info->num_frags);
849 
850 	*xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
851 
852 	return 0;
853 }
854 
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)855 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
856 {
857 	int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
858 
859 	switch (wq_type) {
860 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
861 		sz += sizeof(struct mlx5e_rx_wqe_ll);
862 		break;
863 	default: /* MLX5_WQ_TYPE_CYCLIC */
864 		sz += sizeof(struct mlx5e_rx_wqe_cyc);
865 	}
866 
867 	return order_base_2(sz);
868 }
869 
mlx5e_build_common_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param)870 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
871 					struct mlx5e_cq_param *param)
872 {
873 	void *cqc = param->cqc;
874 
875 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
876 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
877 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
878 }
879 
mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)880 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
881 					struct mlx5e_params *params,
882 					struct mlx5e_xsk_param *xsk)
883 {
884 	int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
885 		MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
886 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
887 	int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
888 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
889 	int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
890 	int wqe_size = BIT(log_stride_sz) * num_strides;
891 
892 	/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
893 	 * so we get a filler cqe for the rest of the reservation.
894 	 */
895 	return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
896 }
897 
mlx5e_build_rx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)898 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
899 				    struct mlx5e_params *params,
900 				    struct mlx5e_xsk_param *xsk,
901 				    struct mlx5e_cq_param *param)
902 {
903 	bool hw_stridx = false;
904 	void *cqc = param->cqc;
905 	u8 log_cq_size;
906 
907 	switch (params->rq_wq_type) {
908 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
909 		hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
910 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
911 			log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
912 		else
913 			log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
914 				mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
915 		break;
916 	default: /* MLX5_WQ_TYPE_CYCLIC */
917 		log_cq_size = params->log_rq_mtu_frames;
918 	}
919 
920 	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
921 	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
922 		MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
923 			 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
924 		MLX5_SET(cqc, cqc, cqe_compression_layout,
925 			 MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
926 			 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
927 			 MLX5_CQE_COMPRESS_LAYOUT_BASIC);
928 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
929 	}
930 
931 	mlx5e_build_common_cq_param(mdev, param);
932 	param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
933 }
934 
rq_end_pad_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params)935 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
936 {
937 	bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
938 	bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
939 
940 	return ro && lro_en ?
941 		MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
942 }
943 
mlx5e_build_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,u16 q_counter,struct mlx5e_rq_param * param)944 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
945 			 struct mlx5e_params *params,
946 			 struct mlx5e_xsk_param *xsk,
947 			 u16 q_counter,
948 			 struct mlx5e_rq_param *param)
949 {
950 	void *rqc = param->rqc;
951 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
952 	int ndsegs = 1;
953 	int err;
954 
955 	switch (params->rq_wq_type) {
956 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
957 		u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
958 		u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
959 		enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
960 		u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
961 
962 		if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
963 						   log_wqe_num_of_strides,
964 						   page_shift, umr_mode)) {
965 			mlx5_core_err(mdev,
966 				      "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
967 				      log_wqe_stride_size, log_wqe_num_of_strides,
968 				      umr_mode);
969 			return -EINVAL;
970 		}
971 
972 		MLX5_SET(wq, wq, log_wqe_num_of_strides,
973 			 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
974 		MLX5_SET(wq, wq, log_wqe_stride_size,
975 			 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
976 		MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
977 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
978 			MLX5_SET(wq, wq, shampo_enable, true);
979 			MLX5_SET(wq, wq, log_reservation_size,
980 				 mlx5e_shampo_get_log_rsrv_size(mdev, params));
981 			MLX5_SET(wq, wq,
982 				 log_max_num_of_packets_per_reservation,
983 				 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
984 			MLX5_SET(wq, wq, log_headers_entry_size,
985 				 mlx5e_shampo_get_log_hd_entry_size(mdev, params));
986 			MLX5_SET(rqc, rqc, reservation_timeout,
987 				 params->packet_merge.timeout);
988 			MLX5_SET(rqc, rqc, shampo_match_criteria_type,
989 				 params->packet_merge.shampo.match_criteria_type);
990 			MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
991 				 params->packet_merge.shampo.alignment_granularity);
992 		}
993 		break;
994 	}
995 	default: /* MLX5_WQ_TYPE_CYCLIC */
996 		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
997 		err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info,
998 						&param->xdp_frag_size);
999 		if (err)
1000 			return err;
1001 		ndsegs = param->frags_info.num_frags;
1002 	}
1003 
1004 	MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
1005 	MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
1006 	MLX5_SET(wq, wq, log_wq_stride,
1007 		 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
1008 	MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.hw_objs.pdn);
1009 	MLX5_SET(rqc, rqc, counter_set_id, q_counter);
1010 	MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
1011 	MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
1012 
1013 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
1014 	mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
1015 
1016 	return 0;
1017 }
1018 
mlx5e_build_drop_rq_param(struct mlx5_core_dev * mdev,u16 q_counter,struct mlx5e_rq_param * param)1019 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1020 			       u16 q_counter,
1021 			       struct mlx5e_rq_param *param)
1022 {
1023 	void *rqc = param->rqc;
1024 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1025 
1026 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1027 	MLX5_SET(wq, wq, log_wq_stride,
1028 		 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
1029 	MLX5_SET(rqc, rqc, counter_set_id, q_counter);
1030 
1031 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
1032 }
1033 
mlx5e_build_tx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_cq_param * param)1034 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
1035 			     struct mlx5e_params *params,
1036 			     struct mlx5e_cq_param *param)
1037 {
1038 	void *cqc = param->cqc;
1039 
1040 	MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
1041 
1042 	mlx5e_build_common_cq_param(mdev, param);
1043 	param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
1044 }
1045 
mlx5e_build_sq_param_common(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param)1046 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
1047 				 struct mlx5e_sq_param *param)
1048 {
1049 	void *sqc = param->sqc;
1050 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1051 
1052 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1053 	MLX5_SET(wq, wq, pd,            mdev->mlx5e_res.hw_objs.pdn);
1054 
1055 	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
1056 }
1057 
mlx5e_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)1058 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
1059 			  struct mlx5e_params *params,
1060 			  struct mlx5e_sq_param *param)
1061 {
1062 	void *sqc = param->sqc;
1063 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1064 	bool allow_swp;
1065 
1066 	allow_swp = mlx5_geneve_tx_allowed(mdev) ||
1067 		    (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
1068 	mlx5e_build_sq_param_common(mdev, param);
1069 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1070 	MLX5_SET(sqc, sqc, allow_swp, allow_swp);
1071 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
1072 	param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
1073 	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
1074 }
1075 
mlx5e_build_ico_cq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_cq_param * param)1076 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
1077 				     u8 log_wq_size,
1078 				     struct mlx5e_cq_param *param)
1079 {
1080 	void *cqc = param->cqc;
1081 
1082 	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1083 
1084 	mlx5e_build_common_cq_param(mdev, param);
1085 
1086 	param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1087 }
1088 
1089 /* This function calculates the maximum number of headers entries that are needed
1090  * per WQE, the formula is based on the size of the reservations and the
1091  * restriction we have about max packets for reservation that is equal to max
1092  * headers per reservation.
1093  */
mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1094 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
1095 			    struct mlx5e_params *params,
1096 			    struct mlx5e_rq_param *rq_param)
1097 {
1098 	int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
1099 		MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
1100 	u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
1101 	int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
1102 	u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
1103 	int wqe_size = BIT(log_stride_sz) * num_strides;
1104 	u32 hd_per_wqe;
1105 
1106 	/* Assumption: hd_per_wqe % 8 == 0. */
1107 	hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
1108 	mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
1109 		      __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
1110 	return hd_per_wqe;
1111 }
1112 
1113 /* This function calculates the maximum number of headers entries that are needed
1114  * for the WQ, this value is uesed to allocate the header buffer in HW, thus
1115  * must be a pow of 2.
1116  */
mlx5e_shampo_hd_per_wq(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1117 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
1118 			   struct mlx5e_params *params,
1119 			   struct mlx5e_rq_param *rq_param)
1120 {
1121 	void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1122 	int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1123 	u32 hd_per_wqe, hd_per_wq;
1124 
1125 	hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1126 	hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
1127 	return hd_per_wq;
1128 }
1129 
mlx5e_shampo_icosq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)1130 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
1131 				 struct mlx5e_params *params,
1132 				 struct mlx5e_rq_param *rq_param)
1133 {
1134 	int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
1135 	void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
1136 	int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
1137 	u32 wqebbs;
1138 
1139 	max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
1140 	max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
1141 	max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
1142 	rest = max_hd_per_wqe % max_klm_per_umr;
1143 	wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
1144 	if (rest)
1145 		wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
1146 	wqebbs *= wq_size;
1147 	return wqebbs;
1148 }
1149 
mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)1150 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
1151 					struct mlx5e_params *params,
1152 					struct mlx5e_xsk_param *xsk)
1153 {
1154 	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
1155 	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
1156 	u8 umr_wqebbs;
1157 
1158 	umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
1159 
1160 	return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
1161 }
1162 
mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rqp)1163 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
1164 				      struct mlx5e_params *params,
1165 				      struct mlx5e_rq_param *rqp)
1166 {
1167 	u32 wqebbs, total_pages, useful_space;
1168 
1169 	/* MLX5_WQ_TYPE_CYCLIC */
1170 	if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1171 		return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1172 
1173 	/* UMR WQEs for the regular RQ. */
1174 	wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
1175 
1176 	/* If XDP program is attached, XSK may be turned on at any time without
1177 	 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
1178 	 * both regular RQ and XSK RQ.
1179 	 *
1180 	 * XSK uses different values of page_shift, and the total number of UMR
1181 	 * WQEBBs depends on it. This dependency is complex and not monotonic,
1182 	 * especially taking into consideration that some of the parameters come
1183 	 * from capabilities. Hence, we have to try all valid values of XSK
1184 	 * frame size (and page_shift) to find the maximum.
1185 	 */
1186 	if (params->xdp_prog) {
1187 		u32 max_xsk_wqebbs = 0;
1188 		u8 frame_shift;
1189 
1190 		for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
1191 		     frame_shift <= PAGE_SHIFT; frame_shift++) {
1192 			/* The headroom doesn't affect the calculation. */
1193 			struct mlx5e_xsk_param xsk = {
1194 				.chunk_size = 1 << frame_shift,
1195 				.unaligned = false,
1196 			};
1197 
1198 			/* XSK aligned mode. */
1199 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1200 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1201 
1202 			/* XSK unaligned mode, frame size is a power of two. */
1203 			xsk.unaligned = true;
1204 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1205 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1206 
1207 			/* XSK unaligned mode, frame size is not equal to stride size. */
1208 			xsk.chunk_size -= 1;
1209 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1210 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1211 
1212 			/* XSK unaligned mode, frame size is a triple power of two. */
1213 			xsk.chunk_size = (1 << frame_shift) / 4 * 3;
1214 			max_xsk_wqebbs = max(max_xsk_wqebbs,
1215 				mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
1216 		}
1217 
1218 		wqebbs += max_xsk_wqebbs;
1219 	}
1220 
1221 	if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1222 		wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
1223 
1224 	/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
1225 	 * This padding is always smaller than the max WQE size. That gives us
1226 	 * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
1227 	 * per page. The number of pages is estimated as the total size of WQEs
1228 	 * divided by the useful space in page, rounding up. If some WQEs don't
1229 	 * fully fit into the useful space, they can occupy part of the padding,
1230 	 * which proves this estimation to be correct (reserve enough space).
1231 	 */
1232 	useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
1233 	total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
1234 	wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
1235 
1236 	return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
1237 }
1238 
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev * mdev)1239 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
1240 {
1241 	if (mlx5e_is_ktls_rx(mdev))
1242 		return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1243 
1244 	return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1245 }
1246 
mlx5e_build_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1247 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
1248 				    u8 log_wq_size,
1249 				    struct mlx5e_sq_param *param)
1250 {
1251 	void *sqc = param->sqc;
1252 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1253 
1254 	mlx5e_build_sq_param_common(mdev, param);
1255 
1256 	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1257 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1258 	mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1259 }
1260 
mlx5e_build_async_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)1261 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
1262 					  u8 log_wq_size,
1263 					  struct mlx5e_sq_param *param)
1264 {
1265 	void *sqc = param->sqc;
1266 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1267 
1268 	mlx5e_build_sq_param_common(mdev, param);
1269 	param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
1270 	param->is_tls = mlx5e_is_ktls_rx(mdev);
1271 	if (param->is_tls)
1272 		param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
1273 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
1274 	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1275 	mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1276 }
1277 
mlx5e_build_xdpsq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_sq_param * param)1278 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
1279 			     struct mlx5e_params *params,
1280 			     struct mlx5e_xsk_param *xsk,
1281 			     struct mlx5e_sq_param *param)
1282 {
1283 	void *sqc = param->sqc;
1284 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1285 
1286 	mlx5e_build_sq_param_common(mdev, param);
1287 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1288 	param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
1289 	param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
1290 	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
1291 }
1292 
mlx5e_build_channel_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 q_counter,struct mlx5e_channel_param * cparam)1293 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
1294 			      struct mlx5e_params *params,
1295 			      u16 q_counter,
1296 			      struct mlx5e_channel_param *cparam)
1297 {
1298 	u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
1299 	int err;
1300 
1301 	err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
1302 	if (err)
1303 		return err;
1304 
1305 	icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
1306 	async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
1307 
1308 	mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
1309 	mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
1310 	mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
1311 	mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
1312 
1313 	return 0;
1314 }
1315