1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #ifndef __MLX5_EN_H__
33 #define __MLX5_EN_H__
34 
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/crash_dump.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/mlx5/fs.h>
47 #include <linux/rhashtable.h>
48 #include <net/udp_tunnel.h>
49 #include <net/switchdev.h>
50 #include <net/xdp.h>
51 #include <linux/dim.h>
52 #include <linux/bits.h>
53 #include "wq.h"
54 #include "mlx5_core.h"
55 #include "en_stats.h"
56 #include "en/dcbnl.h"
57 #include "en/fs.h"
58 #include "en/qos.h"
59 #include "lib/hv_vhca.h"
60 #include "lib/clock.h"
61 #include "en/rx_res.h"
62 
63 extern const struct net_device_ops mlx5e_netdev_ops;
64 struct page_pool;
65 
66 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
67 #define MLX5E_METADATA_ETHER_LEN 8
68 
69 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
70 
71 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
72 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
73 
74 #define MLX5E_MAX_NUM_TC	8
75 #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
76 
77 #define MLX5_RX_HEADROOM NET_SKB_PAD
78 #define MLX5_SKB_FRAG_SZ(len)	(SKB_DATA_ALIGN(len) +	\
79 				 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
80 
81 #define MLX5E_RX_MAX_HEAD (256)
82 
83 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
84 	(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
85 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
86 	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
87 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
88 	MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
89 
90 #define MLX5_MPWRQ_LOG_WQE_SZ			18
91 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
92 				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
93 #define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
94 
95 #define MLX5_ALIGN_MTTS(mtts)		(ALIGN(mtts, 8))
96 #define MLX5_ALIGNED_MTTS_OCTW(mtts)	((mtts) / 2)
97 #define MLX5_MTT_OCTW(mtts)		(MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
98 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
99  * WQEs, This page will absorb write overflow by the hardware, when
100  * receiving packets larger than MTU. These oversize packets are
101  * dropped by the driver at a later stage.
102  */
103 #define MLX5E_REQUIRED_WQE_MTTS		(MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
104 #define MLX5E_REQUIRED_MTTS(wqes)	(wqes * MLX5E_REQUIRED_WQE_MTTS)
105 #define MLX5E_MAX_RQ_NUM_MTTS	\
106 	((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
107 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
108 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW	\
109 		(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
110 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
111 	(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
112 	 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
113 
114 #define MLX5E_MIN_SKB_FRAG_SZ		(MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
115 #define MLX5E_LOG_MAX_RX_WQE_BULK	\
116 	(ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
117 
118 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
119 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
120 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
121 
122 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
123 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
124 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd,	\
125 					       MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
126 
127 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x2
128 
129 #define MLX5E_DEFAULT_LRO_TIMEOUT                       32
130 #define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
131 
132 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
133 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
134 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
135 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
136 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
137 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
138 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
139 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
140 
141 #define MLX5E_MIN_NUM_CHANNELS         0x1
142 #define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE / 2)
143 #define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
144 #define MLX5E_TX_CQ_POLL_BUDGET        128
145 #define MLX5E_TX_XSK_POLL_BUDGET       64
146 #define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
147 
148 #define MLX5E_UMR_WQE_INLINE_SZ \
149 	(sizeof(struct mlx5e_umr_wqe) + \
150 	 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
151 	       MLX5_UMR_MTT_ALIGNMENT))
152 #define MLX5E_UMR_WQEBBS \
153 	(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
154 
155 #define MLX5E_MSG_LEVEL			NETIF_MSG_LINK
156 
157 #define mlx5e_dbg(mlevel, priv, format, ...)                    \
158 do {                                                            \
159 	if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
160 		netdev_warn(priv->netdev, format,               \
161 			    ##__VA_ARGS__);                     \
162 } while (0)
163 
164 #define mlx5e_state_dereference(priv, p) \
165 	rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
166 
167 enum mlx5e_rq_group {
168 	MLX5E_RQ_GROUP_REGULAR,
169 	MLX5E_RQ_GROUP_XSK,
170 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
171 };
172 
173 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
174 {
175 	if (mlx5_lag_is_lacp_owner(mdev))
176 		return 1;
177 
178 	return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
179 }
180 
181 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
182 {
183 	switch (wq_type) {
184 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
185 		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
186 			     wq_size / 2);
187 	default:
188 		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
189 			     wq_size / 2);
190 	}
191 }
192 
193 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */
194 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
195 {
196 	return is_kdump_kernel() ?
197 		MLX5E_MIN_NUM_CHANNELS :
198 		min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
199 }
200 
201 struct mlx5e_tx_wqe {
202 	struct mlx5_wqe_ctrl_seg ctrl;
203 	struct mlx5_wqe_eth_seg  eth;
204 	struct mlx5_wqe_data_seg data[0];
205 };
206 
207 struct mlx5e_rx_wqe_ll {
208 	struct mlx5_wqe_srq_next_seg  next;
209 	struct mlx5_wqe_data_seg      data[];
210 };
211 
212 struct mlx5e_rx_wqe_cyc {
213 	struct mlx5_wqe_data_seg      data[0];
214 };
215 
216 struct mlx5e_umr_wqe {
217 	struct mlx5_wqe_ctrl_seg       ctrl;
218 	struct mlx5_wqe_umr_ctrl_seg   uctrl;
219 	struct mlx5_mkey_seg           mkc;
220 	struct mlx5_mtt                inline_mtts[0];
221 };
222 
223 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
224 
225 enum mlx5e_priv_flag {
226 	MLX5E_PFLAG_RX_CQE_BASED_MODER,
227 	MLX5E_PFLAG_TX_CQE_BASED_MODER,
228 	MLX5E_PFLAG_RX_CQE_COMPRESS,
229 	MLX5E_PFLAG_RX_STRIDING_RQ,
230 	MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
231 	MLX5E_PFLAG_XDP_TX_MPWQE,
232 	MLX5E_PFLAG_SKB_TX_MPWQE,
233 	MLX5E_PFLAG_TX_PORT_TS,
234 	MLX5E_NUM_PFLAGS, /* Keep last */
235 };
236 
237 #define MLX5E_SET_PFLAG(params, pflag, enable)			\
238 	do {							\
239 		if (enable)					\
240 			(params)->pflags |= BIT(pflag);		\
241 		else						\
242 			(params)->pflags &= ~(BIT(pflag));	\
243 	} while (0)
244 
245 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
246 
247 struct mlx5e_params {
248 	u8  log_sq_size;
249 	u8  rq_wq_type;
250 	u8  log_rq_mtu_frames;
251 	u16 num_channels;
252 	struct {
253 		u16 mode;
254 		u8 num_tc;
255 	} mqprio;
256 	bool rx_cqe_compress_def;
257 	bool tunneled_offload_en;
258 	struct dim_cq_moder rx_cq_moderation;
259 	struct dim_cq_moder tx_cq_moderation;
260 	bool lro_en;
261 	u8  tx_min_inline_mode;
262 	bool vlan_strip_disable;
263 	bool scatter_fcs_en;
264 	bool rx_dim_enabled;
265 	bool tx_dim_enabled;
266 	u32 lro_timeout;
267 	u32 pflags;
268 	struct bpf_prog *xdp_prog;
269 	struct mlx5e_xsk *xsk;
270 	unsigned int sw_mtu;
271 	int hard_mtu;
272 	bool ptp_rx;
273 };
274 
275 static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
276 {
277 	return params->mqprio.mode == TC_MQPRIO_MODE_DCB ?
278 		params->mqprio.num_tc : 1;
279 }
280 
281 enum {
282 	MLX5E_RQ_STATE_ENABLED,
283 	MLX5E_RQ_STATE_RECOVERING,
284 	MLX5E_RQ_STATE_AM,
285 	MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
286 	MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
287 	MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
288 	MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
289 };
290 
291 struct mlx5e_cq {
292 	/* data path - accessed per cqe */
293 	struct mlx5_cqwq           wq;
294 
295 	/* data path - accessed per napi poll */
296 	u16                        event_ctr;
297 	struct napi_struct        *napi;
298 	struct mlx5_core_cq        mcq;
299 	struct mlx5e_ch_stats     *ch_stats;
300 
301 	/* control */
302 	struct net_device         *netdev;
303 	struct mlx5_core_dev      *mdev;
304 	struct mlx5e_priv         *priv;
305 	struct mlx5_wq_ctrl        wq_ctrl;
306 } ____cacheline_aligned_in_smp;
307 
308 struct mlx5e_cq_decomp {
309 	/* cqe decompression */
310 	struct mlx5_cqe64          title;
311 	struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
312 	u8                         mini_arr_idx;
313 	u16                        left;
314 	u16                        wqe_counter;
315 } ____cacheline_aligned_in_smp;
316 
317 enum mlx5e_dma_map_type {
318 	MLX5E_DMA_MAP_SINGLE,
319 	MLX5E_DMA_MAP_PAGE
320 };
321 
322 struct mlx5e_sq_dma {
323 	dma_addr_t              addr;
324 	u32                     size;
325 	enum mlx5e_dma_map_type type;
326 };
327 
328 enum {
329 	MLX5E_SQ_STATE_ENABLED,
330 	MLX5E_SQ_STATE_MPWQE,
331 	MLX5E_SQ_STATE_RECOVERING,
332 	MLX5E_SQ_STATE_IPSEC,
333 	MLX5E_SQ_STATE_AM,
334 	MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
335 	MLX5E_SQ_STATE_PENDING_XSK_TX,
336 	MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
337 };
338 
339 struct mlx5e_tx_mpwqe {
340 	/* Current MPWQE session */
341 	struct mlx5e_tx_wqe *wqe;
342 	u32 bytes_count;
343 	u8 ds_count;
344 	u8 pkt_count;
345 	u8 inline_on;
346 };
347 
348 struct mlx5e_skb_fifo {
349 	struct sk_buff **fifo;
350 	u16 *pc;
351 	u16 *cc;
352 	u16 mask;
353 };
354 
355 struct mlx5e_ptpsq;
356 
357 struct mlx5e_txqsq {
358 	/* data path */
359 
360 	/* dirtied @completion */
361 	u16                        cc;
362 	u16                        skb_fifo_cc;
363 	u32                        dma_fifo_cc;
364 	struct dim                 dim; /* Adaptive Moderation */
365 
366 	/* dirtied @xmit */
367 	u16                        pc ____cacheline_aligned_in_smp;
368 	u16                        skb_fifo_pc;
369 	u32                        dma_fifo_pc;
370 	struct mlx5e_tx_mpwqe      mpwqe;
371 
372 	struct mlx5e_cq            cq;
373 
374 	/* read only */
375 	struct mlx5_wq_cyc         wq;
376 	u32                        dma_fifo_mask;
377 	struct mlx5e_sq_stats     *stats;
378 	struct {
379 		struct mlx5e_sq_dma       *dma_fifo;
380 		struct mlx5e_skb_fifo      skb_fifo;
381 		struct mlx5e_tx_wqe_info  *wqe_info;
382 	} db;
383 	void __iomem              *uar_map;
384 	struct netdev_queue       *txq;
385 	u32                        sqn;
386 	u16                        stop_room;
387 	u8                         min_inline_mode;
388 	struct device             *pdev;
389 	__be32                     mkey_be;
390 	unsigned long              state;
391 	unsigned int               hw_mtu;
392 	struct hwtstamp_config    *tstamp;
393 	struct mlx5_clock         *clock;
394 	struct net_device         *netdev;
395 	struct mlx5_core_dev      *mdev;
396 	struct mlx5e_priv         *priv;
397 
398 	/* control path */
399 	struct mlx5_wq_ctrl        wq_ctrl;
400 	int                        ch_ix;
401 	int                        txq_ix;
402 	u32                        rate_limit;
403 	struct work_struct         recover_work;
404 	struct mlx5e_ptpsq        *ptpsq;
405 	cqe_ts_to_ns               ptp_cyc2time;
406 } ____cacheline_aligned_in_smp;
407 
408 struct mlx5e_dma_info {
409 	dma_addr_t addr;
410 	union {
411 		struct page *page;
412 		struct xdp_buff *xsk;
413 	};
414 };
415 
416 /* XDP packets can be transmitted in different ways. On completion, we need to
417  * distinguish between them to clean up things in a proper way.
418  */
419 enum mlx5e_xdp_xmit_mode {
420 	/* An xdp_frame was transmitted due to either XDP_REDIRECT from another
421 	 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
422 	 * returned.
423 	 */
424 	MLX5E_XDP_XMIT_MODE_FRAME,
425 
426 	/* The xdp_frame was created in place as a result of XDP_TX from a
427 	 * regular RQ. No DMA remapping happened, and the page belongs to us.
428 	 */
429 	MLX5E_XDP_XMIT_MODE_PAGE,
430 
431 	/* No xdp_frame was created at all, the transmit happened from a UMEM
432 	 * page. The UMEM Completion Ring producer pointer has to be increased.
433 	 */
434 	MLX5E_XDP_XMIT_MODE_XSK,
435 };
436 
437 struct mlx5e_xdp_info {
438 	enum mlx5e_xdp_xmit_mode mode;
439 	union {
440 		struct {
441 			struct xdp_frame *xdpf;
442 			dma_addr_t dma_addr;
443 		} frame;
444 		struct {
445 			struct mlx5e_rq *rq;
446 			struct mlx5e_dma_info di;
447 		} page;
448 	};
449 };
450 
451 struct mlx5e_xmit_data {
452 	dma_addr_t  dma_addr;
453 	void       *data;
454 	u32         len;
455 };
456 
457 struct mlx5e_xdp_info_fifo {
458 	struct mlx5e_xdp_info *xi;
459 	u32 *cc;
460 	u32 *pc;
461 	u32 mask;
462 };
463 
464 struct mlx5e_xdpsq;
465 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
466 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
467 					struct mlx5e_xmit_data *,
468 					struct mlx5e_xdp_info *,
469 					int);
470 
471 struct mlx5e_xdpsq {
472 	/* data path */
473 
474 	/* dirtied @completion */
475 	u32                        xdpi_fifo_cc;
476 	u16                        cc;
477 
478 	/* dirtied @xmit */
479 	u32                        xdpi_fifo_pc ____cacheline_aligned_in_smp;
480 	u16                        pc;
481 	struct mlx5_wqe_ctrl_seg   *doorbell_cseg;
482 	struct mlx5e_tx_mpwqe      mpwqe;
483 
484 	struct mlx5e_cq            cq;
485 
486 	/* read only */
487 	struct xsk_buff_pool      *xsk_pool;
488 	struct mlx5_wq_cyc         wq;
489 	struct mlx5e_xdpsq_stats  *stats;
490 	mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
491 	mlx5e_fp_xmit_xdp_frame    xmit_xdp_frame;
492 	struct {
493 		struct mlx5e_xdp_wqe_info *wqe_info;
494 		struct mlx5e_xdp_info_fifo xdpi_fifo;
495 	} db;
496 	void __iomem              *uar_map;
497 	u32                        sqn;
498 	struct device             *pdev;
499 	__be32                     mkey_be;
500 	u8                         min_inline_mode;
501 	unsigned long              state;
502 	unsigned int               hw_mtu;
503 
504 	/* control path */
505 	struct mlx5_wq_ctrl        wq_ctrl;
506 	struct mlx5e_channel      *channel;
507 } ____cacheline_aligned_in_smp;
508 
509 struct mlx5e_ktls_resync_resp;
510 
511 struct mlx5e_icosq {
512 	/* data path */
513 	u16                        cc;
514 	u16                        pc;
515 
516 	struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
517 	struct mlx5e_cq            cq;
518 
519 	/* write@xmit, read@completion */
520 	struct {
521 		struct mlx5e_icosq_wqe_info *wqe_info;
522 	} db;
523 
524 	/* read only */
525 	struct mlx5_wq_cyc         wq;
526 	void __iomem              *uar_map;
527 	u32                        sqn;
528 	u16                        reserved_room;
529 	unsigned long              state;
530 	struct mlx5e_ktls_resync_resp *ktls_resync;
531 
532 	/* control path */
533 	struct mlx5_wq_ctrl        wq_ctrl;
534 	struct mlx5e_channel      *channel;
535 
536 	struct work_struct         recover_work;
537 } ____cacheline_aligned_in_smp;
538 
539 struct mlx5e_wqe_frag_info {
540 	struct mlx5e_dma_info *di;
541 	u32 offset;
542 	bool last_in_page;
543 };
544 
545 struct mlx5e_umr_dma_info {
546 	struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
547 };
548 
549 struct mlx5e_mpw_info {
550 	struct mlx5e_umr_dma_info umr;
551 	u16 consumed_strides;
552 	DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
553 };
554 
555 #define MLX5E_MAX_RX_FRAGS 4
556 
557 /* a single cache unit is capable to serve one napi call (for non-striding rq)
558  * or a MPWQE (for striding rq).
559  */
560 #define MLX5E_CACHE_UNIT	(MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
561 				 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
562 #define MLX5E_CACHE_SIZE	(4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
563 struct mlx5e_page_cache {
564 	u32 head;
565 	u32 tail;
566 	struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
567 };
568 
569 struct mlx5e_rq;
570 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
571 typedef struct sk_buff *
572 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
573 			       u16 cqe_bcnt, u32 head_offset, u32 page_idx);
574 typedef struct sk_buff *
575 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
576 			 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
577 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
578 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
579 
580 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
581 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
582 
583 enum mlx5e_rq_flag {
584 	MLX5E_RQ_FLAG_XDP_XMIT,
585 	MLX5E_RQ_FLAG_XDP_REDIRECT,
586 };
587 
588 struct mlx5e_rq_frag_info {
589 	int frag_size;
590 	int frag_stride;
591 };
592 
593 struct mlx5e_rq_frags_info {
594 	struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
595 	u8 num_frags;
596 	u8 log_num_frags;
597 	u8 wqe_bulk;
598 };
599 
600 struct mlx5e_rq {
601 	/* data path */
602 	union {
603 		struct {
604 			struct mlx5_wq_cyc          wq;
605 			struct mlx5e_wqe_frag_info *frags;
606 			struct mlx5e_dma_info      *di;
607 			struct mlx5e_rq_frags_info  info;
608 			mlx5e_fp_skb_from_cqe       skb_from_cqe;
609 		} wqe;
610 		struct {
611 			struct mlx5_wq_ll      wq;
612 			struct mlx5e_umr_wqe   umr_wqe;
613 			struct mlx5e_mpw_info *info;
614 			mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
615 			u16                    num_strides;
616 			u16                    actual_wq_head;
617 			u8                     log_stride_sz;
618 			u8                     umr_in_progress;
619 			u8                     umr_last_bulk;
620 			u8                     umr_completed;
621 		} mpwqe;
622 	};
623 	struct {
624 		u16            headroom;
625 		u32            frame0_sz;
626 		u8             map_dir;   /* dma map direction */
627 	} buff;
628 
629 	struct device         *pdev;
630 	struct net_device     *netdev;
631 	struct mlx5e_rq_stats *stats;
632 	struct mlx5e_cq        cq;
633 	struct mlx5e_cq_decomp cqd;
634 	struct mlx5e_page_cache page_cache;
635 	struct hwtstamp_config *tstamp;
636 	struct mlx5_clock      *clock;
637 	struct mlx5e_icosq    *icosq;
638 	struct mlx5e_priv     *priv;
639 
640 	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
641 	mlx5e_fp_post_rx_wqes  post_wqes;
642 	mlx5e_fp_dealloc_wqe   dealloc_wqe;
643 
644 	unsigned long          state;
645 	int                    ix;
646 	unsigned int           hw_mtu;
647 
648 	struct dim         dim; /* Dynamic Interrupt Moderation */
649 
650 	/* XDP */
651 	struct bpf_prog __rcu *xdp_prog;
652 	struct mlx5e_xdpsq    *xdpsq;
653 	DECLARE_BITMAP(flags, 8);
654 	struct page_pool      *page_pool;
655 
656 	/* AF_XDP zero-copy */
657 	struct xsk_buff_pool  *xsk_pool;
658 
659 	struct work_struct     recover_work;
660 
661 	/* control */
662 	struct mlx5_wq_ctrl    wq_ctrl;
663 	__be32                 mkey_be;
664 	u8                     wq_type;
665 	u32                    rqn;
666 	struct mlx5_core_dev  *mdev;
667 	struct mlx5_core_mkey  umr_mkey;
668 	struct mlx5e_dma_info  wqe_overflow;
669 
670 	/* XDP read-mostly */
671 	struct xdp_rxq_info    xdp_rxq;
672 	cqe_ts_to_ns           ptp_cyc2time;
673 } ____cacheline_aligned_in_smp;
674 
675 enum mlx5e_channel_state {
676 	MLX5E_CHANNEL_STATE_XSK,
677 	MLX5E_CHANNEL_NUM_STATES
678 };
679 
680 struct mlx5e_channel {
681 	/* data path */
682 	struct mlx5e_rq            rq;
683 	struct mlx5e_xdpsq         rq_xdpsq;
684 	struct mlx5e_txqsq         sq[MLX5E_MAX_NUM_TC];
685 	struct mlx5e_icosq         icosq;   /* internal control operations */
686 	struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
687 	bool                       xdp;
688 	struct napi_struct         napi;
689 	struct device             *pdev;
690 	struct net_device         *netdev;
691 	__be32                     mkey_be;
692 	u16                        qos_sqs_size;
693 	u8                         num_tc;
694 	u8                         lag_port;
695 
696 	/* XDP_REDIRECT */
697 	struct mlx5e_xdpsq         xdpsq;
698 
699 	/* AF_XDP zero-copy */
700 	struct mlx5e_rq            xskrq;
701 	struct mlx5e_xdpsq         xsksq;
702 
703 	/* Async ICOSQ */
704 	struct mlx5e_icosq         async_icosq;
705 	/* async_icosq can be accessed from any CPU - the spinlock protects it. */
706 	spinlock_t                 async_icosq_lock;
707 
708 	/* data path - accessed per napi poll */
709 	const struct cpumask	  *aff_mask;
710 	struct mlx5e_ch_stats     *stats;
711 
712 	/* control */
713 	struct mlx5e_priv         *priv;
714 	struct mlx5_core_dev      *mdev;
715 	struct hwtstamp_config    *tstamp;
716 	DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
717 	int                        ix;
718 	int                        cpu;
719 };
720 
721 struct mlx5e_ptp;
722 
723 struct mlx5e_channels {
724 	struct mlx5e_channel **c;
725 	struct mlx5e_ptp      *ptp;
726 	unsigned int           num;
727 	struct mlx5e_params    params;
728 };
729 
730 struct mlx5e_channel_stats {
731 	struct mlx5e_ch_stats ch;
732 	struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
733 	struct mlx5e_rq_stats rq;
734 	struct mlx5e_rq_stats xskrq;
735 	struct mlx5e_xdpsq_stats rq_xdpsq;
736 	struct mlx5e_xdpsq_stats xdpsq;
737 	struct mlx5e_xdpsq_stats xsksq;
738 } ____cacheline_aligned_in_smp;
739 
740 struct mlx5e_ptp_stats {
741 	struct mlx5e_ch_stats ch;
742 	struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
743 	struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
744 	struct mlx5e_rq_stats rq;
745 } ____cacheline_aligned_in_smp;
746 
747 enum {
748 	MLX5E_STATE_OPENED,
749 	MLX5E_STATE_DESTROYING,
750 	MLX5E_STATE_XDP_TX_ENABLED,
751 	MLX5E_STATE_XDP_ACTIVE,
752 };
753 
754 enum {
755 	MLX5E_TC_PRIO = 0,
756 	MLX5E_NIC_PRIO
757 };
758 
759 struct mlx5e_modify_sq_param {
760 	int curr_state;
761 	int next_state;
762 	int rl_update;
763 	int rl_index;
764 	bool qos_update;
765 	u16 qos_queue_group_id;
766 };
767 
768 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
769 struct mlx5e_hv_vhca_stats_agent {
770 	struct mlx5_hv_vhca_agent *agent;
771 	struct delayed_work        work;
772 	u16                        delay;
773 	void                      *buf;
774 };
775 #endif
776 
777 struct mlx5e_xsk {
778 	/* XSK buffer pools are stored separately from channels,
779 	 * because we don't want to lose them when channels are
780 	 * recreated. The kernel also stores buffer pool, but it doesn't
781 	 * distinguish between zero-copy and non-zero-copy UMEMs, so
782 	 * rely on our mechanism.
783 	 */
784 	struct xsk_buff_pool **pools;
785 	u16 refcnt;
786 	bool ever_used;
787 };
788 
789 /* Temporary storage for variables that are allocated when struct mlx5e_priv is
790  * initialized, and used where we can't allocate them because that functions
791  * must not fail. Use with care and make sure the same variable is not used
792  * simultaneously by multiple users.
793  */
794 struct mlx5e_scratchpad {
795 	cpumask_var_t cpumask;
796 };
797 
798 struct mlx5e_htb {
799 	DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
800 	DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
801 	struct mlx5e_sq_stats **qos_sq_stats;
802 	u16 max_qos_sqs;
803 	u16 maj_id;
804 	u16 defcls;
805 };
806 
807 struct mlx5e_trap;
808 
809 struct mlx5e_priv {
810 	/* priv data path fields - start */
811 	/* +1 for port ptp ts */
812 	struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
813 				   MLX5E_QOS_MAX_LEAF_NODES];
814 	int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
815 	int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
816 #ifdef CONFIG_MLX5_CORE_EN_DCB
817 	struct mlx5e_dcbx_dp       dcbx_dp;
818 #endif
819 	/* priv data path fields - end */
820 
821 	u32                        msglevel;
822 	unsigned long              state;
823 	struct mutex               state_lock; /* Protects Interface state */
824 	struct mlx5e_rq            drop_rq;
825 
826 	struct mlx5e_channels      channels;
827 	u32                        tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
828 	struct mlx5e_rx_res       *rx_res;
829 	u32                        tx_rates[MLX5E_MAX_NUM_SQS];
830 
831 	struct mlx5e_flow_steering fs;
832 
833 	struct workqueue_struct    *wq;
834 	struct work_struct         update_carrier_work;
835 	struct work_struct         set_rx_mode_work;
836 	struct work_struct         tx_timeout_work;
837 	struct work_struct         update_stats_work;
838 	struct work_struct         monitor_counters_work;
839 	struct mlx5_nb             monitor_counters_nb;
840 
841 	struct mlx5_core_dev      *mdev;
842 	struct net_device         *netdev;
843 	struct mlx5e_trap         *en_trap;
844 	struct mlx5e_stats         stats;
845 	struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
846 	struct mlx5e_channel_stats trap_stats;
847 	struct mlx5e_ptp_stats     ptp_stats;
848 	u16                        max_nch;
849 	u8                         max_opened_tc;
850 	bool                       tx_ptp_opened;
851 	bool                       rx_ptp_opened;
852 	struct hwtstamp_config     tstamp;
853 	u16                        q_counter;
854 	u16                        drop_rq_q_counter;
855 	struct notifier_block      events_nb;
856 	struct notifier_block      blocking_events_nb;
857 	int                        num_tc_x_num_ch;
858 
859 	struct udp_tunnel_nic_info nic_info;
860 #ifdef CONFIG_MLX5_CORE_EN_DCB
861 	struct mlx5e_dcbx          dcbx;
862 #endif
863 
864 	const struct mlx5e_profile *profile;
865 	void                      *ppriv;
866 #ifdef CONFIG_MLX5_EN_IPSEC
867 	struct mlx5e_ipsec        *ipsec;
868 #endif
869 #ifdef CONFIG_MLX5_EN_TLS
870 	struct mlx5e_tls          *tls;
871 #endif
872 	struct devlink_health_reporter *tx_reporter;
873 	struct devlink_health_reporter *rx_reporter;
874 	struct mlx5e_xsk           xsk;
875 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
876 	struct mlx5e_hv_vhca_stats_agent stats_agent;
877 #endif
878 	struct mlx5e_scratchpad    scratchpad;
879 	struct mlx5e_htb           htb;
880 };
881 
882 struct mlx5e_rx_handlers {
883 	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
884 	mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
885 };
886 
887 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
888 
889 struct mlx5e_profile {
890 	int	(*init)(struct mlx5_core_dev *mdev,
891 			struct net_device *netdev);
892 	void	(*cleanup)(struct mlx5e_priv *priv);
893 	int	(*init_rx)(struct mlx5e_priv *priv);
894 	void	(*cleanup_rx)(struct mlx5e_priv *priv);
895 	int	(*init_tx)(struct mlx5e_priv *priv);
896 	void	(*cleanup_tx)(struct mlx5e_priv *priv);
897 	void	(*enable)(struct mlx5e_priv *priv);
898 	void	(*disable)(struct mlx5e_priv *priv);
899 	int	(*update_rx)(struct mlx5e_priv *priv);
900 	void	(*update_stats)(struct mlx5e_priv *priv);
901 	void	(*update_carrier)(struct mlx5e_priv *priv);
902 	unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
903 	mlx5e_stats_grp_t *stats_grps;
904 	const struct mlx5e_rx_handlers *rx_handlers;
905 	int	max_tc;
906 	u8	rq_groups;
907 	bool	rx_ptp_support;
908 };
909 
910 void mlx5e_build_ptys2ethtool_map(void);
911 
912 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
913 
914 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
915 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
916 
917 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
918 int mlx5e_self_test_num(struct mlx5e_priv *priv);
919 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
920 		     u64 *buf);
921 void mlx5e_set_rx_mode_work(struct work_struct *work);
922 
923 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
924 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
925 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
926 
927 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
928 			  u16 vid);
929 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
930 			   u16 vid);
931 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
932 
933 struct mlx5e_xsk_param;
934 
935 struct mlx5e_rq_param;
936 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
937 		  struct mlx5e_xsk_param *xsk, int node,
938 		  struct mlx5e_rq *rq);
939 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
940 void mlx5e_close_rq(struct mlx5e_rq *rq);
941 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
942 void mlx5e_destroy_rq(struct mlx5e_rq *rq);
943 
944 struct mlx5e_sq_param;
945 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
946 		     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
947 void mlx5e_close_icosq(struct mlx5e_icosq *sq);
948 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
949 		     struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
950 		     struct mlx5e_xdpsq *sq, bool is_redirect);
951 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
952 
953 struct mlx5e_create_cq_param {
954 	struct napi_struct *napi;
955 	struct mlx5e_ch_stats *ch_stats;
956 	int node;
957 	int ix;
958 };
959 
960 struct mlx5e_cq_param;
961 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
962 		  struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
963 		  struct mlx5e_cq *cq);
964 void mlx5e_close_cq(struct mlx5e_cq *cq);
965 
966 int mlx5e_open_locked(struct net_device *netdev);
967 int mlx5e_close_locked(struct net_device *netdev);
968 
969 int mlx5e_open_channels(struct mlx5e_priv *priv,
970 			struct mlx5e_channels *chs);
971 void mlx5e_close_channels(struct mlx5e_channels *chs);
972 
973 /* Function pointer to be used to modify HW or kernel settings while
974  * switching channels
975  */
976 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
977 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
978 int fn##_ctx(struct mlx5e_priv *priv, void *context) \
979 { \
980 	return fn(priv); \
981 }
982 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
983 int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
984 			     struct mlx5e_params *new_params,
985 			     mlx5e_fp_preactivate preactivate,
986 			     void *context, bool reset);
987 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
988 int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
989 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
990 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
991 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
992 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
993 
994 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
995 void mlx5e_activate_rq(struct mlx5e_rq *rq);
996 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
997 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
998 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
999 
1000 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1001 		    struct mlx5e_modify_sq_param *p);
1002 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1003 		     struct mlx5e_params *params, struct mlx5e_sq_param *param,
1004 		     struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
1005 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
1006 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
1007 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
1008 void mlx5e_tx_disable_queue(struct netdev_queue *txq);
1009 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
1010 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
1011 struct mlx5e_create_sq_param;
1012 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1013 			struct mlx5e_sq_param *param,
1014 			struct mlx5e_create_sq_param *csp,
1015 			u16 qos_queue_group_id,
1016 			u32 *sqn);
1017 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
1018 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
1019 
1020 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1021 {
1022 	return MLX5_CAP_ETH(mdev, swp) &&
1023 		MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1024 }
1025 
1026 extern const struct ethtool_ops mlx5e_ethtool_ops;
1027 
1028 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1029 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
1030 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1031 		       bool enable_mc_lb);
1032 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
1033 
1034 /* common netdev helpers */
1035 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1036 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1037 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1038 		       struct mlx5e_rq *drop_rq);
1039 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1040 int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
1041 void mlx5e_free_di_list(struct mlx5e_rq *rq);
1042 
1043 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
1044 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1045 
1046 int mlx5e_create_tises(struct mlx5e_priv *priv);
1047 void mlx5e_destroy_tises(struct mlx5e_priv *priv);
1048 int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
1049 void mlx5e_update_carrier(struct mlx5e_priv *priv);
1050 int mlx5e_close(struct net_device *netdev);
1051 int mlx5e_open(struct net_device *netdev);
1052 
1053 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
1054 
1055 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
1056 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
1057 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
1058 		     mlx5e_fp_preactivate preactivate);
1059 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
1060 
1061 /* ethtool helpers */
1062 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1063 			       struct ethtool_drvinfo *drvinfo);
1064 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1065 			       uint32_t stringset, uint8_t *data);
1066 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1067 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1068 				     struct ethtool_stats *stats, u64 *data);
1069 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1070 				 struct ethtool_ringparam *param);
1071 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1072 				struct ethtool_ringparam *param);
1073 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1074 				struct ethtool_channels *ch);
1075 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1076 			       struct ethtool_channels *ch);
1077 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1078 			       struct ethtool_coalesce *coal);
1079 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1080 			       struct ethtool_coalesce *coal);
1081 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1082 				     struct ethtool_link_ksettings *link_ksettings);
1083 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1084 				     const struct ethtool_link_ksettings *link_ksettings);
1085 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1086 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1087 		   const u8 hfunc);
1088 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1089 		    u32 *rule_locs);
1090 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
1091 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1092 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1093 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1094 			      struct ethtool_ts_info *info);
1095 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1096 			       struct ethtool_flash *flash);
1097 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1098 				  struct ethtool_pauseparam *pauseparam);
1099 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1100 				 struct ethtool_pauseparam *pauseparam);
1101 
1102 /* mlx5e generic netdev management API */
1103 static inline unsigned int
1104 mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
1105 {
1106 	return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
1107 }
1108 
1109 static inline bool
1110 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
1111 {
1112 	return !is_kdump_kernel() &&
1113 		MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
1114 }
1115 
1116 int mlx5e_priv_init(struct mlx5e_priv *priv,
1117 		    struct net_device *netdev,
1118 		    struct mlx5_core_dev *mdev);
1119 void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
1120 struct net_device *
1121 mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
1122 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1123 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1124 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
1125 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
1126 				const struct mlx5e_profile *new_profile, void *new_ppriv);
1127 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
1128 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
1129 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
1130 void mlx5e_rx_dim_work(struct work_struct *work);
1131 void mlx5e_tx_dim_work(struct work_struct *work);
1132 
1133 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1134 				       struct net_device *netdev,
1135 				       netdev_features_t features);
1136 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
1137 #ifdef CONFIG_MLX5_ESWITCH
1138 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1139 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1140 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1141 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1142 #endif
1143 #endif /* __MLX5_EN_H__ */
1144