1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef __MLX5_EN_H__ 33 #define __MLX5_EN_H__ 34 35 #include <linux/if_vlan.h> 36 #include <linux/etherdevice.h> 37 #include <linux/timecounter.h> 38 #include <linux/net_tstamp.h> 39 #include <linux/crash_dump.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/qp.h> 42 #include <linux/mlx5/cq.h> 43 #include <linux/mlx5/port.h> 44 #include <linux/mlx5/vport.h> 45 #include <linux/mlx5/transobj.h> 46 #include <linux/mlx5/fs.h> 47 #include <linux/rhashtable.h> 48 #include <net/udp_tunnel.h> 49 #include <net/switchdev.h> 50 #include <net/xdp.h> 51 #include <linux/dim.h> 52 #include <linux/bits.h> 53 #include "wq.h" 54 #include "mlx5_core.h" 55 #include "en_stats.h" 56 #include "en/dcbnl.h" 57 #include "en/fs.h" 58 #include "en/qos.h" 59 #include "lib/hv_vhca.h" 60 #include "lib/clock.h" 61 #include "en/rx_res.h" 62 #include "en/selq.h" 63 64 extern const struct net_device_ops mlx5e_netdev_ops; 65 struct page_pool; 66 67 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) 68 #define MLX5E_METADATA_ETHER_LEN 8 69 70 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) 71 72 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) 73 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) 74 75 #define MLX5E_MAX_NUM_TC 8 76 #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE 77 78 #define MLX5_RX_HEADROOM NET_SKB_PAD 79 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ 80 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 81 82 #define MLX5E_RX_MAX_HEAD (256) 83 #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) 84 #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) 85 #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) 86 #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) 87 #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) 88 89 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ 90 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ 91 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ 92 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 93 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ 94 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) 95 96 #define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 97 98 /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. 99 * These are theoretical maximums, which can be further restricted by 100 * capabilities. These values are used for static resource allocations and 101 * sanity checks. 102 * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE 103 * size actually used at runtime, but it's not a problem when calculating static 104 * array sizes. 105 */ 106 #define MLX5_UMR_MAX_FLEX_SPACE \ 107 (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \ 108 MLX5_UMR_FLEX_ALIGNMENT)) 109 #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \ 110 rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt)) 111 112 #define MLX5E_MAX_RQ_NUM_MTTS \ 113 (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */ 114 #define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */ 115 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) 116 117 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) 118 #define MLX5E_LOG_MAX_RX_WQE_BULK \ 119 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) 120 121 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 122 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa 123 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd 124 125 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) 126 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 127 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd 128 129 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 130 131 #define MLX5E_DEFAULT_LRO_TIMEOUT 32 132 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 133 134 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 135 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 136 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 137 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 138 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 139 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 140 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 141 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 142 143 #define MLX5E_MIN_NUM_CHANNELS 0x1 144 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) 145 #define MLX5E_TX_CQ_POLL_BUDGET 128 146 #define MLX5E_TX_XSK_POLL_BUDGET 64 147 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ 148 149 #define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ 150 (sizeof(struct mlx5e_umr_wqe) +\ 151 (sizeof(struct mlx5_klm) * (sgl_len))) 152 153 #define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ 154 (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) 155 156 #define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ 157 (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) 158 159 #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ 160 (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) 161 162 #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ 163 ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT) 164 165 #define MLX5E_MAX_KLM_PER_WQE(mdev) \ 166 MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) 167 168 #define mlx5e_state_dereference(priv, p) \ 169 rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) 170 171 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) 172 { 173 if (mlx5_lag_is_lacp_owner(mdev)) 174 return 1; 175 176 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); 177 } 178 179 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) 180 { 181 switch (wq_type) { 182 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 183 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, 184 wq_size / 2); 185 default: 186 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, 187 wq_size / 2); 188 } 189 } 190 191 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */ 192 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) 193 { 194 return is_kdump_kernel() ? 195 MLX5E_MIN_NUM_CHANNELS : 196 min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS); 197 } 198 199 /* The maximum WQE size can be retrieved by max_wqe_sz_sq in 200 * bytes units. Driver hardens the limitation to 1KB (16 201 * WQEBBs), unless firmware capability is stricter. 202 */ 203 static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) 204 { 205 BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX); 206 207 return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS, 208 MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); 209 } 210 211 static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev) 212 { 213 /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. 214 * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, 215 * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64) 216 * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower 217 * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be 218 * cache-aligned. 219 */ 220 u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev); 221 222 wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); 223 #if L1_CACHE_BYTES >= 128 224 wqebbs = ALIGN_DOWN(wqebbs, 2); 225 #endif 226 return wqebbs; 227 } 228 229 struct mlx5e_tx_wqe { 230 struct mlx5_wqe_ctrl_seg ctrl; 231 struct mlx5_wqe_eth_seg eth; 232 struct mlx5_wqe_data_seg data[]; 233 }; 234 235 struct mlx5e_rx_wqe_ll { 236 struct mlx5_wqe_srq_next_seg next; 237 struct mlx5_wqe_data_seg data[]; 238 }; 239 240 struct mlx5e_rx_wqe_cyc { 241 DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); 242 }; 243 244 struct mlx5e_umr_wqe { 245 struct mlx5_wqe_ctrl_seg ctrl; 246 struct mlx5_wqe_umr_ctrl_seg uctrl; 247 struct mlx5_mkey_seg mkc; 248 union { 249 DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); 250 DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); 251 DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms); 252 }; 253 }; 254 255 enum mlx5e_priv_flag { 256 MLX5E_PFLAG_RX_CQE_BASED_MODER, 257 MLX5E_PFLAG_TX_CQE_BASED_MODER, 258 MLX5E_PFLAG_RX_CQE_COMPRESS, 259 MLX5E_PFLAG_RX_STRIDING_RQ, 260 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, 261 MLX5E_PFLAG_XDP_TX_MPWQE, 262 MLX5E_PFLAG_SKB_TX_MPWQE, 263 MLX5E_PFLAG_TX_PORT_TS, 264 MLX5E_NUM_PFLAGS, /* Keep last */ 265 }; 266 267 #define MLX5E_SET_PFLAG(params, pflag, enable) \ 268 do { \ 269 if (enable) \ 270 (params)->pflags |= BIT(pflag); \ 271 else \ 272 (params)->pflags &= ~(BIT(pflag)); \ 273 } while (0) 274 275 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) 276 277 enum packet_merge { 278 MLX5E_PACKET_MERGE_NONE, 279 MLX5E_PACKET_MERGE_LRO, 280 MLX5E_PACKET_MERGE_SHAMPO, 281 }; 282 283 struct mlx5e_packet_merge_param { 284 enum packet_merge type; 285 u32 timeout; 286 struct { 287 u8 match_criteria_type; 288 u8 alignment_granularity; 289 } shampo; 290 }; 291 292 struct mlx5e_params { 293 u8 log_sq_size; 294 u8 rq_wq_type; 295 u8 log_rq_mtu_frames; 296 u16 num_channels; 297 struct { 298 u16 mode; 299 u8 num_tc; 300 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 301 struct { 302 u64 max_rate[TC_MAX_QUEUE]; 303 u32 hw_id[TC_MAX_QUEUE]; 304 } channel; 305 } mqprio; 306 bool rx_cqe_compress_def; 307 struct dim_cq_moder rx_cq_moderation; 308 struct dim_cq_moder tx_cq_moderation; 309 struct mlx5e_packet_merge_param packet_merge; 310 u8 tx_min_inline_mode; 311 bool vlan_strip_disable; 312 bool scatter_fcs_en; 313 bool rx_dim_enabled; 314 bool tx_dim_enabled; 315 u32 pflags; 316 struct bpf_prog *xdp_prog; 317 struct mlx5e_xsk *xsk; 318 unsigned int sw_mtu; 319 int hard_mtu; 320 bool ptp_rx; 321 __be32 terminate_lkey_be; 322 }; 323 324 static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) 325 { 326 return params->mqprio.mode == TC_MQPRIO_MODE_DCB ? 327 params->mqprio.num_tc : 1; 328 } 329 330 /* Keep this enum consistent with the corresponding strings array 331 * declared in en/reporter_rx.c 332 */ 333 enum { 334 MLX5E_RQ_STATE_ENABLED = 0, 335 MLX5E_RQ_STATE_RECOVERING, 336 MLX5E_RQ_STATE_DIM, 337 MLX5E_RQ_STATE_NO_CSUM_COMPLETE, 338 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ 339 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ 340 MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ 341 MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */ 342 MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */ 343 MLX5E_NUM_RQ_STATES, /* Must be kept last */ 344 }; 345 346 struct mlx5e_cq { 347 /* data path - accessed per cqe */ 348 struct mlx5_cqwq wq; 349 350 /* data path - accessed per napi poll */ 351 u16 event_ctr; 352 struct napi_struct *napi; 353 struct mlx5_core_cq mcq; 354 struct mlx5e_ch_stats *ch_stats; 355 356 /* control */ 357 struct net_device *netdev; 358 struct mlx5_core_dev *mdev; 359 struct mlx5e_priv *priv; 360 struct mlx5_wq_ctrl wq_ctrl; 361 } ____cacheline_aligned_in_smp; 362 363 struct mlx5e_cq_decomp { 364 /* cqe decompression */ 365 struct mlx5_cqe64 title; 366 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; 367 u8 mini_arr_idx; 368 u16 left; 369 u16 wqe_counter; 370 bool last_cqe_title; 371 } ____cacheline_aligned_in_smp; 372 373 enum mlx5e_dma_map_type { 374 MLX5E_DMA_MAP_SINGLE, 375 MLX5E_DMA_MAP_PAGE 376 }; 377 378 struct mlx5e_sq_dma { 379 dma_addr_t addr; 380 u32 size; 381 enum mlx5e_dma_map_type type; 382 }; 383 384 /* Keep this enum consistent with with the corresponding strings array 385 * declared in en/reporter_tx.c 386 */ 387 enum { 388 MLX5E_SQ_STATE_ENABLED = 0, 389 MLX5E_SQ_STATE_MPWQE, 390 MLX5E_SQ_STATE_RECOVERING, 391 MLX5E_SQ_STATE_IPSEC, 392 MLX5E_SQ_STATE_DIM, 393 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, 394 MLX5E_SQ_STATE_PENDING_XSK_TX, 395 MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, 396 MLX5E_SQ_STATE_XDP_MULTIBUF, 397 MLX5E_NUM_SQ_STATES, /* Must be kept last */ 398 }; 399 400 struct mlx5e_tx_mpwqe { 401 /* Current MPWQE session */ 402 struct mlx5e_tx_wqe *wqe; 403 u32 bytes_count; 404 u8 ds_count; 405 u8 pkt_count; 406 u8 inline_on; 407 }; 408 409 struct mlx5e_skb_fifo { 410 struct sk_buff **fifo; 411 u16 *pc; 412 u16 *cc; 413 u16 mask; 414 }; 415 416 struct mlx5e_ptpsq; 417 418 struct mlx5e_txqsq { 419 /* data path */ 420 421 /* dirtied @completion */ 422 u16 cc; 423 u16 skb_fifo_cc; 424 u32 dma_fifo_cc; 425 struct dim dim; /* Adaptive Moderation */ 426 427 /* dirtied @xmit */ 428 u16 pc ____cacheline_aligned_in_smp; 429 u16 skb_fifo_pc; 430 u32 dma_fifo_pc; 431 struct mlx5e_tx_mpwqe mpwqe; 432 433 struct mlx5e_cq cq; 434 435 /* read only */ 436 struct mlx5_wq_cyc wq; 437 u32 dma_fifo_mask; 438 struct mlx5e_sq_stats *stats; 439 struct { 440 struct mlx5e_sq_dma *dma_fifo; 441 struct mlx5e_skb_fifo skb_fifo; 442 struct mlx5e_tx_wqe_info *wqe_info; 443 } db; 444 void __iomem *uar_map; 445 struct netdev_queue *txq; 446 u32 sqn; 447 u16 stop_room; 448 u8 max_sq_mpw_wqebbs; 449 u8 min_inline_mode; 450 struct device *pdev; 451 __be32 mkey_be; 452 unsigned long state; 453 unsigned int hw_mtu; 454 struct mlx5_clock *clock; 455 struct net_device *netdev; 456 struct mlx5_core_dev *mdev; 457 struct mlx5e_channel *channel; 458 struct mlx5e_priv *priv; 459 460 /* control path */ 461 struct mlx5_wq_ctrl wq_ctrl; 462 int ch_ix; 463 int txq_ix; 464 u32 rate_limit; 465 struct work_struct recover_work; 466 struct mlx5e_ptpsq *ptpsq; 467 cqe_ts_to_ns ptp_cyc2time; 468 } ____cacheline_aligned_in_smp; 469 470 struct mlx5e_xdp_info_fifo { 471 union mlx5e_xdp_info *xi; 472 u32 *cc; 473 u32 *pc; 474 u32 mask; 475 }; 476 477 struct mlx5e_xdpsq; 478 struct mlx5e_xmit_data; 479 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); 480 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, 481 struct mlx5e_xmit_data *, 482 int); 483 484 struct mlx5e_xdpsq { 485 /* data path */ 486 487 /* dirtied @completion */ 488 u32 xdpi_fifo_cc; 489 u16 cc; 490 491 /* dirtied @xmit */ 492 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; 493 u16 pc; 494 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 495 struct mlx5e_tx_mpwqe mpwqe; 496 497 struct mlx5e_cq cq; 498 499 /* read only */ 500 struct xsk_buff_pool *xsk_pool; 501 struct mlx5_wq_cyc wq; 502 struct mlx5e_xdpsq_stats *stats; 503 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check; 504 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; 505 struct { 506 struct mlx5e_xdp_wqe_info *wqe_info; 507 struct mlx5e_xdp_info_fifo xdpi_fifo; 508 } db; 509 void __iomem *uar_map; 510 u32 sqn; 511 struct device *pdev; 512 __be32 mkey_be; 513 u16 stop_room; 514 u8 max_sq_mpw_wqebbs; 515 u8 min_inline_mode; 516 unsigned long state; 517 unsigned int hw_mtu; 518 519 /* control path */ 520 struct mlx5_wq_ctrl wq_ctrl; 521 struct mlx5e_channel *channel; 522 } ____cacheline_aligned_in_smp; 523 524 struct mlx5e_ktls_resync_resp; 525 526 struct mlx5e_icosq { 527 /* data path */ 528 u16 cc; 529 u16 pc; 530 531 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 532 struct mlx5e_cq cq; 533 534 /* write@xmit, read@completion */ 535 struct { 536 struct mlx5e_icosq_wqe_info *wqe_info; 537 } db; 538 539 /* read only */ 540 struct mlx5_wq_cyc wq; 541 void __iomem *uar_map; 542 u32 sqn; 543 u16 reserved_room; 544 unsigned long state; 545 struct mlx5e_ktls_resync_resp *ktls_resync; 546 547 /* control path */ 548 struct mlx5_wq_ctrl wq_ctrl; 549 struct mlx5e_channel *channel; 550 551 struct work_struct recover_work; 552 } ____cacheline_aligned_in_smp; 553 554 struct mlx5e_frag_page { 555 struct page *page; 556 u16 frags; 557 }; 558 559 enum mlx5e_wqe_frag_flag { 560 MLX5E_WQE_FRAG_LAST_IN_PAGE, 561 MLX5E_WQE_FRAG_SKIP_RELEASE, 562 }; 563 564 struct mlx5e_wqe_frag_info { 565 union { 566 struct mlx5e_frag_page *frag_page; 567 struct xdp_buff **xskp; 568 }; 569 u32 offset; 570 u8 flags; 571 }; 572 573 union mlx5e_alloc_units { 574 DECLARE_FLEX_ARRAY(struct mlx5e_frag_page, frag_pages); 575 DECLARE_FLEX_ARRAY(struct page *, pages); 576 DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs); 577 }; 578 579 struct mlx5e_mpw_info { 580 u16 consumed_strides; 581 DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE); 582 struct mlx5e_frag_page linear_page; 583 union mlx5e_alloc_units alloc_units; 584 }; 585 586 #define MLX5E_MAX_RX_FRAGS 4 587 588 struct mlx5e_rq; 589 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); 590 typedef struct sk_buff * 591 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 592 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, 593 u32 head_offset, u32 page_idx); 594 typedef struct sk_buff * 595 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, 596 struct mlx5_cqe64 *cqe, u32 cqe_bcnt); 597 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); 598 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); 599 typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); 600 601 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); 602 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); 603 604 enum mlx5e_rq_flag { 605 MLX5E_RQ_FLAG_XDP_XMIT, 606 MLX5E_RQ_FLAG_XDP_REDIRECT, 607 }; 608 609 struct mlx5e_rq_frag_info { 610 int frag_size; 611 int frag_stride; 612 }; 613 614 struct mlx5e_rq_frags_info { 615 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; 616 u8 num_frags; 617 u8 log_num_frags; 618 u16 wqe_bulk; 619 u16 refill_unit; 620 u8 wqe_index_mask; 621 }; 622 623 struct mlx5e_dma_info { 624 dma_addr_t addr; 625 union { 626 struct mlx5e_frag_page *frag_page; 627 struct page *page; 628 }; 629 }; 630 631 struct mlx5e_shampo_hd { 632 u32 mkey; 633 struct mlx5e_dma_info *info; 634 struct mlx5e_frag_page *pages; 635 u16 curr_page_index; 636 u16 hd_per_wq; 637 u16 hd_per_wqe; 638 unsigned long *bitmap; 639 u16 pi; 640 u16 ci; 641 __be32 key; 642 u64 last_addr; 643 }; 644 645 struct mlx5e_hw_gro_data { 646 struct sk_buff *skb; 647 struct flow_keys fk; 648 int second_ip_id; 649 }; 650 651 enum mlx5e_mpwrq_umr_mode { 652 MLX5E_MPWRQ_UMR_MODE_ALIGNED, 653 MLX5E_MPWRQ_UMR_MODE_UNALIGNED, 654 MLX5E_MPWRQ_UMR_MODE_OVERSIZED, 655 MLX5E_MPWRQ_UMR_MODE_TRIPLE, 656 }; 657 658 struct mlx5e_rq { 659 /* data path */ 660 union { 661 struct { 662 struct mlx5_wq_cyc wq; 663 struct mlx5e_wqe_frag_info *frags; 664 union mlx5e_alloc_units *alloc_units; 665 struct mlx5e_rq_frags_info info; 666 mlx5e_fp_skb_from_cqe skb_from_cqe; 667 } wqe; 668 struct { 669 struct mlx5_wq_ll wq; 670 struct mlx5e_umr_wqe umr_wqe; 671 struct mlx5e_mpw_info *info; 672 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; 673 __be32 umr_mkey_be; 674 u16 num_strides; 675 u16 actual_wq_head; 676 u8 log_stride_sz; 677 u8 umr_in_progress; 678 u8 umr_last_bulk; 679 u8 umr_completed; 680 u8 min_wqe_bulk; 681 u8 page_shift; 682 u8 pages_per_wqe; 683 u8 umr_wqebbs; 684 u8 mtts_per_wqe; 685 u8 umr_mode; 686 struct mlx5e_shampo_hd *shampo; 687 } mpwqe; 688 }; 689 struct { 690 u16 headroom; 691 u32 frame0_sz; 692 u8 map_dir; /* dma map direction */ 693 } buff; 694 695 struct device *pdev; 696 struct net_device *netdev; 697 struct mlx5e_rq_stats *stats; 698 struct mlx5e_cq cq; 699 struct mlx5e_cq_decomp cqd; 700 struct hwtstamp_config *tstamp; 701 struct mlx5_clock *clock; 702 struct mlx5e_icosq *icosq; 703 struct mlx5e_priv *priv; 704 705 struct mlx5e_hw_gro_data *hw_gro_data; 706 707 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 708 mlx5e_fp_post_rx_wqes post_wqes; 709 mlx5e_fp_dealloc_wqe dealloc_wqe; 710 711 unsigned long state; 712 int ix; 713 unsigned int hw_mtu; 714 715 struct dim dim; /* Dynamic Interrupt Moderation */ 716 717 /* XDP */ 718 struct bpf_prog __rcu *xdp_prog; 719 struct mlx5e_xdpsq *xdpsq; 720 DECLARE_BITMAP(flags, 8); 721 struct page_pool *page_pool; 722 723 /* AF_XDP zero-copy */ 724 struct xsk_buff_pool *xsk_pool; 725 726 struct work_struct recover_work; 727 728 /* control */ 729 struct mlx5_wq_ctrl wq_ctrl; 730 __be32 mkey_be; 731 u8 wq_type; 732 u32 rqn; 733 struct mlx5_core_dev *mdev; 734 struct mlx5e_channel *channel; 735 struct mlx5e_dma_info wqe_overflow; 736 737 /* XDP read-mostly */ 738 struct xdp_rxq_info xdp_rxq; 739 cqe_ts_to_ns ptp_cyc2time; 740 } ____cacheline_aligned_in_smp; 741 742 enum mlx5e_channel_state { 743 MLX5E_CHANNEL_STATE_XSK, 744 MLX5E_CHANNEL_NUM_STATES 745 }; 746 747 struct mlx5e_channel { 748 /* data path */ 749 struct mlx5e_rq rq; 750 struct mlx5e_xdpsq rq_xdpsq; 751 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; 752 struct mlx5e_icosq icosq; /* internal control operations */ 753 struct mlx5e_txqsq __rcu * __rcu *qos_sqs; 754 bool xdp; 755 struct napi_struct napi; 756 struct device *pdev; 757 struct net_device *netdev; 758 __be32 mkey_be; 759 u16 qos_sqs_size; 760 u8 num_tc; 761 u8 lag_port; 762 763 /* XDP_REDIRECT */ 764 struct mlx5e_xdpsq xdpsq; 765 766 /* AF_XDP zero-copy */ 767 struct mlx5e_rq xskrq; 768 struct mlx5e_xdpsq xsksq; 769 770 /* Async ICOSQ */ 771 struct mlx5e_icosq async_icosq; 772 /* async_icosq can be accessed from any CPU - the spinlock protects it. */ 773 spinlock_t async_icosq_lock; 774 775 /* data path - accessed per napi poll */ 776 const struct cpumask *aff_mask; 777 struct mlx5e_ch_stats *stats; 778 779 /* control */ 780 struct mlx5e_priv *priv; 781 struct mlx5_core_dev *mdev; 782 struct hwtstamp_config *tstamp; 783 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); 784 int ix; 785 int cpu; 786 /* Sync between icosq recovery and XSK enable/disable. */ 787 struct mutex icosq_recovery_lock; 788 }; 789 790 struct mlx5e_ptp; 791 792 struct mlx5e_channels { 793 struct mlx5e_channel **c; 794 struct mlx5e_ptp *ptp; 795 unsigned int num; 796 struct mlx5e_params params; 797 }; 798 799 struct mlx5e_channel_stats { 800 struct mlx5e_ch_stats ch; 801 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 802 struct mlx5e_rq_stats rq; 803 struct mlx5e_rq_stats xskrq; 804 struct mlx5e_xdpsq_stats rq_xdpsq; 805 struct mlx5e_xdpsq_stats xdpsq; 806 struct mlx5e_xdpsq_stats xsksq; 807 } ____cacheline_aligned_in_smp; 808 809 struct mlx5e_ptp_stats { 810 struct mlx5e_ch_stats ch; 811 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 812 struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; 813 struct mlx5e_rq_stats rq; 814 } ____cacheline_aligned_in_smp; 815 816 enum { 817 MLX5E_STATE_OPENED, 818 MLX5E_STATE_DESTROYING, 819 MLX5E_STATE_XDP_TX_ENABLED, 820 MLX5E_STATE_XDP_ACTIVE, 821 }; 822 823 struct mlx5e_modify_sq_param { 824 int curr_state; 825 int next_state; 826 int rl_update; 827 int rl_index; 828 bool qos_update; 829 u16 qos_queue_group_id; 830 }; 831 832 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) 833 struct mlx5e_hv_vhca_stats_agent { 834 struct mlx5_hv_vhca_agent *agent; 835 struct delayed_work work; 836 u16 delay; 837 void *buf; 838 }; 839 #endif 840 841 struct mlx5e_xsk { 842 /* XSK buffer pools are stored separately from channels, 843 * because we don't want to lose them when channels are 844 * recreated. The kernel also stores buffer pool, but it doesn't 845 * distinguish between zero-copy and non-zero-copy UMEMs, so 846 * rely on our mechanism. 847 */ 848 struct xsk_buff_pool **pools; 849 u16 refcnt; 850 bool ever_used; 851 }; 852 853 /* Temporary storage for variables that are allocated when struct mlx5e_priv is 854 * initialized, and used where we can't allocate them because that functions 855 * must not fail. Use with care and make sure the same variable is not used 856 * simultaneously by multiple users. 857 */ 858 struct mlx5e_scratchpad { 859 cpumask_var_t cpumask; 860 }; 861 862 struct mlx5e_trap; 863 struct mlx5e_htb; 864 865 struct mlx5e_priv { 866 /* priv data path fields - start */ 867 struct mlx5e_selq selq; 868 struct mlx5e_txqsq **txq2sq; 869 #ifdef CONFIG_MLX5_CORE_EN_DCB 870 struct mlx5e_dcbx_dp dcbx_dp; 871 #endif 872 /* priv data path fields - end */ 873 874 unsigned long state; 875 struct mutex state_lock; /* Protects Interface state */ 876 struct mlx5e_rq drop_rq; 877 878 struct mlx5e_channels channels; 879 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; 880 struct mlx5e_rx_res *rx_res; 881 u32 *tx_rates; 882 883 struct mlx5e_flow_steering *fs; 884 885 struct workqueue_struct *wq; 886 struct work_struct update_carrier_work; 887 struct work_struct set_rx_mode_work; 888 struct work_struct tx_timeout_work; 889 struct work_struct update_stats_work; 890 struct work_struct monitor_counters_work; 891 struct mlx5_nb monitor_counters_nb; 892 893 struct mlx5_core_dev *mdev; 894 struct net_device *netdev; 895 struct mlx5e_trap *en_trap; 896 struct mlx5e_stats stats; 897 struct mlx5e_channel_stats **channel_stats; 898 struct mlx5e_channel_stats trap_stats; 899 struct mlx5e_ptp_stats ptp_stats; 900 struct mlx5e_sq_stats **htb_qos_sq_stats; 901 u16 htb_max_qos_sqs; 902 u16 stats_nch; 903 u16 max_nch; 904 u8 max_opened_tc; 905 bool tx_ptp_opened; 906 bool rx_ptp_opened; 907 struct hwtstamp_config tstamp; 908 u16 q_counter; 909 u16 drop_rq_q_counter; 910 struct notifier_block events_nb; 911 struct notifier_block blocking_events_nb; 912 913 struct udp_tunnel_nic_info nic_info; 914 #ifdef CONFIG_MLX5_CORE_EN_DCB 915 struct mlx5e_dcbx dcbx; 916 #endif 917 918 const struct mlx5e_profile *profile; 919 void *ppriv; 920 #ifdef CONFIG_MLX5_MACSEC 921 struct mlx5e_macsec *macsec; 922 #endif 923 #ifdef CONFIG_MLX5_EN_IPSEC 924 struct mlx5e_ipsec *ipsec; 925 #endif 926 #ifdef CONFIG_MLX5_EN_TLS 927 struct mlx5e_tls *tls; 928 #endif 929 struct devlink_health_reporter *tx_reporter; 930 struct devlink_health_reporter *rx_reporter; 931 struct mlx5e_xsk xsk; 932 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) 933 struct mlx5e_hv_vhca_stats_agent stats_agent; 934 #endif 935 struct mlx5e_scratchpad scratchpad; 936 struct mlx5e_htb *htb; 937 struct mlx5e_mqprio_rl *mqprio_rl; 938 struct dentry *dfs_root; 939 }; 940 941 struct mlx5e_dev { 942 struct mlx5e_priv *priv; 943 struct devlink_port dl_port; 944 }; 945 946 struct mlx5e_rx_handlers { 947 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 948 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 949 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo; 950 }; 951 952 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; 953 954 enum mlx5e_profile_feature { 955 MLX5E_PROFILE_FEATURE_PTP_RX, 956 MLX5E_PROFILE_FEATURE_PTP_TX, 957 MLX5E_PROFILE_FEATURE_QOS_HTB, 958 MLX5E_PROFILE_FEATURE_FS_VLAN, 959 MLX5E_PROFILE_FEATURE_FS_TC, 960 }; 961 962 struct mlx5e_profile { 963 int (*init)(struct mlx5_core_dev *mdev, 964 struct net_device *netdev); 965 void (*cleanup)(struct mlx5e_priv *priv); 966 int (*init_rx)(struct mlx5e_priv *priv); 967 void (*cleanup_rx)(struct mlx5e_priv *priv); 968 int (*init_tx)(struct mlx5e_priv *priv); 969 void (*cleanup_tx)(struct mlx5e_priv *priv); 970 void (*enable)(struct mlx5e_priv *priv); 971 void (*disable)(struct mlx5e_priv *priv); 972 int (*update_rx)(struct mlx5e_priv *priv); 973 void (*update_stats)(struct mlx5e_priv *priv); 974 void (*update_carrier)(struct mlx5e_priv *priv); 975 int (*max_nch_limit)(struct mlx5_core_dev *mdev); 976 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); 977 mlx5e_stats_grp_t *stats_grps; 978 const struct mlx5e_rx_handlers *rx_handlers; 979 int max_tc; 980 u32 features; 981 }; 982 983 #define mlx5e_profile_feature_cap(profile, feature) \ 984 ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) 985 986 void mlx5e_build_ptys2ethtool_map(void); 987 988 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, 989 enum mlx5e_mpwrq_umr_mode umr_mode); 990 991 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); 992 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 993 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); 994 995 int mlx5e_self_test_num(struct mlx5e_priv *priv); 996 int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); 997 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, 998 u64 *buf); 999 void mlx5e_set_rx_mode_work(struct work_struct *work); 1000 1001 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); 1002 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); 1003 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); 1004 1005 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 1006 u16 vid); 1007 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, 1008 u16 vid); 1009 void mlx5e_timestamp_init(struct mlx5e_priv *priv); 1010 1011 struct mlx5e_xsk_param; 1012 1013 struct mlx5e_rq_param; 1014 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, 1015 struct mlx5e_xsk_param *xsk, int node, 1016 struct mlx5e_rq *rq); 1017 #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ 1018 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); 1019 void mlx5e_close_rq(struct mlx5e_rq *rq); 1020 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); 1021 void mlx5e_destroy_rq(struct mlx5e_rq *rq); 1022 1023 struct mlx5e_sq_param; 1024 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, 1025 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, 1026 struct mlx5e_xdpsq *sq, bool is_redirect); 1027 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); 1028 1029 struct mlx5e_create_cq_param { 1030 struct napi_struct *napi; 1031 struct mlx5e_ch_stats *ch_stats; 1032 int node; 1033 int ix; 1034 }; 1035 1036 struct mlx5e_cq_param; 1037 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, 1038 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 1039 struct mlx5e_cq *cq); 1040 void mlx5e_close_cq(struct mlx5e_cq *cq); 1041 1042 int mlx5e_open_locked(struct net_device *netdev); 1043 int mlx5e_close_locked(struct net_device *netdev); 1044 1045 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c); 1046 void mlx5e_trigger_napi_sched(struct napi_struct *napi); 1047 1048 int mlx5e_open_channels(struct mlx5e_priv *priv, 1049 struct mlx5e_channels *chs); 1050 void mlx5e_close_channels(struct mlx5e_channels *chs); 1051 1052 /* Function pointer to be used to modify HW or kernel settings while 1053 * switching channels 1054 */ 1055 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); 1056 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ 1057 int fn##_ctx(struct mlx5e_priv *priv, void *context) \ 1058 { \ 1059 return fn(priv); \ 1060 } 1061 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); 1062 int mlx5e_safe_switch_params(struct mlx5e_priv *priv, 1063 struct mlx5e_params *new_params, 1064 mlx5e_fp_preactivate preactivate, 1065 void *context, bool reset); 1066 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); 1067 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); 1068 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); 1069 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); 1070 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); 1071 1072 int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state); 1073 void mlx5e_activate_rq(struct mlx5e_rq *rq); 1074 void mlx5e_deactivate_rq(struct mlx5e_rq *rq); 1075 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); 1076 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); 1077 1078 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, 1079 struct mlx5e_modify_sq_param *p); 1080 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, 1081 struct mlx5e_params *params, struct mlx5e_sq_param *param, 1082 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, 1083 struct mlx5e_sq_stats *sq_stats); 1084 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); 1085 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); 1086 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); 1087 void mlx5e_tx_disable_queue(struct netdev_queue *txq); 1088 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); 1089 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); 1090 struct mlx5e_create_sq_param; 1091 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, 1092 struct mlx5e_sq_param *param, 1093 struct mlx5e_create_sq_param *csp, 1094 u16 qos_queue_group_id, 1095 u32 *sqn); 1096 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); 1097 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); 1098 1099 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) 1100 { 1101 return MLX5_CAP_ETH(mdev, swp) && 1102 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); 1103 } 1104 1105 extern const struct ethtool_ops mlx5e_ethtool_ops; 1106 1107 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); 1108 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1109 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1110 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, 1111 bool enable_mc_lb); 1112 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); 1113 1114 /* common netdev helpers */ 1115 void mlx5e_create_q_counters(struct mlx5e_priv *priv); 1116 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); 1117 int mlx5e_open_drop_rq(struct mlx5e_priv *priv, 1118 struct mlx5e_rq *drop_rq); 1119 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); 1120 1121 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); 1122 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); 1123 1124 int mlx5e_create_tises(struct mlx5e_priv *priv); 1125 void mlx5e_destroy_tises(struct mlx5e_priv *priv); 1126 int mlx5e_update_nic_rx(struct mlx5e_priv *priv); 1127 void mlx5e_update_carrier(struct mlx5e_priv *priv); 1128 int mlx5e_close(struct net_device *netdev); 1129 int mlx5e_open(struct net_device *netdev); 1130 1131 void mlx5e_queue_update_stats(struct mlx5e_priv *priv); 1132 1133 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); 1134 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); 1135 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, 1136 mlx5e_fp_preactivate preactivate); 1137 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); 1138 1139 /* ethtool helpers */ 1140 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, 1141 struct ethtool_drvinfo *drvinfo); 1142 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, 1143 uint32_t stringset, uint8_t *data); 1144 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); 1145 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, 1146 struct ethtool_stats *stats, u64 *data); 1147 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, 1148 struct ethtool_ringparam *param, 1149 struct kernel_ethtool_ringparam *kernel_param); 1150 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1151 struct ethtool_ringparam *param); 1152 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, 1153 struct ethtool_channels *ch); 1154 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, 1155 struct ethtool_channels *ch); 1156 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, 1157 struct ethtool_coalesce *coal, 1158 struct kernel_ethtool_coalesce *kernel_coal); 1159 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, 1160 struct ethtool_coalesce *coal, 1161 struct kernel_ethtool_coalesce *kernel_coal, 1162 struct netlink_ext_ack *extack); 1163 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 1164 struct ethtool_link_ksettings *link_ksettings); 1165 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, 1166 const struct ethtool_link_ksettings *link_ksettings); 1167 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); 1168 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 1169 const u8 hfunc); 1170 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); 1171 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1172 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1173 struct ethtool_ts_info *info); 1174 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, 1175 struct ethtool_flash *flash); 1176 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1177 struct ethtool_pauseparam *pauseparam); 1178 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1179 struct ethtool_pauseparam *pauseparam); 1180 1181 /* mlx5e generic netdev management API */ 1182 static inline bool 1183 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev) 1184 { 1185 return !is_kdump_kernel() && 1186 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe); 1187 } 1188 1189 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev); 1190 int mlx5e_priv_init(struct mlx5e_priv *priv, 1191 const struct mlx5e_profile *profile, 1192 struct net_device *netdev, 1193 struct mlx5_core_dev *mdev); 1194 void mlx5e_priv_cleanup(struct mlx5e_priv *priv); 1195 struct net_device * 1196 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile); 1197 int mlx5e_attach_netdev(struct mlx5e_priv *priv); 1198 void mlx5e_detach_netdev(struct mlx5e_priv *priv); 1199 void mlx5e_destroy_netdev(struct mlx5e_priv *priv); 1200 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, 1201 const struct mlx5e_profile *new_profile, void *new_ppriv); 1202 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); 1203 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); 1204 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); 1205 void mlx5e_rx_dim_work(struct work_struct *work); 1206 void mlx5e_tx_dim_work(struct work_struct *work); 1207 1208 void mlx5e_set_xdp_feature(struct net_device *netdev); 1209 netdev_features_t mlx5e_features_check(struct sk_buff *skb, 1210 struct net_device *netdev, 1211 netdev_features_t features); 1212 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features); 1213 #ifdef CONFIG_MLX5_ESWITCH 1214 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 1215 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); 1216 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); 1217 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); 1218 #endif 1219 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); 1220 #endif /* __MLX5_EN_H__ */ 1221