1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef __MLX5_EN_H__ 33 #define __MLX5_EN_H__ 34 35 #include <linux/if_vlan.h> 36 #include <linux/etherdevice.h> 37 #include <linux/timecounter.h> 38 #include <linux/net_tstamp.h> 39 #include <linux/ptp_clock_kernel.h> 40 #include <linux/crash_dump.h> 41 #include <linux/mlx5/driver.h> 42 #include <linux/mlx5/qp.h> 43 #include <linux/mlx5/cq.h> 44 #include <linux/mlx5/port.h> 45 #include <linux/mlx5/vport.h> 46 #include <linux/mlx5/transobj.h> 47 #include <linux/mlx5/fs.h> 48 #include <linux/rhashtable.h> 49 #include <net/switchdev.h> 50 #include <net/xdp.h> 51 #include <linux/net_dim.h> 52 #include <linux/bits.h> 53 #include "wq.h" 54 #include "mlx5_core.h" 55 #include "en_stats.h" 56 #include "en/fs.h" 57 58 extern const struct net_device_ops mlx5e_netdev_ops; 59 struct page_pool; 60 61 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) 62 #define MLX5E_METADATA_ETHER_LEN 8 63 64 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 65 66 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) 67 68 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) 69 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) 70 71 #define MLX5E_MAX_PRIORITY 8 72 #define MLX5E_MAX_DSCP 64 73 #define MLX5E_MAX_NUM_TC 8 74 75 #define MLX5_RX_HEADROOM NET_SKB_PAD 76 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ 77 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 78 79 #define MLX5E_RX_MAX_HEAD (256) 80 81 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ 82 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ 83 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ 84 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 85 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ 86 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) 87 88 #define MLX5_MPWRQ_LOG_WQE_SZ 18 89 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ 90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) 91 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) 92 93 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) 94 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) 95 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) 96 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) 97 #define MLX5E_MAX_RQ_NUM_MTTS \ 98 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ 99 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) 100 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ 101 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) 102 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \ 103 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ 104 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) 105 106 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) 107 #define MLX5E_LOG_MAX_RX_WQE_BULK \ 108 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) 109 110 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 111 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa 112 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd 113 114 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) 115 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 116 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ 117 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) 118 119 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 120 121 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 122 #define MLX5E_DEFAULT_LRO_TIMEOUT 32 123 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 124 125 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 126 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 127 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 128 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 129 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 130 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 131 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 132 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 133 134 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 135 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) 136 #define MLX5E_MIN_NUM_CHANNELS 0x1 137 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) 138 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) 139 #define MLX5E_TX_CQ_POLL_BUDGET 128 140 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ 141 142 #define MLX5E_UMR_WQE_INLINE_SZ \ 143 (sizeof(struct mlx5e_umr_wqe) + \ 144 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \ 145 MLX5_UMR_MTT_ALIGNMENT)) 146 #define MLX5E_UMR_WQEBBS \ 147 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) 148 149 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK 150 151 #define mlx5e_dbg(mlevel, priv, format, ...) \ 152 do { \ 153 if (NETIF_MSG_##mlevel & (priv)->msglevel) \ 154 netdev_warn(priv->netdev, format, \ 155 ##__VA_ARGS__); \ 156 } while (0) 157 158 159 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) 160 { 161 switch (wq_type) { 162 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 163 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, 164 wq_size / 2); 165 default: 166 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, 167 wq_size / 2); 168 } 169 } 170 171 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */ 172 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) 173 { 174 return is_kdump_kernel() ? 175 MLX5E_MIN_NUM_CHANNELS : 176 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); 177 } 178 179 /* Use this function to get max num channels after netdev was created */ 180 static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev) 181 { 182 return min_t(unsigned int, netdev->num_rx_queues, 183 netdev->num_tx_queues); 184 } 185 186 struct mlx5e_tx_wqe { 187 struct mlx5_wqe_ctrl_seg ctrl; 188 struct mlx5_wqe_eth_seg eth; 189 struct mlx5_wqe_data_seg data[0]; 190 }; 191 192 struct mlx5e_rx_wqe_ll { 193 struct mlx5_wqe_srq_next_seg next; 194 struct mlx5_wqe_data_seg data[0]; 195 }; 196 197 struct mlx5e_rx_wqe_cyc { 198 struct mlx5_wqe_data_seg data[0]; 199 }; 200 201 struct mlx5e_umr_wqe { 202 struct mlx5_wqe_ctrl_seg ctrl; 203 struct mlx5_wqe_umr_ctrl_seg uctrl; 204 struct mlx5_mkey_seg mkc; 205 struct mlx5_mtt inline_mtts[0]; 206 }; 207 208 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; 209 210 enum mlx5e_priv_flag { 211 MLX5E_PFLAG_RX_CQE_BASED_MODER, 212 MLX5E_PFLAG_TX_CQE_BASED_MODER, 213 MLX5E_PFLAG_RX_CQE_COMPRESS, 214 MLX5E_PFLAG_RX_STRIDING_RQ, 215 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, 216 MLX5E_PFLAG_XDP_TX_MPWQE, 217 MLX5E_NUM_PFLAGS, /* Keep last */ 218 }; 219 220 #define MLX5E_SET_PFLAG(params, pflag, enable) \ 221 do { \ 222 if (enable) \ 223 (params)->pflags |= BIT(pflag); \ 224 else \ 225 (params)->pflags &= ~(BIT(pflag)); \ 226 } while (0) 227 228 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) 229 230 #ifdef CONFIG_MLX5_CORE_EN_DCB 231 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 232 #endif 233 234 struct mlx5e_params { 235 u8 log_sq_size; 236 u8 rq_wq_type; 237 u8 log_rq_mtu_frames; 238 u16 num_channels; 239 u8 num_tc; 240 bool rx_cqe_compress_def; 241 struct net_dim_cq_moder rx_cq_moderation; 242 struct net_dim_cq_moder tx_cq_moderation; 243 bool tunneled_offload_en; 244 bool lro_en; 245 u8 tx_min_inline_mode; 246 bool vlan_strip_disable; 247 bool scatter_fcs_en; 248 bool rx_dim_enabled; 249 bool tx_dim_enabled; 250 u32 lro_timeout; 251 u32 pflags; 252 struct bpf_prog *xdp_prog; 253 unsigned int sw_mtu; 254 int hard_mtu; 255 }; 256 257 #ifdef CONFIG_MLX5_CORE_EN_DCB 258 struct mlx5e_cee_config { 259 /* bw pct for priority group */ 260 u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; 261 u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; 262 bool pfc_setting[CEE_DCBX_MAX_PRIO]; 263 bool pfc_enable; 264 }; 265 266 enum { 267 MLX5_DCB_CHG_RESET, 268 MLX5_DCB_NO_CHG, 269 MLX5_DCB_CHG_NO_RESET, 270 }; 271 272 struct mlx5e_dcbx { 273 enum mlx5_dcbx_oper_mode mode; 274 struct mlx5e_cee_config cee_cfg; /* pending configuration */ 275 u8 dscp_app_cnt; 276 277 /* The only setting that cannot be read from FW */ 278 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; 279 u8 cap; 280 281 /* Buffer configuration */ 282 bool manual_buffer; 283 u32 cable_len; 284 u32 xoff; 285 }; 286 287 struct mlx5e_dcbx_dp { 288 u8 dscp2prio[MLX5E_MAX_DSCP]; 289 u8 trust_state; 290 }; 291 #endif 292 293 enum { 294 MLX5E_RQ_STATE_ENABLED, 295 MLX5E_RQ_STATE_AM, 296 MLX5E_RQ_STATE_NO_CSUM_COMPLETE, 297 }; 298 299 struct mlx5e_cq { 300 /* data path - accessed per cqe */ 301 struct mlx5_cqwq wq; 302 303 /* data path - accessed per napi poll */ 304 u16 event_ctr; 305 struct napi_struct *napi; 306 struct mlx5_core_cq mcq; 307 struct mlx5e_channel *channel; 308 309 /* control */ 310 struct mlx5_core_dev *mdev; 311 struct mlx5_wq_ctrl wq_ctrl; 312 } ____cacheline_aligned_in_smp; 313 314 struct mlx5e_cq_decomp { 315 /* cqe decompression */ 316 struct mlx5_cqe64 title; 317 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; 318 u8 mini_arr_idx; 319 u16 left; 320 u16 wqe_counter; 321 } ____cacheline_aligned_in_smp; 322 323 struct mlx5e_tx_wqe_info { 324 struct sk_buff *skb; 325 u32 num_bytes; 326 u8 num_wqebbs; 327 u8 num_dma; 328 }; 329 330 enum mlx5e_dma_map_type { 331 MLX5E_DMA_MAP_SINGLE, 332 MLX5E_DMA_MAP_PAGE 333 }; 334 335 struct mlx5e_sq_dma { 336 dma_addr_t addr; 337 u32 size; 338 enum mlx5e_dma_map_type type; 339 }; 340 341 enum { 342 MLX5E_SQ_STATE_ENABLED, 343 MLX5E_SQ_STATE_RECOVERING, 344 MLX5E_SQ_STATE_IPSEC, 345 MLX5E_SQ_STATE_AM, 346 MLX5E_SQ_STATE_TLS, 347 }; 348 349 struct mlx5e_sq_wqe_info { 350 u8 opcode; 351 }; 352 353 struct mlx5e_txqsq { 354 /* data path */ 355 356 /* dirtied @completion */ 357 u16 cc; 358 u32 dma_fifo_cc; 359 struct net_dim dim; /* Adaptive Moderation */ 360 361 /* dirtied @xmit */ 362 u16 pc ____cacheline_aligned_in_smp; 363 u32 dma_fifo_pc; 364 365 struct mlx5e_cq cq; 366 367 /* read only */ 368 struct mlx5_wq_cyc wq; 369 u32 dma_fifo_mask; 370 struct mlx5e_sq_stats *stats; 371 struct { 372 struct mlx5e_sq_dma *dma_fifo; 373 struct mlx5e_tx_wqe_info *wqe_info; 374 } db; 375 void __iomem *uar_map; 376 struct netdev_queue *txq; 377 u32 sqn; 378 u8 min_inline_mode; 379 struct device *pdev; 380 __be32 mkey_be; 381 unsigned long state; 382 struct hwtstamp_config *tstamp; 383 struct mlx5_clock *clock; 384 385 /* control path */ 386 struct mlx5_wq_ctrl wq_ctrl; 387 struct mlx5e_channel *channel; 388 int txq_ix; 389 u32 rate_limit; 390 struct work_struct recover_work; 391 } ____cacheline_aligned_in_smp; 392 393 struct mlx5e_dma_info { 394 struct page *page; 395 dma_addr_t addr; 396 }; 397 398 struct mlx5e_xdp_info { 399 struct xdp_frame *xdpf; 400 dma_addr_t dma_addr; 401 struct mlx5e_dma_info di; 402 }; 403 404 struct mlx5e_xdp_info_fifo { 405 struct mlx5e_xdp_info *xi; 406 u32 *cc; 407 u32 *pc; 408 u32 mask; 409 }; 410 411 struct mlx5e_xdp_wqe_info { 412 u8 num_wqebbs; 413 u8 num_pkts; 414 }; 415 416 struct mlx5e_xdp_mpwqe { 417 /* Current MPWQE session */ 418 struct mlx5e_tx_wqe *wqe; 419 u8 ds_count; 420 u8 pkt_count; 421 u8 max_ds_count; 422 u8 complete; 423 u8 inline_on; 424 }; 425 426 struct mlx5e_xdpsq; 427 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*, 428 struct mlx5e_xdp_info*); 429 struct mlx5e_xdpsq { 430 /* data path */ 431 432 /* dirtied @completion */ 433 u32 xdpi_fifo_cc; 434 u16 cc; 435 436 /* dirtied @xmit */ 437 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; 438 u16 pc; 439 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 440 struct mlx5e_xdp_mpwqe mpwqe; 441 442 struct mlx5e_cq cq; 443 444 /* read only */ 445 struct mlx5_wq_cyc wq; 446 struct mlx5e_xdpsq_stats *stats; 447 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; 448 struct { 449 struct mlx5e_xdp_wqe_info *wqe_info; 450 struct mlx5e_xdp_info_fifo xdpi_fifo; 451 } db; 452 void __iomem *uar_map; 453 u32 sqn; 454 struct device *pdev; 455 __be32 mkey_be; 456 u8 min_inline_mode; 457 unsigned long state; 458 unsigned int hw_mtu; 459 460 /* control path */ 461 struct mlx5_wq_ctrl wq_ctrl; 462 struct mlx5e_channel *channel; 463 } ____cacheline_aligned_in_smp; 464 465 struct mlx5e_icosq { 466 /* data path */ 467 u16 cc; 468 u16 pc; 469 470 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 471 struct mlx5e_cq cq; 472 473 /* write@xmit, read@completion */ 474 struct { 475 struct mlx5e_sq_wqe_info *ico_wqe; 476 } db; 477 478 /* read only */ 479 struct mlx5_wq_cyc wq; 480 void __iomem *uar_map; 481 u32 sqn; 482 unsigned long state; 483 484 /* control path */ 485 struct mlx5_wq_ctrl wq_ctrl; 486 struct mlx5e_channel *channel; 487 } ____cacheline_aligned_in_smp; 488 489 static inline bool 490 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) 491 { 492 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 493 } 494 495 struct mlx5e_wqe_frag_info { 496 struct mlx5e_dma_info *di; 497 u32 offset; 498 bool last_in_page; 499 }; 500 501 struct mlx5e_umr_dma_info { 502 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; 503 }; 504 505 struct mlx5e_mpw_info { 506 struct mlx5e_umr_dma_info umr; 507 u16 consumed_strides; 508 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); 509 }; 510 511 #define MLX5E_MAX_RX_FRAGS 4 512 513 /* a single cache unit is capable to serve one napi call (for non-striding rq) 514 * or a MPWQE (for striding rq). 515 */ 516 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \ 517 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT) 518 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) 519 struct mlx5e_page_cache { 520 u32 head; 521 u32 tail; 522 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; 523 }; 524 525 struct mlx5e_rq; 526 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); 527 typedef struct sk_buff * 528 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 529 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 530 typedef struct sk_buff * 531 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 532 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); 533 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); 534 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); 535 536 enum mlx5e_rq_flag { 537 MLX5E_RQ_FLAG_XDP_XMIT, 538 MLX5E_RQ_FLAG_XDP_REDIRECT, 539 }; 540 541 struct mlx5e_rq_frag_info { 542 int frag_size; 543 int frag_stride; 544 }; 545 546 struct mlx5e_rq_frags_info { 547 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; 548 u8 num_frags; 549 u8 log_num_frags; 550 u8 wqe_bulk; 551 }; 552 553 struct mlx5e_rq { 554 /* data path */ 555 union { 556 struct { 557 struct mlx5_wq_cyc wq; 558 struct mlx5e_wqe_frag_info *frags; 559 struct mlx5e_dma_info *di; 560 struct mlx5e_rq_frags_info info; 561 mlx5e_fp_skb_from_cqe skb_from_cqe; 562 } wqe; 563 struct { 564 struct mlx5_wq_ll wq; 565 struct mlx5e_umr_wqe umr_wqe; 566 struct mlx5e_mpw_info *info; 567 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; 568 u16 num_strides; 569 u16 actual_wq_head; 570 u8 log_stride_sz; 571 u8 umr_in_progress; 572 u8 umr_last_bulk; 573 } mpwqe; 574 }; 575 struct { 576 u16 headroom; 577 u8 map_dir; /* dma map direction */ 578 } buff; 579 580 struct mlx5e_channel *channel; 581 struct device *pdev; 582 struct net_device *netdev; 583 struct mlx5e_rq_stats *stats; 584 struct mlx5e_cq cq; 585 struct mlx5e_cq_decomp cqd; 586 struct mlx5e_page_cache page_cache; 587 struct hwtstamp_config *tstamp; 588 struct mlx5_clock *clock; 589 590 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 591 mlx5e_fp_post_rx_wqes post_wqes; 592 mlx5e_fp_dealloc_wqe dealloc_wqe; 593 594 unsigned long state; 595 int ix; 596 unsigned int hw_mtu; 597 598 struct net_dim dim; /* Dynamic Interrupt Moderation */ 599 600 /* XDP */ 601 struct bpf_prog *xdp_prog; 602 struct mlx5e_xdpsq xdpsq; 603 DECLARE_BITMAP(flags, 8); 604 struct page_pool *page_pool; 605 606 /* control */ 607 struct mlx5_wq_ctrl wq_ctrl; 608 __be32 mkey_be; 609 u8 wq_type; 610 u32 rqn; 611 struct mlx5_core_dev *mdev; 612 struct mlx5_core_mkey umr_mkey; 613 614 /* XDP read-mostly */ 615 struct xdp_rxq_info xdp_rxq; 616 } ____cacheline_aligned_in_smp; 617 618 struct mlx5e_channel { 619 /* data path */ 620 struct mlx5e_rq rq; 621 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; 622 struct mlx5e_icosq icosq; /* internal control operations */ 623 bool xdp; 624 struct napi_struct napi; 625 struct device *pdev; 626 struct net_device *netdev; 627 __be32 mkey_be; 628 u8 num_tc; 629 630 /* XDP_REDIRECT */ 631 struct mlx5e_xdpsq xdpsq; 632 633 /* data path - accessed per napi poll */ 634 struct irq_desc *irq_desc; 635 struct mlx5e_ch_stats *stats; 636 637 /* control */ 638 struct mlx5e_priv *priv; 639 struct mlx5_core_dev *mdev; 640 struct hwtstamp_config *tstamp; 641 int ix; 642 int cpu; 643 cpumask_var_t xps_cpumask; 644 }; 645 646 struct mlx5e_channels { 647 struct mlx5e_channel **c; 648 unsigned int num; 649 struct mlx5e_params params; 650 }; 651 652 struct mlx5e_channel_stats { 653 struct mlx5e_ch_stats ch; 654 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 655 struct mlx5e_rq_stats rq; 656 struct mlx5e_xdpsq_stats rq_xdpsq; 657 struct mlx5e_xdpsq_stats xdpsq; 658 } ____cacheline_aligned_in_smp; 659 660 enum { 661 MLX5E_STATE_OPENED, 662 MLX5E_STATE_DESTROYING, 663 MLX5E_STATE_XDP_TX_ENABLED, 664 }; 665 666 struct mlx5e_rqt { 667 u32 rqtn; 668 bool enabled; 669 }; 670 671 struct mlx5e_tir { 672 u32 tirn; 673 struct mlx5e_rqt rqt; 674 struct list_head list; 675 }; 676 677 enum { 678 MLX5E_TC_PRIO = 0, 679 MLX5E_NIC_PRIO 680 }; 681 682 struct mlx5e_rss_params { 683 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; 684 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; 685 u8 toeplitz_hash_key[40]; 686 u8 hfunc; 687 }; 688 689 struct mlx5e_modify_sq_param { 690 int curr_state; 691 int next_state; 692 int rl_update; 693 int rl_index; 694 }; 695 696 struct mlx5e_priv { 697 /* priv data path fields - start */ 698 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; 699 int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; 700 #ifdef CONFIG_MLX5_CORE_EN_DCB 701 struct mlx5e_dcbx_dp dcbx_dp; 702 #endif 703 /* priv data path fields - end */ 704 705 u32 msglevel; 706 unsigned long state; 707 struct mutex state_lock; /* Protects Interface state */ 708 struct mlx5e_rq drop_rq; 709 710 struct mlx5e_channels channels; 711 u32 tisn[MLX5E_MAX_NUM_TC]; 712 struct mlx5e_rqt indir_rqt; 713 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 714 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; 715 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 716 struct mlx5e_rss_params rss_params; 717 u32 tx_rates[MLX5E_MAX_NUM_SQS]; 718 719 struct mlx5e_flow_steering fs; 720 721 struct workqueue_struct *wq; 722 struct work_struct update_carrier_work; 723 struct work_struct set_rx_mode_work; 724 struct work_struct tx_timeout_work; 725 struct work_struct update_stats_work; 726 struct work_struct monitor_counters_work; 727 struct mlx5_nb monitor_counters_nb; 728 729 struct mlx5_core_dev *mdev; 730 struct net_device *netdev; 731 struct mlx5e_stats stats; 732 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; 733 u8 max_opened_tc; 734 struct hwtstamp_config tstamp; 735 u16 q_counter; 736 u16 drop_rq_q_counter; 737 struct notifier_block events_nb; 738 739 #ifdef CONFIG_MLX5_CORE_EN_DCB 740 struct mlx5e_dcbx dcbx; 741 #endif 742 743 const struct mlx5e_profile *profile; 744 void *ppriv; 745 #ifdef CONFIG_MLX5_EN_IPSEC 746 struct mlx5e_ipsec *ipsec; 747 #endif 748 #ifdef CONFIG_MLX5_EN_TLS 749 struct mlx5e_tls *tls; 750 #endif 751 struct devlink_health_reporter *tx_reporter; 752 }; 753 754 struct mlx5e_profile { 755 int (*init)(struct mlx5_core_dev *mdev, 756 struct net_device *netdev, 757 const struct mlx5e_profile *profile, void *ppriv); 758 void (*cleanup)(struct mlx5e_priv *priv); 759 int (*init_rx)(struct mlx5e_priv *priv); 760 void (*cleanup_rx)(struct mlx5e_priv *priv); 761 int (*init_tx)(struct mlx5e_priv *priv); 762 void (*cleanup_tx)(struct mlx5e_priv *priv); 763 void (*enable)(struct mlx5e_priv *priv); 764 void (*disable)(struct mlx5e_priv *priv); 765 void (*update_stats)(struct mlx5e_priv *priv); 766 void (*update_carrier)(struct mlx5e_priv *priv); 767 struct { 768 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 769 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 770 } rx_handlers; 771 int max_tc; 772 }; 773 774 void mlx5e_build_ptys2ethtool_map(void); 775 776 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 777 struct net_device *sb_dev); 778 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 779 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 780 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); 781 782 void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 783 void mlx5e_completion_event(struct mlx5_core_cq *mcq); 784 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 785 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 786 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 787 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 788 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 789 790 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); 791 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, 792 struct mlx5e_params *params); 793 794 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); 795 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, 796 bool recycle); 797 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 798 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 799 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 800 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); 801 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); 802 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 803 struct sk_buff * 804 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 805 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 806 struct sk_buff * 807 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 808 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 809 struct sk_buff * 810 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 811 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); 812 struct sk_buff * 813 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 814 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); 815 816 void mlx5e_update_stats(struct mlx5e_priv *priv); 817 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 818 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); 819 820 void mlx5e_init_l2_addr(struct mlx5e_priv *priv); 821 int mlx5e_self_test_num(struct mlx5e_priv *priv); 822 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, 823 u64 *buf); 824 void mlx5e_set_rx_mode_work(struct work_struct *work); 825 826 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); 827 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); 828 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); 829 830 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 831 u16 vid); 832 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, 833 u16 vid); 834 void mlx5e_timestamp_init(struct mlx5e_priv *priv); 835 836 struct mlx5e_redirect_rqt_param { 837 bool is_rss; 838 union { 839 u32 rqn; /* Direct RQN (Non-RSS) */ 840 struct { 841 u8 hfunc; 842 struct mlx5e_channels *channels; 843 } rss; /* RSS data */ 844 }; 845 }; 846 847 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, 848 struct mlx5e_redirect_rqt_param rrp); 849 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, 850 const struct mlx5e_tirc_config *ttconfig, 851 void *tirc, bool inner); 852 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen); 853 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); 854 855 int mlx5e_open_locked(struct net_device *netdev); 856 int mlx5e_close_locked(struct net_device *netdev); 857 858 int mlx5e_open_channels(struct mlx5e_priv *priv, 859 struct mlx5e_channels *chs); 860 void mlx5e_close_channels(struct mlx5e_channels *chs); 861 862 /* Function pointer to be used to modify WH settings while 863 * switching channels 864 */ 865 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); 866 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); 867 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, 868 struct mlx5e_channels *new_chs, 869 mlx5e_fp_hw_modify hw_modify); 870 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); 871 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); 872 873 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 874 int num_channels); 875 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, 876 u8 cq_period_mode); 877 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 878 u8 cq_period_mode); 879 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 880 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 881 struct mlx5e_params *params); 882 883 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, 884 struct mlx5e_modify_sq_param *p); 885 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); 886 void mlx5e_tx_disable_queue(struct netdev_queue *txq); 887 888 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 889 { 890 return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && 891 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); 892 } 893 894 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) 895 { 896 return MLX5_CAP_ETH(mdev, swp) && 897 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); 898 } 899 900 struct mlx5e_swp_spec { 901 __be16 l3_proto; 902 u8 l4_proto; 903 u8 is_tun; 904 __be16 tun_l3_proto; 905 u8 tun_l4_proto; 906 }; 907 908 static inline void 909 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, 910 struct mlx5e_swp_spec *swp_spec) 911 { 912 /* SWP offsets are in 2-bytes words */ 913 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; 914 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 915 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; 916 if (swp_spec->l4_proto) { 917 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; 918 if (swp_spec->l4_proto == IPPROTO_UDP) 919 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; 920 } 921 922 if (swp_spec->is_tun) { 923 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; 924 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) 925 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 926 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ 927 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; 928 if (swp_spec->l3_proto == htons(ETH_P_IPV6)) 929 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 930 } 931 switch (swp_spec->tun_l4_proto) { 932 case IPPROTO_UDP: 933 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; 934 /* fall through */ 935 case IPPROTO_TCP: 936 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 937 break; 938 } 939 } 940 941 static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, 942 struct mlx5e_tx_wqe **wqe, 943 u16 *pi) 944 { 945 struct mlx5_wq_cyc *wq = &sq->wq; 946 947 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 948 *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); 949 memset(*wqe, 0, sizeof(**wqe)); 950 } 951 952 static inline 953 struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 954 { 955 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); 956 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 957 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 958 959 memset(cseg, 0, sizeof(*cseg)); 960 961 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 962 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 963 964 (*pc)++; 965 966 return wqe; 967 } 968 969 static inline 970 void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, 971 void __iomem *uar_map, 972 struct mlx5_wqe_ctrl_seg *ctrl) 973 { 974 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 975 /* ensure wqe is visible to device before updating doorbell record */ 976 dma_wmb(); 977 978 *wq->db = cpu_to_be32(pc); 979 980 /* ensure doorbell record is visible to device before ringing the 981 * doorbell 982 */ 983 wmb(); 984 985 mlx5_write64((__be32 *)ctrl, uar_map); 986 } 987 988 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) 989 { 990 struct mlx5_core_cq *mcq; 991 992 mcq = &cq->mcq; 993 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); 994 } 995 996 extern const struct ethtool_ops mlx5e_ethtool_ops; 997 #ifdef CONFIG_MLX5_CORE_EN_DCB 998 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; 999 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); 1000 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); 1001 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); 1002 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); 1003 #endif 1004 1005 int mlx5e_create_tir(struct mlx5_core_dev *mdev, 1006 struct mlx5e_tir *tir, u32 *in, int inlen); 1007 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 1008 struct mlx5e_tir *tir); 1009 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1010 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1011 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); 1012 1013 /* common netdev helpers */ 1014 void mlx5e_create_q_counters(struct mlx5e_priv *priv); 1015 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); 1016 int mlx5e_open_drop_rq(struct mlx5e_priv *priv, 1017 struct mlx5e_rq *drop_rq); 1018 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); 1019 1020 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1021 1022 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1023 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1024 1025 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); 1026 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv); 1027 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); 1028 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1029 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1030 1031 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, 1032 u32 underlay_qpn, u32 *tisn); 1033 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); 1034 1035 int mlx5e_create_tises(struct mlx5e_priv *priv); 1036 void mlx5e_update_carrier(struct mlx5e_priv *priv); 1037 int mlx5e_close(struct net_device *netdev); 1038 int mlx5e_open(struct net_device *netdev); 1039 void mlx5e_update_ndo_stats(struct mlx5e_priv *priv); 1040 1041 void mlx5e_queue_update_stats(struct mlx5e_priv *priv); 1042 int mlx5e_bits_invert(unsigned long a, int size); 1043 1044 typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); 1045 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); 1046 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, 1047 change_hw_mtu_cb set_mtu_cb); 1048 1049 /* ethtool helpers */ 1050 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, 1051 struct ethtool_drvinfo *drvinfo); 1052 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, 1053 uint32_t stringset, uint8_t *data); 1054 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); 1055 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, 1056 struct ethtool_stats *stats, u64 *data); 1057 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, 1058 struct ethtool_ringparam *param); 1059 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1060 struct ethtool_ringparam *param); 1061 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, 1062 struct ethtool_channels *ch); 1063 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, 1064 struct ethtool_channels *ch); 1065 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, 1066 struct ethtool_coalesce *coal); 1067 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, 1068 struct ethtool_coalesce *coal); 1069 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 1070 struct ethtool_link_ksettings *link_ksettings); 1071 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, 1072 const struct ethtool_link_ksettings *link_ksettings); 1073 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); 1074 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1075 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1076 struct ethtool_ts_info *info); 1077 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, 1078 struct ethtool_flash *flash); 1079 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1080 struct ethtool_pauseparam *pauseparam); 1081 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1082 struct ethtool_pauseparam *pauseparam); 1083 1084 /* mlx5e generic netdev management API */ 1085 int mlx5e_netdev_init(struct net_device *netdev, 1086 struct mlx5e_priv *priv, 1087 struct mlx5_core_dev *mdev, 1088 const struct mlx5e_profile *profile, 1089 void *ppriv); 1090 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv); 1091 struct net_device* 1092 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, 1093 int nch, void *ppriv); 1094 int mlx5e_attach_netdev(struct mlx5e_priv *priv); 1095 void mlx5e_detach_netdev(struct mlx5e_priv *priv); 1096 void mlx5e_destroy_netdev(struct mlx5e_priv *priv); 1097 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); 1098 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, 1099 struct mlx5e_rss_params *rss_params, 1100 struct mlx5e_params *params, 1101 u16 max_channels, u16 mtu); 1102 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 1103 struct mlx5e_params *params); 1104 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, 1105 u16 num_channels); 1106 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); 1107 void mlx5e_rx_dim_work(struct work_struct *work); 1108 void mlx5e_tx_dim_work(struct work_struct *work); 1109 1110 void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); 1111 void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); 1112 netdev_features_t mlx5e_features_check(struct sk_buff *skb, 1113 struct net_device *netdev, 1114 netdev_features_t features); 1115 #ifdef CONFIG_MLX5_ESWITCH 1116 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 1117 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); 1118 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); 1119 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); 1120 #endif 1121 #endif /* __MLX5_EN_H__ */ 1122