1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef __MLX5_EN_H__ 33 #define __MLX5_EN_H__ 34 35 #include <linux/if_vlan.h> 36 #include <linux/etherdevice.h> 37 #include <linux/timecounter.h> 38 #include <linux/net_tstamp.h> 39 #include <linux/ptp_clock_kernel.h> 40 #include <linux/crash_dump.h> 41 #include <linux/mlx5/driver.h> 42 #include <linux/mlx5/qp.h> 43 #include <linux/mlx5/cq.h> 44 #include <linux/mlx5/port.h> 45 #include <linux/mlx5/vport.h> 46 #include <linux/mlx5/transobj.h> 47 #include <linux/mlx5/fs.h> 48 #include <linux/rhashtable.h> 49 #include <net/switchdev.h> 50 #include <net/xdp.h> 51 #include <linux/net_dim.h> 52 #include "wq.h" 53 #include "mlx5_core.h" 54 #include "en_stats.h" 55 56 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 57 58 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) 59 60 #define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu)) 61 #define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu)) 62 63 #define MLX5E_MAX_DSCP 64 64 #define MLX5E_MAX_NUM_TC 8 65 66 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 67 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa 68 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd 69 70 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 71 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 72 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd 73 74 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 75 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 76 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 77 78 #define MLX5_RX_HEADROOM NET_SKB_PAD 79 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ 80 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 81 82 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ 83 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ 84 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ 85 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 86 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) 87 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) 88 #define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \ 89 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \ 90 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)) 91 92 #define MLX5_MPWRQ_LOG_WQE_SZ 18 93 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ 94 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) 95 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) 96 #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ 97 MLX5_MPWRQ_WQE_PAGE_ORDER) 98 99 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) 100 #define MLX5E_REQUIRED_MTTS(wqes) \ 101 (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) 102 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) 103 104 #define MLX5_UMR_ALIGN (2048) 105 #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) 106 107 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 108 #define MLX5E_DEFAULT_LRO_TIMEOUT 32 109 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 110 111 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 112 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 113 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 114 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 115 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 116 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 117 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 118 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 119 120 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 121 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) 122 #define MLX5E_MIN_NUM_CHANNELS 0x1 123 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) 124 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) 125 #define MLX5E_TX_CQ_POLL_BUDGET 128 126 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ 127 128 #define MLX5E_ICOSQ_MAX_WQEBBS \ 129 (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) 130 131 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 132 #define MLX5E_XDP_TX_DS_COUNT \ 133 ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) 134 135 #define MLX5E_NUM_MAIN_GROUPS 9 136 137 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK 138 139 #define mlx5e_dbg(mlevel, priv, format, ...) \ 140 do { \ 141 if (NETIF_MSG_##mlevel & (priv)->msglevel) \ 142 netdev_warn(priv->netdev, format, \ 143 ##__VA_ARGS__); \ 144 } while (0) 145 146 147 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) 148 { 149 switch (wq_type) { 150 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 151 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, 152 wq_size / 2); 153 default: 154 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, 155 wq_size / 2); 156 } 157 } 158 159 static inline int mlx5_min_log_rq_size(int wq_type) 160 { 161 switch (wq_type) { 162 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 163 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; 164 default: 165 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; 166 } 167 } 168 169 static inline int mlx5_max_log_rq_size(int wq_type) 170 { 171 switch (wq_type) { 172 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 173 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; 174 default: 175 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; 176 } 177 } 178 179 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) 180 { 181 return is_kdump_kernel() ? 182 MLX5E_MIN_NUM_CHANNELS : 183 min_t(int, mdev->priv.eq_table.num_comp_vectors, 184 MLX5E_MAX_NUM_CHANNELS); 185 } 186 187 struct mlx5e_tx_wqe { 188 struct mlx5_wqe_ctrl_seg ctrl; 189 struct mlx5_wqe_eth_seg eth; 190 }; 191 192 struct mlx5e_rx_wqe { 193 struct mlx5_wqe_srq_next_seg next; 194 struct mlx5_wqe_data_seg data; 195 }; 196 197 struct mlx5e_umr_wqe { 198 struct mlx5_wqe_ctrl_seg ctrl; 199 struct mlx5_wqe_umr_ctrl_seg uctrl; 200 struct mlx5_mkey_seg mkc; 201 struct mlx5_wqe_data_seg data; 202 }; 203 204 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; 205 206 static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { 207 "rx_cqe_moder", 208 "tx_cqe_moder", 209 "rx_cqe_compress", 210 }; 211 212 enum mlx5e_priv_flag { 213 MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), 214 MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), 215 MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), 216 }; 217 218 #define MLX5E_SET_PFLAG(params, pflag, enable) \ 219 do { \ 220 if (enable) \ 221 (params)->pflags |= (pflag); \ 222 else \ 223 (params)->pflags &= ~(pflag); \ 224 } while (0) 225 226 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag))) 227 228 #ifdef CONFIG_MLX5_CORE_EN_DCB 229 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 230 #endif 231 232 struct mlx5e_params { 233 u8 log_sq_size; 234 u8 rq_wq_type; 235 u16 rq_headroom; 236 u8 mpwqe_log_stride_sz; 237 u8 mpwqe_log_num_strides; 238 u8 log_rq_size; 239 u16 num_channels; 240 u8 num_tc; 241 bool rx_cqe_compress_def; 242 struct net_dim_cq_moder rx_cq_moderation; 243 struct net_dim_cq_moder tx_cq_moderation; 244 bool lro_en; 245 u32 lro_wqe_sz; 246 u16 tx_max_inline; 247 u8 tx_min_inline_mode; 248 u8 rss_hfunc; 249 u8 toeplitz_hash_key[40]; 250 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; 251 bool vlan_strip_disable; 252 bool scatter_fcs_en; 253 bool rx_dim_enabled; 254 u32 lro_timeout; 255 u32 pflags; 256 struct bpf_prog *xdp_prog; 257 }; 258 259 #ifdef CONFIG_MLX5_CORE_EN_DCB 260 struct mlx5e_cee_config { 261 /* bw pct for priority group */ 262 u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; 263 u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; 264 bool pfc_setting[CEE_DCBX_MAX_PRIO]; 265 bool pfc_enable; 266 }; 267 268 enum { 269 MLX5_DCB_CHG_RESET, 270 MLX5_DCB_NO_CHG, 271 MLX5_DCB_CHG_NO_RESET, 272 }; 273 274 struct mlx5e_dcbx { 275 enum mlx5_dcbx_oper_mode mode; 276 struct mlx5e_cee_config cee_cfg; /* pending configuration */ 277 u8 dscp_app_cnt; 278 279 /* The only setting that cannot be read from FW */ 280 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; 281 u8 cap; 282 }; 283 284 struct mlx5e_dcbx_dp { 285 u8 dscp2prio[MLX5E_MAX_DSCP]; 286 u8 trust_state; 287 }; 288 #endif 289 290 enum { 291 MLX5E_RQ_STATE_ENABLED, 292 MLX5E_RQ_STATE_AM, 293 }; 294 295 #define MLX5E_TEST_BIT(state, nr) (state & BIT(nr)) 296 297 struct mlx5e_cq { 298 /* data path - accessed per cqe */ 299 struct mlx5_cqwq wq; 300 301 /* data path - accessed per napi poll */ 302 u16 event_ctr; 303 struct napi_struct *napi; 304 struct mlx5_core_cq mcq; 305 struct mlx5e_channel *channel; 306 307 /* cqe decompression */ 308 struct mlx5_cqe64 title; 309 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; 310 u8 mini_arr_idx; 311 u16 decmprs_left; 312 u16 decmprs_wqe_counter; 313 314 /* control */ 315 struct mlx5_core_dev *mdev; 316 struct mlx5_frag_wq_ctrl wq_ctrl; 317 } ____cacheline_aligned_in_smp; 318 319 struct mlx5e_tx_wqe_info { 320 struct sk_buff *skb; 321 u32 num_bytes; 322 u8 num_wqebbs; 323 u8 num_dma; 324 }; 325 326 enum mlx5e_dma_map_type { 327 MLX5E_DMA_MAP_SINGLE, 328 MLX5E_DMA_MAP_PAGE 329 }; 330 331 struct mlx5e_sq_dma { 332 dma_addr_t addr; 333 u32 size; 334 enum mlx5e_dma_map_type type; 335 }; 336 337 enum { 338 MLX5E_SQ_STATE_ENABLED, 339 MLX5E_SQ_STATE_IPSEC, 340 }; 341 342 struct mlx5e_sq_wqe_info { 343 u8 opcode; 344 }; 345 346 struct mlx5e_txqsq { 347 /* data path */ 348 349 /* dirtied @completion */ 350 u16 cc; 351 u32 dma_fifo_cc; 352 353 /* dirtied @xmit */ 354 u16 pc ____cacheline_aligned_in_smp; 355 u32 dma_fifo_pc; 356 struct mlx5e_sq_stats stats; 357 358 struct mlx5e_cq cq; 359 360 /* write@xmit, read@completion */ 361 struct { 362 struct mlx5e_sq_dma *dma_fifo; 363 struct mlx5e_tx_wqe_info *wqe_info; 364 } db; 365 366 /* read only */ 367 struct mlx5_wq_cyc wq; 368 u32 dma_fifo_mask; 369 void __iomem *uar_map; 370 struct netdev_queue *txq; 371 u32 sqn; 372 u16 max_inline; 373 u8 min_inline_mode; 374 u16 edge; 375 struct device *pdev; 376 __be32 mkey_be; 377 unsigned long state; 378 struct hwtstamp_config *tstamp; 379 struct mlx5_clock *clock; 380 381 /* control path */ 382 struct mlx5_wq_ctrl wq_ctrl; 383 struct mlx5e_channel *channel; 384 int txq_ix; 385 u32 rate_limit; 386 } ____cacheline_aligned_in_smp; 387 388 struct mlx5e_xdpsq { 389 /* data path */ 390 391 /* dirtied @rx completion */ 392 u16 cc; 393 u16 pc; 394 395 struct mlx5e_cq cq; 396 397 /* write@xmit, read@completion */ 398 struct { 399 struct mlx5e_dma_info *di; 400 bool doorbell; 401 } db; 402 403 /* read only */ 404 struct mlx5_wq_cyc wq; 405 void __iomem *uar_map; 406 u32 sqn; 407 struct device *pdev; 408 __be32 mkey_be; 409 u8 min_inline_mode; 410 unsigned long state; 411 412 /* control path */ 413 struct mlx5_wq_ctrl wq_ctrl; 414 struct mlx5e_channel *channel; 415 } ____cacheline_aligned_in_smp; 416 417 struct mlx5e_icosq { 418 /* data path */ 419 420 /* dirtied @xmit */ 421 u16 pc ____cacheline_aligned_in_smp; 422 423 struct mlx5e_cq cq; 424 425 /* write@xmit, read@completion */ 426 struct { 427 struct mlx5e_sq_wqe_info *ico_wqe; 428 } db; 429 430 /* read only */ 431 struct mlx5_wq_cyc wq; 432 void __iomem *uar_map; 433 u32 sqn; 434 u16 edge; 435 __be32 mkey_be; 436 unsigned long state; 437 438 /* control path */ 439 struct mlx5_wq_ctrl wq_ctrl; 440 struct mlx5e_channel *channel; 441 } ____cacheline_aligned_in_smp; 442 443 static inline bool 444 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) 445 { 446 return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); 447 } 448 449 struct mlx5e_dma_info { 450 struct page *page; 451 dma_addr_t addr; 452 }; 453 454 struct mlx5e_wqe_frag_info { 455 struct mlx5e_dma_info di; 456 u32 offset; 457 }; 458 459 struct mlx5e_umr_dma_info { 460 __be64 *mtt; 461 dma_addr_t mtt_addr; 462 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; 463 struct mlx5e_umr_wqe wqe; 464 }; 465 466 struct mlx5e_mpw_info { 467 struct mlx5e_umr_dma_info umr; 468 u16 consumed_strides; 469 u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; 470 }; 471 472 /* a single cache unit is capable to serve one napi call (for non-striding rq) 473 * or a MPWQE (for striding rq). 474 */ 475 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \ 476 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT) 477 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) 478 struct mlx5e_page_cache { 479 u32 head; 480 u32 tail; 481 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; 482 }; 483 484 struct mlx5e_rq; 485 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); 486 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); 487 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); 488 489 struct mlx5e_rq { 490 /* data path */ 491 struct mlx5_wq_ll wq; 492 493 union { 494 struct { 495 struct mlx5e_wqe_frag_info *frag_info; 496 u32 frag_sz; /* max possible skb frag_sz */ 497 union { 498 bool page_reuse; 499 bool xdp_xmit; 500 }; 501 } wqe; 502 struct { 503 struct mlx5e_mpw_info *info; 504 void *mtt_no_align; 505 u16 num_strides; 506 u8 log_stride_sz; 507 bool umr_in_progress; 508 } mpwqe; 509 }; 510 struct { 511 u16 headroom; 512 u8 page_order; 513 u8 map_dir; /* dma map direction */ 514 } buff; 515 516 struct mlx5e_channel *channel; 517 struct device *pdev; 518 struct net_device *netdev; 519 struct mlx5e_rq_stats stats; 520 struct mlx5e_cq cq; 521 struct mlx5e_page_cache page_cache; 522 struct hwtstamp_config *tstamp; 523 struct mlx5_clock *clock; 524 525 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 526 mlx5e_fp_post_rx_wqes post_wqes; 527 mlx5e_fp_dealloc_wqe dealloc_wqe; 528 529 unsigned long state; 530 int ix; 531 532 struct net_dim dim; /* Dynamic Interrupt Moderation */ 533 534 /* XDP */ 535 struct bpf_prog *xdp_prog; 536 struct mlx5e_xdpsq xdpsq; 537 538 /* control */ 539 struct mlx5_wq_ctrl wq_ctrl; 540 __be32 mkey_be; 541 u8 wq_type; 542 u32 rqn; 543 struct mlx5_core_dev *mdev; 544 struct mlx5_core_mkey umr_mkey; 545 546 /* XDP read-mostly */ 547 struct xdp_rxq_info xdp_rxq; 548 } ____cacheline_aligned_in_smp; 549 550 struct mlx5e_channel { 551 /* data path */ 552 struct mlx5e_rq rq; 553 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; 554 struct mlx5e_icosq icosq; /* internal control operations */ 555 bool xdp; 556 struct napi_struct napi; 557 struct device *pdev; 558 struct net_device *netdev; 559 __be32 mkey_be; 560 u8 num_tc; 561 562 /* data path - accessed per napi poll */ 563 struct irq_desc *irq_desc; 564 struct mlx5e_ch_stats stats; 565 566 /* control */ 567 struct mlx5e_priv *priv; 568 struct mlx5_core_dev *mdev; 569 struct hwtstamp_config *tstamp; 570 int ix; 571 int cpu; 572 }; 573 574 struct mlx5e_channels { 575 struct mlx5e_channel **c; 576 unsigned int num; 577 struct mlx5e_params params; 578 }; 579 580 enum mlx5e_traffic_types { 581 MLX5E_TT_IPV4_TCP, 582 MLX5E_TT_IPV6_TCP, 583 MLX5E_TT_IPV4_UDP, 584 MLX5E_TT_IPV6_UDP, 585 MLX5E_TT_IPV4_IPSEC_AH, 586 MLX5E_TT_IPV6_IPSEC_AH, 587 MLX5E_TT_IPV4_IPSEC_ESP, 588 MLX5E_TT_IPV6_IPSEC_ESP, 589 MLX5E_TT_IPV4, 590 MLX5E_TT_IPV6, 591 MLX5E_TT_ANY, 592 MLX5E_NUM_TT, 593 MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, 594 }; 595 596 enum mlx5e_tunnel_types { 597 MLX5E_TT_IPV4_GRE, 598 MLX5E_TT_IPV6_GRE, 599 MLX5E_NUM_TUNNEL_TT, 600 }; 601 602 enum { 603 MLX5E_STATE_ASYNC_EVENTS_ENABLED, 604 MLX5E_STATE_OPENED, 605 MLX5E_STATE_DESTROYING, 606 }; 607 608 struct mlx5e_vxlan_db { 609 spinlock_t lock; /* protect vxlan table */ 610 struct radix_tree_root tree; 611 }; 612 613 struct mlx5e_l2_rule { 614 u8 addr[ETH_ALEN + 2]; 615 struct mlx5_flow_handle *rule; 616 }; 617 618 struct mlx5e_flow_table { 619 int num_groups; 620 struct mlx5_flow_table *t; 621 struct mlx5_flow_group **g; 622 }; 623 624 #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) 625 626 struct mlx5e_tc_table { 627 struct mlx5_flow_table *t; 628 629 struct rhashtable_params ht_params; 630 struct rhashtable ht; 631 632 DECLARE_HASHTABLE(mod_hdr_tbl, 8); 633 DECLARE_HASHTABLE(hairpin_tbl, 8); 634 }; 635 636 struct mlx5e_vlan_table { 637 struct mlx5e_flow_table ft; 638 DECLARE_BITMAP(active_cvlans, VLAN_N_VID); 639 DECLARE_BITMAP(active_svlans, VLAN_N_VID); 640 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; 641 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; 642 struct mlx5_flow_handle *untagged_rule; 643 struct mlx5_flow_handle *any_cvlan_rule; 644 struct mlx5_flow_handle *any_svlan_rule; 645 bool cvlan_filter_disabled; 646 }; 647 648 struct mlx5e_l2_table { 649 struct mlx5e_flow_table ft; 650 struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE]; 651 struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; 652 struct mlx5e_l2_rule broadcast; 653 struct mlx5e_l2_rule allmulti; 654 struct mlx5e_l2_rule promisc; 655 bool broadcast_enabled; 656 bool allmulti_enabled; 657 bool promisc_enabled; 658 }; 659 660 /* L3/L4 traffic type classifier */ 661 struct mlx5e_ttc_table { 662 struct mlx5e_flow_table ft; 663 struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; 664 struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; 665 }; 666 667 #define ARFS_HASH_SHIFT BITS_PER_BYTE 668 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) 669 struct arfs_table { 670 struct mlx5e_flow_table ft; 671 struct mlx5_flow_handle *default_rule; 672 struct hlist_head rules_hash[ARFS_HASH_SIZE]; 673 }; 674 675 enum arfs_type { 676 ARFS_IPV4_TCP, 677 ARFS_IPV6_TCP, 678 ARFS_IPV4_UDP, 679 ARFS_IPV6_UDP, 680 ARFS_NUM_TYPES, 681 }; 682 683 struct mlx5e_arfs_tables { 684 struct arfs_table arfs_tables[ARFS_NUM_TYPES]; 685 /* Protect aRFS rules list */ 686 spinlock_t arfs_lock; 687 struct list_head rules; 688 int last_filter_id; 689 struct workqueue_struct *wq; 690 }; 691 692 /* NIC prio FTS */ 693 enum { 694 MLX5E_VLAN_FT_LEVEL = 0, 695 MLX5E_L2_FT_LEVEL, 696 MLX5E_TTC_FT_LEVEL, 697 MLX5E_INNER_TTC_FT_LEVEL, 698 MLX5E_ARFS_FT_LEVEL 699 }; 700 701 enum { 702 MLX5E_TC_FT_LEVEL = 0, 703 MLX5E_TC_TTC_FT_LEVEL, 704 }; 705 706 struct mlx5e_ethtool_table { 707 struct mlx5_flow_table *ft; 708 int num_rules; 709 }; 710 711 #define ETHTOOL_NUM_L3_L4_FTS 7 712 #define ETHTOOL_NUM_L2_FTS 4 713 714 struct mlx5e_ethtool_steering { 715 struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS]; 716 struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS]; 717 struct list_head rules; 718 int tot_num_rules; 719 }; 720 721 struct mlx5e_flow_steering { 722 struct mlx5_flow_namespace *ns; 723 struct mlx5e_ethtool_steering ethtool; 724 struct mlx5e_tc_table tc; 725 struct mlx5e_vlan_table vlan; 726 struct mlx5e_l2_table l2; 727 struct mlx5e_ttc_table ttc; 728 struct mlx5e_ttc_table inner_ttc; 729 struct mlx5e_arfs_tables arfs; 730 }; 731 732 struct mlx5e_rqt { 733 u32 rqtn; 734 bool enabled; 735 }; 736 737 struct mlx5e_tir { 738 u32 tirn; 739 struct mlx5e_rqt rqt; 740 struct list_head list; 741 }; 742 743 enum { 744 MLX5E_TC_PRIO = 0, 745 MLX5E_NIC_PRIO 746 }; 747 748 struct mlx5e_priv { 749 /* priv data path fields - start */ 750 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; 751 int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; 752 #ifdef CONFIG_MLX5_CORE_EN_DCB 753 struct mlx5e_dcbx_dp dcbx_dp; 754 #endif 755 /* priv data path fields - end */ 756 757 u32 msglevel; 758 unsigned long state; 759 struct mutex state_lock; /* Protects Interface state */ 760 struct mlx5e_rq drop_rq; 761 762 struct mlx5e_channels channels; 763 u32 tisn[MLX5E_MAX_NUM_TC]; 764 struct mlx5e_rqt indir_rqt; 765 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 766 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; 767 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 768 u32 tx_rates[MLX5E_MAX_NUM_SQS]; 769 int hard_mtu; 770 771 struct mlx5e_flow_steering fs; 772 struct mlx5e_vxlan_db vxlan; 773 774 struct workqueue_struct *wq; 775 struct work_struct update_carrier_work; 776 struct work_struct set_rx_mode_work; 777 struct work_struct tx_timeout_work; 778 struct delayed_work update_stats_work; 779 780 struct mlx5_core_dev *mdev; 781 struct net_device *netdev; 782 struct mlx5e_stats stats; 783 struct hwtstamp_config tstamp; 784 u16 q_counter; 785 #ifdef CONFIG_MLX5_CORE_EN_DCB 786 struct mlx5e_dcbx dcbx; 787 #endif 788 789 const struct mlx5e_profile *profile; 790 void *ppriv; 791 #ifdef CONFIG_MLX5_EN_IPSEC 792 struct mlx5e_ipsec *ipsec; 793 #endif 794 }; 795 796 struct mlx5e_profile { 797 void (*init)(struct mlx5_core_dev *mdev, 798 struct net_device *netdev, 799 const struct mlx5e_profile *profile, void *ppriv); 800 void (*cleanup)(struct mlx5e_priv *priv); 801 int (*init_rx)(struct mlx5e_priv *priv); 802 void (*cleanup_rx)(struct mlx5e_priv *priv); 803 int (*init_tx)(struct mlx5e_priv *priv); 804 void (*cleanup_tx)(struct mlx5e_priv *priv); 805 void (*enable)(struct mlx5e_priv *priv); 806 void (*disable)(struct mlx5e_priv *priv); 807 void (*update_stats)(struct mlx5e_priv *priv); 808 void (*update_carrier)(struct mlx5e_priv *priv); 809 int (*max_nch)(struct mlx5_core_dev *mdev); 810 struct { 811 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 812 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 813 } rx_handlers; 814 void (*netdev_registered_init)(struct mlx5e_priv *priv); 815 void (*netdev_registered_remove)(struct mlx5e_priv *priv); 816 int max_tc; 817 }; 818 819 void mlx5e_build_ptys2ethtool_map(void); 820 821 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 822 void *accel_priv, select_queue_fallback_t fallback); 823 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 824 825 void mlx5e_completion_event(struct mlx5_core_cq *mcq); 826 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 827 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 828 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 829 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 830 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); 831 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 832 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 833 834 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, 835 bool recycle); 836 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 837 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 838 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 839 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); 840 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); 841 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 842 void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); 843 844 void mlx5e_update_stats(struct mlx5e_priv *priv); 845 846 int mlx5e_create_flow_steering(struct mlx5e_priv *priv); 847 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); 848 void mlx5e_init_l2_addr(struct mlx5e_priv *priv); 849 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); 850 int mlx5e_self_test_num(struct mlx5e_priv *priv); 851 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, 852 u64 *buf); 853 int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, 854 int location); 855 int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, 856 struct ethtool_rxnfc *info, u32 *rule_locs); 857 int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, 858 struct ethtool_rx_flow_spec *fs); 859 int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, 860 int location); 861 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); 862 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); 863 void mlx5e_set_rx_mode_work(struct work_struct *work); 864 865 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); 866 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); 867 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); 868 869 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 870 u16 vid); 871 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, 872 u16 vid); 873 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); 874 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); 875 void mlx5e_timestamp_init(struct mlx5e_priv *priv); 876 877 struct mlx5e_redirect_rqt_param { 878 bool is_rss; 879 union { 880 u32 rqn; /* Direct RQN (Non-RSS) */ 881 struct { 882 u8 hfunc; 883 struct mlx5e_channels *channels; 884 } rss; /* RSS data */ 885 }; 886 }; 887 888 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, 889 struct mlx5e_redirect_rqt_param rrp); 890 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, 891 enum mlx5e_traffic_types tt, 892 void *tirc, bool inner); 893 894 int mlx5e_open_locked(struct net_device *netdev); 895 int mlx5e_close_locked(struct net_device *netdev); 896 897 int mlx5e_open_channels(struct mlx5e_priv *priv, 898 struct mlx5e_channels *chs); 899 void mlx5e_close_channels(struct mlx5e_channels *chs); 900 901 /* Function pointer to be used to modify WH settings while 902 * switching channels 903 */ 904 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); 905 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, 906 struct mlx5e_channels *new_chs, 907 mlx5e_fp_hw_modify hw_modify); 908 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); 909 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); 910 911 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 912 int num_channels); 913 int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); 914 915 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, 916 u8 cq_period_mode); 917 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 918 u8 cq_period_mode); 919 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 920 struct mlx5e_params *params, 921 u8 rq_type); 922 923 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 924 { 925 return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && 926 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); 927 } 928 929 static inline 930 struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 931 { 932 u16 pi = *pc & wq->sz_m1; 933 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 934 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 935 936 memset(cseg, 0, sizeof(*cseg)); 937 938 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); 939 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); 940 941 (*pc)++; 942 943 return wqe; 944 } 945 946 static inline 947 void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, 948 void __iomem *uar_map, 949 struct mlx5_wqe_ctrl_seg *ctrl) 950 { 951 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 952 /* ensure wqe is visible to device before updating doorbell record */ 953 dma_wmb(); 954 955 *wq->db = cpu_to_be32(pc); 956 957 /* ensure doorbell record is visible to device before ringing the 958 * doorbell 959 */ 960 wmb(); 961 962 mlx5_write64((__be32 *)ctrl, uar_map, NULL); 963 } 964 965 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) 966 { 967 struct mlx5_core_cq *mcq; 968 969 mcq = &cq->mcq; 970 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); 971 } 972 973 static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) 974 { 975 return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); 976 } 977 978 extern const struct ethtool_ops mlx5e_ethtool_ops; 979 #ifdef CONFIG_MLX5_CORE_EN_DCB 980 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; 981 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); 982 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); 983 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); 984 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); 985 #endif 986 987 #ifndef CONFIG_RFS_ACCEL 988 static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) 989 { 990 return 0; 991 } 992 993 static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} 994 995 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 996 { 997 return -EOPNOTSUPP; 998 } 999 1000 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 1001 { 1002 return -EOPNOTSUPP; 1003 } 1004 #else 1005 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 1006 void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); 1007 int mlx5e_arfs_enable(struct mlx5e_priv *priv); 1008 int mlx5e_arfs_disable(struct mlx5e_priv *priv); 1009 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 1010 u16 rxq_index, u32 flow_id); 1011 #endif 1012 1013 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); 1014 int mlx5e_create_tir(struct mlx5_core_dev *mdev, 1015 struct mlx5e_tir *tir, u32 *in, int inlen); 1016 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 1017 struct mlx5e_tir *tir); 1018 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1019 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1020 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); 1021 1022 /* common netdev helpers */ 1023 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1024 1025 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); 1026 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); 1027 1028 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); 1029 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv); 1030 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); 1031 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1032 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1033 1034 struct ttc_params { 1035 struct mlx5_flow_table_attr ft_attr; 1036 u32 any_tt_tirn; 1037 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; 1038 struct mlx5e_ttc_table *inner_ttc; 1039 }; 1040 1041 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); 1042 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); 1043 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); 1044 1045 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1046 struct mlx5e_ttc_table *ttc); 1047 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, 1048 struct mlx5e_ttc_table *ttc); 1049 1050 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1051 struct mlx5e_ttc_table *ttc); 1052 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, 1053 struct mlx5e_ttc_table *ttc); 1054 1055 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, 1056 u32 underlay_qpn, u32 *tisn); 1057 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); 1058 1059 int mlx5e_create_tises(struct mlx5e_priv *priv); 1060 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); 1061 int mlx5e_close(struct net_device *netdev); 1062 int mlx5e_open(struct net_device *netdev); 1063 void mlx5e_update_stats_work(struct work_struct *work); 1064 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); 1065 1066 int mlx5e_bits_invert(unsigned long a, int size); 1067 1068 /* ethtool helpers */ 1069 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, 1070 struct ethtool_drvinfo *drvinfo); 1071 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, 1072 uint32_t stringset, uint8_t *data); 1073 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); 1074 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, 1075 struct ethtool_stats *stats, u64 *data); 1076 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, 1077 struct ethtool_ringparam *param); 1078 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1079 struct ethtool_ringparam *param); 1080 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, 1081 struct ethtool_channels *ch); 1082 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, 1083 struct ethtool_channels *ch); 1084 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, 1085 struct ethtool_coalesce *coal); 1086 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, 1087 struct ethtool_coalesce *coal); 1088 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1089 struct ethtool_ts_info *info); 1090 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, 1091 struct ethtool_flash *flash); 1092 1093 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1094 void *cb_priv); 1095 1096 /* mlx5e generic netdev management API */ 1097 struct net_device* 1098 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, 1099 void *ppriv); 1100 int mlx5e_attach_netdev(struct mlx5e_priv *priv); 1101 void mlx5e_detach_netdev(struct mlx5e_priv *priv); 1102 void mlx5e_destroy_netdev(struct mlx5e_priv *priv); 1103 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, 1104 struct mlx5e_params *params, 1105 u16 max_channels); 1106 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); 1107 void mlx5e_rx_dim_work(struct work_struct *work); 1108 #endif /* __MLX5_EN_H__ */ 1109