1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef __MLX5_EN_H__ 33 #define __MLX5_EN_H__ 34 35 #include <linux/if_vlan.h> 36 #include <linux/etherdevice.h> 37 #include <linux/timecounter.h> 38 #include <linux/net_tstamp.h> 39 #include <linux/crash_dump.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/qp.h> 42 #include <linux/mlx5/cq.h> 43 #include <linux/mlx5/port.h> 44 #include <linux/mlx5/vport.h> 45 #include <linux/mlx5/transobj.h> 46 #include <linux/mlx5/fs.h> 47 #include <linux/rhashtable.h> 48 #include <net/udp_tunnel.h> 49 #include <net/switchdev.h> 50 #include <net/xdp.h> 51 #include <linux/dim.h> 52 #include <linux/bits.h> 53 #include "wq.h" 54 #include "mlx5_core.h" 55 #include "en_stats.h" 56 #include "en/dcbnl.h" 57 #include "en/fs.h" 58 #include "en/qos.h" 59 #include "lib/hv_vhca.h" 60 #include "lib/clock.h" 61 62 extern const struct net_device_ops mlx5e_netdev_ops; 63 struct page_pool; 64 65 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) 66 #define MLX5E_METADATA_ETHER_LEN 8 67 68 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 69 70 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) 71 72 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) 73 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) 74 75 #define MLX5E_MAX_NUM_TC 8 76 77 #define MLX5_RX_HEADROOM NET_SKB_PAD 78 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ 79 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 80 81 #define MLX5E_RX_MAX_HEAD (256) 82 83 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ 84 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ 85 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ 86 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) 87 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ 88 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) 89 90 #define MLX5_MPWRQ_LOG_WQE_SZ 18 91 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ 92 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) 93 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) 94 95 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) 96 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between 97 * WQEs, This page will absorb write overflow by the hardware, when 98 * receiving packets larger than MTU. These oversize packets are 99 * dropped by the driver at a later stage. 100 */ 101 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) 102 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) 103 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) 104 #define MLX5E_MAX_RQ_NUM_MTTS \ 105 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ 106 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) 107 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ 108 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) 109 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \ 110 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ 111 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) 112 113 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) 114 #define MLX5E_LOG_MAX_RX_WQE_BULK \ 115 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) 116 117 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 118 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa 119 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd 120 121 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) 122 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa 123 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ 124 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) 125 126 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 127 128 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) 129 #define MLX5E_DEFAULT_LRO_TIMEOUT 32 130 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 131 132 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 133 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 134 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 135 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 136 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 137 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 138 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 139 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 140 141 #define MLX5E_LOG_INDIR_RQT_SIZE 0x8 142 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) 143 #define MLX5E_MIN_NUM_CHANNELS 0x1 144 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) 145 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) 146 #define MLX5E_TX_CQ_POLL_BUDGET 128 147 #define MLX5E_TX_XSK_POLL_BUDGET 64 148 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ 149 150 #define MLX5E_UMR_WQE_INLINE_SZ \ 151 (sizeof(struct mlx5e_umr_wqe) + \ 152 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \ 153 MLX5_UMR_MTT_ALIGNMENT)) 154 #define MLX5E_UMR_WQEBBS \ 155 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) 156 157 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK 158 159 #define mlx5e_dbg(mlevel, priv, format, ...) \ 160 do { \ 161 if (NETIF_MSG_##mlevel & (priv)->msglevel) \ 162 netdev_warn(priv->netdev, format, \ 163 ##__VA_ARGS__); \ 164 } while (0) 165 166 #define mlx5e_state_dereference(priv, p) \ 167 rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) 168 169 enum mlx5e_rq_group { 170 MLX5E_RQ_GROUP_REGULAR, 171 MLX5E_RQ_GROUP_XSK, 172 #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g) 173 }; 174 175 static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) 176 { 177 if (mlx5_lag_is_lacp_owner(mdev)) 178 return 1; 179 180 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS); 181 } 182 183 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) 184 { 185 switch (wq_type) { 186 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 187 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, 188 wq_size / 2); 189 default: 190 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, 191 wq_size / 2); 192 } 193 } 194 195 /* Use this function to get max num channels (rxqs/txqs) only to create netdev */ 196 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) 197 { 198 return is_kdump_kernel() ? 199 MLX5E_MIN_NUM_CHANNELS : 200 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); 201 } 202 203 struct mlx5e_tx_wqe { 204 struct mlx5_wqe_ctrl_seg ctrl; 205 struct mlx5_wqe_eth_seg eth; 206 struct mlx5_wqe_data_seg data[0]; 207 }; 208 209 struct mlx5e_rx_wqe_ll { 210 struct mlx5_wqe_srq_next_seg next; 211 struct mlx5_wqe_data_seg data[]; 212 }; 213 214 struct mlx5e_rx_wqe_cyc { 215 struct mlx5_wqe_data_seg data[0]; 216 }; 217 218 struct mlx5e_umr_wqe { 219 struct mlx5_wqe_ctrl_seg ctrl; 220 struct mlx5_wqe_umr_ctrl_seg uctrl; 221 struct mlx5_mkey_seg mkc; 222 struct mlx5_mtt inline_mtts[0]; 223 }; 224 225 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; 226 227 enum mlx5e_priv_flag { 228 MLX5E_PFLAG_RX_CQE_BASED_MODER, 229 MLX5E_PFLAG_TX_CQE_BASED_MODER, 230 MLX5E_PFLAG_RX_CQE_COMPRESS, 231 MLX5E_PFLAG_RX_STRIDING_RQ, 232 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, 233 MLX5E_PFLAG_XDP_TX_MPWQE, 234 MLX5E_PFLAG_SKB_TX_MPWQE, 235 MLX5E_PFLAG_TX_PORT_TS, 236 MLX5E_NUM_PFLAGS, /* Keep last */ 237 }; 238 239 #define MLX5E_SET_PFLAG(params, pflag, enable) \ 240 do { \ 241 if (enable) \ 242 (params)->pflags |= BIT(pflag); \ 243 else \ 244 (params)->pflags &= ~(BIT(pflag)); \ 245 } while (0) 246 247 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) 248 249 struct mlx5e_params { 250 u8 log_sq_size; 251 u8 rq_wq_type; 252 u8 log_rq_mtu_frames; 253 u16 num_channels; 254 u8 num_tc; 255 bool rx_cqe_compress_def; 256 bool tunneled_offload_en; 257 struct dim_cq_moder rx_cq_moderation; 258 struct dim_cq_moder tx_cq_moderation; 259 bool lro_en; 260 u8 tx_min_inline_mode; 261 bool vlan_strip_disable; 262 bool scatter_fcs_en; 263 bool rx_dim_enabled; 264 bool tx_dim_enabled; 265 u32 lro_timeout; 266 u32 pflags; 267 struct bpf_prog *xdp_prog; 268 struct mlx5e_xsk *xsk; 269 unsigned int sw_mtu; 270 int hard_mtu; 271 }; 272 273 enum { 274 MLX5E_RQ_STATE_ENABLED, 275 MLX5E_RQ_STATE_RECOVERING, 276 MLX5E_RQ_STATE_AM, 277 MLX5E_RQ_STATE_NO_CSUM_COMPLETE, 278 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ 279 MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ 280 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */ 281 }; 282 283 struct mlx5e_cq { 284 /* data path - accessed per cqe */ 285 struct mlx5_cqwq wq; 286 287 /* data path - accessed per napi poll */ 288 u16 event_ctr; 289 struct napi_struct *napi; 290 struct mlx5_core_cq mcq; 291 struct mlx5e_ch_stats *ch_stats; 292 293 /* control */ 294 struct net_device *netdev; 295 struct mlx5_core_dev *mdev; 296 struct mlx5e_priv *priv; 297 struct mlx5_wq_ctrl wq_ctrl; 298 } ____cacheline_aligned_in_smp; 299 300 struct mlx5e_cq_decomp { 301 /* cqe decompression */ 302 struct mlx5_cqe64 title; 303 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; 304 u8 mini_arr_idx; 305 u16 left; 306 u16 wqe_counter; 307 } ____cacheline_aligned_in_smp; 308 309 enum mlx5e_dma_map_type { 310 MLX5E_DMA_MAP_SINGLE, 311 MLX5E_DMA_MAP_PAGE 312 }; 313 314 struct mlx5e_sq_dma { 315 dma_addr_t addr; 316 u32 size; 317 enum mlx5e_dma_map_type type; 318 }; 319 320 enum { 321 MLX5E_SQ_STATE_ENABLED, 322 MLX5E_SQ_STATE_MPWQE, 323 MLX5E_SQ_STATE_RECOVERING, 324 MLX5E_SQ_STATE_IPSEC, 325 MLX5E_SQ_STATE_AM, 326 MLX5E_SQ_STATE_TLS, 327 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, 328 MLX5E_SQ_STATE_PENDING_XSK_TX, 329 }; 330 331 struct mlx5e_tx_mpwqe { 332 /* Current MPWQE session */ 333 struct mlx5e_tx_wqe *wqe; 334 u32 bytes_count; 335 u8 ds_count; 336 u8 pkt_count; 337 u8 inline_on; 338 }; 339 340 struct mlx5e_skb_fifo { 341 struct sk_buff **fifo; 342 u16 *pc; 343 u16 *cc; 344 u16 mask; 345 }; 346 347 struct mlx5e_ptpsq; 348 349 struct mlx5e_txqsq { 350 /* data path */ 351 352 /* dirtied @completion */ 353 u16 cc; 354 u16 skb_fifo_cc; 355 u32 dma_fifo_cc; 356 struct dim dim; /* Adaptive Moderation */ 357 358 /* dirtied @xmit */ 359 u16 pc ____cacheline_aligned_in_smp; 360 u16 skb_fifo_pc; 361 u32 dma_fifo_pc; 362 struct mlx5e_tx_mpwqe mpwqe; 363 364 struct mlx5e_cq cq; 365 366 /* read only */ 367 struct mlx5_wq_cyc wq; 368 u32 dma_fifo_mask; 369 struct mlx5e_sq_stats *stats; 370 struct { 371 struct mlx5e_sq_dma *dma_fifo; 372 struct mlx5e_skb_fifo skb_fifo; 373 struct mlx5e_tx_wqe_info *wqe_info; 374 } db; 375 void __iomem *uar_map; 376 struct netdev_queue *txq; 377 u32 sqn; 378 u16 stop_room; 379 u8 min_inline_mode; 380 struct device *pdev; 381 __be32 mkey_be; 382 unsigned long state; 383 unsigned int hw_mtu; 384 struct hwtstamp_config *tstamp; 385 struct mlx5_clock *clock; 386 struct net_device *netdev; 387 struct mlx5_core_dev *mdev; 388 struct mlx5e_priv *priv; 389 390 /* control path */ 391 struct mlx5_wq_ctrl wq_ctrl; 392 int ch_ix; 393 int txq_ix; 394 u32 rate_limit; 395 struct work_struct recover_work; 396 struct mlx5e_ptpsq *ptpsq; 397 cqe_ts_to_ns ptp_cyc2time; 398 } ____cacheline_aligned_in_smp; 399 400 struct mlx5e_dma_info { 401 dma_addr_t addr; 402 union { 403 struct page *page; 404 struct xdp_buff *xsk; 405 }; 406 }; 407 408 /* XDP packets can be transmitted in different ways. On completion, we need to 409 * distinguish between them to clean up things in a proper way. 410 */ 411 enum mlx5e_xdp_xmit_mode { 412 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another 413 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and 414 * returned. 415 */ 416 MLX5E_XDP_XMIT_MODE_FRAME, 417 418 /* The xdp_frame was created in place as a result of XDP_TX from a 419 * regular RQ. No DMA remapping happened, and the page belongs to us. 420 */ 421 MLX5E_XDP_XMIT_MODE_PAGE, 422 423 /* No xdp_frame was created at all, the transmit happened from a UMEM 424 * page. The UMEM Completion Ring producer pointer has to be increased. 425 */ 426 MLX5E_XDP_XMIT_MODE_XSK, 427 }; 428 429 struct mlx5e_xdp_info { 430 enum mlx5e_xdp_xmit_mode mode; 431 union { 432 struct { 433 struct xdp_frame *xdpf; 434 dma_addr_t dma_addr; 435 } frame; 436 struct { 437 struct mlx5e_rq *rq; 438 struct mlx5e_dma_info di; 439 } page; 440 }; 441 }; 442 443 struct mlx5e_xmit_data { 444 dma_addr_t dma_addr; 445 void *data; 446 u32 len; 447 }; 448 449 struct mlx5e_xdp_info_fifo { 450 struct mlx5e_xdp_info *xi; 451 u32 *cc; 452 u32 *pc; 453 u32 mask; 454 }; 455 456 struct mlx5e_xdpsq; 457 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); 458 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, 459 struct mlx5e_xmit_data *, 460 struct mlx5e_xdp_info *, 461 int); 462 463 struct mlx5e_xdpsq { 464 /* data path */ 465 466 /* dirtied @completion */ 467 u32 xdpi_fifo_cc; 468 u16 cc; 469 470 /* dirtied @xmit */ 471 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; 472 u16 pc; 473 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 474 struct mlx5e_tx_mpwqe mpwqe; 475 476 struct mlx5e_cq cq; 477 478 /* read only */ 479 struct xsk_buff_pool *xsk_pool; 480 struct mlx5_wq_cyc wq; 481 struct mlx5e_xdpsq_stats *stats; 482 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check; 483 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; 484 struct { 485 struct mlx5e_xdp_wqe_info *wqe_info; 486 struct mlx5e_xdp_info_fifo xdpi_fifo; 487 } db; 488 void __iomem *uar_map; 489 u32 sqn; 490 struct device *pdev; 491 __be32 mkey_be; 492 u8 min_inline_mode; 493 unsigned long state; 494 unsigned int hw_mtu; 495 496 /* control path */ 497 struct mlx5_wq_ctrl wq_ctrl; 498 struct mlx5e_channel *channel; 499 } ____cacheline_aligned_in_smp; 500 501 struct mlx5e_icosq { 502 /* data path */ 503 u16 cc; 504 u16 pc; 505 506 struct mlx5_wqe_ctrl_seg *doorbell_cseg; 507 struct mlx5e_cq cq; 508 509 /* write@xmit, read@completion */ 510 struct { 511 struct mlx5e_icosq_wqe_info *wqe_info; 512 } db; 513 514 /* read only */ 515 struct mlx5_wq_cyc wq; 516 void __iomem *uar_map; 517 u32 sqn; 518 unsigned long state; 519 520 /* control path */ 521 struct mlx5_wq_ctrl wq_ctrl; 522 struct mlx5e_channel *channel; 523 524 struct work_struct recover_work; 525 } ____cacheline_aligned_in_smp; 526 527 struct mlx5e_wqe_frag_info { 528 struct mlx5e_dma_info *di; 529 u32 offset; 530 bool last_in_page; 531 }; 532 533 struct mlx5e_umr_dma_info { 534 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; 535 }; 536 537 struct mlx5e_mpw_info { 538 struct mlx5e_umr_dma_info umr; 539 u16 consumed_strides; 540 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); 541 }; 542 543 #define MLX5E_MAX_RX_FRAGS 4 544 545 /* a single cache unit is capable to serve one napi call (for non-striding rq) 546 * or a MPWQE (for striding rq). 547 */ 548 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \ 549 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT) 550 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) 551 struct mlx5e_page_cache { 552 u32 head; 553 u32 tail; 554 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; 555 }; 556 557 struct mlx5e_rq; 558 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); 559 typedef struct sk_buff * 560 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 561 u16 cqe_bcnt, u32 head_offset, u32 page_idx); 562 typedef struct sk_buff * 563 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 564 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); 565 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); 566 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); 567 568 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); 569 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); 570 571 enum mlx5e_rq_flag { 572 MLX5E_RQ_FLAG_XDP_XMIT, 573 MLX5E_RQ_FLAG_XDP_REDIRECT, 574 }; 575 576 struct mlx5e_rq_frag_info { 577 int frag_size; 578 int frag_stride; 579 }; 580 581 struct mlx5e_rq_frags_info { 582 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; 583 u8 num_frags; 584 u8 log_num_frags; 585 u8 wqe_bulk; 586 }; 587 588 struct mlx5e_rq { 589 /* data path */ 590 union { 591 struct { 592 struct mlx5_wq_cyc wq; 593 struct mlx5e_wqe_frag_info *frags; 594 struct mlx5e_dma_info *di; 595 struct mlx5e_rq_frags_info info; 596 mlx5e_fp_skb_from_cqe skb_from_cqe; 597 } wqe; 598 struct { 599 struct mlx5_wq_ll wq; 600 struct mlx5e_umr_wqe umr_wqe; 601 struct mlx5e_mpw_info *info; 602 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; 603 u16 num_strides; 604 u16 actual_wq_head; 605 u8 log_stride_sz; 606 u8 umr_in_progress; 607 u8 umr_last_bulk; 608 u8 umr_completed; 609 } mpwqe; 610 }; 611 struct { 612 u16 headroom; 613 u32 frame0_sz; 614 u8 map_dir; /* dma map direction */ 615 } buff; 616 617 struct device *pdev; 618 struct net_device *netdev; 619 struct mlx5e_rq_stats *stats; 620 struct mlx5e_cq cq; 621 struct mlx5e_cq_decomp cqd; 622 struct mlx5e_page_cache page_cache; 623 struct hwtstamp_config *tstamp; 624 struct mlx5_clock *clock; 625 struct mlx5e_icosq *icosq; 626 struct mlx5e_priv *priv; 627 628 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 629 mlx5e_fp_post_rx_wqes post_wqes; 630 mlx5e_fp_dealloc_wqe dealloc_wqe; 631 632 unsigned long state; 633 int ix; 634 unsigned int hw_mtu; 635 636 struct dim dim; /* Dynamic Interrupt Moderation */ 637 638 /* XDP */ 639 struct bpf_prog __rcu *xdp_prog; 640 struct mlx5e_xdpsq *xdpsq; 641 DECLARE_BITMAP(flags, 8); 642 struct page_pool *page_pool; 643 644 /* AF_XDP zero-copy */ 645 struct xsk_buff_pool *xsk_pool; 646 647 struct work_struct recover_work; 648 649 /* control */ 650 struct mlx5_wq_ctrl wq_ctrl; 651 __be32 mkey_be; 652 u8 wq_type; 653 u32 rqn; 654 struct mlx5_core_dev *mdev; 655 struct mlx5_core_mkey umr_mkey; 656 struct mlx5e_dma_info wqe_overflow; 657 658 /* XDP read-mostly */ 659 struct xdp_rxq_info xdp_rxq; 660 cqe_ts_to_ns ptp_cyc2time; 661 } ____cacheline_aligned_in_smp; 662 663 enum mlx5e_channel_state { 664 MLX5E_CHANNEL_STATE_XSK, 665 MLX5E_CHANNEL_NUM_STATES 666 }; 667 668 struct mlx5e_channel { 669 /* data path */ 670 struct mlx5e_rq rq; 671 struct mlx5e_xdpsq rq_xdpsq; 672 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; 673 struct mlx5e_icosq icosq; /* internal control operations */ 674 struct mlx5e_txqsq __rcu * __rcu *qos_sqs; 675 bool xdp; 676 struct napi_struct napi; 677 struct device *pdev; 678 struct net_device *netdev; 679 __be32 mkey_be; 680 u16 qos_sqs_size; 681 u8 num_tc; 682 u8 lag_port; 683 684 /* XDP_REDIRECT */ 685 struct mlx5e_xdpsq xdpsq; 686 687 /* AF_XDP zero-copy */ 688 struct mlx5e_rq xskrq; 689 struct mlx5e_xdpsq xsksq; 690 691 /* Async ICOSQ */ 692 struct mlx5e_icosq async_icosq; 693 /* async_icosq can be accessed from any CPU - the spinlock protects it. */ 694 spinlock_t async_icosq_lock; 695 696 /* data path - accessed per napi poll */ 697 const struct cpumask *aff_mask; 698 struct mlx5e_ch_stats *stats; 699 700 /* control */ 701 struct mlx5e_priv *priv; 702 struct mlx5_core_dev *mdev; 703 struct hwtstamp_config *tstamp; 704 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); 705 int ix; 706 int cpu; 707 }; 708 709 struct mlx5e_port_ptp; 710 711 struct mlx5e_channels { 712 struct mlx5e_channel **c; 713 struct mlx5e_port_ptp *port_ptp; 714 unsigned int num; 715 struct mlx5e_params params; 716 }; 717 718 struct mlx5e_channel_stats { 719 struct mlx5e_ch_stats ch; 720 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 721 struct mlx5e_rq_stats rq; 722 struct mlx5e_rq_stats xskrq; 723 struct mlx5e_xdpsq_stats rq_xdpsq; 724 struct mlx5e_xdpsq_stats xdpsq; 725 struct mlx5e_xdpsq_stats xsksq; 726 } ____cacheline_aligned_in_smp; 727 728 struct mlx5e_port_ptp_stats { 729 struct mlx5e_ch_stats ch; 730 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 731 struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; 732 } ____cacheline_aligned_in_smp; 733 734 enum { 735 MLX5E_STATE_OPENED, 736 MLX5E_STATE_DESTROYING, 737 MLX5E_STATE_XDP_TX_ENABLED, 738 MLX5E_STATE_XDP_ACTIVE, 739 }; 740 741 struct mlx5e_rqt { 742 u32 rqtn; 743 bool enabled; 744 }; 745 746 struct mlx5e_tir { 747 u32 tirn; 748 struct mlx5e_rqt rqt; 749 struct list_head list; 750 }; 751 752 enum { 753 MLX5E_TC_PRIO = 0, 754 MLX5E_NIC_PRIO 755 }; 756 757 struct mlx5e_rss_params { 758 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; 759 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; 760 u8 toeplitz_hash_key[40]; 761 u8 hfunc; 762 }; 763 764 struct mlx5e_modify_sq_param { 765 int curr_state; 766 int next_state; 767 int rl_update; 768 int rl_index; 769 bool qos_update; 770 u16 qos_queue_group_id; 771 }; 772 773 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) 774 struct mlx5e_hv_vhca_stats_agent { 775 struct mlx5_hv_vhca_agent *agent; 776 struct delayed_work work; 777 u16 delay; 778 void *buf; 779 }; 780 #endif 781 782 struct mlx5e_xsk { 783 /* XSK buffer pools are stored separately from channels, 784 * because we don't want to lose them when channels are 785 * recreated. The kernel also stores buffer pool, but it doesn't 786 * distinguish between zero-copy and non-zero-copy UMEMs, so 787 * rely on our mechanism. 788 */ 789 struct xsk_buff_pool **pools; 790 u16 refcnt; 791 bool ever_used; 792 }; 793 794 /* Temporary storage for variables that are allocated when struct mlx5e_priv is 795 * initialized, and used where we can't allocate them because that functions 796 * must not fail. Use with care and make sure the same variable is not used 797 * simultaneously by multiple users. 798 */ 799 struct mlx5e_scratchpad { 800 cpumask_var_t cpumask; 801 }; 802 803 struct mlx5e_htb { 804 DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES)); 805 DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES); 806 struct mlx5e_sq_stats **qos_sq_stats; 807 u16 max_qos_sqs; 808 u16 maj_id; 809 u16 defcls; 810 }; 811 812 struct mlx5e_trap; 813 814 struct mlx5e_priv { 815 /* priv data path fields - start */ 816 /* +1 for port ptp ts */ 817 struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC + 818 MLX5E_QOS_MAX_LEAF_NODES]; 819 int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; 820 int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; 821 #ifdef CONFIG_MLX5_CORE_EN_DCB 822 struct mlx5e_dcbx_dp dcbx_dp; 823 #endif 824 /* priv data path fields - end */ 825 826 u32 msglevel; 827 unsigned long state; 828 struct mutex state_lock; /* Protects Interface state */ 829 struct mlx5e_rq drop_rq; 830 831 struct mlx5e_channels channels; 832 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; 833 struct mlx5e_rqt indir_rqt; 834 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; 835 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; 836 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 837 struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS]; 838 struct mlx5e_rss_params rss_params; 839 u32 tx_rates[MLX5E_MAX_NUM_SQS]; 840 841 struct mlx5e_flow_steering fs; 842 843 struct workqueue_struct *wq; 844 struct work_struct update_carrier_work; 845 struct work_struct set_rx_mode_work; 846 struct work_struct tx_timeout_work; 847 struct work_struct update_stats_work; 848 struct work_struct monitor_counters_work; 849 struct mlx5_nb monitor_counters_nb; 850 851 struct mlx5_core_dev *mdev; 852 struct net_device *netdev; 853 struct mlx5e_trap *en_trap; 854 struct mlx5e_stats stats; 855 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; 856 struct mlx5e_channel_stats trap_stats; 857 struct mlx5e_port_ptp_stats port_ptp_stats; 858 u16 max_nch; 859 u8 max_opened_tc; 860 bool port_ptp_opened; 861 struct hwtstamp_config tstamp; 862 u16 q_counter; 863 u16 drop_rq_q_counter; 864 struct notifier_block events_nb; 865 struct notifier_block blocking_events_nb; 866 int num_tc_x_num_ch; 867 868 struct udp_tunnel_nic_info nic_info; 869 #ifdef CONFIG_MLX5_CORE_EN_DCB 870 struct mlx5e_dcbx dcbx; 871 #endif 872 873 const struct mlx5e_profile *profile; 874 void *ppriv; 875 #ifdef CONFIG_MLX5_EN_IPSEC 876 struct mlx5e_ipsec *ipsec; 877 #endif 878 #ifdef CONFIG_MLX5_EN_TLS 879 struct mlx5e_tls *tls; 880 #endif 881 struct devlink_health_reporter *tx_reporter; 882 struct devlink_health_reporter *rx_reporter; 883 struct devlink_port dl_port; 884 struct mlx5e_xsk xsk; 885 #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) 886 struct mlx5e_hv_vhca_stats_agent stats_agent; 887 #endif 888 struct mlx5e_scratchpad scratchpad; 889 struct mlx5e_htb htb; 890 }; 891 892 struct mlx5e_rx_handlers { 893 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 894 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 895 }; 896 897 extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; 898 899 struct mlx5e_profile { 900 int (*init)(struct mlx5_core_dev *mdev, 901 struct net_device *netdev); 902 void (*cleanup)(struct mlx5e_priv *priv); 903 int (*init_rx)(struct mlx5e_priv *priv); 904 void (*cleanup_rx)(struct mlx5e_priv *priv); 905 int (*init_tx)(struct mlx5e_priv *priv); 906 void (*cleanup_tx)(struct mlx5e_priv *priv); 907 void (*enable)(struct mlx5e_priv *priv); 908 void (*disable)(struct mlx5e_priv *priv); 909 int (*update_rx)(struct mlx5e_priv *priv); 910 void (*update_stats)(struct mlx5e_priv *priv); 911 void (*update_carrier)(struct mlx5e_priv *priv); 912 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); 913 mlx5e_stats_grp_t *stats_grps; 914 const struct mlx5e_rx_handlers *rx_handlers; 915 int max_tc; 916 u8 rq_groups; 917 }; 918 919 void mlx5e_build_ptys2ethtool_map(void); 920 921 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); 922 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, 923 struct mlx5e_params *params); 924 925 void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 926 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); 927 928 void mlx5e_init_l2_addr(struct mlx5e_priv *priv); 929 int mlx5e_self_test_num(struct mlx5e_priv *priv); 930 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, 931 u64 *buf); 932 void mlx5e_set_rx_mode_work(struct work_struct *work); 933 934 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); 935 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); 936 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); 937 938 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 939 u16 vid); 940 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, 941 u16 vid); 942 void mlx5e_timestamp_init(struct mlx5e_priv *priv); 943 944 struct mlx5e_redirect_rqt_param { 945 bool is_rss; 946 union { 947 u32 rqn; /* Direct RQN (Non-RSS) */ 948 struct { 949 u8 hfunc; 950 struct mlx5e_channels *channels; 951 } rss; /* RSS data */ 952 }; 953 }; 954 955 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, 956 struct mlx5e_redirect_rqt_param rrp); 957 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, 958 const struct mlx5e_tirc_config *ttconfig, 959 void *tirc, bool inner); 960 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in); 961 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); 962 963 struct mlx5e_xsk_param; 964 965 struct mlx5e_rq_param; 966 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 967 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, 968 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq); 969 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); 970 void mlx5e_deactivate_rq(struct mlx5e_rq *rq); 971 void mlx5e_close_rq(struct mlx5e_rq *rq); 972 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); 973 void mlx5e_destroy_rq(struct mlx5e_rq *rq); 974 975 struct mlx5e_sq_param; 976 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, 977 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq); 978 void mlx5e_close_icosq(struct mlx5e_icosq *sq); 979 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, 980 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, 981 struct mlx5e_xdpsq *sq, bool is_redirect); 982 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); 983 984 struct mlx5e_create_cq_param { 985 struct napi_struct *napi; 986 struct mlx5e_ch_stats *ch_stats; 987 int node; 988 int ix; 989 }; 990 991 struct mlx5e_cq_param; 992 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, 993 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 994 struct mlx5e_cq *cq); 995 void mlx5e_close_cq(struct mlx5e_cq *cq); 996 997 int mlx5e_open_locked(struct net_device *netdev); 998 int mlx5e_close_locked(struct net_device *netdev); 999 1000 int mlx5e_open_channels(struct mlx5e_priv *priv, 1001 struct mlx5e_channels *chs); 1002 void mlx5e_close_channels(struct mlx5e_channels *chs); 1003 1004 /* Function pointer to be used to modify HW or kernel settings while 1005 * switching channels 1006 */ 1007 typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); 1008 #define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ 1009 int fn##_ctx(struct mlx5e_priv *priv, void *context) \ 1010 { \ 1011 return fn(priv); \ 1012 } 1013 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); 1014 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, 1015 struct mlx5e_channels *new_chs, 1016 mlx5e_fp_preactivate preactivate, 1017 void *context); 1018 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); 1019 int mlx5e_num_channels_changed(struct mlx5e_priv *priv); 1020 int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); 1021 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); 1022 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); 1023 1024 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 1025 int num_channels); 1026 1027 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1028 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1029 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1030 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1031 1032 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 1033 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 1034 struct mlx5e_params *params); 1035 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); 1036 void mlx5e_activate_rq(struct mlx5e_rq *rq); 1037 void mlx5e_deactivate_rq(struct mlx5e_rq *rq); 1038 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); 1039 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); 1040 1041 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, 1042 struct mlx5e_modify_sq_param *p); 1043 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, 1044 struct mlx5e_params *params, struct mlx5e_sq_param *param, 1045 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); 1046 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); 1047 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); 1048 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); 1049 void mlx5e_tx_disable_queue(struct netdev_queue *txq); 1050 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); 1051 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); 1052 struct mlx5e_create_sq_param; 1053 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, 1054 struct mlx5e_sq_param *param, 1055 struct mlx5e_create_sq_param *csp, 1056 u16 qos_queue_group_id, 1057 u32 *sqn); 1058 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); 1059 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); 1060 1061 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) 1062 { 1063 return MLX5_CAP_ETH(mdev, swp) && 1064 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); 1065 } 1066 1067 extern const struct ethtool_ops mlx5e_ethtool_ops; 1068 1069 int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, 1070 u32 *in); 1071 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 1072 struct mlx5e_tir *tir); 1073 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1074 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1075 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, 1076 bool enable_mc_lb); 1077 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); 1078 1079 /* common netdev helpers */ 1080 void mlx5e_create_q_counters(struct mlx5e_priv *priv); 1081 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); 1082 int mlx5e_open_drop_rq(struct mlx5e_priv *priv, 1083 struct mlx5e_rq *drop_rq); 1084 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); 1085 int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node); 1086 void mlx5e_free_di_list(struct mlx5e_rq *rq); 1087 1088 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1089 1090 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1091 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); 1092 1093 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1094 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1095 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1096 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1097 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1098 1099 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); 1100 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); 1101 1102 int mlx5e_create_tises(struct mlx5e_priv *priv); 1103 void mlx5e_destroy_tises(struct mlx5e_priv *priv); 1104 int mlx5e_update_nic_rx(struct mlx5e_priv *priv); 1105 void mlx5e_update_carrier(struct mlx5e_priv *priv); 1106 int mlx5e_close(struct net_device *netdev); 1107 int mlx5e_open(struct net_device *netdev); 1108 1109 void mlx5e_queue_update_stats(struct mlx5e_priv *priv); 1110 int mlx5e_bits_invert(unsigned long a, int size); 1111 1112 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); 1113 int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); 1114 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, 1115 mlx5e_fp_preactivate preactivate); 1116 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); 1117 1118 /* ethtool helpers */ 1119 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, 1120 struct ethtool_drvinfo *drvinfo); 1121 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, 1122 uint32_t stringset, uint8_t *data); 1123 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); 1124 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, 1125 struct ethtool_stats *stats, u64 *data); 1126 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, 1127 struct ethtool_ringparam *param); 1128 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1129 struct ethtool_ringparam *param); 1130 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, 1131 struct ethtool_channels *ch); 1132 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, 1133 struct ethtool_channels *ch); 1134 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, 1135 struct ethtool_coalesce *coal); 1136 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, 1137 struct ethtool_coalesce *coal); 1138 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 1139 struct ethtool_link_ksettings *link_ksettings); 1140 int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, 1141 const struct ethtool_link_ksettings *link_ksettings); 1142 int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); 1143 int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, 1144 const u8 hfunc); 1145 int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1146 u32 *rule_locs); 1147 int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); 1148 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); 1149 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1150 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1151 struct ethtool_ts_info *info); 1152 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, 1153 struct ethtool_flash *flash); 1154 void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1155 struct ethtool_pauseparam *pauseparam); 1156 int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1157 struct ethtool_pauseparam *pauseparam); 1158 1159 /* mlx5e generic netdev management API */ 1160 static inline unsigned int 1161 mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile) 1162 { 1163 return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); 1164 } 1165 1166 int mlx5e_priv_init(struct mlx5e_priv *priv, 1167 struct net_device *netdev, 1168 struct mlx5_core_dev *mdev); 1169 void mlx5e_priv_cleanup(struct mlx5e_priv *priv); 1170 struct net_device * 1171 mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs); 1172 int mlx5e_attach_netdev(struct mlx5e_priv *priv); 1173 void mlx5e_detach_netdev(struct mlx5e_priv *priv); 1174 void mlx5e_destroy_netdev(struct mlx5e_priv *priv); 1175 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, 1176 const struct mlx5e_profile *new_profile, void *new_ppriv); 1177 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); 1178 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); 1179 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 1180 struct mlx5e_params *params); 1181 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, 1182 u16 num_channels); 1183 void mlx5e_rx_dim_work(struct work_struct *work); 1184 void mlx5e_tx_dim_work(struct work_struct *work); 1185 1186 netdev_features_t mlx5e_features_check(struct sk_buff *skb, 1187 struct net_device *netdev, 1188 netdev_features_t features); 1189 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features); 1190 #ifdef CONFIG_MLX5_ESWITCH 1191 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); 1192 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); 1193 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); 1194 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); 1195 #endif 1196 #endif /* __MLX5_EN_H__ */ 1197