1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "en/params.h" 5 #include "en/txrx.h" 6 #include "en/port.h" 7 #include "en_accel/en_accel.h" 8 #include "en_accel/ipsec.h" 9 #include <net/xdp_sock_drv.h> 10 11 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) 12 { 13 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); 14 15 return min_page_shift ? : 12; 16 } 17 18 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) 19 { 20 u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT; 21 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); 22 23 /* Regular RQ uses order-0 pages, the NIC must be able to map them. */ 24 if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) 25 min_page_shift = req_page_shift; 26 27 return max(req_page_shift, min_page_shift); 28 } 29 30 enum mlx5e_mpwrq_umr_mode 31 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) 32 { 33 /* Different memory management schemes use different mechanisms to map 34 * user-mode memory. The stricter guarantees we have, the faster 35 * mechanisms we use: 36 * 1. MTT - direct mapping in page granularity. 37 * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but 38 * all mappings have the same size. 39 */ 40 bool unaligned = xsk ? xsk->unaligned : false; 41 42 /* XSK frames can start at arbitrary unaligned locations, but they all 43 * have the same size which is a power of two. It allows to optimize to 44 * one KSM per frame. 45 */ 46 if (unaligned) 47 return MLX5E_MPWRQ_UMR_MODE_UNALIGNED; 48 49 /* XSK: frames are naturally aligned, MTT can be used. 50 * Non-XSK: Allocations happen in units of CPU pages, therefore, the 51 * mappings are naturally aligned. 52 */ 53 return MLX5E_MPWRQ_UMR_MODE_ALIGNED; 54 } 55 56 u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode) 57 { 58 switch (mode) { 59 case MLX5E_MPWRQ_UMR_MODE_ALIGNED: 60 return sizeof(struct mlx5_mtt); 61 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED: 62 return sizeof(struct mlx5_ksm); 63 } 64 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); 65 return 0; 66 } 67 68 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 69 enum mlx5e_mpwrq_umr_mode umr_mode) 70 { 71 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); 72 u8 max_pages_per_wqe, max_log_mpwqe_size; 73 u16 max_wqe_size; 74 75 /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ 76 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; 77 max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), 78 MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size; 79 max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; 80 81 WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); 82 83 return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); 84 } 85 86 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 87 enum mlx5e_mpwrq_umr_mode umr_mode) 88 { 89 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); 90 u8 pages_per_wqe; 91 92 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; 93 94 /* Two MTTs are needed to form an octword. The number of MTTs is encoded 95 * in octwords in a UMR WQE, so we need at least two to avoid mapping 96 * garbage addresses. 97 */ 98 if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) 99 pages_per_wqe = 2; 100 101 /* Sanity check for further calculations to succeed. */ 102 BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); 103 if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) 104 return MLX5_MPWRQ_MAX_PAGES_PER_WQE; 105 106 return pages_per_wqe; 107 } 108 109 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, 110 enum mlx5e_mpwrq_umr_mode umr_mode) 111 { 112 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); 113 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); 114 u16 umr_wqe_sz; 115 116 umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + 117 ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT); 118 119 WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); 120 121 return umr_wqe_sz; 122 } 123 124 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, 125 enum mlx5e_mpwrq_umr_mode umr_mode) 126 { 127 return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode), 128 MLX5_SEND_WQE_BB); 129 } 130 131 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, 132 enum mlx5e_mpwrq_umr_mode umr_mode) 133 { 134 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode); 135 136 /* Add another page as a buffer between WQEs. This page will absorb 137 * write overflow by the hardware, when receiving packets larger than 138 * MTU. These oversize packets are dropped by the driver at a later 139 * stage. 140 */ 141 return ALIGN(pages_per_wqe + 1, 142 MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode)); 143 } 144 145 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, 146 enum mlx5e_mpwrq_umr_mode umr_mode) 147 { 148 if (umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED) 149 return min(MLX5E_MAX_RQ_NUM_KSMS, 150 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); 151 152 return MLX5E_MAX_RQ_NUM_MTTS; 153 } 154 155 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, 156 enum mlx5e_mpwrq_umr_mode umr_mode) 157 { 158 u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode); 159 u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode); 160 161 return ilog2(max_entries / mtts_per_wqe); 162 } 163 164 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, 165 enum mlx5e_mpwrq_umr_mode umr_mode) 166 { 167 return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) + 168 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - 169 MLX5E_ORDER2_MAX_PACKET_MTU; 170 } 171 172 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, 173 struct mlx5e_xsk_param *xsk) 174 { 175 u16 headroom; 176 177 if (xsk) 178 return xsk->headroom; 179 180 headroom = NET_IP_ALIGN; 181 if (params->xdp_prog) 182 headroom += XDP_PACKET_HEADROOM; 183 else 184 headroom += MLX5_RX_HEADROOM; 185 186 return headroom; 187 } 188 189 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, 190 struct mlx5e_xsk_param *xsk) 191 { 192 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 193 194 return xsk->headroom + hw_mtu; 195 } 196 197 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) 198 { 199 /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */ 200 u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL); 201 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 202 203 return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); 204 } 205 206 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, 207 struct mlx5e_params *params, 208 struct mlx5e_xsk_param *xsk, 209 bool mpwqe) 210 { 211 /* XSK frames are mapped as individual pages, because frames may come in 212 * an arbitrary order from random locations in the UMEM. 213 */ 214 if (xsk) 215 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; 216 217 /* XDP in mlx5e doesn't support multiple packets per page. */ 218 if (params->xdp_prog) 219 return PAGE_SIZE; 220 221 return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); 222 } 223 224 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, 225 struct mlx5e_params *params, 226 struct mlx5e_xsk_param *xsk) 227 { 228 u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); 229 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 230 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 231 232 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - 233 order_base_2(linear_stride_sz); 234 } 235 236 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, 237 struct mlx5e_params *params, 238 struct mlx5e_xsk_param *xsk) 239 { 240 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) 241 return false; 242 243 /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data 244 * must fit into a CPU page. 245 */ 246 if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE) 247 return false; 248 249 /* XSK frames must be big enough to hold the packet data. */ 250 if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size) 251 return false; 252 253 return true; 254 } 255 256 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, 257 u8 log_stride_sz, u8 log_num_strides, 258 u8 page_shift, 259 enum mlx5e_mpwrq_umr_mode umr_mode) 260 { 261 if (log_stride_sz + log_num_strides != 262 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode)) 263 return false; 264 265 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || 266 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) 267 return false; 268 269 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) 270 return false; 271 272 if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) 273 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; 274 275 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; 276 } 277 278 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, 279 struct mlx5e_params *params, 280 struct mlx5e_xsk_param *xsk) 281 { 282 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 283 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 284 u8 log_num_strides; 285 u8 log_stride_sz; 286 u8 log_wqe_sz; 287 288 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) 289 return false; 290 291 log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 292 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); 293 294 if (log_wqe_sz < log_stride_sz) 295 return false; 296 297 log_num_strides = log_wqe_sz - log_stride_sz; 298 299 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, 300 log_num_strides, page_shift, 301 umr_mode); 302 } 303 304 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, 305 struct mlx5e_params *params, 306 struct mlx5e_xsk_param *xsk) 307 { 308 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 309 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; 310 311 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); 312 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 313 max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode); 314 315 /* Numbers are unsigned, don't subtract to avoid underflow. */ 316 if (params->log_rq_mtu_frames < 317 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) 318 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; 319 320 /* Ethtool's rx_max_pending is calculated for regular RQ, that uses 321 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a 322 * frame size not equal to PAGE_SIZE. 323 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on 324 * unexpected failure. 325 */ 326 if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size)) 327 return max_log_rq_size; 328 329 return params->log_rq_mtu_frames - log_pkts_per_wqe; 330 } 331 332 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, 333 struct mlx5e_params *params) 334 { 335 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); 336 } 337 338 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, 339 struct mlx5e_params *params) 340 { 341 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); 342 } 343 344 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, 345 struct mlx5e_params *params) 346 { 347 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * 348 PAGE_SIZE; 349 350 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); 351 } 352 353 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, 354 struct mlx5e_params *params, 355 struct mlx5e_xsk_param *xsk) 356 { 357 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 358 return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 359 360 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); 361 } 362 363 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, 364 struct mlx5e_params *params, 365 struct mlx5e_xsk_param *xsk) 366 { 367 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 368 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 369 370 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - 371 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 372 } 373 374 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) 375 { 376 #define UMR_WQE_BULK (2) 377 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); 378 } 379 380 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, 381 struct mlx5e_params *params, 382 struct mlx5e_xsk_param *xsk) 383 { 384 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 385 386 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) 387 return linear_headroom; 388 389 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 390 return linear_headroom; 391 392 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 393 return linear_headroom; 394 395 return 0; 396 } 397 398 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 399 { 400 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 401 u16 stop_room; 402 403 stop_room = mlx5e_ktls_get_stop_room(mdev, params); 404 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 405 if (is_mpwqe) 406 /* A MPWQE can take up to the maximum cacheline-aligned WQE + 407 * all the normal stop room can be taken if a new packet breaks 408 * the active MPWQE session and allocates its WQEs right away. 409 */ 410 stop_room += mlx5e_stop_room_for_mpwqe(mdev); 411 412 return stop_room; 413 } 414 415 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 416 { 417 size_t sq_size = 1 << params->log_sq_size; 418 u16 stop_room; 419 420 stop_room = mlx5e_calc_sq_stop_room(mdev, params); 421 if (stop_room >= sq_size) { 422 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n", 423 stop_room, sq_size); 424 return -EINVAL; 425 } 426 427 return 0; 428 } 429 430 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 431 { 432 struct dim_cq_moder moder = {}; 433 434 moder.cq_period_mode = cq_period_mode; 435 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 436 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 437 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 438 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 439 440 return moder; 441 } 442 443 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 444 { 445 struct dim_cq_moder moder = {}; 446 447 moder.cq_period_mode = cq_period_mode; 448 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 449 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 450 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 451 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 452 453 return moder; 454 } 455 456 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 457 { 458 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 459 DIM_CQ_PERIOD_MODE_START_FROM_CQE : 460 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 461 } 462 463 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 464 { 465 if (params->tx_dim_enabled) { 466 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 467 468 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 469 } else { 470 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 471 } 472 } 473 474 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 475 { 476 if (params->rx_dim_enabled) { 477 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 478 479 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 480 } else { 481 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 482 } 483 } 484 485 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 486 { 487 mlx5e_reset_tx_moderation(params, cq_period_mode); 488 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 489 params->tx_cq_moderation.cq_period_mode == 490 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 491 } 492 493 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 494 { 495 mlx5e_reset_rx_moderation(params, cq_period_mode); 496 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 497 params->rx_cq_moderation.cq_period_mode == 498 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 499 } 500 501 bool slow_pci_heuristic(struct mlx5_core_dev *mdev) 502 { 503 u32 link_speed = 0; 504 u32 pci_bw = 0; 505 506 mlx5e_port_max_linkspeed(mdev, &link_speed); 507 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); 508 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", 509 link_speed, pci_bw); 510 511 #define MLX5E_SLOW_PCI_RATIO (2) 512 513 return link_speed && pci_bw && 514 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; 515 } 516 517 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 518 { 519 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL); 520 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); 521 522 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) 523 return -EOPNOTSUPP; 524 525 if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) 526 return -EINVAL; 527 528 return 0; 529 } 530 531 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, 532 struct mlx5e_xsk_param *xsk) 533 { 534 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 535 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 536 bool unaligned = xsk ? xsk->unaligned : false; 537 u16 max_mtu_pkts; 538 539 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) 540 return -EOPNOTSUPP; 541 542 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 543 return -EINVAL; 544 545 /* Current RQ length is too big for the given frame size, the 546 * needed number of WQEs exceeds the maximum. 547 */ 548 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, 549 mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned)); 550 if (params->log_rq_mtu_frames > max_mtu_pkts) { 551 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", 552 1 << params->log_rq_mtu_frames, xsk->chunk_size); 553 return -EINVAL; 554 } 555 556 return 0; 557 } 558 559 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 560 struct mlx5e_params *params) 561 { 562 params->log_rq_mtu_frames = is_kdump_kernel() ? 563 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 564 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 565 566 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", 567 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, 568 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? 569 BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) : 570 BIT(params->log_rq_mtu_frames), 571 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), 572 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 573 } 574 575 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 576 { 577 params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? 578 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 579 MLX5_WQ_TYPE_CYCLIC; 580 } 581 582 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 583 struct mlx5e_params *params) 584 { 585 /* Prefer Striding RQ, unless any of the following holds: 586 * - Striding RQ configuration is not possible/supported. 587 * - CQE compression is ON, and stride_index mini_cqe layout is not supported. 588 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. 589 * 590 * No XSK params: checking the availability of striding RQ in general. 591 */ 592 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || 593 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && 594 !mlx5e_mpwrq_validate_regular(mdev, params) && 595 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || 596 !mlx5e_rx_is_linear_skb(mdev, params, NULL))) 597 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); 598 mlx5e_set_rq_type(mdev, params); 599 mlx5e_init_rq_type_params(mdev, params); 600 } 601 602 /* Build queue parameters */ 603 604 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 605 { 606 *ccp = (struct mlx5e_create_cq_param) { 607 .napi = &c->napi, 608 .ch_stats = c->stats, 609 .node = cpu_to_node(c->cpu), 610 .ix = c->ix, 611 }; 612 } 613 614 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) 615 { 616 if (xdp) 617 /* XDP requires all fragments to be of the same size. */ 618 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size; 619 620 /* Optimization for small packets: the last fragment is bigger than the others. */ 621 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE; 622 } 623 624 #define DEFAULT_FRAG_SIZE (2048) 625 626 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, 627 struct mlx5e_params *params, 628 struct mlx5e_xsk_param *xsk, 629 struct mlx5e_rq_frags_info *info) 630 { 631 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); 632 int frag_size_max = DEFAULT_FRAG_SIZE; 633 int first_frag_size_max; 634 u32 buf_size = 0; 635 u16 headroom; 636 int max_mtu; 637 int i; 638 639 if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) { 640 int frag_stride; 641 642 frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false); 643 644 info->arr[0].frag_size = byte_count; 645 info->arr[0].frag_stride = frag_stride; 646 info->num_frags = 1; 647 648 /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The 649 * first WQE in the page is responsible for allocation of this 650 * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are 651 * still not completed, the allocation must stop before k*N. 652 */ 653 info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1; 654 655 goto out; 656 } 657 658 headroom = mlx5e_get_linear_rq_headroom(params, xsk); 659 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 660 661 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 662 params->xdp_prog); 663 if (byte_count > max_mtu || params->xdp_prog) { 664 frag_size_max = PAGE_SIZE; 665 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 666 667 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 668 params->xdp_prog); 669 if (byte_count > max_mtu) { 670 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n", 671 params->sw_mtu, max_mtu); 672 return -EINVAL; 673 } 674 } 675 676 i = 0; 677 while (buf_size < byte_count) { 678 int frag_size = byte_count - buf_size; 679 680 if (i == 0) 681 frag_size = min(frag_size, first_frag_size_max); 682 else if (i < MLX5E_MAX_RX_FRAGS - 1) 683 frag_size = min(frag_size, frag_size_max); 684 685 info->arr[i].frag_size = frag_size; 686 buf_size += frag_size; 687 688 if (params->xdp_prog) { 689 /* XDP multi buffer expects fragments of the same size. */ 690 info->arr[i].frag_stride = frag_size_max; 691 } else { 692 if (i == 0) { 693 /* Ensure that headroom and tailroom are included. */ 694 frag_size += headroom; 695 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 696 } 697 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); 698 } 699 700 i++; 701 } 702 info->num_frags = i; 703 704 /* The last fragment of WQE with index 2*N may share the page with the 705 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1 706 * is not completed yet, WQE 2*N must not be allocated, as it's 707 * responsible for allocating a new page. 708 */ 709 if (frag_size_max == PAGE_SIZE) { 710 /* No WQE can start in the middle of a page. */ 711 info->wqe_index_mask = 0; 712 } else { 713 /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments, 714 * because there would be more than MLX5E_MAX_RX_FRAGS of them. 715 */ 716 WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE); 717 718 /* Odd number of fragments allows to pack the last fragment of 719 * the previous WQE and the first fragment of the next WQE into 720 * the same page. 721 * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS 722 * is 4, the last fragment can be bigger than the rest only if 723 * it's the fourth one, so WQEs consisting of 3 fragments will 724 * always share a page. 725 * When a page is shared, WQE bulk size is 2, otherwise just 1. 726 */ 727 info->wqe_index_mask = info->num_frags % 2; 728 } 729 730 out: 731 /* Bulking optimization to skip allocation until at least 8 WQEs can be 732 * allocated in a row. At the same time, never start allocation when 733 * the page is still used by older WQEs. 734 */ 735 info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8); 736 737 info->log_num_frags = order_base_2(info->num_frags); 738 739 return 0; 740 } 741 742 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) 743 { 744 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; 745 746 switch (wq_type) { 747 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 748 sz += sizeof(struct mlx5e_rx_wqe_ll); 749 break; 750 default: /* MLX5_WQ_TYPE_CYCLIC */ 751 sz += sizeof(struct mlx5e_rx_wqe_cyc); 752 } 753 754 return order_base_2(sz); 755 } 756 757 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, 758 struct mlx5e_cq_param *param) 759 { 760 void *cqc = param->cqc; 761 762 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 763 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) 764 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); 765 } 766 767 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, 768 struct mlx5e_params *params, 769 struct mlx5e_xsk_param *xsk) 770 { 771 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 772 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); 773 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 774 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 775 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 776 int wqe_size = BIT(log_stride_sz) * num_strides; 777 778 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation 779 * so we get a filler cqe for the rest of the reservation. 780 */ 781 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); 782 } 783 784 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, 785 struct mlx5e_params *params, 786 struct mlx5e_xsk_param *xsk, 787 struct mlx5e_cq_param *param) 788 { 789 bool hw_stridx = false; 790 void *cqc = param->cqc; 791 u8 log_cq_size; 792 793 switch (params->rq_wq_type) { 794 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 795 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); 796 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 797 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); 798 else 799 log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) + 800 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 801 break; 802 default: /* MLX5_WQ_TYPE_CYCLIC */ 803 log_cq_size = params->log_rq_mtu_frames; 804 } 805 806 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); 807 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 808 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? 809 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); 810 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 811 } 812 813 mlx5e_build_common_cq_param(mdev, param); 814 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; 815 } 816 817 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 818 { 819 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; 820 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && 821 MLX5_CAP_GEN(mdev, relaxed_ordering_write); 822 823 return ro && lro_en ? 824 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; 825 } 826 827 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, 828 struct mlx5e_params *params, 829 struct mlx5e_xsk_param *xsk, 830 u16 q_counter, 831 struct mlx5e_rq_param *param) 832 { 833 void *rqc = param->rqc; 834 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 835 int ndsegs = 1; 836 int err; 837 838 switch (params->rq_wq_type) { 839 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { 840 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 841 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 842 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 843 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 844 845 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, 846 log_wqe_num_of_strides, 847 page_shift, umr_mode)) { 848 mlx5_core_err(mdev, 849 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n", 850 log_wqe_stride_size, log_wqe_num_of_strides, 851 umr_mode); 852 return -EINVAL; 853 } 854 855 MLX5_SET(wq, wq, log_wqe_num_of_strides, 856 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); 857 MLX5_SET(wq, wq, log_wqe_stride_size, 858 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); 859 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 860 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 861 MLX5_SET(wq, wq, shampo_enable, true); 862 MLX5_SET(wq, wq, log_reservation_size, 863 mlx5e_shampo_get_log_rsrv_size(mdev, params)); 864 MLX5_SET(wq, wq, 865 log_max_num_of_packets_per_reservation, 866 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 867 MLX5_SET(wq, wq, log_headers_entry_size, 868 mlx5e_shampo_get_log_hd_entry_size(mdev, params)); 869 MLX5_SET(rqc, rqc, reservation_timeout, 870 params->packet_merge.timeout); 871 MLX5_SET(rqc, rqc, shampo_match_criteria_type, 872 params->packet_merge.shampo.match_criteria_type); 873 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, 874 params->packet_merge.shampo.alignment_granularity); 875 } 876 break; 877 } 878 default: /* MLX5_WQ_TYPE_CYCLIC */ 879 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); 880 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); 881 if (err) 882 return err; 883 ndsegs = param->frags_info.num_frags; 884 } 885 886 MLX5_SET(wq, wq, wq_type, params->rq_wq_type); 887 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); 888 MLX5_SET(wq, wq, log_wq_stride, 889 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); 890 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 891 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 892 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); 893 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); 894 895 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 896 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); 897 898 return 0; 899 } 900 901 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, 902 u16 q_counter, 903 struct mlx5e_rq_param *param) 904 { 905 void *rqc = param->rqc; 906 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 907 908 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 909 MLX5_SET(wq, wq, log_wq_stride, 910 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); 911 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 912 913 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 914 } 915 916 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, 917 struct mlx5e_params *params, 918 struct mlx5e_cq_param *param) 919 { 920 void *cqc = param->cqc; 921 922 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); 923 924 mlx5e_build_common_cq_param(mdev, param); 925 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; 926 } 927 928 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, 929 struct mlx5e_sq_param *param) 930 { 931 void *sqc = param->sqc; 932 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 933 934 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 935 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 936 937 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 938 } 939 940 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, 941 struct mlx5e_params *params, 942 struct mlx5e_sq_param *param) 943 { 944 void *sqc = param->sqc; 945 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 946 bool allow_swp; 947 948 allow_swp = 949 mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 950 mlx5e_build_sq_param_common(mdev, param); 951 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 952 MLX5_SET(sqc, sqc, allow_swp, allow_swp); 953 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 954 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); 955 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 956 } 957 958 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, 959 u8 log_wq_size, 960 struct mlx5e_cq_param *param) 961 { 962 void *cqc = param->cqc; 963 964 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 965 966 mlx5e_build_common_cq_param(mdev, param); 967 968 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 969 } 970 971 /* This function calculates the maximum number of headers entries that are needed 972 * per WQE, the formula is based on the size of the reservations and the 973 * restriction we have about max packets for reservation that is equal to max 974 * headers per reservation. 975 */ 976 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, 977 struct mlx5e_params *params, 978 struct mlx5e_rq_param *rq_param) 979 { 980 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 981 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); 982 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 983 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); 984 int wqe_size = BIT(log_stride_sz) * num_strides; 985 u32 hd_per_wqe; 986 987 /* Assumption: hd_per_wqe % 8 == 0. */ 988 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; 989 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n", 990 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); 991 return hd_per_wqe; 992 } 993 994 /* This function calculates the maximum number of headers entries that are needed 995 * for the WQ, this value is uesed to allocate the header buffer in HW, thus 996 * must be a pow of 2. 997 */ 998 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, 999 struct mlx5e_params *params, 1000 struct mlx5e_rq_param *rq_param) 1001 { 1002 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 1003 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 1004 u32 hd_per_wqe, hd_per_wq; 1005 1006 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 1007 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); 1008 return hd_per_wq; 1009 } 1010 1011 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, 1012 struct mlx5e_params *params, 1013 struct mlx5e_rq_param *rq_param) 1014 { 1015 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; 1016 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 1017 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 1018 u32 wqebbs; 1019 1020 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); 1021 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 1022 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; 1023 rest = max_hd_per_wqe % max_klm_per_umr; 1024 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; 1025 if (rest) 1026 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); 1027 wqebbs *= wq_size; 1028 return wqebbs; 1029 } 1030 1031 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, 1032 struct mlx5e_params *params, 1033 struct mlx5e_xsk_param *xsk) 1034 { 1035 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 1036 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 1037 u8 umr_wqebbs; 1038 1039 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode); 1040 1041 return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 1042 } 1043 1044 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, 1045 struct mlx5e_params *params, 1046 struct mlx5e_rq_param *rqp) 1047 { 1048 u32 wqebbs, total_pages, useful_space; 1049 1050 /* MLX5_WQ_TYPE_CYCLIC */ 1051 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 1052 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 1053 1054 /* UMR WQEs for the regular RQ. */ 1055 wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL); 1056 1057 /* If XDP program is attached, XSK may be turned on at any time without 1058 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of 1059 * both regular RQ and XSK RQ. 1060 * 1061 * XSK uses different values of page_shift, and the total number of UMR 1062 * WQEBBs depends on it. This dependency is complex and not monotonic, 1063 * especially taking into consideration that some of the parameters come 1064 * from capabilities. Hence, we have to try all valid values of XSK 1065 * frame size (and page_shift) to find the maximum. 1066 */ 1067 if (params->xdp_prog) { 1068 u32 max_xsk_wqebbs = 0; 1069 u8 frame_shift; 1070 1071 for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT; 1072 frame_shift <= PAGE_SHIFT; frame_shift++) { 1073 /* The headroom doesn't affect the calculation. */ 1074 struct mlx5e_xsk_param xsk = { 1075 .chunk_size = 1 << frame_shift, 1076 .unaligned = false, 1077 }; 1078 1079 /* XSK aligned mode. */ 1080 max_xsk_wqebbs = max(max_xsk_wqebbs, 1081 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1082 1083 /* XSK unaligned mode, frame size is a power of two. */ 1084 xsk.unaligned = true; 1085 max_xsk_wqebbs = max(max_xsk_wqebbs, 1086 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 1087 } 1088 1089 wqebbs += max_xsk_wqebbs; 1090 } 1091 1092 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 1093 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); 1094 1095 /* UMR WQEs don't cross the page boundary, they are padded with NOPs. 1096 * This padding is always smaller than the max WQE size. That gives us 1097 * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes 1098 * per page. The number of pages is estimated as the total size of WQEs 1099 * divided by the useful space in page, rounding up. If some WQEs don't 1100 * fully fit into the useful space, they can occupy part of the padding, 1101 * which proves this estimation to be correct (reserve enough space). 1102 */ 1103 useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB; 1104 total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space); 1105 wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB); 1106 1107 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); 1108 } 1109 1110 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) 1111 { 1112 if (mlx5e_is_ktls_rx(mdev)) 1113 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 1114 1115 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 1116 } 1117 1118 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, 1119 u8 log_wq_size, 1120 struct mlx5e_sq_param *param) 1121 { 1122 void *sqc = param->sqc; 1123 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1124 1125 mlx5e_build_sq_param_common(mdev, param); 1126 1127 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1128 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1129 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1130 } 1131 1132 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, 1133 u8 log_wq_size, 1134 struct mlx5e_sq_param *param) 1135 { 1136 void *sqc = param->sqc; 1137 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1138 1139 mlx5e_build_sq_param_common(mdev, param); 1140 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ 1141 param->is_tls = mlx5e_is_ktls_rx(mdev); 1142 if (param->is_tls) 1143 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ 1144 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1145 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1146 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1147 } 1148 1149 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, 1150 struct mlx5e_params *params, 1151 struct mlx5e_xsk_param *xsk, 1152 struct mlx5e_sq_param *param) 1153 { 1154 void *sqc = param->sqc; 1155 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1156 1157 mlx5e_build_sq_param_common(mdev, param); 1158 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1159 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); 1160 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); 1161 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 1162 } 1163 1164 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, 1165 struct mlx5e_params *params, 1166 u16 q_counter, 1167 struct mlx5e_channel_param *cparam) 1168 { 1169 u8 icosq_log_wq_sz, async_icosq_log_wq_sz; 1170 int err; 1171 1172 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); 1173 if (err) 1174 return err; 1175 1176 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq); 1177 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); 1178 1179 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); 1180 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq); 1181 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); 1182 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); 1183 1184 return 0; 1185 } 1186