1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "en/params.h" 5 #include "en/txrx.h" 6 #include "en/port.h" 7 #include "en_accel/en_accel.h" 8 #include "en_accel/ipsec.h" 9 #include <net/xdp_sock_drv.h> 10 11 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) 12 { 13 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); 14 15 return min_page_shift ? : 12; 16 } 17 18 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) 19 { 20 u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT; 21 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); 22 23 /* Regular RQ uses order-0 pages, the NIC must be able to map them. */ 24 if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) 25 min_page_shift = req_page_shift; 26 27 return max(req_page_shift, min_page_shift); 28 } 29 30 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 31 { 32 u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); 33 u8 max_pages_per_wqe, max_log_mpwqe_size; 34 u16 max_wqe_size; 35 36 /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ 37 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; 38 max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), 39 MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size; 40 max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; 41 42 WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); 43 44 return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); 45 } 46 47 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 48 { 49 u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); 50 u8 pages_per_wqe; 51 52 pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; 53 54 /* Sanity check for further calculations to succeed. */ 55 BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64); 56 if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE)) 57 return MLX5_MPWRQ_MAX_PAGES_PER_WQE; 58 59 return pages_per_wqe; 60 } 61 62 u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 63 { 64 u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); 65 u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned); 66 u16 umr_wqe_sz; 67 68 umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + 69 ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT); 70 71 WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); 72 73 return umr_wqe_sz; 74 } 75 76 u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 77 { 78 return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, unaligned), 79 MLX5_SEND_WQE_BB); 80 } 81 82 u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 83 { 84 /* Add another page as a buffer between WQEs. This page will absorb 85 * write overflow by the hardware, when receiving packets larger than 86 * MTU. These oversize packets are dropped by the driver at a later 87 * stage. 88 */ 89 return MLX5_ALIGN_MTTS(mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned) + 1); 90 } 91 92 u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned) 93 { 94 if (unaligned) 95 return min(MLX5E_MAX_RQ_NUM_KSMS, 96 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); 97 98 return MLX5E_MAX_RQ_NUM_MTTS; 99 } 100 101 static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, 102 bool unaligned) 103 { 104 u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, unaligned); 105 u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, unaligned); 106 107 return ilog2(max_entries / mtts_per_wqe); 108 } 109 110 u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) 111 { 112 return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned) + 113 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - 114 MLX5E_ORDER2_MAX_PACKET_MTU; 115 } 116 117 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, 118 struct mlx5e_xsk_param *xsk) 119 { 120 u16 headroom; 121 122 if (xsk) 123 return xsk->headroom; 124 125 headroom = NET_IP_ALIGN; 126 if (params->xdp_prog) 127 headroom += XDP_PACKET_HEADROOM; 128 else 129 headroom += MLX5_RX_HEADROOM; 130 131 return headroom; 132 } 133 134 static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, 135 struct mlx5e_xsk_param *xsk) 136 { 137 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 138 139 return xsk->headroom + hw_mtu; 140 } 141 142 static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) 143 { 144 /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */ 145 u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL); 146 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 147 148 return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); 149 } 150 151 static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, 152 struct mlx5e_params *params, 153 struct mlx5e_xsk_param *xsk, 154 bool mpwqe) 155 { 156 /* XSK frames are mapped as individual pages, because frames may come in 157 * an arbitrary order from random locations in the UMEM. 158 */ 159 if (xsk) 160 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; 161 162 /* XDP in mlx5e doesn't support multiple packets per page. */ 163 if (params->xdp_prog) 164 return PAGE_SIZE; 165 166 return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); 167 } 168 169 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, 170 struct mlx5e_params *params, 171 struct mlx5e_xsk_param *xsk) 172 { 173 u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); 174 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 175 bool unaligned = xsk ? xsk->unaligned : false; 176 177 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - 178 order_base_2(linear_stride_sz); 179 } 180 181 bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, 182 struct mlx5e_params *params, 183 struct mlx5e_xsk_param *xsk) 184 { 185 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) 186 return false; 187 188 /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data 189 * must fit into a CPU page. 190 */ 191 if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE) 192 return false; 193 194 /* XSK frames must be big enough to hold the packet data. */ 195 if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size) 196 return false; 197 198 return true; 199 } 200 201 static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, 202 u8 log_stride_sz, u8 log_num_strides, 203 u8 page_shift, bool unaligned) 204 { 205 if (log_stride_sz + log_num_strides != 206 mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned)) 207 return false; 208 209 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || 210 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) 211 return false; 212 213 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) 214 return false; 215 216 if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) 217 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; 218 219 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; 220 } 221 222 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, 223 struct mlx5e_params *params, 224 struct mlx5e_xsk_param *xsk) 225 { 226 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 227 bool unaligned = xsk ? xsk->unaligned : false; 228 u8 log_num_strides; 229 u8 log_stride_sz; 230 u8 log_wqe_sz; 231 232 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) 233 return false; 234 235 log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 236 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); 237 238 if (log_wqe_sz < log_stride_sz) 239 return false; 240 241 log_num_strides = log_wqe_sz - log_stride_sz; 242 243 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, 244 log_num_strides, page_shift, 245 unaligned); 246 } 247 248 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, 249 struct mlx5e_params *params, 250 struct mlx5e_xsk_param *xsk) 251 { 252 u8 log_pkts_per_wqe, page_shift, max_log_rq_size; 253 bool unaligned = xsk ? xsk->unaligned : false; 254 255 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); 256 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 257 max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned); 258 259 /* Numbers are unsigned, don't subtract to avoid underflow. */ 260 if (params->log_rq_mtu_frames < 261 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) 262 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; 263 264 /* Ethtool's rx_max_pending is calculated for regular RQ, that uses 265 * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a 266 * frame size not equal to PAGE_SIZE. 267 * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on 268 * unexpected failure. 269 */ 270 if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size)) 271 return max_log_rq_size; 272 273 return params->log_rq_mtu_frames - log_pkts_per_wqe; 274 } 275 276 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, 277 struct mlx5e_params *params) 278 { 279 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); 280 } 281 282 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, 283 struct mlx5e_params *params) 284 { 285 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); 286 } 287 288 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, 289 struct mlx5e_params *params) 290 { 291 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * 292 PAGE_SIZE; 293 294 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); 295 } 296 297 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, 298 struct mlx5e_params *params, 299 struct mlx5e_xsk_param *xsk) 300 { 301 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 302 return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); 303 304 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); 305 } 306 307 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, 308 struct mlx5e_params *params, 309 struct mlx5e_xsk_param *xsk) 310 { 311 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 312 bool unaligned = xsk ? xsk->unaligned : false; 313 314 return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - 315 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 316 } 317 318 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) 319 { 320 #define UMR_WQE_BULK (2) 321 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); 322 } 323 324 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, 325 struct mlx5e_params *params, 326 struct mlx5e_xsk_param *xsk) 327 { 328 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 329 330 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) 331 return linear_headroom; 332 333 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 334 return linear_headroom; 335 336 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 337 return linear_headroom; 338 339 return 0; 340 } 341 342 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 343 { 344 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 345 u16 stop_room; 346 347 stop_room = mlx5e_ktls_get_stop_room(mdev, params); 348 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 349 if (is_mpwqe) 350 /* A MPWQE can take up to the maximum cacheline-aligned WQE + 351 * all the normal stop room can be taken if a new packet breaks 352 * the active MPWQE session and allocates its WQEs right away. 353 */ 354 stop_room += mlx5e_stop_room_for_mpwqe(mdev); 355 356 return stop_room; 357 } 358 359 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 360 { 361 size_t sq_size = 1 << params->log_sq_size; 362 u16 stop_room; 363 364 stop_room = mlx5e_calc_sq_stop_room(mdev, params); 365 if (stop_room >= sq_size) { 366 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n", 367 stop_room, sq_size); 368 return -EINVAL; 369 } 370 371 return 0; 372 } 373 374 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 375 { 376 struct dim_cq_moder moder = {}; 377 378 moder.cq_period_mode = cq_period_mode; 379 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 380 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 381 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 382 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 383 384 return moder; 385 } 386 387 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 388 { 389 struct dim_cq_moder moder = {}; 390 391 moder.cq_period_mode = cq_period_mode; 392 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 393 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 394 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 395 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 396 397 return moder; 398 } 399 400 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 401 { 402 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 403 DIM_CQ_PERIOD_MODE_START_FROM_CQE : 404 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 405 } 406 407 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 408 { 409 if (params->tx_dim_enabled) { 410 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 411 412 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 413 } else { 414 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 415 } 416 } 417 418 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 419 { 420 if (params->rx_dim_enabled) { 421 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 422 423 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 424 } else { 425 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 426 } 427 } 428 429 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 430 { 431 mlx5e_reset_tx_moderation(params, cq_period_mode); 432 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 433 params->tx_cq_moderation.cq_period_mode == 434 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 435 } 436 437 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 438 { 439 mlx5e_reset_rx_moderation(params, cq_period_mode); 440 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 441 params->rx_cq_moderation.cq_period_mode == 442 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 443 } 444 445 bool slow_pci_heuristic(struct mlx5_core_dev *mdev) 446 { 447 u32 link_speed = 0; 448 u32 pci_bw = 0; 449 450 mlx5e_port_max_linkspeed(mdev, &link_speed); 451 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); 452 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", 453 link_speed, pci_bw); 454 455 #define MLX5E_SLOW_PCI_RATIO (2) 456 457 return link_speed && pci_bw && 458 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; 459 } 460 461 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 462 { 463 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); 464 465 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, false)) 466 return -EOPNOTSUPP; 467 468 if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) 469 return -EINVAL; 470 471 return 0; 472 } 473 474 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, 475 struct mlx5e_xsk_param *xsk) 476 { 477 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 478 bool unaligned = xsk ? xsk->unaligned : false; 479 u16 max_mtu_pkts; 480 481 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, xsk->unaligned)) 482 return -EOPNOTSUPP; 483 484 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 485 return -EINVAL; 486 487 /* Current RQ length is too big for the given frame size, the 488 * needed number of WQEs exceeds the maximum. 489 */ 490 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, 491 mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned)); 492 if (params->log_rq_mtu_frames > max_mtu_pkts) { 493 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", 494 1 << params->log_rq_mtu_frames, xsk->chunk_size); 495 return -EINVAL; 496 } 497 498 return 0; 499 } 500 501 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 502 struct mlx5e_params *params) 503 { 504 params->log_rq_mtu_frames = is_kdump_kernel() ? 505 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 506 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 507 508 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", 509 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, 510 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? 511 BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) : 512 BIT(params->log_rq_mtu_frames), 513 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), 514 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 515 } 516 517 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 518 { 519 params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? 520 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 521 MLX5_WQ_TYPE_CYCLIC; 522 } 523 524 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 525 struct mlx5e_params *params) 526 { 527 /* Prefer Striding RQ, unless any of the following holds: 528 * - Striding RQ configuration is not possible/supported. 529 * - CQE compression is ON, and stride_index mini_cqe layout is not supported. 530 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. 531 * 532 * No XSK params: checking the availability of striding RQ in general. 533 */ 534 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || 535 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && 536 !mlx5e_mpwrq_validate_regular(mdev, params) && 537 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || 538 !mlx5e_rx_is_linear_skb(mdev, params, NULL))) 539 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); 540 mlx5e_set_rq_type(mdev, params); 541 mlx5e_init_rq_type_params(mdev, params); 542 } 543 544 /* Build queue parameters */ 545 546 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 547 { 548 *ccp = (struct mlx5e_create_cq_param) { 549 .napi = &c->napi, 550 .ch_stats = c->stats, 551 .node = cpu_to_node(c->cpu), 552 .ix = c->ix, 553 }; 554 } 555 556 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) 557 { 558 if (xdp) 559 /* XDP requires all fragments to be of the same size. */ 560 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size; 561 562 /* Optimization for small packets: the last fragment is bigger than the others. */ 563 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE; 564 } 565 566 #define DEFAULT_FRAG_SIZE (2048) 567 568 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, 569 struct mlx5e_params *params, 570 struct mlx5e_xsk_param *xsk, 571 struct mlx5e_rq_frags_info *info) 572 { 573 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); 574 int frag_size_max = DEFAULT_FRAG_SIZE; 575 int first_frag_size_max; 576 u32 buf_size = 0; 577 u16 headroom; 578 int max_mtu; 579 int i; 580 581 if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) { 582 int frag_stride; 583 584 frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false); 585 586 info->arr[0].frag_size = byte_count; 587 info->arr[0].frag_stride = frag_stride; 588 info->num_frags = 1; 589 info->wqe_bulk = PAGE_SIZE / frag_stride; 590 goto out; 591 } 592 593 headroom = mlx5e_get_linear_rq_headroom(params, xsk); 594 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 595 596 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 597 params->xdp_prog); 598 if (byte_count > max_mtu || params->xdp_prog) { 599 frag_size_max = PAGE_SIZE; 600 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 601 602 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 603 params->xdp_prog); 604 if (byte_count > max_mtu) { 605 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n", 606 params->sw_mtu, max_mtu); 607 return -EINVAL; 608 } 609 } 610 611 i = 0; 612 while (buf_size < byte_count) { 613 int frag_size = byte_count - buf_size; 614 615 if (i == 0) 616 frag_size = min(frag_size, first_frag_size_max); 617 else if (i < MLX5E_MAX_RX_FRAGS - 1) 618 frag_size = min(frag_size, frag_size_max); 619 620 info->arr[i].frag_size = frag_size; 621 buf_size += frag_size; 622 623 if (params->xdp_prog) { 624 /* XDP multi buffer expects fragments of the same size. */ 625 info->arr[i].frag_stride = frag_size_max; 626 } else { 627 if (i == 0) { 628 /* Ensure that headroom and tailroom are included. */ 629 frag_size += headroom; 630 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 631 } 632 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); 633 } 634 635 i++; 636 } 637 info->num_frags = i; 638 /* number of different wqes sharing a page */ 639 info->wqe_bulk = 1 + (info->num_frags % 2); 640 641 out: 642 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); 643 info->log_num_frags = order_base_2(info->num_frags); 644 645 return 0; 646 } 647 648 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) 649 { 650 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; 651 652 switch (wq_type) { 653 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 654 sz += sizeof(struct mlx5e_rx_wqe_ll); 655 break; 656 default: /* MLX5_WQ_TYPE_CYCLIC */ 657 sz += sizeof(struct mlx5e_rx_wqe_cyc); 658 } 659 660 return order_base_2(sz); 661 } 662 663 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, 664 struct mlx5e_cq_param *param) 665 { 666 void *cqc = param->cqc; 667 668 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 669 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) 670 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); 671 } 672 673 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, 674 struct mlx5e_params *params, 675 struct mlx5e_xsk_param *xsk) 676 { 677 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 678 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); 679 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 680 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 681 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 682 int wqe_size = BIT(log_stride_sz) * num_strides; 683 684 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation 685 * so we get a filler cqe for the rest of the reservation. 686 */ 687 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); 688 } 689 690 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, 691 struct mlx5e_params *params, 692 struct mlx5e_xsk_param *xsk, 693 struct mlx5e_cq_param *param) 694 { 695 bool hw_stridx = false; 696 void *cqc = param->cqc; 697 u8 log_cq_size; 698 699 switch (params->rq_wq_type) { 700 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 701 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); 702 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 703 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); 704 else 705 log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) + 706 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 707 break; 708 default: /* MLX5_WQ_TYPE_CYCLIC */ 709 log_cq_size = params->log_rq_mtu_frames; 710 } 711 712 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); 713 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 714 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? 715 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); 716 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 717 } 718 719 mlx5e_build_common_cq_param(mdev, param); 720 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; 721 } 722 723 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 724 { 725 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; 726 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && 727 MLX5_CAP_GEN(mdev, relaxed_ordering_write); 728 729 return ro && lro_en ? 730 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; 731 } 732 733 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, 734 struct mlx5e_params *params, 735 struct mlx5e_xsk_param *xsk, 736 u16 q_counter, 737 struct mlx5e_rq_param *param) 738 { 739 void *rqc = param->rqc; 740 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 741 int ndsegs = 1; 742 int err; 743 744 switch (params->rq_wq_type) { 745 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { 746 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 747 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 748 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 749 bool unaligned = xsk ? xsk->unaligned : false; 750 751 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, 752 log_wqe_num_of_strides, 753 page_shift, unaligned)) { 754 mlx5_core_err(mdev, 755 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, unaligned %d\n", 756 log_wqe_stride_size, log_wqe_num_of_strides, 757 unaligned); 758 return -EINVAL; 759 } 760 761 MLX5_SET(wq, wq, log_wqe_num_of_strides, 762 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); 763 MLX5_SET(wq, wq, log_wqe_stride_size, 764 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); 765 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 766 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 767 MLX5_SET(wq, wq, shampo_enable, true); 768 MLX5_SET(wq, wq, log_reservation_size, 769 mlx5e_shampo_get_log_rsrv_size(mdev, params)); 770 MLX5_SET(wq, wq, 771 log_max_num_of_packets_per_reservation, 772 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 773 MLX5_SET(wq, wq, log_headers_entry_size, 774 mlx5e_shampo_get_log_hd_entry_size(mdev, params)); 775 MLX5_SET(rqc, rqc, reservation_timeout, 776 params->packet_merge.timeout); 777 MLX5_SET(rqc, rqc, shampo_match_criteria_type, 778 params->packet_merge.shampo.match_criteria_type); 779 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, 780 params->packet_merge.shampo.alignment_granularity); 781 } 782 break; 783 } 784 default: /* MLX5_WQ_TYPE_CYCLIC */ 785 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); 786 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); 787 if (err) 788 return err; 789 ndsegs = param->frags_info.num_frags; 790 } 791 792 MLX5_SET(wq, wq, wq_type, params->rq_wq_type); 793 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); 794 MLX5_SET(wq, wq, log_wq_stride, 795 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); 796 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 797 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 798 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); 799 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); 800 801 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 802 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); 803 804 return 0; 805 } 806 807 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, 808 u16 q_counter, 809 struct mlx5e_rq_param *param) 810 { 811 void *rqc = param->rqc; 812 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 813 814 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 815 MLX5_SET(wq, wq, log_wq_stride, 816 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); 817 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 818 819 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 820 } 821 822 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, 823 struct mlx5e_params *params, 824 struct mlx5e_cq_param *param) 825 { 826 void *cqc = param->cqc; 827 828 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); 829 830 mlx5e_build_common_cq_param(mdev, param); 831 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; 832 } 833 834 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, 835 struct mlx5e_sq_param *param) 836 { 837 void *sqc = param->sqc; 838 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 839 840 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 841 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 842 843 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 844 } 845 846 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, 847 struct mlx5e_params *params, 848 struct mlx5e_sq_param *param) 849 { 850 void *sqc = param->sqc; 851 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 852 bool allow_swp; 853 854 allow_swp = 855 mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 856 mlx5e_build_sq_param_common(mdev, param); 857 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 858 MLX5_SET(sqc, sqc, allow_swp, allow_swp); 859 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 860 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); 861 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 862 } 863 864 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, 865 u8 log_wq_size, 866 struct mlx5e_cq_param *param) 867 { 868 void *cqc = param->cqc; 869 870 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 871 872 mlx5e_build_common_cq_param(mdev, param); 873 874 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 875 } 876 877 /* This function calculates the maximum number of headers entries that are needed 878 * per WQE, the formula is based on the size of the reservations and the 879 * restriction we have about max packets for reservation that is equal to max 880 * headers per reservation. 881 */ 882 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, 883 struct mlx5e_params *params, 884 struct mlx5e_rq_param *rq_param) 885 { 886 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 887 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); 888 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 889 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); 890 int wqe_size = BIT(log_stride_sz) * num_strides; 891 u32 hd_per_wqe; 892 893 /* Assumption: hd_per_wqe % 8 == 0. */ 894 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; 895 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n", 896 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); 897 return hd_per_wqe; 898 } 899 900 /* This function calculates the maximum number of headers entries that are needed 901 * for the WQ, this value is uesed to allocate the header buffer in HW, thus 902 * must be a pow of 2. 903 */ 904 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, 905 struct mlx5e_params *params, 906 struct mlx5e_rq_param *rq_param) 907 { 908 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 909 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 910 u32 hd_per_wqe, hd_per_wq; 911 912 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 913 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); 914 return hd_per_wq; 915 } 916 917 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, 918 struct mlx5e_params *params, 919 struct mlx5e_rq_param *rq_param) 920 { 921 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; 922 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 923 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 924 u32 wqebbs; 925 926 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); 927 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 928 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; 929 rest = max_hd_per_wqe % max_klm_per_umr; 930 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; 931 if (rest) 932 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); 933 wqebbs *= wq_size; 934 return wqebbs; 935 } 936 937 static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, 938 struct mlx5e_params *params, 939 struct mlx5e_xsk_param *xsk) 940 { 941 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 942 bool unaligned = xsk ? xsk->unaligned : false; 943 u8 umr_wqebbs; 944 945 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned); 946 947 return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); 948 } 949 950 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, 951 struct mlx5e_params *params, 952 struct mlx5e_rq_param *rqp) 953 { 954 u32 wqebbs; 955 956 /* MLX5_WQ_TYPE_CYCLIC */ 957 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 958 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 959 960 /* UMR WQEs for the regular RQ. */ 961 wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL); 962 963 /* If XDP program is attached, XSK may be turned on at any time without 964 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of 965 * both regular RQ and XSK RQ. 966 * 967 * XSK uses different values of page_shift, and the total number of UMR 968 * WQEBBs depends on it. This dependency is complex and not monotonic, 969 * especially taking into consideration that some of the parameters come 970 * from capabilities. Hence, we have to try all valid values of XSK 971 * frame size (and page_shift) to find the maximum. 972 */ 973 if (params->xdp_prog) { 974 u32 max_xsk_wqebbs = 0; 975 u8 frame_shift; 976 977 for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT; 978 frame_shift <= PAGE_SHIFT; frame_shift++) { 979 /* The headroom doesn't affect the calculation. */ 980 struct mlx5e_xsk_param xsk = { 981 .chunk_size = 1 << frame_shift, 982 .unaligned = false, 983 }; 984 985 /* XSK aligned mode. */ 986 max_xsk_wqebbs = max(max_xsk_wqebbs, 987 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 988 989 /* XSK unaligned mode, frame size is a power of two. */ 990 xsk.unaligned = true; 991 max_xsk_wqebbs = max(max_xsk_wqebbs, 992 mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk)); 993 } 994 995 wqebbs += max_xsk_wqebbs; 996 } 997 998 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 999 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); 1000 1001 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); 1002 } 1003 1004 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) 1005 { 1006 if (mlx5e_is_ktls_rx(mdev)) 1007 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 1008 1009 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 1010 } 1011 1012 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, 1013 u8 log_wq_size, 1014 struct mlx5e_sq_param *param) 1015 { 1016 void *sqc = param->sqc; 1017 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1018 1019 mlx5e_build_sq_param_common(mdev, param); 1020 1021 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1022 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1023 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1024 } 1025 1026 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, 1027 u8 log_wq_size, 1028 struct mlx5e_sq_param *param) 1029 { 1030 void *sqc = param->sqc; 1031 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1032 1033 mlx5e_build_sq_param_common(mdev, param); 1034 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ 1035 param->is_tls = mlx5e_is_ktls_rx(mdev); 1036 if (param->is_tls) 1037 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ 1038 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 1039 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 1040 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 1041 } 1042 1043 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, 1044 struct mlx5e_params *params, 1045 struct mlx5e_xsk_param *xsk, 1046 struct mlx5e_sq_param *param) 1047 { 1048 void *sqc = param->sqc; 1049 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1050 1051 mlx5e_build_sq_param_common(mdev, param); 1052 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1053 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); 1054 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); 1055 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 1056 } 1057 1058 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, 1059 struct mlx5e_params *params, 1060 u16 q_counter, 1061 struct mlx5e_channel_param *cparam) 1062 { 1063 u8 icosq_log_wq_sz, async_icosq_log_wq_sz; 1064 int err; 1065 1066 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); 1067 if (err) 1068 return err; 1069 1070 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq); 1071 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); 1072 1073 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); 1074 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq); 1075 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); 1076 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); 1077 1078 return 0; 1079 } 1080