1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include "en/params.h" 5 #include "en/txrx.h" 6 #include "en/port.h" 7 #include "en_accel/en_accel.h" 8 #include "accel/ipsec.h" 9 #include "fpga/ipsec.h" 10 11 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, 12 struct mlx5e_xsk_param *xsk) 13 { 14 return params->xdp_prog || xsk; 15 } 16 17 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, 18 struct mlx5e_xsk_param *xsk) 19 { 20 u16 headroom; 21 22 if (xsk) 23 return xsk->headroom; 24 25 headroom = NET_IP_ALIGN; 26 if (mlx5e_rx_is_xdp(params, xsk)) 27 headroom += XDP_PACKET_HEADROOM; 28 else 29 headroom += MLX5_RX_HEADROOM; 30 31 return headroom; 32 } 33 34 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, 35 struct mlx5e_xsk_param *xsk) 36 { 37 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 38 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 39 40 return linear_rq_headroom + hw_mtu; 41 } 42 43 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, 44 struct mlx5e_xsk_param *xsk) 45 { 46 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk); 47 48 /* AF_XDP doesn't build SKBs in place. */ 49 if (!xsk) 50 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz); 51 52 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a 53 * special case. It can run with frames smaller than a page, as it 54 * doesn't allocate pages dynamically. However, here we pretend that 55 * fragments are page-sized: it allows to treat XSK frames like pages 56 * by redirecting alloc and free operations to XSK rings and by using 57 * the fact there are no multiple packets per "page" (which is a frame). 58 * The latter is important, because frames may come in a random order, 59 * and we will have trouble assemblying a real page of multiple frames. 60 */ 61 if (mlx5e_rx_is_xdp(params, xsk)) 62 frag_sz = max_t(u32, frag_sz, PAGE_SIZE); 63 64 /* Even if we can go with a smaller fragment size, we must not put 65 * multiple packets into a single frame. 66 */ 67 if (xsk) 68 frag_sz = max_t(u32, frag_sz, xsk->chunk_size); 69 70 return frag_sz; 71 } 72 73 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, 74 struct mlx5e_xsk_param *xsk) 75 { 76 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk); 77 78 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); 79 } 80 81 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, 82 struct mlx5e_xsk_param *xsk) 83 { 84 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more 85 * than one page. For this, check both with and without xsk. 86 */ 87 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk), 88 mlx5e_rx_get_linear_frag_sz(params, NULL)); 89 90 return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE && 91 linear_frag_sz <= PAGE_SIZE; 92 } 93 94 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, 95 u8 log_stride_sz, u8 log_num_strides) 96 { 97 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ) 98 return false; 99 100 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || 101 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX) 102 return false; 103 104 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX) 105 return false; 106 107 if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) 108 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE; 109 110 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE; 111 } 112 113 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, 114 struct mlx5e_params *params, 115 struct mlx5e_xsk_param *xsk) 116 { 117 s8 log_num_strides; 118 u8 log_stride_sz; 119 120 if (!mlx5e_rx_is_linear_skb(params, xsk)) 121 return false; 122 123 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk)); 124 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz; 125 126 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides); 127 } 128 129 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, 130 struct mlx5e_xsk_param *xsk) 131 { 132 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk); 133 134 /* Numbers are unsigned, don't subtract to avoid underflow. */ 135 if (params->log_rq_mtu_frames < 136 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) 137 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; 138 139 return params->log_rq_mtu_frames - log_pkts_per_wqe; 140 } 141 142 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, 143 struct mlx5e_params *params) 144 { 145 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE)); 146 } 147 148 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev, 149 struct mlx5e_params *params) 150 { 151 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE); 152 } 153 154 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, 155 struct mlx5e_params *params) 156 { 157 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * 158 PAGE_SIZE; 159 160 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); 161 } 162 163 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, 164 struct mlx5e_params *params, 165 struct mlx5e_xsk_param *xsk) 166 { 167 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 168 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk)); 169 170 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); 171 } 172 173 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, 174 struct mlx5e_params *params, 175 struct mlx5e_xsk_param *xsk) 176 { 177 return MLX5_MPWRQ_LOG_WQE_SZ - 178 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 179 } 180 181 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) 182 { 183 #define UMR_WQE_BULK (2) 184 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1); 185 } 186 187 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, 188 struct mlx5e_params *params, 189 struct mlx5e_xsk_param *xsk) 190 { 191 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 192 193 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) 194 return linear_headroom; 195 196 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) 197 return linear_headroom; 198 199 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 200 return linear_headroom; 201 202 return 0; 203 } 204 205 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 206 { 207 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 208 u16 stop_room; 209 210 stop_room = mlx5e_tls_get_stop_room(mdev, params); 211 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 212 if (is_mpwqe) 213 /* A MPWQE can take up to the maximum-sized WQE + all the normal 214 * stop room can be taken if a new packet breaks the active 215 * MPWQE session and allocates its WQEs right away. 216 */ 217 stop_room += mlx5e_stop_room_for_max_wqe(mdev); 218 219 return stop_room; 220 } 221 222 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 223 { 224 size_t sq_size = 1 << params->log_sq_size; 225 u16 stop_room; 226 227 stop_room = mlx5e_calc_sq_stop_room(mdev, params); 228 if (stop_room >= sq_size) { 229 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n", 230 stop_room, sq_size); 231 return -EINVAL; 232 } 233 234 return 0; 235 } 236 237 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 238 { 239 struct dim_cq_moder moder = {}; 240 241 moder.cq_period_mode = cq_period_mode; 242 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 243 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 244 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 245 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 246 247 return moder; 248 } 249 250 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 251 { 252 struct dim_cq_moder moder = {}; 253 254 moder.cq_period_mode = cq_period_mode; 255 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 256 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 257 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 258 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 259 260 return moder; 261 } 262 263 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 264 { 265 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 266 DIM_CQ_PERIOD_MODE_START_FROM_CQE : 267 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 268 } 269 270 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 271 { 272 if (params->tx_dim_enabled) { 273 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 274 275 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 276 } else { 277 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 278 } 279 } 280 281 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 282 { 283 if (params->rx_dim_enabled) { 284 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 285 286 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 287 } else { 288 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 289 } 290 } 291 292 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 293 { 294 mlx5e_reset_tx_moderation(params, cq_period_mode); 295 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 296 params->tx_cq_moderation.cq_period_mode == 297 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 298 } 299 300 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 301 { 302 mlx5e_reset_rx_moderation(params, cq_period_mode); 303 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 304 params->rx_cq_moderation.cq_period_mode == 305 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 306 } 307 308 bool slow_pci_heuristic(struct mlx5_core_dev *mdev) 309 { 310 u32 link_speed = 0; 311 u32 pci_bw = 0; 312 313 mlx5e_port_max_linkspeed(mdev, &link_speed); 314 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); 315 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", 316 link_speed, pci_bw); 317 318 #define MLX5E_SLOW_PCI_RATIO (2) 319 320 return link_speed && pci_bw && 321 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; 322 } 323 324 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, 325 struct mlx5e_params *params) 326 { 327 if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) 328 return false; 329 330 if (mlx5_fpga_is_ipsec_device(mdev)) 331 return false; 332 333 if (params->xdp_prog) { 334 /* XSK params are not considered here. If striding RQ is in use, 335 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will 336 * be called with the known XSK params. 337 */ 338 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) 339 return false; 340 } 341 342 return true; 343 } 344 345 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 346 struct mlx5e_params *params) 347 { 348 params->log_rq_mtu_frames = is_kdump_kernel() ? 349 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 350 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 351 352 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", 353 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, 354 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? 355 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : 356 BIT(params->log_rq_mtu_frames), 357 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), 358 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 359 } 360 361 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 362 { 363 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && 364 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? 365 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 366 MLX5_WQ_TYPE_CYCLIC; 367 } 368 369 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 370 struct mlx5e_params *params) 371 { 372 /* Prefer Striding RQ, unless any of the following holds: 373 * - Striding RQ configuration is not possible/supported. 374 * - CQE compression is ON, and stride_index mini_cqe layout is not supported. 375 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. 376 * 377 * No XSK params: checking the availability of striding RQ in general. 378 */ 379 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) || 380 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) && 381 mlx5e_striding_rq_possible(mdev, params) && 382 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || 383 !mlx5e_rx_is_linear_skb(params, NULL))) 384 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); 385 mlx5e_set_rq_type(mdev, params); 386 mlx5e_init_rq_type_params(mdev, params); 387 } 388 389 /* Build queue parameters */ 390 391 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 392 { 393 *ccp = (struct mlx5e_create_cq_param) { 394 .napi = &c->napi, 395 .ch_stats = c->stats, 396 .node = cpu_to_node(c->cpu), 397 .ix = c->ix, 398 }; 399 } 400 401 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp) 402 { 403 if (xdp) 404 /* XDP requires all fragments to be of the same size. */ 405 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size; 406 407 /* Optimization for small packets: the last fragment is bigger than the others. */ 408 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE; 409 } 410 411 #define DEFAULT_FRAG_SIZE (2048) 412 413 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, 414 struct mlx5e_params *params, 415 struct mlx5e_xsk_param *xsk, 416 struct mlx5e_rq_frags_info *info) 417 { 418 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); 419 int frag_size_max = DEFAULT_FRAG_SIZE; 420 int first_frag_size_max; 421 u32 buf_size = 0; 422 u16 headroom; 423 int max_mtu; 424 int i; 425 426 if (mlx5_fpga_is_ipsec_device(mdev)) 427 byte_count += MLX5E_METADATA_ETHER_LEN; 428 429 if (mlx5e_rx_is_linear_skb(params, xsk)) { 430 int frag_stride; 431 432 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk); 433 frag_stride = roundup_pow_of_two(frag_stride); 434 435 info->arr[0].frag_size = byte_count; 436 info->arr[0].frag_stride = frag_stride; 437 info->num_frags = 1; 438 info->wqe_bulk = PAGE_SIZE / frag_stride; 439 goto out; 440 } 441 442 headroom = mlx5e_get_linear_rq_headroom(params, xsk); 443 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 444 445 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 446 params->xdp_prog); 447 if (byte_count > max_mtu || params->xdp_prog) { 448 frag_size_max = PAGE_SIZE; 449 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom); 450 451 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max, 452 params->xdp_prog); 453 if (byte_count > max_mtu) { 454 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n", 455 params->sw_mtu, max_mtu); 456 return -EINVAL; 457 } 458 } 459 460 i = 0; 461 while (buf_size < byte_count) { 462 int frag_size = byte_count - buf_size; 463 464 if (i == 0) 465 frag_size = min(frag_size, first_frag_size_max); 466 else if (i < MLX5E_MAX_RX_FRAGS - 1) 467 frag_size = min(frag_size, frag_size_max); 468 469 info->arr[i].frag_size = frag_size; 470 buf_size += frag_size; 471 472 if (params->xdp_prog) { 473 /* XDP multi buffer expects fragments of the same size. */ 474 info->arr[i].frag_stride = frag_size_max; 475 } else { 476 if (i == 0) { 477 /* Ensure that headroom and tailroom are included. */ 478 frag_size += headroom; 479 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 480 } 481 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); 482 } 483 484 i++; 485 } 486 info->num_frags = i; 487 /* number of different wqes sharing a page */ 488 info->wqe_bulk = 1 + (info->num_frags % 2); 489 490 out: 491 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); 492 info->log_num_frags = order_base_2(info->num_frags); 493 494 return 0; 495 } 496 497 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) 498 { 499 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; 500 501 switch (wq_type) { 502 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 503 sz += sizeof(struct mlx5e_rx_wqe_ll); 504 break; 505 default: /* MLX5_WQ_TYPE_CYCLIC */ 506 sz += sizeof(struct mlx5e_rx_wqe_cyc); 507 } 508 509 return order_base_2(sz); 510 } 511 512 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev, 513 struct mlx5e_cq_param *param) 514 { 515 void *cqc = param->cqc; 516 517 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 518 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) 519 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); 520 } 521 522 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, 523 struct mlx5e_params *params, 524 struct mlx5e_xsk_param *xsk) 525 { 526 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 527 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); 528 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 529 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 530 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk)); 531 int wqe_size = BIT(log_stride_sz) * num_strides; 532 533 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation 534 * so we get a filler cqe for the rest of the reservation. 535 */ 536 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1)); 537 } 538 539 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, 540 struct mlx5e_params *params, 541 struct mlx5e_xsk_param *xsk, 542 struct mlx5e_cq_param *param) 543 { 544 bool hw_stridx = false; 545 void *cqc = param->cqc; 546 u8 log_cq_size; 547 548 switch (params->rq_wq_type) { 549 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 550 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); 551 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 552 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk); 553 else 554 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + 555 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 556 break; 557 default: /* MLX5_WQ_TYPE_CYCLIC */ 558 log_cq_size = params->log_rq_mtu_frames; 559 } 560 561 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); 562 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 563 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? 564 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); 565 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 566 } 567 568 mlx5e_build_common_cq_param(mdev, param); 569 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; 570 } 571 572 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 573 { 574 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; 575 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && 576 MLX5_CAP_GEN(mdev, relaxed_ordering_write); 577 578 return ro && lro_en ? 579 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; 580 } 581 582 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, 583 struct mlx5e_params *params, 584 struct mlx5e_xsk_param *xsk, 585 u16 q_counter, 586 struct mlx5e_rq_param *param) 587 { 588 void *rqc = param->rqc; 589 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 590 int ndsegs = 1; 591 int err; 592 593 switch (params->rq_wq_type) { 594 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { 595 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 596 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 597 598 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, 599 log_wqe_num_of_strides)) { 600 mlx5_core_err(mdev, 601 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n", 602 log_wqe_stride_size, log_wqe_num_of_strides); 603 return -EINVAL; 604 } 605 606 MLX5_SET(wq, wq, log_wqe_num_of_strides, 607 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE); 608 MLX5_SET(wq, wq, log_wqe_stride_size, 609 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); 610 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); 611 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { 612 MLX5_SET(wq, wq, shampo_enable, true); 613 MLX5_SET(wq, wq, log_reservation_size, 614 mlx5e_shampo_get_log_rsrv_size(mdev, params)); 615 MLX5_SET(wq, wq, 616 log_max_num_of_packets_per_reservation, 617 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 618 MLX5_SET(wq, wq, log_headers_entry_size, 619 mlx5e_shampo_get_log_hd_entry_size(mdev, params)); 620 MLX5_SET(rqc, rqc, reservation_timeout, 621 params->packet_merge.timeout); 622 MLX5_SET(rqc, rqc, shampo_match_criteria_type, 623 params->packet_merge.shampo.match_criteria_type); 624 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity, 625 params->packet_merge.shampo.alignment_granularity); 626 } 627 break; 628 } 629 default: /* MLX5_WQ_TYPE_CYCLIC */ 630 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); 631 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); 632 if (err) 633 return err; 634 ndsegs = param->frags_info.num_frags; 635 } 636 637 MLX5_SET(wq, wq, wq_type, params->rq_wq_type); 638 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params)); 639 MLX5_SET(wq, wq, log_wq_stride, 640 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); 641 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 642 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 643 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); 644 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); 645 646 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 647 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp); 648 649 return 0; 650 } 651 652 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, 653 u16 q_counter, 654 struct mlx5e_rq_param *param) 655 { 656 void *rqc = param->rqc; 657 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 658 659 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 660 MLX5_SET(wq, wq, log_wq_stride, 661 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); 662 MLX5_SET(rqc, rqc, counter_set_id, q_counter); 663 664 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 665 } 666 667 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, 668 struct mlx5e_params *params, 669 struct mlx5e_cq_param *param) 670 { 671 void *cqc = param->cqc; 672 673 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); 674 675 mlx5e_build_common_cq_param(mdev, param); 676 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; 677 } 678 679 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, 680 struct mlx5e_sq_param *param) 681 { 682 void *sqc = param->sqc; 683 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 684 685 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 686 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); 687 688 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 689 } 690 691 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, 692 struct mlx5e_params *params, 693 struct mlx5e_sq_param *param) 694 { 695 void *sqc = param->sqc; 696 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 697 bool allow_swp; 698 699 allow_swp = mlx5_geneve_tx_allowed(mdev) || 700 !!MLX5_IPSEC_DEV(mdev); 701 mlx5e_build_sq_param_common(mdev, param); 702 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 703 MLX5_SET(sqc, sqc, allow_swp, allow_swp); 704 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 705 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params); 706 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 707 } 708 709 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev, 710 u8 log_wq_size, 711 struct mlx5e_cq_param *param) 712 { 713 void *cqc = param->cqc; 714 715 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 716 717 mlx5e_build_common_cq_param(mdev, param); 718 719 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 720 } 721 722 static u8 mlx5e_get_rq_log_wq_sz(void *rqc) 723 { 724 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 725 726 return MLX5_GET(wq, wq, log_wq_sz); 727 } 728 729 /* This function calculates the maximum number of headers entries that are needed 730 * per WQE, the formula is based on the size of the reservations and the 731 * restriction we have about max packets for reservation that is equal to max 732 * headers per reservation. 733 */ 734 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, 735 struct mlx5e_params *params, 736 struct mlx5e_rq_param *rq_param) 737 { 738 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; 739 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); 740 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); 741 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); 742 int wqe_size = BIT(log_stride_sz) * num_strides; 743 u32 hd_per_wqe; 744 745 /* Assumption: hd_per_wqe % 8 == 0. */ 746 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv; 747 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n", 748 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv); 749 return hd_per_wqe; 750 } 751 752 /* This function calculates the maximum number of headers entries that are needed 753 * for the WQ, this value is uesed to allocate the header buffer in HW, thus 754 * must be a pow of 2. 755 */ 756 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev, 757 struct mlx5e_params *params, 758 struct mlx5e_rq_param *rq_param) 759 { 760 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 761 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 762 u32 hd_per_wqe, hd_per_wq; 763 764 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 765 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size); 766 return hd_per_wq; 767 } 768 769 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, 770 struct mlx5e_params *params, 771 struct mlx5e_rq_param *rq_param) 772 { 773 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; 774 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); 775 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); 776 u32 wqebbs; 777 778 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); 779 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); 780 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; 781 rest = max_hd_per_wqe % max_klm_per_umr; 782 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; 783 if (rest) 784 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); 785 wqebbs *= wq_size; 786 return wqebbs; 787 } 788 789 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, 790 struct mlx5e_params *params, 791 struct mlx5e_rq_param *rqp) 792 { 793 u32 wqebbs; 794 795 /* MLX5_WQ_TYPE_CYCLIC */ 796 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 797 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 798 799 wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc)); 800 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) 801 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); 802 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); 803 } 804 805 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) 806 { 807 if (mlx5e_accel_is_ktls_rx(mdev)) 808 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 809 810 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 811 } 812 813 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev, 814 u8 log_wq_size, 815 struct mlx5e_sq_param *param) 816 { 817 void *sqc = param->sqc; 818 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 819 820 mlx5e_build_sq_param_common(mdev, param); 821 822 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 823 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 824 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 825 } 826 827 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, 828 u8 log_wq_size, 829 struct mlx5e_sq_param *param) 830 { 831 void *sqc = param->sqc; 832 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 833 834 mlx5e_build_sq_param_common(mdev, param); 835 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ 836 param->is_tls = mlx5e_accel_is_ktls_rx(mdev); 837 if (param->is_tls) 838 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ 839 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); 840 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 841 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp); 842 } 843 844 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, 845 struct mlx5e_params *params, 846 struct mlx5e_sq_param *param) 847 { 848 void *sqc = param->sqc; 849 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 850 851 mlx5e_build_sq_param_common(mdev, param); 852 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 853 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); 854 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); 855 } 856 857 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, 858 struct mlx5e_params *params, 859 u16 q_counter, 860 struct mlx5e_channel_param *cparam) 861 { 862 u8 icosq_log_wq_sz, async_icosq_log_wq_sz; 863 int err; 864 865 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); 866 if (err) 867 return err; 868 869 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq); 870 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); 871 872 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq); 873 mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); 874 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); 875 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); 876 877 return 0; 878 } 879