1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <net/tc_act/tc_gact.h> 34 #include <net/pkt_cls.h> 35 #include <linux/mlx5/fs.h> 36 #include <net/vxlan.h> 37 #include <net/geneve.h> 38 #include <linux/bpf.h> 39 #include <linux/if_bridge.h> 40 #include <net/page_pool.h> 41 #include <net/xdp_sock_drv.h> 42 #include "eswitch.h" 43 #include "en.h" 44 #include "en/txrx.h" 45 #include "en_tc.h" 46 #include "en_rep.h" 47 #include "en_accel/ipsec.h" 48 #include "en_accel/en_accel.h" 49 #include "en_accel/tls.h" 50 #include "accel/ipsec.h" 51 #include "accel/tls.h" 52 #include "lib/vxlan.h" 53 #include "lib/clock.h" 54 #include "en/port.h" 55 #include "en/xdp.h" 56 #include "lib/eq.h" 57 #include "en/monitor_stats.h" 58 #include "en/health.h" 59 #include "en/params.h" 60 #include "en/xsk/pool.h" 61 #include "en/xsk/setup.h" 62 #include "en/xsk/rx.h" 63 #include "en/xsk/tx.h" 64 #include "en/hv_vhca_stats.h" 65 #include "en/devlink.h" 66 #include "lib/mlx5.h" 67 #include "en/ptp.h" 68 69 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) 70 { 71 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && 72 MLX5_CAP_GEN(mdev, umr_ptr_rlky) && 73 MLX5_CAP_ETH(mdev, reg_umr_sq); 74 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); 75 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; 76 77 if (!striding_rq_umr) 78 return false; 79 if (!inline_umr) { 80 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", 81 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); 82 return false; 83 } 84 return true; 85 } 86 87 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 88 struct mlx5e_params *params) 89 { 90 params->log_rq_mtu_frames = is_kdump_kernel() ? 91 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 92 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; 93 94 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", 95 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, 96 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? 97 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) : 98 BIT(params->log_rq_mtu_frames), 99 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)), 100 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); 101 } 102 103 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, 104 struct mlx5e_params *params) 105 { 106 if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) 107 return false; 108 109 if (MLX5_IPSEC_DEV(mdev)) 110 return false; 111 112 if (params->xdp_prog) { 113 /* XSK params are not considered here. If striding RQ is in use, 114 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will 115 * be called with the known XSK params. 116 */ 117 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) 118 return false; 119 } 120 121 return true; 122 } 123 124 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 125 { 126 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && 127 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? 128 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : 129 MLX5_WQ_TYPE_CYCLIC; 130 } 131 132 void mlx5e_update_carrier(struct mlx5e_priv *priv) 133 { 134 struct mlx5_core_dev *mdev = priv->mdev; 135 u8 port_state; 136 137 port_state = mlx5_query_vport_state(mdev, 138 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 139 0); 140 141 if (port_state == VPORT_STATE_UP) { 142 netdev_info(priv->netdev, "Link up\n"); 143 netif_carrier_on(priv->netdev); 144 } else { 145 netdev_info(priv->netdev, "Link down\n"); 146 netif_carrier_off(priv->netdev); 147 } 148 } 149 150 static void mlx5e_update_carrier_work(struct work_struct *work) 151 { 152 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 153 update_carrier_work); 154 155 mutex_lock(&priv->state_lock); 156 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 157 if (priv->profile->update_carrier) 158 priv->profile->update_carrier(priv); 159 mutex_unlock(&priv->state_lock); 160 } 161 162 static void mlx5e_update_stats_work(struct work_struct *work) 163 { 164 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 165 update_stats_work); 166 167 mutex_lock(&priv->state_lock); 168 priv->profile->update_stats(priv); 169 mutex_unlock(&priv->state_lock); 170 } 171 172 void mlx5e_queue_update_stats(struct mlx5e_priv *priv) 173 { 174 if (!priv->profile->update_stats) 175 return; 176 177 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state))) 178 return; 179 180 queue_work(priv->wq, &priv->update_stats_work); 181 } 182 183 static int async_event(struct notifier_block *nb, unsigned long event, void *data) 184 { 185 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); 186 struct mlx5_eqe *eqe = data; 187 188 if (event != MLX5_EVENT_TYPE_PORT_CHANGE) 189 return NOTIFY_DONE; 190 191 switch (eqe->sub_type) { 192 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 193 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 194 queue_work(priv->wq, &priv->update_carrier_work); 195 break; 196 default: 197 return NOTIFY_DONE; 198 } 199 200 return NOTIFY_OK; 201 } 202 203 static void mlx5e_enable_async_events(struct mlx5e_priv *priv) 204 { 205 priv->events_nb.notifier_call = async_event; 206 mlx5_notifier_register(priv->mdev, &priv->events_nb); 207 } 208 209 static void mlx5e_disable_async_events(struct mlx5e_priv *priv) 210 { 211 mlx5_notifier_unregister(priv->mdev, &priv->events_nb); 212 } 213 214 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, 215 struct mlx5e_icosq *sq, 216 struct mlx5e_umr_wqe *wqe) 217 { 218 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 219 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; 220 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); 221 222 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 223 ds_cnt); 224 cseg->umr_mkey = rq->mkey_be; 225 226 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; 227 ucseg->xlt_octowords = 228 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); 229 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 230 } 231 232 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, 233 struct mlx5e_channel *c) 234 { 235 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); 236 237 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, 238 sizeof(*rq->mpwqe.info)), 239 GFP_KERNEL, cpu_to_node(c->cpu)); 240 if (!rq->mpwqe.info) 241 return -ENOMEM; 242 243 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); 244 245 return 0; 246 } 247 248 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, 249 u64 npages, u8 page_shift, 250 struct mlx5_core_mkey *umr_mkey, 251 dma_addr_t filler_addr) 252 { 253 struct mlx5_mtt *mtt; 254 int inlen; 255 void *mkc; 256 u32 *in; 257 int err; 258 int i; 259 260 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages; 261 262 in = kvzalloc(inlen, GFP_KERNEL); 263 if (!in) 264 return -ENOMEM; 265 266 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 267 268 MLX5_SET(mkc, mkc, free, 1); 269 MLX5_SET(mkc, mkc, umr_en, 1); 270 MLX5_SET(mkc, mkc, lw, 1); 271 MLX5_SET(mkc, mkc, lr, 1); 272 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 273 mlx5e_mkey_set_relaxed_ordering(mdev, mkc); 274 MLX5_SET(mkc, mkc, qpn, 0xffffff); 275 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); 276 MLX5_SET64(mkc, mkc, len, npages << page_shift); 277 MLX5_SET(mkc, mkc, translations_octword_size, 278 MLX5_MTT_OCTW(npages)); 279 MLX5_SET(mkc, mkc, log_page_size, page_shift); 280 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 281 MLX5_MTT_OCTW(npages)); 282 283 /* Initialize the mkey with all MTTs pointing to a default 284 * page (filler_addr). When the channels are activated, UMR 285 * WQEs will redirect the RX WQEs to the actual memory from 286 * the RQ's pool, while the gaps (wqe_overflow) remain mapped 287 * to the default page. 288 */ 289 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 290 for (i = 0 ; i < npages ; i++) 291 mtt[i].ptag = cpu_to_be64(filler_addr); 292 293 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); 294 295 kvfree(in); 296 return err; 297 } 298 299 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) 300 { 301 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); 302 303 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, 304 rq->wqe_overflow.addr); 305 } 306 307 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) 308 { 309 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; 310 } 311 312 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) 313 { 314 struct mlx5e_wqe_frag_info next_frag = {}; 315 struct mlx5e_wqe_frag_info *prev = NULL; 316 int i; 317 318 next_frag.di = &rq->wqe.di[0]; 319 320 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { 321 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; 322 struct mlx5e_wqe_frag_info *frag = 323 &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; 324 int f; 325 326 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { 327 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { 328 next_frag.di++; 329 next_frag.offset = 0; 330 if (prev) 331 prev->last_in_page = true; 332 } 333 *frag = next_frag; 334 335 /* prepare next */ 336 next_frag.offset += frag_info[f].frag_stride; 337 prev = frag; 338 } 339 } 340 341 if (prev) 342 prev->last_in_page = true; 343 } 344 345 static int mlx5e_init_di_list(struct mlx5e_rq *rq, 346 int wq_sz, int cpu) 347 { 348 int len = wq_sz << rq->wqe.info.log_num_frags; 349 350 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), 351 GFP_KERNEL, cpu_to_node(cpu)); 352 if (!rq->wqe.di) 353 return -ENOMEM; 354 355 mlx5e_init_frags_partition(rq); 356 357 return 0; 358 } 359 360 static void mlx5e_free_di_list(struct mlx5e_rq *rq) 361 { 362 kvfree(rq->wqe.di); 363 } 364 365 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) 366 { 367 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); 368 369 mlx5e_reporter_rq_cqe_err(rq); 370 } 371 372 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) 373 { 374 rq->wqe_overflow.page = alloc_page(GFP_KERNEL); 375 if (!rq->wqe_overflow.page) 376 return -ENOMEM; 377 378 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0, 379 PAGE_SIZE, rq->buff.map_dir); 380 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) { 381 __free_page(rq->wqe_overflow.page); 382 return -ENOMEM; 383 } 384 return 0; 385 } 386 387 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) 388 { 389 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE, 390 rq->buff.map_dir); 391 __free_page(rq->wqe_overflow.page); 392 } 393 394 static int mlx5e_alloc_rq(struct mlx5e_channel *c, 395 struct mlx5e_params *params, 396 struct mlx5e_xsk_param *xsk, 397 struct xsk_buff_pool *xsk_pool, 398 struct mlx5e_rq_param *rqp, 399 struct mlx5e_rq *rq) 400 { 401 struct page_pool_params pp_params = { 0 }; 402 struct mlx5_core_dev *mdev = c->mdev; 403 void *rqc = rqp->rqc; 404 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 405 u32 rq_xdp_ix; 406 u32 pool_size; 407 int wq_sz; 408 int err; 409 int i; 410 411 rqp->wq.db_numa_node = cpu_to_node(c->cpu); 412 413 rq->wq_type = params->rq_wq_type; 414 rq->pdev = c->pdev; 415 rq->netdev = c->netdev; 416 rq->priv = c->priv; 417 rq->tstamp = c->tstamp; 418 rq->clock = &mdev->clock; 419 rq->icosq = &c->icosq; 420 rq->ix = c->ix; 421 rq->mdev = mdev; 422 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 423 rq->xdpsq = &c->rq_xdpsq; 424 rq->xsk_pool = xsk_pool; 425 426 if (rq->xsk_pool) 427 rq->stats = &c->priv->channel_stats[c->ix].xskrq; 428 else 429 rq->stats = &c->priv->channel_stats[c->ix].rq; 430 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); 431 432 if (params->xdp_prog) 433 bpf_prog_inc(params->xdp_prog); 434 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog); 435 436 rq_xdp_ix = rq->ix; 437 if (xsk) 438 rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK; 439 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); 440 if (err < 0) 441 goto err_rq_xdp_prog; 442 443 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 444 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); 445 pool_size = 1 << params->log_rq_mtu_frames; 446 447 switch (rq->wq_type) { 448 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 449 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, 450 &rq->wq_ctrl); 451 if (err) 452 goto err_rq_xdp; 453 454 err = mlx5e_alloc_mpwqe_rq_drop_page(rq); 455 if (err) 456 goto err_rq_wq_destroy; 457 458 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; 459 460 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); 461 462 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << 463 mlx5e_mpwqe_get_log_rq_size(params, xsk); 464 465 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); 466 rq->mpwqe.num_strides = 467 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); 468 469 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz); 470 471 err = mlx5e_create_rq_umr_mkey(mdev, rq); 472 if (err) 473 goto err_rq_drop_page; 474 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); 475 476 err = mlx5e_rq_alloc_mpwqe_info(rq, c); 477 if (err) 478 goto err_rq_mkey; 479 break; 480 default: /* MLX5_WQ_TYPE_CYCLIC */ 481 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, 482 &rq->wq_ctrl); 483 if (err) 484 goto err_rq_xdp; 485 486 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; 487 488 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); 489 490 rq->wqe.info = rqp->frags_info; 491 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; 492 493 rq->wqe.frags = 494 kvzalloc_node(array_size(sizeof(*rq->wqe.frags), 495 (wq_sz << rq->wqe.info.log_num_frags)), 496 GFP_KERNEL, cpu_to_node(c->cpu)); 497 if (!rq->wqe.frags) { 498 err = -ENOMEM; 499 goto err_rq_wq_destroy; 500 } 501 502 err = mlx5e_init_di_list(rq, wq_sz, c->cpu); 503 if (err) 504 goto err_rq_frags; 505 506 rq->mkey_be = c->mkey_be; 507 } 508 509 err = mlx5e_rq_set_handlers(rq, params, xsk); 510 if (err) 511 goto err_free_by_rq_type; 512 513 if (xsk) { 514 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, 515 MEM_TYPE_XSK_BUFF_POOL, NULL); 516 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); 517 } else { 518 /* Create a page_pool and register it with rxq */ 519 pp_params.order = 0; 520 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ 521 pp_params.pool_size = pool_size; 522 pp_params.nid = cpu_to_node(c->cpu); 523 pp_params.dev = c->pdev; 524 pp_params.dma_dir = rq->buff.map_dir; 525 526 /* page_pool can be used even when there is no rq->xdp_prog, 527 * given page_pool does not handle DMA mapping there is no 528 * required state to clear. And page_pool gracefully handle 529 * elevated refcnt. 530 */ 531 rq->page_pool = page_pool_create(&pp_params); 532 if (IS_ERR(rq->page_pool)) { 533 err = PTR_ERR(rq->page_pool); 534 rq->page_pool = NULL; 535 goto err_free_by_rq_type; 536 } 537 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, 538 MEM_TYPE_PAGE_POOL, rq->page_pool); 539 } 540 if (err) 541 goto err_free_by_rq_type; 542 543 for (i = 0; i < wq_sz; i++) { 544 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 545 struct mlx5e_rx_wqe_ll *wqe = 546 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); 547 u32 byte_count = 548 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; 549 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); 550 551 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); 552 wqe->data[0].byte_count = cpu_to_be32(byte_count); 553 wqe->data[0].lkey = rq->mkey_be; 554 } else { 555 struct mlx5e_rx_wqe_cyc *wqe = 556 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); 557 int f; 558 559 for (f = 0; f < rq->wqe.info.num_frags; f++) { 560 u32 frag_size = rq->wqe.info.arr[f].frag_size | 561 MLX5_HW_START_PADDING; 562 563 wqe->data[f].byte_count = cpu_to_be32(frag_size); 564 wqe->data[f].lkey = rq->mkey_be; 565 } 566 /* check if num_frags is not a pow of two */ 567 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { 568 wqe->data[f].byte_count = 0; 569 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 570 wqe->data[f].addr = 0; 571 } 572 } 573 } 574 575 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); 576 577 switch (params->rx_cq_moderation.cq_period_mode) { 578 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 579 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 580 break; 581 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 582 default: 583 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 584 } 585 586 rq->page_cache.head = 0; 587 rq->page_cache.tail = 0; 588 589 return 0; 590 591 err_free_by_rq_type: 592 switch (rq->wq_type) { 593 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 594 kvfree(rq->mpwqe.info); 595 err_rq_mkey: 596 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); 597 err_rq_drop_page: 598 mlx5e_free_mpwqe_rq_drop_page(rq); 599 break; 600 default: /* MLX5_WQ_TYPE_CYCLIC */ 601 mlx5e_free_di_list(rq); 602 err_rq_frags: 603 kvfree(rq->wqe.frags); 604 } 605 err_rq_wq_destroy: 606 mlx5_wq_destroy(&rq->wq_ctrl); 607 err_rq_xdp: 608 xdp_rxq_info_unreg(&rq->xdp_rxq); 609 err_rq_xdp_prog: 610 if (params->xdp_prog) 611 bpf_prog_put(params->xdp_prog); 612 613 return err; 614 } 615 616 static void mlx5e_free_rq(struct mlx5e_rq *rq) 617 { 618 struct bpf_prog *old_prog; 619 int i; 620 621 old_prog = rcu_dereference_protected(rq->xdp_prog, 622 lockdep_is_held(&rq->priv->state_lock)); 623 if (old_prog) 624 bpf_prog_put(old_prog); 625 626 switch (rq->wq_type) { 627 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 628 kvfree(rq->mpwqe.info); 629 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); 630 mlx5e_free_mpwqe_rq_drop_page(rq); 631 break; 632 default: /* MLX5_WQ_TYPE_CYCLIC */ 633 kvfree(rq->wqe.frags); 634 mlx5e_free_di_list(rq); 635 } 636 637 for (i = rq->page_cache.head; i != rq->page_cache.tail; 638 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) { 639 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i]; 640 641 /* With AF_XDP, page_cache is not used, so this loop is not 642 * entered, and it's safe to call mlx5e_page_release_dynamic 643 * directly. 644 */ 645 mlx5e_page_release_dynamic(rq, dma_info, false); 646 } 647 648 xdp_rxq_info_unreg(&rq->xdp_rxq); 649 page_pool_destroy(rq->page_pool); 650 mlx5_wq_destroy(&rq->wq_ctrl); 651 } 652 653 static int mlx5e_create_rq(struct mlx5e_rq *rq, 654 struct mlx5e_rq_param *param) 655 { 656 struct mlx5_core_dev *mdev = rq->mdev; 657 658 void *in; 659 void *rqc; 660 void *wq; 661 int inlen; 662 int err; 663 664 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 665 sizeof(u64) * rq->wq_ctrl.buf.npages; 666 in = kvzalloc(inlen, GFP_KERNEL); 667 if (!in) 668 return -ENOMEM; 669 670 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 671 wq = MLX5_ADDR_OF(rqc, rqc, wq); 672 673 memcpy(rqc, param->rqc, sizeof(param->rqc)); 674 675 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); 676 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 677 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 678 MLX5_ADAPTER_PAGE_SHIFT); 679 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); 680 681 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, 682 (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); 683 684 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); 685 686 kvfree(in); 687 688 return err; 689 } 690 691 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) 692 { 693 struct mlx5_core_dev *mdev = rq->mdev; 694 695 void *in; 696 void *rqc; 697 int inlen; 698 int err; 699 700 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 701 in = kvzalloc(inlen, GFP_KERNEL); 702 if (!in) 703 return -ENOMEM; 704 705 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) 706 mlx5e_rqwq_reset(rq); 707 708 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 709 710 MLX5_SET(modify_rq_in, in, rq_state, curr_state); 711 MLX5_SET(rqc, rqc, state, next_state); 712 713 err = mlx5_core_modify_rq(mdev, rq->rqn, in); 714 715 kvfree(in); 716 717 return err; 718 } 719 720 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) 721 { 722 struct mlx5_core_dev *mdev = rq->mdev; 723 724 void *in; 725 void *rqc; 726 int inlen; 727 int err; 728 729 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 730 in = kvzalloc(inlen, GFP_KERNEL); 731 if (!in) 732 return -ENOMEM; 733 734 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 735 736 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); 737 MLX5_SET64(modify_rq_in, in, modify_bitmask, 738 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS); 739 MLX5_SET(rqc, rqc, scatter_fcs, enable); 740 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); 741 742 err = mlx5_core_modify_rq(mdev, rq->rqn, in); 743 744 kvfree(in); 745 746 return err; 747 } 748 749 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) 750 { 751 struct mlx5_core_dev *mdev = rq->mdev; 752 void *in; 753 void *rqc; 754 int inlen; 755 int err; 756 757 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 758 in = kvzalloc(inlen, GFP_KERNEL); 759 if (!in) 760 return -ENOMEM; 761 762 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 763 764 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); 765 MLX5_SET64(modify_rq_in, in, modify_bitmask, 766 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); 767 MLX5_SET(rqc, rqc, vsd, vsd); 768 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); 769 770 err = mlx5_core_modify_rq(mdev, rq->rqn, in); 771 772 kvfree(in); 773 774 return err; 775 } 776 777 static void mlx5e_destroy_rq(struct mlx5e_rq *rq) 778 { 779 mlx5_core_destroy_rq(rq->mdev, rq->rqn); 780 } 781 782 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) 783 { 784 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); 785 786 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); 787 788 do { 789 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes) 790 return 0; 791 792 msleep(20); 793 } while (time_before(jiffies, exp_time)); 794 795 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", 796 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); 797 798 mlx5e_reporter_rx_timeout(rq); 799 return -ETIMEDOUT; 800 } 801 802 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) 803 { 804 struct mlx5_wq_ll *wq; 805 u16 head; 806 int i; 807 808 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 809 return; 810 811 wq = &rq->mpwqe.wq; 812 head = wq->head; 813 814 /* Outstanding UMR WQEs (in progress) start at wq->head */ 815 for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { 816 rq->dealloc_wqe(rq, head); 817 head = mlx5_wq_ll_get_wqe_next_ix(wq, head); 818 } 819 820 rq->mpwqe.actual_wq_head = wq->head; 821 rq->mpwqe.umr_in_progress = 0; 822 rq->mpwqe.umr_completed = 0; 823 } 824 825 void mlx5e_free_rx_descs(struct mlx5e_rq *rq) 826 { 827 __be16 wqe_ix_be; 828 u16 wqe_ix; 829 830 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 831 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; 832 833 mlx5e_free_rx_in_progress_descs(rq); 834 835 while (!mlx5_wq_ll_is_empty(wq)) { 836 struct mlx5e_rx_wqe_ll *wqe; 837 838 wqe_ix_be = *wq->tail_next; 839 wqe_ix = be16_to_cpu(wqe_ix_be); 840 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); 841 rq->dealloc_wqe(rq, wqe_ix); 842 mlx5_wq_ll_pop(wq, wqe_ix_be, 843 &wqe->next.next_wqe_index); 844 } 845 } else { 846 struct mlx5_wq_cyc *wq = &rq->wqe.wq; 847 848 while (!mlx5_wq_cyc_is_empty(wq)) { 849 wqe_ix = mlx5_wq_cyc_get_tail(wq); 850 rq->dealloc_wqe(rq, wqe_ix); 851 mlx5_wq_cyc_pop(wq); 852 } 853 } 854 855 } 856 857 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, 858 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk, 859 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq) 860 { 861 int err; 862 863 err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq); 864 if (err) 865 return err; 866 867 err = mlx5e_create_rq(rq, param); 868 if (err) 869 goto err_free_rq; 870 871 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 872 if (err) 873 goto err_destroy_rq; 874 875 if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev)) 876 __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */ 877 878 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full)) 879 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state); 880 881 if (params->rx_dim_enabled) 882 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 883 884 /* We disable csum_complete when XDP is enabled since 885 * XDP programs might manipulate packets which will render 886 * skb->checksum incorrect. 887 */ 888 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) 889 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 890 891 /* For CQE compression on striding RQ, use stride index provided by 892 * HW if capability is supported. 893 */ 894 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) && 895 MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index)) 896 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state); 897 898 return 0; 899 900 err_destroy_rq: 901 mlx5e_destroy_rq(rq); 902 err_free_rq: 903 mlx5e_free_rq(rq); 904 905 return err; 906 } 907 908 void mlx5e_activate_rq(struct mlx5e_rq *rq) 909 { 910 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); 911 mlx5e_trigger_irq(rq->icosq); 912 } 913 914 void mlx5e_deactivate_rq(struct mlx5e_rq *rq) 915 { 916 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); 917 synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ 918 } 919 920 void mlx5e_close_rq(struct mlx5e_rq *rq) 921 { 922 cancel_work_sync(&rq->dim.work); 923 cancel_work_sync(&rq->icosq->recover_work); 924 cancel_work_sync(&rq->recover_work); 925 mlx5e_destroy_rq(rq); 926 mlx5e_free_rx_descs(rq); 927 mlx5e_free_rq(rq); 928 } 929 930 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) 931 { 932 kvfree(sq->db.xdpi_fifo.xi); 933 kvfree(sq->db.wqe_info); 934 } 935 936 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) 937 { 938 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; 939 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 940 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; 941 942 xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, 943 GFP_KERNEL, numa); 944 if (!xdpi_fifo->xi) 945 return -ENOMEM; 946 947 xdpi_fifo->pc = &sq->xdpi_fifo_pc; 948 xdpi_fifo->cc = &sq->xdpi_fifo_cc; 949 xdpi_fifo->mask = dsegs_per_wq - 1; 950 951 return 0; 952 } 953 954 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) 955 { 956 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 957 int err; 958 959 sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, 960 GFP_KERNEL, numa); 961 if (!sq->db.wqe_info) 962 return -ENOMEM; 963 964 err = mlx5e_alloc_xdpsq_fifo(sq, numa); 965 if (err) { 966 mlx5e_free_xdpsq_db(sq); 967 return err; 968 } 969 970 return 0; 971 } 972 973 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, 974 struct mlx5e_params *params, 975 struct xsk_buff_pool *xsk_pool, 976 struct mlx5e_sq_param *param, 977 struct mlx5e_xdpsq *sq, 978 bool is_redirect) 979 { 980 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); 981 struct mlx5_core_dev *mdev = c->mdev; 982 struct mlx5_wq_cyc *wq = &sq->wq; 983 int err; 984 985 sq->pdev = c->pdev; 986 sq->mkey_be = c->mkey_be; 987 sq->channel = c; 988 sq->uar_map = mdev->mlx5e_res.bfreg.map; 989 sq->min_inline_mode = params->tx_min_inline_mode; 990 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 991 sq->xsk_pool = xsk_pool; 992 993 sq->stats = sq->xsk_pool ? 994 &c->priv->channel_stats[c->ix].xsksq : 995 is_redirect ? 996 &c->priv->channel_stats[c->ix].xdpsq : 997 &c->priv->channel_stats[c->ix].rq_xdpsq; 998 999 param->wq.db_numa_node = cpu_to_node(c->cpu); 1000 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); 1001 if (err) 1002 return err; 1003 wq->db = &wq->db[MLX5_SND_DBR]; 1004 1005 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); 1006 if (err) 1007 goto err_sq_wq_destroy; 1008 1009 return 0; 1010 1011 err_sq_wq_destroy: 1012 mlx5_wq_destroy(&sq->wq_ctrl); 1013 1014 return err; 1015 } 1016 1017 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) 1018 { 1019 mlx5e_free_xdpsq_db(sq); 1020 mlx5_wq_destroy(&sq->wq_ctrl); 1021 } 1022 1023 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) 1024 { 1025 kvfree(sq->db.wqe_info); 1026 } 1027 1028 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) 1029 { 1030 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1031 size_t size; 1032 1033 size = array_size(wq_sz, sizeof(*sq->db.wqe_info)); 1034 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); 1035 if (!sq->db.wqe_info) 1036 return -ENOMEM; 1037 1038 return 0; 1039 } 1040 1041 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) 1042 { 1043 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, 1044 recover_work); 1045 1046 mlx5e_reporter_icosq_cqe_err(sq); 1047 } 1048 1049 static int mlx5e_alloc_icosq(struct mlx5e_channel *c, 1050 struct mlx5e_sq_param *param, 1051 struct mlx5e_icosq *sq) 1052 { 1053 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); 1054 struct mlx5_core_dev *mdev = c->mdev; 1055 struct mlx5_wq_cyc *wq = &sq->wq; 1056 int err; 1057 1058 sq->channel = c; 1059 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1060 1061 param->wq.db_numa_node = cpu_to_node(c->cpu); 1062 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); 1063 if (err) 1064 return err; 1065 wq->db = &wq->db[MLX5_SND_DBR]; 1066 1067 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); 1068 if (err) 1069 goto err_sq_wq_destroy; 1070 1071 INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); 1072 1073 return 0; 1074 1075 err_sq_wq_destroy: 1076 mlx5_wq_destroy(&sq->wq_ctrl); 1077 1078 return err; 1079 } 1080 1081 static void mlx5e_free_icosq(struct mlx5e_icosq *sq) 1082 { 1083 mlx5e_free_icosq_db(sq); 1084 mlx5_wq_destroy(&sq->wq_ctrl); 1085 } 1086 1087 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) 1088 { 1089 kvfree(sq->db.wqe_info); 1090 kvfree(sq->db.skb_fifo.fifo); 1091 kvfree(sq->db.dma_fifo); 1092 } 1093 1094 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) 1095 { 1096 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 1097 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; 1098 1099 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, 1100 sizeof(*sq->db.dma_fifo)), 1101 GFP_KERNEL, numa); 1102 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz, 1103 sizeof(*sq->db.skb_fifo.fifo)), 1104 GFP_KERNEL, numa); 1105 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, 1106 sizeof(*sq->db.wqe_info)), 1107 GFP_KERNEL, numa); 1108 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) { 1109 mlx5e_free_txqsq_db(sq); 1110 return -ENOMEM; 1111 } 1112 1113 sq->dma_fifo_mask = df_sz - 1; 1114 1115 sq->db.skb_fifo.pc = &sq->skb_fifo_pc; 1116 sq->db.skb_fifo.cc = &sq->skb_fifo_cc; 1117 sq->db.skb_fifo.mask = df_sz - 1; 1118 1119 return 0; 1120 } 1121 1122 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, 1123 int txq_ix, 1124 struct mlx5e_params *params, 1125 struct mlx5e_sq_param *param, 1126 struct mlx5e_txqsq *sq, 1127 int tc) 1128 { 1129 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); 1130 struct mlx5_core_dev *mdev = c->mdev; 1131 struct mlx5_wq_cyc *wq = &sq->wq; 1132 int err; 1133 1134 sq->pdev = c->pdev; 1135 sq->tstamp = c->tstamp; 1136 sq->clock = &mdev->clock; 1137 sq->mkey_be = c->mkey_be; 1138 sq->netdev = c->netdev; 1139 sq->mdev = c->mdev; 1140 sq->priv = c->priv; 1141 sq->ch_ix = c->ix; 1142 sq->txq_ix = txq_ix; 1143 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1144 sq->min_inline_mode = params->tx_min_inline_mode; 1145 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 1146 sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; 1147 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); 1148 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) 1149 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); 1150 if (MLX5_IPSEC_DEV(c->priv->mdev)) 1151 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1152 if (mlx5_accel_is_tls_device(c->priv->mdev)) 1153 set_bit(MLX5E_SQ_STATE_TLS, &sq->state); 1154 if (param->is_mpw) 1155 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); 1156 sq->stop_room = param->stop_room; 1157 1158 param->wq.db_numa_node = cpu_to_node(c->cpu); 1159 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); 1160 if (err) 1161 return err; 1162 wq->db = &wq->db[MLX5_SND_DBR]; 1163 1164 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); 1165 if (err) 1166 goto err_sq_wq_destroy; 1167 1168 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); 1169 sq->dim.mode = params->tx_cq_moderation.cq_period_mode; 1170 1171 return 0; 1172 1173 err_sq_wq_destroy: 1174 mlx5_wq_destroy(&sq->wq_ctrl); 1175 1176 return err; 1177 } 1178 1179 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) 1180 { 1181 mlx5e_free_txqsq_db(sq); 1182 mlx5_wq_destroy(&sq->wq_ctrl); 1183 } 1184 1185 static int mlx5e_create_sq(struct mlx5_core_dev *mdev, 1186 struct mlx5e_sq_param *param, 1187 struct mlx5e_create_sq_param *csp, 1188 u32 *sqn) 1189 { 1190 void *in; 1191 void *sqc; 1192 void *wq; 1193 int inlen; 1194 int err; 1195 1196 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1197 sizeof(u64) * csp->wq_ctrl->buf.npages; 1198 in = kvzalloc(inlen, GFP_KERNEL); 1199 if (!in) 1200 return -ENOMEM; 1201 1202 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1203 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1204 1205 memcpy(sqc, param->sqc, sizeof(param->sqc)); 1206 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); 1207 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); 1208 MLX5_SET(sqc, sqc, cqn, csp->cqn); 1209 MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn); 1210 1211 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 1212 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); 1213 1214 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1215 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1216 1217 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1218 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); 1219 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - 1220 MLX5_ADAPTER_PAGE_SHIFT); 1221 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); 1222 1223 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf, 1224 (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); 1225 1226 err = mlx5_core_create_sq(mdev, in, inlen, sqn); 1227 1228 kvfree(in); 1229 1230 return err; 1231 } 1232 1233 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, 1234 struct mlx5e_modify_sq_param *p) 1235 { 1236 void *in; 1237 void *sqc; 1238 int inlen; 1239 int err; 1240 1241 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 1242 in = kvzalloc(inlen, GFP_KERNEL); 1243 if (!in) 1244 return -ENOMEM; 1245 1246 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1247 1248 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state); 1249 MLX5_SET(sqc, sqc, state, p->next_state); 1250 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) { 1251 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 1252 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); 1253 } 1254 1255 err = mlx5_core_modify_sq(mdev, sqn, in); 1256 1257 kvfree(in); 1258 1259 return err; 1260 } 1261 1262 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) 1263 { 1264 mlx5_core_destroy_sq(mdev, sqn); 1265 } 1266 1267 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, 1268 struct mlx5e_sq_param *param, 1269 struct mlx5e_create_sq_param *csp, 1270 u32 *sqn) 1271 { 1272 struct mlx5e_modify_sq_param msp = {0}; 1273 int err; 1274 1275 err = mlx5e_create_sq(mdev, param, csp, sqn); 1276 if (err) 1277 return err; 1278 1279 msp.curr_state = MLX5_SQC_STATE_RST; 1280 msp.next_state = MLX5_SQC_STATE_RDY; 1281 err = mlx5e_modify_sq(mdev, *sqn, &msp); 1282 if (err) 1283 mlx5e_destroy_sq(mdev, *sqn); 1284 1285 return err; 1286 } 1287 1288 static int mlx5e_set_sq_maxrate(struct net_device *dev, 1289 struct mlx5e_txqsq *sq, u32 rate); 1290 1291 static int mlx5e_open_txqsq(struct mlx5e_channel *c, 1292 u32 tisn, 1293 int txq_ix, 1294 struct mlx5e_params *params, 1295 struct mlx5e_sq_param *param, 1296 struct mlx5e_txqsq *sq, 1297 int tc) 1298 { 1299 struct mlx5e_create_sq_param csp = {}; 1300 u32 tx_rate; 1301 int err; 1302 1303 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); 1304 if (err) 1305 return err; 1306 1307 csp.tisn = tisn; 1308 csp.tis_lst_sz = 1; 1309 csp.cqn = sq->cq.mcq.cqn; 1310 csp.wq_ctrl = &sq->wq_ctrl; 1311 csp.min_inline_mode = sq->min_inline_mode; 1312 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); 1313 if (err) 1314 goto err_free_txqsq; 1315 1316 tx_rate = c->priv->tx_rates[sq->txq_ix]; 1317 if (tx_rate) 1318 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); 1319 1320 if (params->tx_dim_enabled) 1321 sq->state |= BIT(MLX5E_SQ_STATE_AM); 1322 1323 return 0; 1324 1325 err_free_txqsq: 1326 mlx5e_free_txqsq(sq); 1327 1328 return err; 1329 } 1330 1331 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1332 { 1333 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix); 1334 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1335 netdev_tx_reset_queue(sq->txq); 1336 netif_tx_start_queue(sq->txq); 1337 } 1338 1339 void mlx5e_tx_disable_queue(struct netdev_queue *txq) 1340 { 1341 __netif_tx_lock_bh(txq); 1342 netif_tx_stop_queue(txq); 1343 __netif_tx_unlock_bh(txq); 1344 } 1345 1346 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) 1347 { 1348 struct mlx5_wq_cyc *wq = &sq->wq; 1349 1350 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1351 synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ 1352 1353 mlx5e_tx_disable_queue(sq->txq); 1354 1355 /* last doorbell out, godspeed .. */ 1356 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { 1357 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 1358 struct mlx5e_tx_wqe *nop; 1359 1360 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) { 1361 .num_wqebbs = 1, 1362 }; 1363 1364 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); 1365 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); 1366 } 1367 } 1368 1369 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) 1370 { 1371 struct mlx5_core_dev *mdev = sq->mdev; 1372 struct mlx5_rate_limit rl = {0}; 1373 1374 cancel_work_sync(&sq->dim.work); 1375 cancel_work_sync(&sq->recover_work); 1376 mlx5e_destroy_sq(mdev, sq->sqn); 1377 if (sq->rate_limit) { 1378 rl.rate = sq->rate_limit; 1379 mlx5_rl_remove_rate(mdev, &rl); 1380 } 1381 mlx5e_free_txqsq_descs(sq); 1382 mlx5e_free_txqsq(sq); 1383 } 1384 1385 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) 1386 { 1387 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, 1388 recover_work); 1389 1390 mlx5e_reporter_tx_err_cqe(sq); 1391 } 1392 1393 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, 1394 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) 1395 { 1396 struct mlx5e_create_sq_param csp = {}; 1397 int err; 1398 1399 err = mlx5e_alloc_icosq(c, param, sq); 1400 if (err) 1401 return err; 1402 1403 csp.cqn = sq->cq.mcq.cqn; 1404 csp.wq_ctrl = &sq->wq_ctrl; 1405 csp.min_inline_mode = params->tx_min_inline_mode; 1406 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); 1407 if (err) 1408 goto err_free_icosq; 1409 1410 return 0; 1411 1412 err_free_icosq: 1413 mlx5e_free_icosq(sq); 1414 1415 return err; 1416 } 1417 1418 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) 1419 { 1420 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); 1421 } 1422 1423 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) 1424 { 1425 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); 1426 synchronize_rcu(); /* Sync with NAPI. */ 1427 } 1428 1429 void mlx5e_close_icosq(struct mlx5e_icosq *sq) 1430 { 1431 struct mlx5e_channel *c = sq->channel; 1432 1433 mlx5e_destroy_sq(c->mdev, sq->sqn); 1434 mlx5e_free_icosq_descs(sq); 1435 mlx5e_free_icosq(sq); 1436 } 1437 1438 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, 1439 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, 1440 struct mlx5e_xdpsq *sq, bool is_redirect) 1441 { 1442 struct mlx5e_create_sq_param csp = {}; 1443 int err; 1444 1445 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); 1446 if (err) 1447 return err; 1448 1449 csp.tis_lst_sz = 1; 1450 csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ 1451 csp.cqn = sq->cq.mcq.cqn; 1452 csp.wq_ctrl = &sq->wq_ctrl; 1453 csp.min_inline_mode = sq->min_inline_mode; 1454 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1455 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); 1456 if (err) 1457 goto err_free_xdpsq; 1458 1459 mlx5e_set_xmit_fp(sq, param->is_mpw); 1460 1461 if (!param->is_mpw) { 1462 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; 1463 unsigned int inline_hdr_sz = 0; 1464 int i; 1465 1466 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 1467 inline_hdr_sz = MLX5E_XDP_MIN_INLINE; 1468 ds_cnt++; 1469 } 1470 1471 /* Pre initialize fixed WQE fields */ 1472 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { 1473 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); 1474 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 1475 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 1476 struct mlx5_wqe_data_seg *dseg; 1477 1478 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { 1479 .num_wqebbs = 1, 1480 .num_pkts = 1, 1481 }; 1482 1483 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 1484 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); 1485 1486 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); 1487 dseg->lkey = sq->mkey_be; 1488 } 1489 } 1490 1491 return 0; 1492 1493 err_free_xdpsq: 1494 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1495 mlx5e_free_xdpsq(sq); 1496 1497 return err; 1498 } 1499 1500 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) 1501 { 1502 struct mlx5e_channel *c = sq->channel; 1503 1504 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1505 synchronize_rcu(); /* Sync with NAPI. */ 1506 1507 mlx5e_destroy_sq(c->mdev, sq->sqn); 1508 mlx5e_free_xdpsq_descs(sq); 1509 mlx5e_free_xdpsq(sq); 1510 } 1511 1512 static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, 1513 struct mlx5e_cq_param *param, 1514 struct mlx5e_cq *cq) 1515 { 1516 struct mlx5_core_dev *mdev = priv->mdev; 1517 struct mlx5_core_cq *mcq = &cq->mcq; 1518 int eqn_not_used; 1519 unsigned int irqn; 1520 int err; 1521 u32 i; 1522 1523 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); 1524 if (err) 1525 return err; 1526 1527 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, 1528 &cq->wq_ctrl); 1529 if (err) 1530 return err; 1531 1532 mcq->cqe_sz = 64; 1533 mcq->set_ci_db = cq->wq_ctrl.db.db; 1534 mcq->arm_db = cq->wq_ctrl.db.db + 1; 1535 *mcq->set_ci_db = 0; 1536 *mcq->arm_db = 0; 1537 mcq->vector = param->eq_ix; 1538 mcq->comp = mlx5e_completion_event; 1539 mcq->event = mlx5e_cq_error_event; 1540 mcq->irqn = irqn; 1541 1542 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1543 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 1544 1545 cqe->op_own = 0xf1; 1546 } 1547 1548 cq->mdev = mdev; 1549 cq->netdev = priv->netdev; 1550 cq->priv = priv; 1551 1552 return 0; 1553 } 1554 1555 static int mlx5e_alloc_cq(struct mlx5e_priv *priv, 1556 struct mlx5e_cq_param *param, 1557 struct mlx5e_create_cq_param *ccp, 1558 struct mlx5e_cq *cq) 1559 { 1560 int err; 1561 1562 param->wq.buf_numa_node = ccp->node; 1563 param->wq.db_numa_node = ccp->node; 1564 param->eq_ix = ccp->ix; 1565 1566 err = mlx5e_alloc_cq_common(priv, param, cq); 1567 1568 cq->napi = ccp->napi; 1569 cq->ch_stats = ccp->ch_stats; 1570 1571 return err; 1572 } 1573 1574 static void mlx5e_free_cq(struct mlx5e_cq *cq) 1575 { 1576 mlx5_wq_destroy(&cq->wq_ctrl); 1577 } 1578 1579 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) 1580 { 1581 u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 1582 struct mlx5_core_dev *mdev = cq->mdev; 1583 struct mlx5_core_cq *mcq = &cq->mcq; 1584 1585 void *in; 1586 void *cqc; 1587 int inlen; 1588 unsigned int irqn_not_used; 1589 int eqn; 1590 int err; 1591 1592 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); 1593 if (err) 1594 return err; 1595 1596 inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 1597 sizeof(u64) * cq->wq_ctrl.buf.npages; 1598 in = kvzalloc(inlen, GFP_KERNEL); 1599 if (!in) 1600 return -ENOMEM; 1601 1602 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1603 1604 memcpy(cqc, param->cqc, sizeof(param->cqc)); 1605 1606 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 1607 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 1608 1609 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); 1610 MLX5_SET(cqc, cqc, c_eqn, eqn); 1611 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 1612 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - 1613 MLX5_ADAPTER_PAGE_SHIFT); 1614 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1615 1616 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); 1617 1618 kvfree(in); 1619 1620 if (err) 1621 return err; 1622 1623 mlx5e_cq_arm(cq); 1624 1625 return 0; 1626 } 1627 1628 static void mlx5e_destroy_cq(struct mlx5e_cq *cq) 1629 { 1630 mlx5_core_destroy_cq(cq->mdev, &cq->mcq); 1631 } 1632 1633 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, 1634 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 1635 struct mlx5e_cq *cq) 1636 { 1637 struct mlx5_core_dev *mdev = priv->mdev; 1638 int err; 1639 1640 err = mlx5e_alloc_cq(priv, param, ccp, cq); 1641 if (err) 1642 return err; 1643 1644 err = mlx5e_create_cq(cq, param); 1645 if (err) 1646 goto err_free_cq; 1647 1648 if (MLX5_CAP_GEN(mdev, cq_moderation)) 1649 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); 1650 return 0; 1651 1652 err_free_cq: 1653 mlx5e_free_cq(cq); 1654 1655 return err; 1656 } 1657 1658 void mlx5e_close_cq(struct mlx5e_cq *cq) 1659 { 1660 mlx5e_destroy_cq(cq); 1661 mlx5e_free_cq(cq); 1662 } 1663 1664 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, 1665 struct mlx5e_params *params, 1666 struct mlx5e_create_cq_param *ccp, 1667 struct mlx5e_channel_param *cparam) 1668 { 1669 int err; 1670 int tc; 1671 1672 for (tc = 0; tc < c->num_tc; tc++) { 1673 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, 1674 ccp, &c->sq[tc].cq); 1675 if (err) 1676 goto err_close_tx_cqs; 1677 } 1678 1679 return 0; 1680 1681 err_close_tx_cqs: 1682 for (tc--; tc >= 0; tc--) 1683 mlx5e_close_cq(&c->sq[tc].cq); 1684 1685 return err; 1686 } 1687 1688 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) 1689 { 1690 int tc; 1691 1692 for (tc = 0; tc < c->num_tc; tc++) 1693 mlx5e_close_cq(&c->sq[tc].cq); 1694 } 1695 1696 static int mlx5e_open_sqs(struct mlx5e_channel *c, 1697 struct mlx5e_params *params, 1698 struct mlx5e_channel_param *cparam) 1699 { 1700 int err, tc; 1701 1702 for (tc = 0; tc < params->num_tc; tc++) { 1703 int txq_ix = c->ix + tc * params->num_channels; 1704 1705 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, 1706 params, &cparam->txq_sq, &c->sq[tc], tc); 1707 if (err) 1708 goto err_close_sqs; 1709 } 1710 1711 return 0; 1712 1713 err_close_sqs: 1714 for (tc--; tc >= 0; tc--) 1715 mlx5e_close_txqsq(&c->sq[tc]); 1716 1717 return err; 1718 } 1719 1720 static void mlx5e_close_sqs(struct mlx5e_channel *c) 1721 { 1722 int tc; 1723 1724 for (tc = 0; tc < c->num_tc; tc++) 1725 mlx5e_close_txqsq(&c->sq[tc]); 1726 } 1727 1728 static int mlx5e_set_sq_maxrate(struct net_device *dev, 1729 struct mlx5e_txqsq *sq, u32 rate) 1730 { 1731 struct mlx5e_priv *priv = netdev_priv(dev); 1732 struct mlx5_core_dev *mdev = priv->mdev; 1733 struct mlx5e_modify_sq_param msp = {0}; 1734 struct mlx5_rate_limit rl = {0}; 1735 u16 rl_index = 0; 1736 int err; 1737 1738 if (rate == sq->rate_limit) 1739 /* nothing to do */ 1740 return 0; 1741 1742 if (sq->rate_limit) { 1743 rl.rate = sq->rate_limit; 1744 /* remove current rl index to free space to next ones */ 1745 mlx5_rl_remove_rate(mdev, &rl); 1746 } 1747 1748 sq->rate_limit = 0; 1749 1750 if (rate) { 1751 rl.rate = rate; 1752 err = mlx5_rl_add_rate(mdev, &rl_index, &rl); 1753 if (err) { 1754 netdev_err(dev, "Failed configuring rate %u: %d\n", 1755 rate, err); 1756 return err; 1757 } 1758 } 1759 1760 msp.curr_state = MLX5_SQC_STATE_RDY; 1761 msp.next_state = MLX5_SQC_STATE_RDY; 1762 msp.rl_index = rl_index; 1763 msp.rl_update = true; 1764 err = mlx5e_modify_sq(mdev, sq->sqn, &msp); 1765 if (err) { 1766 netdev_err(dev, "Failed configuring rate %u: %d\n", 1767 rate, err); 1768 /* remove the rate from the table */ 1769 if (rate) 1770 mlx5_rl_remove_rate(mdev, &rl); 1771 return err; 1772 } 1773 1774 sq->rate_limit = rate; 1775 return 0; 1776 } 1777 1778 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) 1779 { 1780 struct mlx5e_priv *priv = netdev_priv(dev); 1781 struct mlx5_core_dev *mdev = priv->mdev; 1782 struct mlx5e_txqsq *sq = priv->txq2sq[index]; 1783 int err = 0; 1784 1785 if (!mlx5_rl_is_supported(mdev)) { 1786 netdev_err(dev, "Rate limiting is not supported on this device\n"); 1787 return -EINVAL; 1788 } 1789 1790 /* rate is given in Mb/sec, HW config is in Kb/sec */ 1791 rate = rate << 10; 1792 1793 /* Check whether rate in valid range, 0 is always valid */ 1794 if (rate && !mlx5_rl_is_in_range(mdev, rate)) { 1795 netdev_err(dev, "TX rate %u, is not in range\n", rate); 1796 return -ERANGE; 1797 } 1798 1799 mutex_lock(&priv->state_lock); 1800 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1801 err = mlx5e_set_sq_maxrate(dev, sq, rate); 1802 if (!err) 1803 priv->tx_rates[index] = rate; 1804 mutex_unlock(&priv->state_lock); 1805 1806 return err; 1807 } 1808 1809 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 1810 { 1811 *ccp = (struct mlx5e_create_cq_param) { 1812 .napi = &c->napi, 1813 .ch_stats = c->stats, 1814 .node = cpu_to_node(c->cpu), 1815 .ix = c->ix, 1816 }; 1817 } 1818 1819 static int mlx5e_open_queues(struct mlx5e_channel *c, 1820 struct mlx5e_params *params, 1821 struct mlx5e_channel_param *cparam) 1822 { 1823 struct dim_cq_moder icocq_moder = {0, 0}; 1824 struct mlx5e_create_cq_param ccp; 1825 int err; 1826 1827 mlx5e_build_create_cq_param(&ccp, c); 1828 1829 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, 1830 &c->async_icosq.cq); 1831 if (err) 1832 return err; 1833 1834 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, 1835 &c->icosq.cq); 1836 if (err) 1837 goto err_close_async_icosq_cq; 1838 1839 err = mlx5e_open_tx_cqs(c, params, &ccp, cparam); 1840 if (err) 1841 goto err_close_icosq_cq; 1842 1843 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 1844 &c->xdpsq.cq); 1845 if (err) 1846 goto err_close_tx_cqs; 1847 1848 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 1849 &c->rq.cq); 1850 if (err) 1851 goto err_close_xdp_tx_cqs; 1852 1853 err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, 1854 &ccp, &c->rq_xdpsq.cq) : 0; 1855 if (err) 1856 goto err_close_rx_cq; 1857 1858 napi_enable(&c->napi); 1859 1860 spin_lock_init(&c->async_icosq_lock); 1861 1862 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); 1863 if (err) 1864 goto err_disable_napi; 1865 1866 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); 1867 if (err) 1868 goto err_close_async_icosq; 1869 1870 err = mlx5e_open_sqs(c, params, cparam); 1871 if (err) 1872 goto err_close_icosq; 1873 1874 if (c->xdp) { 1875 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, 1876 &c->rq_xdpsq, false); 1877 if (err) 1878 goto err_close_sqs; 1879 } 1880 1881 err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq); 1882 if (err) 1883 goto err_close_xdp_sq; 1884 1885 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true); 1886 if (err) 1887 goto err_close_rq; 1888 1889 return 0; 1890 1891 err_close_rq: 1892 mlx5e_close_rq(&c->rq); 1893 1894 err_close_xdp_sq: 1895 if (c->xdp) 1896 mlx5e_close_xdpsq(&c->rq_xdpsq); 1897 1898 err_close_sqs: 1899 mlx5e_close_sqs(c); 1900 1901 err_close_icosq: 1902 mlx5e_close_icosq(&c->icosq); 1903 1904 err_close_async_icosq: 1905 mlx5e_close_icosq(&c->async_icosq); 1906 1907 err_disable_napi: 1908 napi_disable(&c->napi); 1909 1910 if (c->xdp) 1911 mlx5e_close_cq(&c->rq_xdpsq.cq); 1912 1913 err_close_rx_cq: 1914 mlx5e_close_cq(&c->rq.cq); 1915 1916 err_close_xdp_tx_cqs: 1917 mlx5e_close_cq(&c->xdpsq.cq); 1918 1919 err_close_tx_cqs: 1920 mlx5e_close_tx_cqs(c); 1921 1922 err_close_icosq_cq: 1923 mlx5e_close_cq(&c->icosq.cq); 1924 1925 err_close_async_icosq_cq: 1926 mlx5e_close_cq(&c->async_icosq.cq); 1927 1928 return err; 1929 } 1930 1931 static void mlx5e_close_queues(struct mlx5e_channel *c) 1932 { 1933 mlx5e_close_xdpsq(&c->xdpsq); 1934 mlx5e_close_rq(&c->rq); 1935 if (c->xdp) 1936 mlx5e_close_xdpsq(&c->rq_xdpsq); 1937 mlx5e_close_sqs(c); 1938 mlx5e_close_icosq(&c->icosq); 1939 mlx5e_close_icosq(&c->async_icosq); 1940 napi_disable(&c->napi); 1941 if (c->xdp) 1942 mlx5e_close_cq(&c->rq_xdpsq.cq); 1943 mlx5e_close_cq(&c->rq.cq); 1944 mlx5e_close_cq(&c->xdpsq.cq); 1945 mlx5e_close_tx_cqs(c); 1946 mlx5e_close_cq(&c->icosq.cq); 1947 mlx5e_close_cq(&c->async_icosq.cq); 1948 } 1949 1950 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) 1951 { 1952 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id); 1953 1954 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); 1955 } 1956 1957 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, 1958 struct mlx5e_params *params, 1959 struct mlx5e_channel_param *cparam, 1960 struct xsk_buff_pool *xsk_pool, 1961 struct mlx5e_channel **cp) 1962 { 1963 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix)); 1964 struct net_device *netdev = priv->netdev; 1965 struct mlx5e_xsk_param xsk; 1966 struct mlx5e_channel *c; 1967 unsigned int irq; 1968 int err; 1969 int eqn; 1970 1971 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); 1972 if (err) 1973 return err; 1974 1975 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); 1976 if (!c) 1977 return -ENOMEM; 1978 1979 c->priv = priv; 1980 c->mdev = priv->mdev; 1981 c->tstamp = &priv->tstamp; 1982 c->ix = ix; 1983 c->cpu = cpu; 1984 c->pdev = mlx5_core_dma_dev(priv->mdev); 1985 c->netdev = priv->netdev; 1986 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1987 c->num_tc = params->num_tc; 1988 c->xdp = !!params->xdp_prog; 1989 c->stats = &priv->channel_stats[ix].ch; 1990 c->aff_mask = irq_get_effective_affinity_mask(irq); 1991 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); 1992 1993 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); 1994 1995 err = mlx5e_open_queues(c, params, cparam); 1996 if (unlikely(err)) 1997 goto err_napi_del; 1998 1999 if (xsk_pool) { 2000 mlx5e_build_xsk_param(xsk_pool, &xsk); 2001 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c); 2002 if (unlikely(err)) 2003 goto err_close_queues; 2004 } 2005 2006 *cp = c; 2007 2008 return 0; 2009 2010 err_close_queues: 2011 mlx5e_close_queues(c); 2012 2013 err_napi_del: 2014 netif_napi_del(&c->napi); 2015 2016 kvfree(c); 2017 2018 return err; 2019 } 2020 2021 static void mlx5e_activate_channel(struct mlx5e_channel *c) 2022 { 2023 int tc; 2024 2025 for (tc = 0; tc < c->num_tc; tc++) 2026 mlx5e_activate_txqsq(&c->sq[tc]); 2027 mlx5e_activate_icosq(&c->icosq); 2028 mlx5e_activate_icosq(&c->async_icosq); 2029 mlx5e_activate_rq(&c->rq); 2030 2031 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 2032 mlx5e_activate_xsk(c); 2033 } 2034 2035 static void mlx5e_deactivate_channel(struct mlx5e_channel *c) 2036 { 2037 int tc; 2038 2039 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 2040 mlx5e_deactivate_xsk(c); 2041 2042 mlx5e_deactivate_rq(&c->rq); 2043 mlx5e_deactivate_icosq(&c->async_icosq); 2044 mlx5e_deactivate_icosq(&c->icosq); 2045 for (tc = 0; tc < c->num_tc; tc++) 2046 mlx5e_deactivate_txqsq(&c->sq[tc]); 2047 } 2048 2049 static void mlx5e_close_channel(struct mlx5e_channel *c) 2050 { 2051 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 2052 mlx5e_close_xsk(c); 2053 mlx5e_close_queues(c); 2054 netif_napi_del(&c->napi); 2055 2056 kvfree(c); 2057 } 2058 2059 #define DEFAULT_FRAG_SIZE (2048) 2060 2061 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, 2062 struct mlx5e_params *params, 2063 struct mlx5e_xsk_param *xsk, 2064 struct mlx5e_rq_frags_info *info) 2065 { 2066 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); 2067 int frag_size_max = DEFAULT_FRAG_SIZE; 2068 u32 buf_size = 0; 2069 int i; 2070 2071 #ifdef CONFIG_MLX5_EN_IPSEC 2072 if (MLX5_IPSEC_DEV(mdev)) 2073 byte_count += MLX5E_METADATA_ETHER_LEN; 2074 #endif 2075 2076 if (mlx5e_rx_is_linear_skb(params, xsk)) { 2077 int frag_stride; 2078 2079 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk); 2080 frag_stride = roundup_pow_of_two(frag_stride); 2081 2082 info->arr[0].frag_size = byte_count; 2083 info->arr[0].frag_stride = frag_stride; 2084 info->num_frags = 1; 2085 info->wqe_bulk = PAGE_SIZE / frag_stride; 2086 goto out; 2087 } 2088 2089 if (byte_count > PAGE_SIZE + 2090 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) 2091 frag_size_max = PAGE_SIZE; 2092 2093 i = 0; 2094 while (buf_size < byte_count) { 2095 int frag_size = byte_count - buf_size; 2096 2097 if (i < MLX5E_MAX_RX_FRAGS - 1) 2098 frag_size = min(frag_size, frag_size_max); 2099 2100 info->arr[i].frag_size = frag_size; 2101 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); 2102 2103 buf_size += frag_size; 2104 i++; 2105 } 2106 info->num_frags = i; 2107 /* number of different wqes sharing a page */ 2108 info->wqe_bulk = 1 + (info->num_frags % 2); 2109 2110 out: 2111 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); 2112 info->log_num_frags = order_base_2(info->num_frags); 2113 } 2114 2115 static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) 2116 { 2117 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; 2118 2119 switch (wq_type) { 2120 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2121 sz += sizeof(struct mlx5e_rx_wqe_ll); 2122 break; 2123 default: /* MLX5_WQ_TYPE_CYCLIC */ 2124 sz += sizeof(struct mlx5e_rx_wqe_cyc); 2125 } 2126 2127 return order_base_2(sz); 2128 } 2129 2130 static u8 mlx5e_get_rq_log_wq_sz(void *rqc) 2131 { 2132 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2133 2134 return MLX5_GET(wq, wq, log_wq_sz); 2135 } 2136 2137 void mlx5e_build_rq_param(struct mlx5e_priv *priv, 2138 struct mlx5e_params *params, 2139 struct mlx5e_xsk_param *xsk, 2140 struct mlx5e_rq_param *param) 2141 { 2142 struct mlx5_core_dev *mdev = priv->mdev; 2143 void *rqc = param->rqc; 2144 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2145 int ndsegs = 1; 2146 2147 switch (params->rq_wq_type) { 2148 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2149 MLX5_SET(wq, wq, log_wqe_num_of_strides, 2150 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) - 2151 MLX5_MPWQE_LOG_NUM_STRIDES_BASE); 2152 MLX5_SET(wq, wq, log_wqe_stride_size, 2153 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) - 2154 MLX5_MPWQE_LOG_STRIDE_SZ_BASE); 2155 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); 2156 break; 2157 default: /* MLX5_WQ_TYPE_CYCLIC */ 2158 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); 2159 mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); 2160 ndsegs = param->frags_info.num_frags; 2161 } 2162 2163 MLX5_SET(wq, wq, wq_type, params->rq_wq_type); 2164 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 2165 MLX5_SET(wq, wq, log_wq_stride, 2166 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); 2167 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); 2168 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); 2169 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); 2170 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); 2171 2172 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 2173 mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp); 2174 } 2175 2176 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, 2177 struct mlx5e_rq_param *param) 2178 { 2179 struct mlx5_core_dev *mdev = priv->mdev; 2180 void *rqc = param->rqc; 2181 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); 2182 2183 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 2184 MLX5_SET(wq, wq, log_wq_stride, 2185 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); 2186 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); 2187 2188 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 2189 } 2190 2191 void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, 2192 struct mlx5e_sq_param *param) 2193 { 2194 void *sqc = param->sqc; 2195 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2196 2197 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 2198 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); 2199 2200 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev)); 2201 } 2202 2203 static void mlx5e_build_sq_param(struct mlx5e_priv *priv, 2204 struct mlx5e_params *params, 2205 struct mlx5e_sq_param *param) 2206 { 2207 void *sqc = param->sqc; 2208 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2209 bool allow_swp; 2210 2211 allow_swp = mlx5_geneve_tx_allowed(priv->mdev) || 2212 !!MLX5_IPSEC_DEV(priv->mdev); 2213 mlx5e_build_sq_param_common(priv, param); 2214 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 2215 MLX5_SET(sqc, sqc, allow_swp, allow_swp); 2216 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); 2217 param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); 2218 mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); 2219 } 2220 2221 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, 2222 struct mlx5e_cq_param *param) 2223 { 2224 void *cqc = param->cqc; 2225 2226 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); 2227 if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128) 2228 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); 2229 } 2230 2231 void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 2232 struct mlx5e_params *params, 2233 struct mlx5e_xsk_param *xsk, 2234 struct mlx5e_cq_param *param) 2235 { 2236 struct mlx5_core_dev *mdev = priv->mdev; 2237 bool hw_stridx = false; 2238 void *cqc = param->cqc; 2239 u8 log_cq_size; 2240 2241 switch (params->rq_wq_type) { 2242 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2243 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + 2244 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); 2245 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index); 2246 break; 2247 default: /* MLX5_WQ_TYPE_CYCLIC */ 2248 log_cq_size = params->log_rq_mtu_frames; 2249 } 2250 2251 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); 2252 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 2253 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? 2254 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); 2255 MLX5_SET(cqc, cqc, cqe_comp_en, 1); 2256 } 2257 2258 mlx5e_build_common_cq_param(priv, param); 2259 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; 2260 } 2261 2262 void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, 2263 struct mlx5e_params *params, 2264 struct mlx5e_cq_param *param) 2265 { 2266 void *cqc = param->cqc; 2267 2268 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); 2269 2270 mlx5e_build_common_cq_param(priv, param); 2271 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; 2272 } 2273 2274 void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, 2275 u8 log_wq_size, 2276 struct mlx5e_cq_param *param) 2277 { 2278 void *cqc = param->cqc; 2279 2280 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); 2281 2282 mlx5e_build_common_cq_param(priv, param); 2283 2284 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 2285 } 2286 2287 void mlx5e_build_icosq_param(struct mlx5e_priv *priv, 2288 u8 log_wq_size, 2289 struct mlx5e_sq_param *param) 2290 { 2291 void *sqc = param->sqc; 2292 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2293 2294 mlx5e_build_sq_param_common(priv, param); 2295 2296 MLX5_SET(wq, wq, log_wq_sz, log_wq_size); 2297 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); 2298 mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); 2299 } 2300 2301 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, 2302 struct mlx5e_params *params, 2303 struct mlx5e_sq_param *param) 2304 { 2305 void *sqc = param->sqc; 2306 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 2307 2308 mlx5e_build_sq_param_common(priv, param); 2309 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 2310 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); 2311 mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); 2312 } 2313 2314 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, 2315 struct mlx5e_rq_param *rqp) 2316 { 2317 switch (params->rq_wq_type) { 2318 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2319 return order_base_2(MLX5E_UMR_WQEBBS) + 2320 mlx5e_get_rq_log_wq_sz(rqp->rqc); 2321 default: /* MLX5_WQ_TYPE_CYCLIC */ 2322 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 2323 } 2324 } 2325 2326 static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev) 2327 { 2328 if (netdev->hw_features & NETIF_F_HW_TLS_RX) 2329 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 2330 2331 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 2332 } 2333 2334 static void mlx5e_build_channel_param(struct mlx5e_priv *priv, 2335 struct mlx5e_params *params, 2336 struct mlx5e_channel_param *cparam) 2337 { 2338 u8 icosq_log_wq_sz, async_icosq_log_wq_sz; 2339 2340 mlx5e_build_rq_param(priv, params, NULL, &cparam->rq); 2341 2342 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); 2343 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev); 2344 2345 mlx5e_build_sq_param(priv, params, &cparam->txq_sq); 2346 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); 2347 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); 2348 mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq); 2349 } 2350 2351 int mlx5e_open_channels(struct mlx5e_priv *priv, 2352 struct mlx5e_channels *chs) 2353 { 2354 struct mlx5e_channel_param *cparam; 2355 int err = -ENOMEM; 2356 int i; 2357 2358 chs->num = chs->params.num_channels; 2359 2360 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); 2361 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); 2362 if (!chs->c || !cparam) 2363 goto err_free; 2364 2365 mlx5e_build_channel_param(priv, &chs->params, cparam); 2366 for (i = 0; i < chs->num; i++) { 2367 struct xsk_buff_pool *xsk_pool = NULL; 2368 2369 if (chs->params.xdp_prog) 2370 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i); 2371 2372 err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]); 2373 if (err) 2374 goto err_close_channels; 2375 } 2376 2377 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) { 2378 err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port, 2379 &chs->port_ptp); 2380 if (err) 2381 goto err_close_channels; 2382 } 2383 2384 mlx5e_health_channels_update(priv); 2385 kvfree(cparam); 2386 return 0; 2387 2388 err_close_channels: 2389 for (i--; i >= 0; i--) 2390 mlx5e_close_channel(chs->c[i]); 2391 2392 err_free: 2393 kfree(chs->c); 2394 kvfree(cparam); 2395 chs->num = 0; 2396 return err; 2397 } 2398 2399 static void mlx5e_activate_channels(struct mlx5e_channels *chs) 2400 { 2401 int i; 2402 2403 for (i = 0; i < chs->num; i++) 2404 mlx5e_activate_channel(chs->c[i]); 2405 2406 if (chs->port_ptp) 2407 mlx5e_ptp_activate_channel(chs->port_ptp); 2408 } 2409 2410 #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ 2411 2412 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) 2413 { 2414 int err = 0; 2415 int i; 2416 2417 for (i = 0; i < chs->num; i++) { 2418 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; 2419 2420 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); 2421 2422 /* Don't wait on the XSK RQ, because the newer xdpsock sample 2423 * doesn't provide any Fill Ring entries at the setup stage. 2424 */ 2425 } 2426 2427 return err ? -ETIMEDOUT : 0; 2428 } 2429 2430 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) 2431 { 2432 int i; 2433 2434 if (chs->port_ptp) 2435 mlx5e_ptp_deactivate_channel(chs->port_ptp); 2436 2437 for (i = 0; i < chs->num; i++) 2438 mlx5e_deactivate_channel(chs->c[i]); 2439 } 2440 2441 void mlx5e_close_channels(struct mlx5e_channels *chs) 2442 { 2443 int i; 2444 2445 if (chs->port_ptp) 2446 mlx5e_port_ptp_close(chs->port_ptp); 2447 2448 for (i = 0; i < chs->num; i++) 2449 mlx5e_close_channel(chs->c[i]); 2450 2451 kfree(chs->c); 2452 chs->num = 0; 2453 } 2454 2455 static int 2456 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) 2457 { 2458 struct mlx5_core_dev *mdev = priv->mdev; 2459 void *rqtc; 2460 int inlen; 2461 int err; 2462 u32 *in; 2463 int i; 2464 2465 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 2466 in = kvzalloc(inlen, GFP_KERNEL); 2467 if (!in) 2468 return -ENOMEM; 2469 2470 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 2471 2472 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2473 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 2474 2475 for (i = 0; i < sz; i++) 2476 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); 2477 2478 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); 2479 if (!err) 2480 rqt->enabled = true; 2481 2482 kvfree(in); 2483 return err; 2484 } 2485 2486 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) 2487 { 2488 rqt->enabled = false; 2489 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); 2490 } 2491 2492 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) 2493 { 2494 struct mlx5e_rqt *rqt = &priv->indir_rqt; 2495 int err; 2496 2497 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt); 2498 if (err) 2499 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err); 2500 return err; 2501 } 2502 2503 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 2504 { 2505 int err; 2506 int ix; 2507 2508 for (ix = 0; ix < priv->max_nch; ix++) { 2509 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); 2510 if (unlikely(err)) 2511 goto err_destroy_rqts; 2512 } 2513 2514 return 0; 2515 2516 err_destroy_rqts: 2517 mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err); 2518 for (ix--; ix >= 0; ix--) 2519 mlx5e_destroy_rqt(priv, &tirs[ix].rqt); 2520 2521 return err; 2522 } 2523 2524 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 2525 { 2526 int i; 2527 2528 for (i = 0; i < priv->max_nch; i++) 2529 mlx5e_destroy_rqt(priv, &tirs[i].rqt); 2530 } 2531 2532 static int mlx5e_rx_hash_fn(int hfunc) 2533 { 2534 return (hfunc == ETH_RSS_HASH_TOP) ? 2535 MLX5_RX_HASH_FN_TOEPLITZ : 2536 MLX5_RX_HASH_FN_INVERTED_XOR8; 2537 } 2538 2539 int mlx5e_bits_invert(unsigned long a, int size) 2540 { 2541 int inv = 0; 2542 int i; 2543 2544 for (i = 0; i < size; i++) 2545 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; 2546 2547 return inv; 2548 } 2549 2550 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, 2551 struct mlx5e_redirect_rqt_param rrp, void *rqtc) 2552 { 2553 int i; 2554 2555 for (i = 0; i < sz; i++) { 2556 u32 rqn; 2557 2558 if (rrp.is_rss) { 2559 int ix = i; 2560 2561 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) 2562 ix = mlx5e_bits_invert(i, ilog2(sz)); 2563 2564 ix = priv->rss_params.indirection_rqt[ix]; 2565 rqn = rrp.rss.channels->c[ix]->rq.rqn; 2566 } else { 2567 rqn = rrp.rqn; 2568 } 2569 MLX5_SET(rqtc, rqtc, rq_num[i], rqn); 2570 } 2571 } 2572 2573 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, 2574 struct mlx5e_redirect_rqt_param rrp) 2575 { 2576 struct mlx5_core_dev *mdev = priv->mdev; 2577 void *rqtc; 2578 int inlen; 2579 u32 *in; 2580 int err; 2581 2582 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; 2583 in = kvzalloc(inlen, GFP_KERNEL); 2584 if (!in) 2585 return -ENOMEM; 2586 2587 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); 2588 2589 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 2590 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); 2591 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc); 2592 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); 2593 2594 kvfree(in); 2595 return err; 2596 } 2597 2598 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, 2599 struct mlx5e_redirect_rqt_param rrp) 2600 { 2601 if (!rrp.is_rss) 2602 return rrp.rqn; 2603 2604 if (ix >= rrp.rss.channels->num) 2605 return priv->drop_rq.rqn; 2606 2607 return rrp.rss.channels->c[ix]->rq.rqn; 2608 } 2609 2610 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, 2611 struct mlx5e_redirect_rqt_param rrp) 2612 { 2613 u32 rqtn; 2614 int ix; 2615 2616 if (priv->indir_rqt.enabled) { 2617 /* RSS RQ table */ 2618 rqtn = priv->indir_rqt.rqtn; 2619 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); 2620 } 2621 2622 for (ix = 0; ix < priv->max_nch; ix++) { 2623 struct mlx5e_redirect_rqt_param direct_rrp = { 2624 .is_rss = false, 2625 { 2626 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp) 2627 }, 2628 }; 2629 2630 /* Direct RQ Tables */ 2631 if (!priv->direct_tir[ix].rqt.enabled) 2632 continue; 2633 2634 rqtn = priv->direct_tir[ix].rqt.rqtn; 2635 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); 2636 } 2637 } 2638 2639 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, 2640 struct mlx5e_channels *chs) 2641 { 2642 struct mlx5e_redirect_rqt_param rrp = { 2643 .is_rss = true, 2644 { 2645 .rss = { 2646 .channels = chs, 2647 .hfunc = priv->rss_params.hfunc, 2648 } 2649 }, 2650 }; 2651 2652 mlx5e_redirect_rqts(priv, rrp); 2653 } 2654 2655 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) 2656 { 2657 struct mlx5e_redirect_rqt_param drop_rrp = { 2658 .is_rss = false, 2659 { 2660 .rqn = priv->drop_rq.rqn, 2661 }, 2662 }; 2663 2664 mlx5e_redirect_rqts(priv, drop_rrp); 2665 } 2666 2667 static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { 2668 [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, 2669 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, 2670 .rx_hash_fields = MLX5_HASH_IP_L4PORTS, 2671 }, 2672 [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, 2673 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, 2674 .rx_hash_fields = MLX5_HASH_IP_L4PORTS, 2675 }, 2676 [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, 2677 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, 2678 .rx_hash_fields = MLX5_HASH_IP_L4PORTS, 2679 }, 2680 [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, 2681 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, 2682 .rx_hash_fields = MLX5_HASH_IP_L4PORTS, 2683 }, 2684 [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, 2685 .l4_prot_type = 0, 2686 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, 2687 }, 2688 [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, 2689 .l4_prot_type = 0, 2690 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, 2691 }, 2692 [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, 2693 .l4_prot_type = 0, 2694 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, 2695 }, 2696 [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, 2697 .l4_prot_type = 0, 2698 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, 2699 }, 2700 [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, 2701 .l4_prot_type = 0, 2702 .rx_hash_fields = MLX5_HASH_IP, 2703 }, 2704 [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, 2705 .l4_prot_type = 0, 2706 .rx_hash_fields = MLX5_HASH_IP, 2707 }, 2708 }; 2709 2710 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) 2711 { 2712 return tirc_default_config[tt]; 2713 } 2714 2715 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) 2716 { 2717 if (!params->lro_en) 2718 return; 2719 2720 #define ROUGH_MAX_L2_L3_HDR_SZ 256 2721 2722 MLX5_SET(tirc, tirc, lro_enable_mask, 2723 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 2724 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); 2725 MLX5_SET(tirc, tirc, lro_max_ip_payload_size, 2726 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); 2727 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); 2728 } 2729 2730 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, 2731 const struct mlx5e_tirc_config *ttconfig, 2732 void *tirc, bool inner) 2733 { 2734 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : 2735 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2736 2737 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); 2738 if (rss_params->hfunc == ETH_RSS_HASH_TOP) { 2739 void *rss_key = MLX5_ADDR_OF(tirc, tirc, 2740 rx_hash_toeplitz_key); 2741 size_t len = MLX5_FLD_SZ_BYTES(tirc, 2742 rx_hash_toeplitz_key); 2743 2744 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2745 memcpy(rss_key, rss_params->toeplitz_hash_key, len); 2746 } 2747 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 2748 ttconfig->l3_prot_type); 2749 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 2750 ttconfig->l4_prot_type); 2751 MLX5_SET(rx_hash_field_select, hfso, selected_fields, 2752 ttconfig->rx_hash_fields); 2753 } 2754 2755 static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, 2756 enum mlx5e_traffic_types tt, 2757 u32 rx_hash_fields) 2758 { 2759 *ttconfig = tirc_default_config[tt]; 2760 ttconfig->rx_hash_fields = rx_hash_fields; 2761 } 2762 2763 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) 2764 { 2765 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 2766 struct mlx5e_rss_params *rss = &priv->rss_params; 2767 struct mlx5_core_dev *mdev = priv->mdev; 2768 int ctxlen = MLX5_ST_SZ_BYTES(tirc); 2769 struct mlx5e_tirc_config ttconfig; 2770 int tt; 2771 2772 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 2773 2774 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 2775 memset(tirc, 0, ctxlen); 2776 mlx5e_update_rx_hash_fields(&ttconfig, tt, 2777 rss->rx_hash_fields[tt]); 2778 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); 2779 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); 2780 } 2781 2782 /* Verify inner tirs resources allocated */ 2783 if (!priv->inner_indir_tir[0].tirn) 2784 return; 2785 2786 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 2787 memset(tirc, 0, ctxlen); 2788 mlx5e_update_rx_hash_fields(&ttconfig, tt, 2789 rss->rx_hash_fields[tt]); 2790 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); 2791 mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); 2792 } 2793 } 2794 2795 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2796 { 2797 struct mlx5_core_dev *mdev = priv->mdev; 2798 2799 void *in; 2800 void *tirc; 2801 int inlen; 2802 int err; 2803 int tt; 2804 int ix; 2805 2806 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 2807 in = kvzalloc(inlen, GFP_KERNEL); 2808 if (!in) 2809 return -ENOMEM; 2810 2811 MLX5_SET(modify_tir_in, in, bitmask.lro, 1); 2812 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 2813 2814 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); 2815 2816 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 2817 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); 2818 if (err) 2819 goto free_in; 2820 } 2821 2822 for (ix = 0; ix < priv->max_nch; ix++) { 2823 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); 2824 if (err) 2825 goto free_in; 2826 } 2827 2828 free_in: 2829 kvfree(in); 2830 2831 return err; 2832 } 2833 2834 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); 2835 2836 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, 2837 struct mlx5e_params *params, u16 mtu) 2838 { 2839 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); 2840 int err; 2841 2842 err = mlx5_set_port_mtu(mdev, hw_mtu, 1); 2843 if (err) 2844 return err; 2845 2846 /* Update vport context MTU */ 2847 mlx5_modify_nic_vport_mtu(mdev, hw_mtu); 2848 return 0; 2849 } 2850 2851 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, 2852 struct mlx5e_params *params, u16 *mtu) 2853 { 2854 u16 hw_mtu = 0; 2855 int err; 2856 2857 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); 2858 if (err || !hw_mtu) /* fallback to port oper mtu */ 2859 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 2860 2861 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); 2862 } 2863 2864 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) 2865 { 2866 struct mlx5e_params *params = &priv->channels.params; 2867 struct net_device *netdev = priv->netdev; 2868 struct mlx5_core_dev *mdev = priv->mdev; 2869 u16 mtu; 2870 int err; 2871 2872 err = mlx5e_set_mtu(mdev, params, params->sw_mtu); 2873 if (err) 2874 return err; 2875 2876 mlx5e_query_mtu(mdev, params, &mtu); 2877 if (mtu != params->sw_mtu) 2878 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", 2879 __func__, mtu, params->sw_mtu); 2880 2881 params->sw_mtu = mtu; 2882 return 0; 2883 } 2884 2885 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu); 2886 2887 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) 2888 { 2889 struct mlx5e_params *params = &priv->channels.params; 2890 struct net_device *netdev = priv->netdev; 2891 struct mlx5_core_dev *mdev = priv->mdev; 2892 u16 max_mtu; 2893 2894 /* MTU range: 68 - hw-specific max */ 2895 netdev->min_mtu = ETH_MIN_MTU; 2896 2897 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2898 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu), 2899 ETH_MAX_MTU); 2900 } 2901 2902 static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) 2903 { 2904 int tc; 2905 2906 netdev_reset_tc(netdev); 2907 2908 if (ntc == 1) 2909 return; 2910 2911 netdev_set_num_tc(netdev, ntc); 2912 2913 /* Map netdev TCs to offset 0 2914 * We have our own UP to TXQ mapping for QoS 2915 */ 2916 for (tc = 0; tc < ntc; tc++) 2917 netdev_set_tc_queue(netdev, tc, nch, 0); 2918 } 2919 2920 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) 2921 { 2922 struct net_device *netdev = priv->netdev; 2923 int num_txqs, num_rxqs, nch, ntc; 2924 int old_num_txqs, old_ntc; 2925 int err; 2926 2927 old_num_txqs = netdev->real_num_tx_queues; 2928 old_ntc = netdev->num_tc; 2929 2930 nch = priv->channels.params.num_channels; 2931 ntc = priv->channels.params.num_tc; 2932 num_txqs = nch * ntc; 2933 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) 2934 num_txqs += ntc; 2935 num_rxqs = nch * priv->profile->rq_groups; 2936 2937 mlx5e_netdev_set_tcs(netdev, nch, ntc); 2938 2939 err = netif_set_real_num_tx_queues(netdev, num_txqs); 2940 if (err) { 2941 netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err); 2942 goto err_tcs; 2943 } 2944 err = netif_set_real_num_rx_queues(netdev, num_rxqs); 2945 if (err) { 2946 netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); 2947 goto err_txqs; 2948 } 2949 2950 return 0; 2951 2952 err_txqs: 2953 /* netif_set_real_num_rx_queues could fail only when nch increased. Only 2954 * one of nch and ntc is changed in this function. That means, the call 2955 * to netif_set_real_num_tx_queues below should not fail, because it 2956 * decreases the number of TX queues. 2957 */ 2958 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); 2959 2960 err_tcs: 2961 mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc); 2962 return err; 2963 } 2964 2965 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, 2966 struct mlx5e_params *params) 2967 { 2968 struct mlx5_core_dev *mdev = priv->mdev; 2969 int num_comp_vectors, ix, irq; 2970 2971 num_comp_vectors = mlx5_comp_vectors_count(mdev); 2972 2973 for (ix = 0; ix < params->num_channels; ix++) { 2974 cpumask_clear(priv->scratchpad.cpumask); 2975 2976 for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { 2977 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); 2978 2979 cpumask_set_cpu(cpu, priv->scratchpad.cpumask); 2980 } 2981 2982 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); 2983 } 2984 } 2985 2986 int mlx5e_num_channels_changed(struct mlx5e_priv *priv) 2987 { 2988 u16 count = priv->channels.params.num_channels; 2989 int err; 2990 2991 err = mlx5e_update_netdev_queues(priv); 2992 if (err) 2993 return err; 2994 2995 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); 2996 2997 if (!netif_is_rxfh_configured(priv->netdev)) 2998 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, 2999 MLX5E_INDIR_RQT_SIZE, count); 3000 3001 return 0; 3002 } 3003 3004 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); 3005 3006 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) 3007 { 3008 int i, ch, tc, num_tc; 3009 3010 ch = priv->channels.num; 3011 num_tc = priv->channels.params.num_tc; 3012 3013 for (i = 0; i < ch; i++) { 3014 for (tc = 0; tc < num_tc; tc++) { 3015 struct mlx5e_channel *c = priv->channels.c[i]; 3016 struct mlx5e_txqsq *sq = &c->sq[tc]; 3017 3018 priv->txq2sq[sq->txq_ix] = sq; 3019 priv->channel_tc2realtxq[i][tc] = i + tc * ch; 3020 } 3021 } 3022 3023 if (!priv->channels.port_ptp) 3024 return; 3025 3026 for (tc = 0; tc < num_tc; tc++) { 3027 struct mlx5e_port_ptp *c = priv->channels.port_ptp; 3028 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; 3029 3030 priv->txq2sq[sq->txq_ix] = sq; 3031 priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; 3032 } 3033 } 3034 3035 static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) 3036 { 3037 /* Sync with mlx5e_select_queue. */ 3038 WRITE_ONCE(priv->num_tc_x_num_ch, 3039 priv->channels.params.num_tc * priv->channels.num); 3040 } 3041 3042 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) 3043 { 3044 mlx5e_update_num_tc_x_num_ch(priv); 3045 mlx5e_build_txq_maps(priv); 3046 mlx5e_activate_channels(&priv->channels); 3047 mlx5e_xdp_tx_enable(priv); 3048 netif_tx_start_all_queues(priv->netdev); 3049 3050 if (mlx5e_is_vport_rep(priv)) 3051 mlx5e_add_sqs_fwd_rules(priv); 3052 3053 mlx5e_wait_channels_min_rx_wqes(&priv->channels); 3054 mlx5e_redirect_rqts_to_channels(priv, &priv->channels); 3055 3056 mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels); 3057 } 3058 3059 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) 3060 { 3061 mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels); 3062 3063 mlx5e_redirect_rqts_to_drop(priv); 3064 3065 if (mlx5e_is_vport_rep(priv)) 3066 mlx5e_remove_sqs_fwd_rules(priv); 3067 3068 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when 3069 * polling for inactive tx queues. 3070 */ 3071 netif_tx_stop_all_queues(priv->netdev); 3072 netif_tx_disable(priv->netdev); 3073 mlx5e_xdp_tx_disable(priv); 3074 mlx5e_deactivate_channels(&priv->channels); 3075 } 3076 3077 static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, 3078 struct mlx5e_channels *new_chs, 3079 mlx5e_fp_preactivate preactivate, 3080 void *context) 3081 { 3082 struct net_device *netdev = priv->netdev; 3083 struct mlx5e_channels old_chs; 3084 int carrier_ok; 3085 int err = 0; 3086 3087 carrier_ok = netif_carrier_ok(netdev); 3088 netif_carrier_off(netdev); 3089 3090 mlx5e_deactivate_priv_channels(priv); 3091 3092 old_chs = priv->channels; 3093 priv->channels = *new_chs; 3094 3095 /* New channels are ready to roll, call the preactivate hook if needed 3096 * to modify HW settings or update kernel parameters. 3097 */ 3098 if (preactivate) { 3099 err = preactivate(priv, context); 3100 if (err) { 3101 priv->channels = old_chs; 3102 goto out; 3103 } 3104 } 3105 3106 mlx5e_close_channels(&old_chs); 3107 priv->profile->update_rx(priv); 3108 3109 out: 3110 mlx5e_activate_priv_channels(priv); 3111 3112 /* return carrier back if needed */ 3113 if (carrier_ok) 3114 netif_carrier_on(netdev); 3115 3116 return err; 3117 } 3118 3119 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, 3120 struct mlx5e_channels *new_chs, 3121 mlx5e_fp_preactivate preactivate, 3122 void *context) 3123 { 3124 int err; 3125 3126 err = mlx5e_open_channels(priv, new_chs); 3127 if (err) 3128 return err; 3129 3130 err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); 3131 if (err) 3132 goto err_close; 3133 3134 return 0; 3135 3136 err_close: 3137 mlx5e_close_channels(new_chs); 3138 3139 return err; 3140 } 3141 3142 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) 3143 { 3144 struct mlx5e_channels new_channels = {}; 3145 3146 new_channels.params = priv->channels.params; 3147 return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); 3148 } 3149 3150 void mlx5e_timestamp_init(struct mlx5e_priv *priv) 3151 { 3152 priv->tstamp.tx_type = HWTSTAMP_TX_OFF; 3153 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; 3154 } 3155 3156 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, 3157 enum mlx5_port_status state) 3158 { 3159 struct mlx5_eswitch *esw = mdev->priv.eswitch; 3160 int vport_admin_state; 3161 3162 mlx5_set_port_admin_status(mdev, state); 3163 3164 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS || 3165 !MLX5_CAP_GEN(mdev, uplink_follow)) 3166 return; 3167 3168 if (state == MLX5_PORT_UP) 3169 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; 3170 else 3171 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3172 3173 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); 3174 } 3175 3176 int mlx5e_open_locked(struct net_device *netdev) 3177 { 3178 struct mlx5e_priv *priv = netdev_priv(netdev); 3179 int err; 3180 3181 set_bit(MLX5E_STATE_OPENED, &priv->state); 3182 3183 err = mlx5e_open_channels(priv, &priv->channels); 3184 if (err) 3185 goto err_clear_state_opened_flag; 3186 3187 priv->profile->update_rx(priv); 3188 mlx5e_activate_priv_channels(priv); 3189 if (priv->profile->update_carrier) 3190 priv->profile->update_carrier(priv); 3191 3192 mlx5e_queue_update_stats(priv); 3193 return 0; 3194 3195 err_clear_state_opened_flag: 3196 clear_bit(MLX5E_STATE_OPENED, &priv->state); 3197 return err; 3198 } 3199 3200 int mlx5e_open(struct net_device *netdev) 3201 { 3202 struct mlx5e_priv *priv = netdev_priv(netdev); 3203 int err; 3204 3205 mutex_lock(&priv->state_lock); 3206 err = mlx5e_open_locked(netdev); 3207 if (!err) 3208 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); 3209 mutex_unlock(&priv->state_lock); 3210 3211 return err; 3212 } 3213 3214 int mlx5e_close_locked(struct net_device *netdev) 3215 { 3216 struct mlx5e_priv *priv = netdev_priv(netdev); 3217 3218 /* May already be CLOSED in case a previous configuration operation 3219 * (e.g RX/TX queue size change) that involves close&open failed. 3220 */ 3221 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 3222 return 0; 3223 3224 clear_bit(MLX5E_STATE_OPENED, &priv->state); 3225 3226 netif_carrier_off(priv->netdev); 3227 mlx5e_deactivate_priv_channels(priv); 3228 mlx5e_close_channels(&priv->channels); 3229 3230 return 0; 3231 } 3232 3233 int mlx5e_close(struct net_device *netdev) 3234 { 3235 struct mlx5e_priv *priv = netdev_priv(netdev); 3236 int err; 3237 3238 if (!netif_device_present(netdev)) 3239 return -ENODEV; 3240 3241 mutex_lock(&priv->state_lock); 3242 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); 3243 err = mlx5e_close_locked(netdev); 3244 mutex_unlock(&priv->state_lock); 3245 3246 return err; 3247 } 3248 3249 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq) 3250 { 3251 mlx5_wq_destroy(&rq->wq_ctrl); 3252 } 3253 3254 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, 3255 struct mlx5e_rq *rq, 3256 struct mlx5e_rq_param *param) 3257 { 3258 void *rqc = param->rqc; 3259 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); 3260 int err; 3261 3262 param->wq.db_numa_node = param->wq.buf_numa_node; 3263 3264 err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, 3265 &rq->wq_ctrl); 3266 if (err) 3267 return err; 3268 3269 /* Mark as unused given "Drop-RQ" packets never reach XDP */ 3270 xdp_rxq_info_unused(&rq->xdp_rxq); 3271 3272 rq->mdev = mdev; 3273 3274 return 0; 3275 } 3276 3277 static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, 3278 struct mlx5e_cq *cq, 3279 struct mlx5e_cq_param *param) 3280 { 3281 struct mlx5_core_dev *mdev = priv->mdev; 3282 3283 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 3284 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 3285 3286 return mlx5e_alloc_cq_common(priv, param, cq); 3287 } 3288 3289 int mlx5e_open_drop_rq(struct mlx5e_priv *priv, 3290 struct mlx5e_rq *drop_rq) 3291 { 3292 struct mlx5_core_dev *mdev = priv->mdev; 3293 struct mlx5e_cq_param cq_param = {}; 3294 struct mlx5e_rq_param rq_param = {}; 3295 struct mlx5e_cq *cq = &drop_rq->cq; 3296 int err; 3297 3298 mlx5e_build_drop_rq_param(priv, &rq_param); 3299 3300 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param); 3301 if (err) 3302 return err; 3303 3304 err = mlx5e_create_cq(cq, &cq_param); 3305 if (err) 3306 goto err_free_cq; 3307 3308 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param); 3309 if (err) 3310 goto err_destroy_cq; 3311 3312 err = mlx5e_create_rq(drop_rq, &rq_param); 3313 if (err) 3314 goto err_free_rq; 3315 3316 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); 3317 if (err) 3318 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err); 3319 3320 return 0; 3321 3322 err_free_rq: 3323 mlx5e_free_drop_rq(drop_rq); 3324 3325 err_destroy_cq: 3326 mlx5e_destroy_cq(cq); 3327 3328 err_free_cq: 3329 mlx5e_free_cq(cq); 3330 3331 return err; 3332 } 3333 3334 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) 3335 { 3336 mlx5e_destroy_rq(drop_rq); 3337 mlx5e_free_drop_rq(drop_rq); 3338 mlx5e_destroy_cq(&drop_rq->cq); 3339 mlx5e_free_cq(&drop_rq->cq); 3340 } 3341 3342 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) 3343 { 3344 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 3345 3346 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); 3347 3348 if (MLX5_GET(tisc, tisc, tls_en)) 3349 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn); 3350 3351 if (mlx5_lag_is_lacp_owner(mdev)) 3352 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); 3353 3354 return mlx5_core_create_tis(mdev, in, tisn); 3355 } 3356 3357 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) 3358 { 3359 mlx5_core_destroy_tis(mdev, tisn); 3360 } 3361 3362 void mlx5e_destroy_tises(struct mlx5e_priv *priv) 3363 { 3364 int tc, i; 3365 3366 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) 3367 for (tc = 0; tc < priv->profile->max_tc; tc++) 3368 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); 3369 } 3370 3371 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) 3372 { 3373 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; 3374 } 3375 3376 int mlx5e_create_tises(struct mlx5e_priv *priv) 3377 { 3378 int tc, i; 3379 int err; 3380 3381 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { 3382 for (tc = 0; tc < priv->profile->max_tc; tc++) { 3383 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 3384 void *tisc; 3385 3386 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 3387 3388 MLX5_SET(tisc, tisc, prio, tc << 1); 3389 3390 if (mlx5e_lag_should_assign_affinity(priv->mdev)) 3391 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); 3392 3393 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); 3394 if (err) 3395 goto err_close_tises; 3396 } 3397 } 3398 3399 return 0; 3400 3401 err_close_tises: 3402 for (; i >= 0; i--) { 3403 for (tc--; tc >= 0; tc--) 3404 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); 3405 tc = priv->profile->max_tc; 3406 } 3407 3408 return err; 3409 } 3410 3411 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) 3412 { 3413 mlx5e_destroy_tises(priv); 3414 } 3415 3416 static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, 3417 u32 rqtn, u32 *tirc) 3418 { 3419 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 3420 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 3421 MLX5_SET(tirc, tirc, indirect_table, rqtn); 3422 MLX5_SET(tirc, tirc, tunneled_offload_en, 3423 priv->channels.params.tunneled_offload_en); 3424 3425 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); 3426 } 3427 3428 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, 3429 enum mlx5e_traffic_types tt, 3430 u32 *tirc) 3431 { 3432 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); 3433 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, 3434 &tirc_default_config[tt], tirc, false); 3435 } 3436 3437 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) 3438 { 3439 mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc); 3440 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); 3441 } 3442 3443 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, 3444 enum mlx5e_traffic_types tt, 3445 u32 *tirc) 3446 { 3447 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); 3448 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, 3449 &tirc_default_config[tt], tirc, true); 3450 } 3451 3452 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) 3453 { 3454 struct mlx5e_tir *tir; 3455 void *tirc; 3456 int inlen; 3457 int i = 0; 3458 int err; 3459 u32 *in; 3460 int tt; 3461 3462 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 3463 in = kvzalloc(inlen, GFP_KERNEL); 3464 if (!in) 3465 return -ENOMEM; 3466 3467 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 3468 memset(in, 0, inlen); 3469 tir = &priv->indir_tir[tt]; 3470 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 3471 mlx5e_build_indir_tir_ctx(priv, tt, tirc); 3472 err = mlx5e_create_tir(priv->mdev, tir, in); 3473 if (err) { 3474 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); 3475 goto err_destroy_inner_tirs; 3476 } 3477 } 3478 3479 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) 3480 goto out; 3481 3482 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { 3483 memset(in, 0, inlen); 3484 tir = &priv->inner_indir_tir[i]; 3485 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 3486 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); 3487 err = mlx5e_create_tir(priv->mdev, tir, in); 3488 if (err) { 3489 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); 3490 goto err_destroy_inner_tirs; 3491 } 3492 } 3493 3494 out: 3495 kvfree(in); 3496 3497 return 0; 3498 3499 err_destroy_inner_tirs: 3500 for (i--; i >= 0; i--) 3501 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); 3502 3503 for (tt--; tt >= 0; tt--) 3504 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); 3505 3506 kvfree(in); 3507 3508 return err; 3509 } 3510 3511 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 3512 { 3513 struct mlx5e_tir *tir; 3514 void *tirc; 3515 int inlen; 3516 int err = 0; 3517 u32 *in; 3518 int ix; 3519 3520 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 3521 in = kvzalloc(inlen, GFP_KERNEL); 3522 if (!in) 3523 return -ENOMEM; 3524 3525 for (ix = 0; ix < priv->max_nch; ix++) { 3526 memset(in, 0, inlen); 3527 tir = &tirs[ix]; 3528 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 3529 mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc); 3530 err = mlx5e_create_tir(priv->mdev, tir, in); 3531 if (unlikely(err)) 3532 goto err_destroy_ch_tirs; 3533 } 3534 3535 goto out; 3536 3537 err_destroy_ch_tirs: 3538 mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err); 3539 for (ix--; ix >= 0; ix--) 3540 mlx5e_destroy_tir(priv->mdev, &tirs[ix]); 3541 3542 out: 3543 kvfree(in); 3544 3545 return err; 3546 } 3547 3548 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) 3549 { 3550 int i; 3551 3552 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 3553 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); 3554 3555 /* Verify inner tirs resources allocated */ 3556 if (!priv->inner_indir_tir[0].tirn) 3557 return; 3558 3559 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 3560 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); 3561 } 3562 3563 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) 3564 { 3565 int i; 3566 3567 for (i = 0; i < priv->max_nch; i++) 3568 mlx5e_destroy_tir(priv->mdev, &tirs[i]); 3569 } 3570 3571 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) 3572 { 3573 int err = 0; 3574 int i; 3575 3576 for (i = 0; i < chs->num; i++) { 3577 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); 3578 if (err) 3579 return err; 3580 } 3581 3582 return 0; 3583 } 3584 3585 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) 3586 { 3587 int err = 0; 3588 int i; 3589 3590 for (i = 0; i < chs->num; i++) { 3591 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); 3592 if (err) 3593 return err; 3594 } 3595 3596 return 0; 3597 } 3598 3599 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, 3600 struct tc_mqprio_qopt *mqprio) 3601 { 3602 struct mlx5e_channels new_channels = {}; 3603 u8 tc = mqprio->num_tc; 3604 int err = 0; 3605 3606 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 3607 3608 if (tc && tc != MLX5E_MAX_NUM_TC) 3609 return -EINVAL; 3610 3611 mutex_lock(&priv->state_lock); 3612 3613 new_channels.params = priv->channels.params; 3614 new_channels.params.num_tc = tc ? tc : 1; 3615 3616 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 3617 priv->channels.params = new_channels.params; 3618 goto out; 3619 } 3620 3621 err = mlx5e_safe_switch_channels(priv, &new_channels, 3622 mlx5e_num_channels_changed_ctx, NULL); 3623 if (err) 3624 goto out; 3625 3626 priv->max_opened_tc = max_t(u8, priv->max_opened_tc, 3627 new_channels.params.num_tc); 3628 out: 3629 mutex_unlock(&priv->state_lock); 3630 return err; 3631 } 3632 3633 static LIST_HEAD(mlx5e_block_cb_list); 3634 3635 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, 3636 void *type_data) 3637 { 3638 struct mlx5e_priv *priv = netdev_priv(dev); 3639 3640 switch (type) { 3641 case TC_SETUP_BLOCK: { 3642 struct flow_block_offload *f = type_data; 3643 3644 f->unlocked_driver_cb = true; 3645 return flow_block_cb_setup_simple(type_data, 3646 &mlx5e_block_cb_list, 3647 mlx5e_setup_tc_block_cb, 3648 priv, priv, true); 3649 } 3650 case TC_SETUP_QDISC_MQPRIO: 3651 return mlx5e_setup_tc_mqprio(priv, type_data); 3652 default: 3653 return -EOPNOTSUPP; 3654 } 3655 } 3656 3657 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) 3658 { 3659 int i; 3660 3661 for (i = 0; i < priv->max_nch; i++) { 3662 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; 3663 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; 3664 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; 3665 int j; 3666 3667 s->rx_packets += rq_stats->packets + xskrq_stats->packets; 3668 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes; 3669 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets; 3670 3671 for (j = 0; j < priv->max_opened_tc; j++) { 3672 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; 3673 3674 s->tx_packets += sq_stats->packets; 3675 s->tx_bytes += sq_stats->bytes; 3676 s->tx_dropped += sq_stats->dropped; 3677 } 3678 } 3679 } 3680 3681 void 3682 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 3683 { 3684 struct mlx5e_priv *priv = netdev_priv(dev); 3685 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 3686 3687 /* In switchdev mode, monitor counters doesn't monitor 3688 * rx/tx stats of 802_3. The update stats mechanism 3689 * should keep the 802_3 layout counters updated 3690 */ 3691 if (!mlx5e_monitor_counter_supported(priv) || 3692 mlx5e_is_uplink_rep(priv)) { 3693 /* update HW stats in background for next time */ 3694 mlx5e_queue_update_stats(priv); 3695 } 3696 3697 if (mlx5e_is_uplink_rep(priv)) { 3698 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); 3699 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); 3700 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); 3701 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); 3702 } else { 3703 mlx5e_fold_sw_stats64(priv, stats); 3704 } 3705 3706 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; 3707 3708 stats->rx_length_errors = 3709 PPORT_802_3_GET(pstats, a_in_range_length_errors) + 3710 PPORT_802_3_GET(pstats, a_out_of_range_length_field) + 3711 PPORT_802_3_GET(pstats, a_frame_too_long_errors); 3712 stats->rx_crc_errors = 3713 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); 3714 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); 3715 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); 3716 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + 3717 stats->rx_frame_errors; 3718 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; 3719 } 3720 3721 static void mlx5e_set_rx_mode(struct net_device *dev) 3722 { 3723 struct mlx5e_priv *priv = netdev_priv(dev); 3724 3725 queue_work(priv->wq, &priv->set_rx_mode_work); 3726 } 3727 3728 static int mlx5e_set_mac(struct net_device *netdev, void *addr) 3729 { 3730 struct mlx5e_priv *priv = netdev_priv(netdev); 3731 struct sockaddr *saddr = addr; 3732 3733 if (!is_valid_ether_addr(saddr->sa_data)) 3734 return -EADDRNOTAVAIL; 3735 3736 netif_addr_lock_bh(netdev); 3737 ether_addr_copy(netdev->dev_addr, saddr->sa_data); 3738 netif_addr_unlock_bh(netdev); 3739 3740 queue_work(priv->wq, &priv->set_rx_mode_work); 3741 3742 return 0; 3743 } 3744 3745 #define MLX5E_SET_FEATURE(features, feature, enable) \ 3746 do { \ 3747 if (enable) \ 3748 *features |= feature; \ 3749 else \ 3750 *features &= ~feature; \ 3751 } while (0) 3752 3753 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); 3754 3755 static int set_feature_lro(struct net_device *netdev, bool enable) 3756 { 3757 struct mlx5e_priv *priv = netdev_priv(netdev); 3758 struct mlx5_core_dev *mdev = priv->mdev; 3759 struct mlx5e_channels new_channels = {}; 3760 struct mlx5e_params *old_params; 3761 int err = 0; 3762 bool reset; 3763 3764 mutex_lock(&priv->state_lock); 3765 3766 if (enable && priv->xsk.refcnt) { 3767 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n", 3768 priv->xsk.refcnt); 3769 err = -EINVAL; 3770 goto out; 3771 } 3772 3773 old_params = &priv->channels.params; 3774 if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { 3775 netdev_warn(netdev, "can't set LRO with legacy RQ\n"); 3776 err = -EINVAL; 3777 goto out; 3778 } 3779 3780 reset = test_bit(MLX5E_STATE_OPENED, &priv->state); 3781 3782 new_channels.params = *old_params; 3783 new_channels.params.lro_en = enable; 3784 3785 if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { 3786 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) == 3787 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL)) 3788 reset = false; 3789 } 3790 3791 if (!reset) { 3792 *old_params = new_channels.params; 3793 err = mlx5e_modify_tirs_lro(priv); 3794 goto out; 3795 } 3796 3797 err = mlx5e_safe_switch_channels(priv, &new_channels, 3798 mlx5e_modify_tirs_lro_ctx, NULL); 3799 out: 3800 mutex_unlock(&priv->state_lock); 3801 return err; 3802 } 3803 3804 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) 3805 { 3806 struct mlx5e_priv *priv = netdev_priv(netdev); 3807 3808 if (enable) 3809 mlx5e_enable_cvlan_filter(priv); 3810 else 3811 mlx5e_disable_cvlan_filter(priv); 3812 3813 return 0; 3814 } 3815 3816 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 3817 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) 3818 { 3819 struct mlx5e_priv *priv = netdev_priv(netdev); 3820 3821 if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { 3822 netdev_err(netdev, 3823 "Active offloaded tc filters, can't turn hw_tc_offload off\n"); 3824 return -EINVAL; 3825 } 3826 3827 return 0; 3828 } 3829 #endif 3830 3831 static int set_feature_rx_all(struct net_device *netdev, bool enable) 3832 { 3833 struct mlx5e_priv *priv = netdev_priv(netdev); 3834 struct mlx5_core_dev *mdev = priv->mdev; 3835 3836 return mlx5_set_port_fcs(mdev, !enable); 3837 } 3838 3839 static int set_feature_rx_fcs(struct net_device *netdev, bool enable) 3840 { 3841 struct mlx5e_priv *priv = netdev_priv(netdev); 3842 int err; 3843 3844 mutex_lock(&priv->state_lock); 3845 3846 priv->channels.params.scatter_fcs_en = enable; 3847 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable); 3848 if (err) 3849 priv->channels.params.scatter_fcs_en = !enable; 3850 3851 mutex_unlock(&priv->state_lock); 3852 3853 return err; 3854 } 3855 3856 static int set_feature_rx_vlan(struct net_device *netdev, bool enable) 3857 { 3858 struct mlx5e_priv *priv = netdev_priv(netdev); 3859 int err = 0; 3860 3861 mutex_lock(&priv->state_lock); 3862 3863 priv->channels.params.vlan_strip_disable = !enable; 3864 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 3865 goto unlock; 3866 3867 err = mlx5e_modify_channels_vsd(&priv->channels, !enable); 3868 if (err) 3869 priv->channels.params.vlan_strip_disable = enable; 3870 3871 unlock: 3872 mutex_unlock(&priv->state_lock); 3873 3874 return err; 3875 } 3876 3877 #ifdef CONFIG_MLX5_EN_ARFS 3878 static int set_feature_arfs(struct net_device *netdev, bool enable) 3879 { 3880 struct mlx5e_priv *priv = netdev_priv(netdev); 3881 int err; 3882 3883 if (enable) 3884 err = mlx5e_arfs_enable(priv); 3885 else 3886 err = mlx5e_arfs_disable(priv); 3887 3888 return err; 3889 } 3890 #endif 3891 3892 static int mlx5e_handle_feature(struct net_device *netdev, 3893 netdev_features_t *features, 3894 netdev_features_t wanted_features, 3895 netdev_features_t feature, 3896 mlx5e_feature_handler feature_handler) 3897 { 3898 netdev_features_t changes = wanted_features ^ netdev->features; 3899 bool enable = !!(wanted_features & feature); 3900 int err; 3901 3902 if (!(changes & feature)) 3903 return 0; 3904 3905 err = feature_handler(netdev, enable); 3906 if (err) { 3907 netdev_err(netdev, "%s feature %pNF failed, err %d\n", 3908 enable ? "Enable" : "Disable", &feature, err); 3909 return err; 3910 } 3911 3912 MLX5E_SET_FEATURE(features, feature, enable); 3913 return 0; 3914 } 3915 3916 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) 3917 { 3918 netdev_features_t oper_features = netdev->features; 3919 int err = 0; 3920 3921 #define MLX5E_HANDLE_FEATURE(feature, handler) \ 3922 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) 3923 3924 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); 3925 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, 3926 set_feature_cvlan_filter); 3927 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 3928 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); 3929 #endif 3930 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); 3931 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); 3932 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); 3933 #ifdef CONFIG_MLX5_EN_ARFS 3934 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); 3935 #endif 3936 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx); 3937 3938 if (err) { 3939 netdev->features = oper_features; 3940 return -EINVAL; 3941 } 3942 3943 return 0; 3944 } 3945 3946 static netdev_features_t mlx5e_fix_features(struct net_device *netdev, 3947 netdev_features_t features) 3948 { 3949 struct mlx5e_priv *priv = netdev_priv(netdev); 3950 struct mlx5e_params *params; 3951 3952 mutex_lock(&priv->state_lock); 3953 params = &priv->channels.params; 3954 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { 3955 /* HW strips the outer C-tag header, this is a problem 3956 * for S-tag traffic. 3957 */ 3958 features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3959 if (!params->vlan_strip_disable) 3960 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); 3961 } 3962 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { 3963 if (features & NETIF_F_LRO) { 3964 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); 3965 features &= ~NETIF_F_LRO; 3966 } 3967 } 3968 3969 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { 3970 features &= ~NETIF_F_RXHASH; 3971 if (netdev->features & NETIF_F_RXHASH) 3972 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); 3973 } 3974 3975 mutex_unlock(&priv->state_lock); 3976 3977 return features; 3978 } 3979 3980 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, 3981 struct mlx5e_channels *chs, 3982 struct mlx5e_params *new_params, 3983 struct mlx5_core_dev *mdev) 3984 { 3985 u16 ix; 3986 3987 for (ix = 0; ix < chs->params.num_channels; ix++) { 3988 struct xsk_buff_pool *xsk_pool = 3989 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix); 3990 struct mlx5e_xsk_param xsk; 3991 3992 if (!xsk_pool) 3993 continue; 3994 3995 mlx5e_build_xsk_param(xsk_pool, &xsk); 3996 3997 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) { 3998 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk); 3999 int max_mtu_frame, max_mtu_page, max_mtu; 4000 4001 /* Two criteria must be met: 4002 * 1. HW MTU + all headrooms <= XSK frame size. 4003 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE. 4004 */ 4005 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr); 4006 max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk); 4007 max_mtu = min(max_mtu_frame, max_mtu_page); 4008 4009 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n", 4010 new_params->sw_mtu, ix, max_mtu); 4011 return false; 4012 } 4013 } 4014 4015 return true; 4016 } 4017 4018 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, 4019 mlx5e_fp_preactivate preactivate) 4020 { 4021 struct mlx5e_priv *priv = netdev_priv(netdev); 4022 struct mlx5e_channels new_channels = {}; 4023 struct mlx5e_params *params; 4024 int err = 0; 4025 bool reset; 4026 4027 mutex_lock(&priv->state_lock); 4028 4029 params = &priv->channels.params; 4030 4031 reset = !params->lro_en; 4032 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); 4033 4034 new_channels.params = *params; 4035 new_channels.params.sw_mtu = new_mtu; 4036 err = mlx5e_validate_params(priv, &new_channels.params); 4037 if (err) 4038 goto out; 4039 4040 if (params->xdp_prog && 4041 !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { 4042 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", 4043 new_mtu, mlx5e_xdp_max_mtu(params, NULL)); 4044 err = -EINVAL; 4045 goto out; 4046 } 4047 4048 if (priv->xsk.refcnt && 4049 !mlx5e_xsk_validate_mtu(netdev, &priv->channels, 4050 &new_channels.params, priv->mdev)) { 4051 err = -EINVAL; 4052 goto out; 4053 } 4054 4055 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 4056 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, 4057 &new_channels.params, 4058 NULL); 4059 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL); 4060 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL); 4061 4062 /* If XSK is active, XSK RQs are linear. */ 4063 is_linear |= priv->xsk.refcnt; 4064 4065 /* Always reset in linear mode - hw_mtu is used in data path. */ 4066 reset = reset && (is_linear || (ppw_old != ppw_new)); 4067 } 4068 4069 if (!reset) { 4070 params->sw_mtu = new_mtu; 4071 if (preactivate) 4072 preactivate(priv, NULL); 4073 netdev->mtu = params->sw_mtu; 4074 goto out; 4075 } 4076 4077 err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); 4078 if (err) 4079 goto out; 4080 4081 netdev->mtu = new_channels.params.sw_mtu; 4082 4083 out: 4084 mutex_unlock(&priv->state_lock); 4085 return err; 4086 } 4087 4088 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) 4089 { 4090 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); 4091 } 4092 4093 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) 4094 { 4095 struct hwtstamp_config config; 4096 int err; 4097 4098 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || 4099 (mlx5_clock_get_ptp_index(priv->mdev) == -1)) 4100 return -EOPNOTSUPP; 4101 4102 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 4103 return -EFAULT; 4104 4105 /* TX HW timestamp */ 4106 switch (config.tx_type) { 4107 case HWTSTAMP_TX_OFF: 4108 case HWTSTAMP_TX_ON: 4109 break; 4110 default: 4111 return -ERANGE; 4112 } 4113 4114 mutex_lock(&priv->state_lock); 4115 /* RX HW timestamp */ 4116 switch (config.rx_filter) { 4117 case HWTSTAMP_FILTER_NONE: 4118 /* Reset CQE compression to Admin default */ 4119 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); 4120 break; 4121 case HWTSTAMP_FILTER_ALL: 4122 case HWTSTAMP_FILTER_SOME: 4123 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 4124 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 4125 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 4126 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 4127 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 4128 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 4129 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 4130 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 4131 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 4132 case HWTSTAMP_FILTER_PTP_V2_EVENT: 4133 case HWTSTAMP_FILTER_PTP_V2_SYNC: 4134 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 4135 case HWTSTAMP_FILTER_NTP_ALL: 4136 /* Disable CQE compression */ 4137 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) 4138 netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); 4139 err = mlx5e_modify_rx_cqe_compression_locked(priv, false); 4140 if (err) { 4141 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); 4142 mutex_unlock(&priv->state_lock); 4143 return err; 4144 } 4145 config.rx_filter = HWTSTAMP_FILTER_ALL; 4146 break; 4147 default: 4148 mutex_unlock(&priv->state_lock); 4149 return -ERANGE; 4150 } 4151 4152 memcpy(&priv->tstamp, &config, sizeof(config)); 4153 mutex_unlock(&priv->state_lock); 4154 4155 /* might need to fix some features */ 4156 netdev_update_features(priv->netdev); 4157 4158 return copy_to_user(ifr->ifr_data, &config, 4159 sizeof(config)) ? -EFAULT : 0; 4160 } 4161 4162 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) 4163 { 4164 struct hwtstamp_config *cfg = &priv->tstamp; 4165 4166 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) 4167 return -EOPNOTSUPP; 4168 4169 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; 4170 } 4171 4172 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4173 { 4174 struct mlx5e_priv *priv = netdev_priv(dev); 4175 4176 switch (cmd) { 4177 case SIOCSHWTSTAMP: 4178 return mlx5e_hwstamp_set(priv, ifr); 4179 case SIOCGHWTSTAMP: 4180 return mlx5e_hwstamp_get(priv, ifr); 4181 default: 4182 return -EOPNOTSUPP; 4183 } 4184 } 4185 4186 #ifdef CONFIG_MLX5_ESWITCH 4187 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) 4188 { 4189 struct mlx5e_priv *priv = netdev_priv(dev); 4190 struct mlx5_core_dev *mdev = priv->mdev; 4191 4192 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac); 4193 } 4194 4195 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 4196 __be16 vlan_proto) 4197 { 4198 struct mlx5e_priv *priv = netdev_priv(dev); 4199 struct mlx5_core_dev *mdev = priv->mdev; 4200 4201 if (vlan_proto != htons(ETH_P_8021Q)) 4202 return -EPROTONOSUPPORT; 4203 4204 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1, 4205 vlan, qos); 4206 } 4207 4208 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 4209 { 4210 struct mlx5e_priv *priv = netdev_priv(dev); 4211 struct mlx5_core_dev *mdev = priv->mdev; 4212 4213 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting); 4214 } 4215 4216 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) 4217 { 4218 struct mlx5e_priv *priv = netdev_priv(dev); 4219 struct mlx5_core_dev *mdev = priv->mdev; 4220 4221 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); 4222 } 4223 4224 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 4225 int max_tx_rate) 4226 { 4227 struct mlx5e_priv *priv = netdev_priv(dev); 4228 struct mlx5_core_dev *mdev = priv->mdev; 4229 4230 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, 4231 max_tx_rate, min_tx_rate); 4232 } 4233 4234 static int mlx5_vport_link2ifla(u8 esw_link) 4235 { 4236 switch (esw_link) { 4237 case MLX5_VPORT_ADMIN_STATE_DOWN: 4238 return IFLA_VF_LINK_STATE_DISABLE; 4239 case MLX5_VPORT_ADMIN_STATE_UP: 4240 return IFLA_VF_LINK_STATE_ENABLE; 4241 } 4242 return IFLA_VF_LINK_STATE_AUTO; 4243 } 4244 4245 static int mlx5_ifla_link2vport(u8 ifla_link) 4246 { 4247 switch (ifla_link) { 4248 case IFLA_VF_LINK_STATE_DISABLE: 4249 return MLX5_VPORT_ADMIN_STATE_DOWN; 4250 case IFLA_VF_LINK_STATE_ENABLE: 4251 return MLX5_VPORT_ADMIN_STATE_UP; 4252 } 4253 return MLX5_VPORT_ADMIN_STATE_AUTO; 4254 } 4255 4256 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, 4257 int link_state) 4258 { 4259 struct mlx5e_priv *priv = netdev_priv(dev); 4260 struct mlx5_core_dev *mdev = priv->mdev; 4261 4262 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, 4263 mlx5_ifla_link2vport(link_state)); 4264 } 4265 4266 int mlx5e_get_vf_config(struct net_device *dev, 4267 int vf, struct ifla_vf_info *ivi) 4268 { 4269 struct mlx5e_priv *priv = netdev_priv(dev); 4270 struct mlx5_core_dev *mdev = priv->mdev; 4271 int err; 4272 4273 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); 4274 if (err) 4275 return err; 4276 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate); 4277 return 0; 4278 } 4279 4280 int mlx5e_get_vf_stats(struct net_device *dev, 4281 int vf, struct ifla_vf_stats *vf_stats) 4282 { 4283 struct mlx5e_priv *priv = netdev_priv(dev); 4284 struct mlx5_core_dev *mdev = priv->mdev; 4285 4286 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, 4287 vf_stats); 4288 } 4289 #endif 4290 4291 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type) 4292 { 4293 switch (proto_type) { 4294 case IPPROTO_GRE: 4295 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); 4296 case IPPROTO_IPIP: 4297 case IPPROTO_IPV6: 4298 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || 4299 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx)); 4300 default: 4301 return false; 4302 } 4303 } 4304 4305 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, 4306 struct sk_buff *skb) 4307 { 4308 switch (skb->inner_protocol) { 4309 case htons(ETH_P_IP): 4310 case htons(ETH_P_IPV6): 4311 case htons(ETH_P_TEB): 4312 return true; 4313 case htons(ETH_P_MPLS_UC): 4314 case htons(ETH_P_MPLS_MC): 4315 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre); 4316 } 4317 return false; 4318 } 4319 4320 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, 4321 struct sk_buff *skb, 4322 netdev_features_t features) 4323 { 4324 unsigned int offset = 0; 4325 struct udphdr *udph; 4326 u8 proto; 4327 u16 port; 4328 4329 switch (vlan_get_protocol(skb)) { 4330 case htons(ETH_P_IP): 4331 proto = ip_hdr(skb)->protocol; 4332 break; 4333 case htons(ETH_P_IPV6): 4334 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); 4335 break; 4336 default: 4337 goto out; 4338 } 4339 4340 switch (proto) { 4341 case IPPROTO_GRE: 4342 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb)) 4343 return features; 4344 break; 4345 case IPPROTO_IPIP: 4346 case IPPROTO_IPV6: 4347 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP)) 4348 return features; 4349 break; 4350 case IPPROTO_UDP: 4351 udph = udp_hdr(skb); 4352 port = be16_to_cpu(udph->dest); 4353 4354 /* Verify if UDP port is being offloaded by HW */ 4355 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) 4356 return features; 4357 4358 #if IS_ENABLED(CONFIG_GENEVE) 4359 /* Support Geneve offload for default UDP port */ 4360 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev)) 4361 return features; 4362 #endif 4363 } 4364 4365 out: 4366 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ 4367 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 4368 } 4369 4370 netdev_features_t mlx5e_features_check(struct sk_buff *skb, 4371 struct net_device *netdev, 4372 netdev_features_t features) 4373 { 4374 struct mlx5e_priv *priv = netdev_priv(netdev); 4375 4376 features = vlan_features_check(skb, features); 4377 features = vxlan_features_check(skb, features); 4378 4379 #ifdef CONFIG_MLX5_EN_IPSEC 4380 if (mlx5e_ipsec_feature_check(skb, netdev, features)) 4381 return features; 4382 #endif 4383 4384 /* Validate if the tunneled packet is being offloaded by HW */ 4385 if (skb->encapsulation && 4386 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) 4387 return mlx5e_tunnel_features_check(priv, skb, features); 4388 4389 return features; 4390 } 4391 4392 static void mlx5e_tx_timeout_work(struct work_struct *work) 4393 { 4394 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 4395 tx_timeout_work); 4396 struct net_device *netdev = priv->netdev; 4397 int i; 4398 4399 rtnl_lock(); 4400 mutex_lock(&priv->state_lock); 4401 4402 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 4403 goto unlock; 4404 4405 for (i = 0; i < netdev->real_num_tx_queues; i++) { 4406 struct netdev_queue *dev_queue = 4407 netdev_get_tx_queue(netdev, i); 4408 struct mlx5e_txqsq *sq = priv->txq2sq[i]; 4409 4410 if (!netif_xmit_stopped(dev_queue)) 4411 continue; 4412 4413 if (mlx5e_reporter_tx_timeout(sq)) 4414 /* break if tried to reopened channels */ 4415 break; 4416 } 4417 4418 unlock: 4419 mutex_unlock(&priv->state_lock); 4420 rtnl_unlock(); 4421 } 4422 4423 static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) 4424 { 4425 struct mlx5e_priv *priv = netdev_priv(dev); 4426 4427 netdev_err(dev, "TX timeout detected\n"); 4428 queue_work(priv->wq, &priv->tx_timeout_work); 4429 } 4430 4431 static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) 4432 { 4433 struct net_device *netdev = priv->netdev; 4434 struct mlx5e_channels new_channels = {}; 4435 4436 if (priv->channels.params.lro_en) { 4437 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); 4438 return -EINVAL; 4439 } 4440 4441 if (MLX5_IPSEC_DEV(priv->mdev)) { 4442 netdev_warn(netdev, "can't set XDP with IPSec offload\n"); 4443 return -EINVAL; 4444 } 4445 4446 new_channels.params = priv->channels.params; 4447 new_channels.params.xdp_prog = prog; 4448 4449 /* No XSK params: AF_XDP can't be enabled yet at the point of setting 4450 * the XDP program. 4451 */ 4452 if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { 4453 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", 4454 new_channels.params.sw_mtu, 4455 mlx5e_xdp_max_mtu(&new_channels.params, NULL)); 4456 return -EINVAL; 4457 } 4458 4459 return 0; 4460 } 4461 4462 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog) 4463 { 4464 struct bpf_prog *old_prog; 4465 4466 old_prog = rcu_replace_pointer(rq->xdp_prog, prog, 4467 lockdep_is_held(&rq->priv->state_lock)); 4468 if (old_prog) 4469 bpf_prog_put(old_prog); 4470 } 4471 4472 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) 4473 { 4474 struct mlx5e_priv *priv = netdev_priv(netdev); 4475 struct bpf_prog *old_prog; 4476 bool reset, was_opened; 4477 int err = 0; 4478 int i; 4479 4480 mutex_lock(&priv->state_lock); 4481 4482 if (prog) { 4483 err = mlx5e_xdp_allowed(priv, prog); 4484 if (err) 4485 goto unlock; 4486 } 4487 4488 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 4489 /* no need for full reset when exchanging programs */ 4490 reset = (!priv->channels.params.xdp_prog || !prog); 4491 4492 if (was_opened && !reset) 4493 /* num_channels is invariant here, so we can take the 4494 * batched reference right upfront. 4495 */ 4496 bpf_prog_add(prog, priv->channels.num); 4497 4498 if (was_opened && reset) { 4499 struct mlx5e_channels new_channels = {}; 4500 4501 new_channels.params = priv->channels.params; 4502 new_channels.params.xdp_prog = prog; 4503 mlx5e_set_rq_type(priv->mdev, &new_channels.params); 4504 old_prog = priv->channels.params.xdp_prog; 4505 4506 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); 4507 if (err) 4508 goto unlock; 4509 } else { 4510 /* exchange programs, extra prog reference we got from caller 4511 * as long as we don't fail from this point onwards. 4512 */ 4513 old_prog = xchg(&priv->channels.params.xdp_prog, prog); 4514 } 4515 4516 if (old_prog) 4517 bpf_prog_put(old_prog); 4518 4519 if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */ 4520 mlx5e_set_rq_type(priv->mdev, &priv->channels.params); 4521 4522 if (!was_opened || reset) 4523 goto unlock; 4524 4525 /* exchanging programs w/o reset, we update ref counts on behalf 4526 * of the channels RQs here. 4527 */ 4528 for (i = 0; i < priv->channels.num; i++) { 4529 struct mlx5e_channel *c = priv->channels.c[i]; 4530 4531 mlx5e_rq_replace_xdp_prog(&c->rq, prog); 4532 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 4533 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); 4534 } 4535 4536 unlock: 4537 mutex_unlock(&priv->state_lock); 4538 return err; 4539 } 4540 4541 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4542 { 4543 switch (xdp->command) { 4544 case XDP_SETUP_PROG: 4545 return mlx5e_xdp_set(dev, xdp->prog); 4546 case XDP_SETUP_XSK_POOL: 4547 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool, 4548 xdp->xsk.queue_id); 4549 default: 4550 return -EINVAL; 4551 } 4552 } 4553 4554 #ifdef CONFIG_MLX5_ESWITCH 4555 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4556 struct net_device *dev, u32 filter_mask, 4557 int nlflags) 4558 { 4559 struct mlx5e_priv *priv = netdev_priv(dev); 4560 struct mlx5_core_dev *mdev = priv->mdev; 4561 u8 mode, setting; 4562 int err; 4563 4564 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); 4565 if (err) 4566 return err; 4567 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; 4568 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 4569 mode, 4570 0, 0, nlflags, filter_mask, NULL); 4571 } 4572 4573 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 4574 u16 flags, struct netlink_ext_ack *extack) 4575 { 4576 struct mlx5e_priv *priv = netdev_priv(dev); 4577 struct mlx5_core_dev *mdev = priv->mdev; 4578 struct nlattr *attr, *br_spec; 4579 u16 mode = BRIDGE_MODE_UNDEF; 4580 u8 setting; 4581 int rem; 4582 4583 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4584 if (!br_spec) 4585 return -EINVAL; 4586 4587 nla_for_each_nested(attr, br_spec, rem) { 4588 if (nla_type(attr) != IFLA_BRIDGE_MODE) 4589 continue; 4590 4591 if (nla_len(attr) < sizeof(mode)) 4592 return -EINVAL; 4593 4594 mode = nla_get_u16(attr); 4595 if (mode > BRIDGE_MODE_VEPA) 4596 return -EINVAL; 4597 4598 break; 4599 } 4600 4601 if (mode == BRIDGE_MODE_UNDEF) 4602 return -EINVAL; 4603 4604 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0; 4605 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting); 4606 } 4607 #endif 4608 4609 const struct net_device_ops mlx5e_netdev_ops = { 4610 .ndo_open = mlx5e_open, 4611 .ndo_stop = mlx5e_close, 4612 .ndo_start_xmit = mlx5e_xmit, 4613 .ndo_setup_tc = mlx5e_setup_tc, 4614 .ndo_select_queue = mlx5e_select_queue, 4615 .ndo_get_stats64 = mlx5e_get_stats, 4616 .ndo_set_rx_mode = mlx5e_set_rx_mode, 4617 .ndo_set_mac_address = mlx5e_set_mac, 4618 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 4619 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 4620 .ndo_set_features = mlx5e_set_features, 4621 .ndo_fix_features = mlx5e_fix_features, 4622 .ndo_change_mtu = mlx5e_change_nic_mtu, 4623 .ndo_do_ioctl = mlx5e_ioctl, 4624 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, 4625 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 4626 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 4627 .ndo_features_check = mlx5e_features_check, 4628 .ndo_tx_timeout = mlx5e_tx_timeout, 4629 .ndo_bpf = mlx5e_xdp, 4630 .ndo_xdp_xmit = mlx5e_xdp_xmit, 4631 .ndo_xsk_wakeup = mlx5e_xsk_wakeup, 4632 #ifdef CONFIG_MLX5_EN_ARFS 4633 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 4634 #endif 4635 #ifdef CONFIG_MLX5_ESWITCH 4636 .ndo_bridge_setlink = mlx5e_bridge_setlink, 4637 .ndo_bridge_getlink = mlx5e_bridge_getlink, 4638 4639 /* SRIOV E-Switch NDOs */ 4640 .ndo_set_vf_mac = mlx5e_set_vf_mac, 4641 .ndo_set_vf_vlan = mlx5e_set_vf_vlan, 4642 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, 4643 .ndo_set_vf_trust = mlx5e_set_vf_trust, 4644 .ndo_set_vf_rate = mlx5e_set_vf_rate, 4645 .ndo_get_vf_config = mlx5e_get_vf_config, 4646 .ndo_set_vf_link_state = mlx5e_set_vf_link_state, 4647 .ndo_get_vf_stats = mlx5e_get_vf_stats, 4648 #endif 4649 .ndo_get_devlink_port = mlx5e_get_devlink_port, 4650 }; 4651 4652 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 4653 int num_channels) 4654 { 4655 int i; 4656 4657 for (i = 0; i < len; i++) 4658 indirection_rqt[i] = i % num_channels; 4659 } 4660 4661 static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) 4662 { 4663 u32 link_speed = 0; 4664 u32 pci_bw = 0; 4665 4666 mlx5e_port_max_linkspeed(mdev, &link_speed); 4667 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); 4668 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", 4669 link_speed, pci_bw); 4670 4671 #define MLX5E_SLOW_PCI_RATIO (2) 4672 4673 return link_speed && pci_bw && 4674 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; 4675 } 4676 4677 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 4678 { 4679 struct dim_cq_moder moder; 4680 4681 moder.cq_period_mode = cq_period_mode; 4682 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 4683 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 4684 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 4685 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 4686 4687 return moder; 4688 } 4689 4690 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 4691 { 4692 struct dim_cq_moder moder; 4693 4694 moder.cq_period_mode = cq_period_mode; 4695 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 4696 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 4697 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 4698 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 4699 4700 return moder; 4701 } 4702 4703 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 4704 { 4705 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 4706 DIM_CQ_PERIOD_MODE_START_FROM_CQE : 4707 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4708 } 4709 4710 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4711 { 4712 if (params->tx_dim_enabled) { 4713 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 4714 4715 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 4716 } else { 4717 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 4718 } 4719 } 4720 4721 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4722 { 4723 if (params->rx_dim_enabled) { 4724 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 4725 4726 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 4727 } else { 4728 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 4729 } 4730 } 4731 4732 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4733 { 4734 mlx5e_reset_tx_moderation(params, cq_period_mode); 4735 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4736 params->tx_cq_moderation.cq_period_mode == 4737 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4738 } 4739 4740 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4741 { 4742 mlx5e_reset_rx_moderation(params, cq_period_mode); 4743 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 4744 params->rx_cq_moderation.cq_period_mode == 4745 MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4746 } 4747 4748 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) 4749 { 4750 int i; 4751 4752 /* The supported periods are organized in ascending order */ 4753 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) 4754 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) 4755 break; 4756 4757 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); 4758 } 4759 4760 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, 4761 struct mlx5e_params *params) 4762 { 4763 /* Prefer Striding RQ, unless any of the following holds: 4764 * - Striding RQ configuration is not possible/supported. 4765 * - Slow PCI heuristic. 4766 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. 4767 * 4768 * No XSK params: checking the availability of striding RQ in general. 4769 */ 4770 if (!slow_pci_heuristic(mdev) && 4771 mlx5e_striding_rq_possible(mdev, params) && 4772 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) || 4773 !mlx5e_rx_is_linear_skb(params, NULL))) 4774 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); 4775 mlx5e_set_rq_type(mdev, params); 4776 mlx5e_init_rq_type_params(mdev, params); 4777 } 4778 4779 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, 4780 u16 num_channels) 4781 { 4782 enum mlx5e_traffic_types tt; 4783 4784 rss_params->hfunc = ETH_RSS_HASH_TOP; 4785 netdev_rss_key_fill(rss_params->toeplitz_hash_key, 4786 sizeof(rss_params->toeplitz_hash_key)); 4787 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, 4788 MLX5E_INDIR_RQT_SIZE, num_channels); 4789 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 4790 rss_params->rx_hash_fields[tt] = 4791 tirc_default_config[tt].rx_hash_fields; 4792 } 4793 4794 void mlx5e_build_nic_params(struct mlx5e_priv *priv, 4795 struct mlx5e_xsk *xsk, 4796 struct mlx5e_rss_params *rss_params, 4797 struct mlx5e_params *params, 4798 u16 mtu) 4799 { 4800 struct mlx5_core_dev *mdev = priv->mdev; 4801 u8 rx_cq_period_mode; 4802 4803 params->sw_mtu = mtu; 4804 params->hard_mtu = MLX5E_ETH_HARD_MTU; 4805 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, 4806 priv->max_nch); 4807 params->num_tc = 1; 4808 4809 /* SQ */ 4810 params->log_sq_size = is_kdump_kernel() ? 4811 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : 4812 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 4813 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, 4814 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); 4815 4816 /* XDP SQ */ 4817 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, 4818 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); 4819 4820 /* set CQE compression */ 4821 params->rx_cqe_compress_def = false; 4822 if (MLX5_CAP_GEN(mdev, cqe_compression) && 4823 MLX5_CAP_GEN(mdev, vport_group_manager)) 4824 params->rx_cqe_compress_def = slow_pci_heuristic(mdev); 4825 4826 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); 4827 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false); 4828 4829 /* RQ */ 4830 mlx5e_build_rq_params(mdev, params); 4831 4832 /* HW LRO */ 4833 if (MLX5_CAP_ETH(mdev, lro_cap) && 4834 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 4835 /* No XSK params: checking the availability of striding RQ in general. */ 4836 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) 4837 params->lro_en = !slow_pci_heuristic(mdev); 4838 } 4839 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); 4840 4841 /* CQ moderation params */ 4842 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 4843 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 4844 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 4845 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 4846 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 4847 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); 4848 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 4849 4850 /* TX inline */ 4851 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); 4852 4853 /* RSS */ 4854 mlx5e_build_rss_params(rss_params, params->num_channels); 4855 params->tunneled_offload_en = 4856 mlx5e_tunnel_inner_ft_supported(mdev); 4857 4858 /* AF_XDP */ 4859 params->xsk = xsk; 4860 } 4861 4862 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) 4863 { 4864 struct mlx5e_priv *priv = netdev_priv(netdev); 4865 4866 mlx5_query_mac_address(priv->mdev, netdev->dev_addr); 4867 if (is_zero_ether_addr(netdev->dev_addr) && 4868 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { 4869 eth_hw_addr_random(netdev); 4870 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); 4871 } 4872 } 4873 4874 static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, 4875 unsigned int entry, struct udp_tunnel_info *ti) 4876 { 4877 struct mlx5e_priv *priv = netdev_priv(netdev); 4878 4879 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port)); 4880 } 4881 4882 static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table, 4883 unsigned int entry, struct udp_tunnel_info *ti) 4884 { 4885 struct mlx5e_priv *priv = netdev_priv(netdev); 4886 4887 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port)); 4888 } 4889 4890 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) 4891 { 4892 if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) 4893 return; 4894 4895 priv->nic_info.set_port = mlx5e_vxlan_set_port; 4896 priv->nic_info.unset_port = mlx5e_vxlan_unset_port; 4897 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 4898 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN; 4899 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; 4900 /* Don't count the space hard-coded to the IANA port */ 4901 priv->nic_info.tables[0].n_entries = 4902 mlx5_vxlan_max_udp_ports(priv->mdev) - 1; 4903 4904 priv->netdev->udp_tunnel_nic_info = &priv->nic_info; 4905 } 4906 4907 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) 4908 { 4909 int tt; 4910 4911 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { 4912 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt))) 4913 return true; 4914 } 4915 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); 4916 } 4917 4918 static void mlx5e_build_nic_netdev(struct net_device *netdev) 4919 { 4920 struct mlx5e_priv *priv = netdev_priv(netdev); 4921 struct mlx5_core_dev *mdev = priv->mdev; 4922 bool fcs_supported; 4923 bool fcs_enabled; 4924 4925 SET_NETDEV_DEV(netdev, mdev->device); 4926 4927 netdev->netdev_ops = &mlx5e_netdev_ops; 4928 4929 mlx5e_dcbnl_build_netdev(netdev); 4930 4931 netdev->watchdog_timeo = 15 * HZ; 4932 4933 netdev->ethtool_ops = &mlx5e_ethtool_ops; 4934 4935 netdev->vlan_features |= NETIF_F_SG; 4936 netdev->vlan_features |= NETIF_F_HW_CSUM; 4937 netdev->vlan_features |= NETIF_F_GRO; 4938 netdev->vlan_features |= NETIF_F_TSO; 4939 netdev->vlan_features |= NETIF_F_TSO6; 4940 netdev->vlan_features |= NETIF_F_RXCSUM; 4941 netdev->vlan_features |= NETIF_F_RXHASH; 4942 4943 netdev->mpls_features |= NETIF_F_SG; 4944 netdev->mpls_features |= NETIF_F_HW_CSUM; 4945 netdev->mpls_features |= NETIF_F_TSO; 4946 netdev->mpls_features |= NETIF_F_TSO6; 4947 4948 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; 4949 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; 4950 4951 if (!!MLX5_CAP_ETH(mdev, lro_cap) && 4952 mlx5e_check_fragmented_striding_rq_cap(mdev)) 4953 netdev->vlan_features |= NETIF_F_LRO; 4954 4955 netdev->hw_features = netdev->vlan_features; 4956 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 4957 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 4958 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4959 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 4960 4961 mlx5e_vxlan_set_netdev_info(priv); 4962 4963 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { 4964 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 4965 netdev->hw_enc_features |= NETIF_F_TSO; 4966 netdev->hw_enc_features |= NETIF_F_TSO6; 4967 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; 4968 } 4969 4970 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { 4971 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 4972 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4973 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 4974 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4975 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 4976 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | 4977 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4978 } 4979 4980 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { 4981 netdev->hw_features |= NETIF_F_GSO_GRE | 4982 NETIF_F_GSO_GRE_CSUM; 4983 netdev->hw_enc_features |= NETIF_F_GSO_GRE | 4984 NETIF_F_GSO_GRE_CSUM; 4985 netdev->gso_partial_features |= NETIF_F_GSO_GRE | 4986 NETIF_F_GSO_GRE_CSUM; 4987 } 4988 4989 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { 4990 netdev->hw_features |= NETIF_F_GSO_IPXIP4 | 4991 NETIF_F_GSO_IPXIP6; 4992 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | 4993 NETIF_F_GSO_IPXIP6; 4994 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 | 4995 NETIF_F_GSO_IPXIP6; 4996 } 4997 4998 netdev->hw_features |= NETIF_F_GSO_PARTIAL; 4999 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; 5000 netdev->hw_features |= NETIF_F_GSO_UDP_L4; 5001 netdev->features |= NETIF_F_GSO_UDP_L4; 5002 5003 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); 5004 5005 if (fcs_supported) 5006 netdev->hw_features |= NETIF_F_RXALL; 5007 5008 if (MLX5_CAP_ETH(mdev, scatter_fcs)) 5009 netdev->hw_features |= NETIF_F_RXFCS; 5010 5011 netdev->features = netdev->hw_features; 5012 if (!priv->channels.params.lro_en) 5013 netdev->features &= ~NETIF_F_LRO; 5014 5015 if (fcs_enabled) 5016 netdev->features &= ~NETIF_F_RXALL; 5017 5018 if (!priv->channels.params.scatter_fcs_en) 5019 netdev->features &= ~NETIF_F_RXFCS; 5020 5021 /* prefere CQE compression over rxhash */ 5022 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) 5023 netdev->features &= ~NETIF_F_RXHASH; 5024 5025 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) 5026 if (FT_CAP(flow_modify_en) && 5027 FT_CAP(modify_root) && 5028 FT_CAP(identified_miss_table_mode) && 5029 FT_CAP(flow_table_modify)) { 5030 #ifdef CONFIG_MLX5_ESWITCH 5031 netdev->hw_features |= NETIF_F_HW_TC; 5032 #endif 5033 #ifdef CONFIG_MLX5_EN_ARFS 5034 netdev->hw_features |= NETIF_F_NTUPLE; 5035 #endif 5036 } 5037 5038 netdev->features |= NETIF_F_HIGHDMA; 5039 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 5040 5041 netdev->priv_flags |= IFF_UNICAST_FLT; 5042 5043 mlx5e_set_netdev_dev_addr(netdev); 5044 mlx5e_ipsec_build_netdev(priv); 5045 mlx5e_tls_build_netdev(priv); 5046 } 5047 5048 void mlx5e_create_q_counters(struct mlx5e_priv *priv) 5049 { 5050 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; 5051 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; 5052 struct mlx5_core_dev *mdev = priv->mdev; 5053 int err; 5054 5055 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); 5056 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); 5057 if (!err) 5058 priv->q_counter = 5059 MLX5_GET(alloc_q_counter_out, out, counter_set_id); 5060 5061 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); 5062 if (!err) 5063 priv->drop_rq_q_counter = 5064 MLX5_GET(alloc_q_counter_out, out, counter_set_id); 5065 } 5066 5067 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) 5068 { 5069 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; 5070 5071 MLX5_SET(dealloc_q_counter_in, in, opcode, 5072 MLX5_CMD_OP_DEALLOC_Q_COUNTER); 5073 if (priv->q_counter) { 5074 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, 5075 priv->q_counter); 5076 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); 5077 } 5078 5079 if (priv->drop_rq_q_counter) { 5080 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, 5081 priv->drop_rq_q_counter); 5082 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); 5083 } 5084 } 5085 5086 static int mlx5e_nic_init(struct mlx5_core_dev *mdev, 5087 struct net_device *netdev, 5088 const struct mlx5e_profile *profile, 5089 void *ppriv) 5090 { 5091 struct mlx5e_priv *priv = netdev_priv(netdev); 5092 struct mlx5e_rss_params *rss = &priv->rss_params; 5093 int err; 5094 5095 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); 5096 if (err) 5097 return err; 5098 5099 mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params, 5100 netdev->mtu); 5101 5102 mlx5e_timestamp_init(priv); 5103 5104 err = mlx5e_ipsec_init(priv); 5105 if (err) 5106 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); 5107 err = mlx5e_tls_init(priv); 5108 if (err) 5109 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); 5110 mlx5e_build_nic_netdev(netdev); 5111 err = mlx5e_devlink_port_register(priv); 5112 if (err) 5113 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); 5114 mlx5e_health_create_reporters(priv); 5115 5116 return 0; 5117 } 5118 5119 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 5120 { 5121 mlx5e_health_destroy_reporters(priv); 5122 mlx5e_devlink_port_unregister(priv); 5123 mlx5e_tls_cleanup(priv); 5124 mlx5e_ipsec_cleanup(priv); 5125 mlx5e_netdev_cleanup(priv->netdev, priv); 5126 } 5127 5128 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) 5129 { 5130 struct mlx5_core_dev *mdev = priv->mdev; 5131 int err; 5132 5133 mlx5e_create_q_counters(priv); 5134 5135 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 5136 if (err) { 5137 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 5138 goto err_destroy_q_counters; 5139 } 5140 5141 err = mlx5e_create_indirect_rqt(priv); 5142 if (err) 5143 goto err_close_drop_rq; 5144 5145 err = mlx5e_create_direct_rqts(priv, priv->direct_tir); 5146 if (err) 5147 goto err_destroy_indirect_rqts; 5148 5149 err = mlx5e_create_indirect_tirs(priv, true); 5150 if (err) 5151 goto err_destroy_direct_rqts; 5152 5153 err = mlx5e_create_direct_tirs(priv, priv->direct_tir); 5154 if (err) 5155 goto err_destroy_indirect_tirs; 5156 5157 err = mlx5e_create_direct_rqts(priv, priv->xsk_tir); 5158 if (unlikely(err)) 5159 goto err_destroy_direct_tirs; 5160 5161 err = mlx5e_create_direct_tirs(priv, priv->xsk_tir); 5162 if (unlikely(err)) 5163 goto err_destroy_xsk_rqts; 5164 5165 err = mlx5e_create_flow_steering(priv); 5166 if (err) { 5167 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); 5168 goto err_destroy_xsk_tirs; 5169 } 5170 5171 err = mlx5e_tc_nic_init(priv); 5172 if (err) 5173 goto err_destroy_flow_steering; 5174 5175 err = mlx5e_accel_init_rx(priv); 5176 if (err) 5177 goto err_tc_nic_cleanup; 5178 5179 #ifdef CONFIG_MLX5_EN_ARFS 5180 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); 5181 #endif 5182 5183 return 0; 5184 5185 err_tc_nic_cleanup: 5186 mlx5e_tc_nic_cleanup(priv); 5187 err_destroy_flow_steering: 5188 mlx5e_destroy_flow_steering(priv); 5189 err_destroy_xsk_tirs: 5190 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); 5191 err_destroy_xsk_rqts: 5192 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); 5193 err_destroy_direct_tirs: 5194 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5195 err_destroy_indirect_tirs: 5196 mlx5e_destroy_indirect_tirs(priv); 5197 err_destroy_direct_rqts: 5198 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5199 err_destroy_indirect_rqts: 5200 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 5201 err_close_drop_rq: 5202 mlx5e_close_drop_rq(&priv->drop_rq); 5203 err_destroy_q_counters: 5204 mlx5e_destroy_q_counters(priv); 5205 return err; 5206 } 5207 5208 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) 5209 { 5210 mlx5e_accel_cleanup_rx(priv); 5211 mlx5e_tc_nic_cleanup(priv); 5212 mlx5e_destroy_flow_steering(priv); 5213 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); 5214 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); 5215 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5216 mlx5e_destroy_indirect_tirs(priv); 5217 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5218 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 5219 mlx5e_close_drop_rq(&priv->drop_rq); 5220 mlx5e_destroy_q_counters(priv); 5221 } 5222 5223 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) 5224 { 5225 int err; 5226 5227 err = mlx5e_create_tises(priv); 5228 if (err) { 5229 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); 5230 return err; 5231 } 5232 5233 mlx5e_dcbnl_initialize(priv); 5234 return 0; 5235 } 5236 5237 static void mlx5e_nic_enable(struct mlx5e_priv *priv) 5238 { 5239 struct net_device *netdev = priv->netdev; 5240 struct mlx5_core_dev *mdev = priv->mdev; 5241 5242 mlx5e_init_l2_addr(priv); 5243 5244 /* Marking the link as currently not needed by the Driver */ 5245 if (!netif_running(netdev)) 5246 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); 5247 5248 mlx5e_set_netdev_mtu_boundaries(priv); 5249 mlx5e_set_dev_port_mtu(priv); 5250 5251 mlx5_lag_add(mdev, netdev); 5252 5253 mlx5e_enable_async_events(priv); 5254 if (mlx5e_monitor_counter_supported(priv)) 5255 mlx5e_monitor_counter_init(priv); 5256 5257 mlx5e_hv_vhca_stats_create(priv); 5258 if (netdev->reg_state != NETREG_REGISTERED) 5259 return; 5260 mlx5e_dcbnl_init_app(priv); 5261 5262 queue_work(priv->wq, &priv->set_rx_mode_work); 5263 5264 rtnl_lock(); 5265 if (netif_running(netdev)) 5266 mlx5e_open(netdev); 5267 udp_tunnel_nic_reset_ntf(priv->netdev); 5268 netif_device_attach(netdev); 5269 rtnl_unlock(); 5270 } 5271 5272 static void mlx5e_nic_disable(struct mlx5e_priv *priv) 5273 { 5274 struct mlx5_core_dev *mdev = priv->mdev; 5275 5276 if (priv->netdev->reg_state == NETREG_REGISTERED) 5277 mlx5e_dcbnl_delete_app(priv); 5278 5279 rtnl_lock(); 5280 if (netif_running(priv->netdev)) 5281 mlx5e_close(priv->netdev); 5282 netif_device_detach(priv->netdev); 5283 rtnl_unlock(); 5284 5285 queue_work(priv->wq, &priv->set_rx_mode_work); 5286 5287 mlx5e_hv_vhca_stats_destroy(priv); 5288 if (mlx5e_monitor_counter_supported(priv)) 5289 mlx5e_monitor_counter_cleanup(priv); 5290 5291 mlx5e_disable_async_events(priv); 5292 mlx5_lag_remove(mdev); 5293 mlx5_vxlan_reset_to_default(mdev->vxlan); 5294 } 5295 5296 int mlx5e_update_nic_rx(struct mlx5e_priv *priv) 5297 { 5298 return mlx5e_refresh_tirs(priv, false, false); 5299 } 5300 5301 static const struct mlx5e_profile mlx5e_nic_profile = { 5302 .init = mlx5e_nic_init, 5303 .cleanup = mlx5e_nic_cleanup, 5304 .init_rx = mlx5e_init_nic_rx, 5305 .cleanup_rx = mlx5e_cleanup_nic_rx, 5306 .init_tx = mlx5e_init_nic_tx, 5307 .cleanup_tx = mlx5e_cleanup_nic_tx, 5308 .enable = mlx5e_nic_enable, 5309 .disable = mlx5e_nic_disable, 5310 .update_rx = mlx5e_update_nic_rx, 5311 .update_stats = mlx5e_stats_update_ndo_stats, 5312 .update_carrier = mlx5e_update_carrier, 5313 .rx_handlers = &mlx5e_rx_handlers_nic, 5314 .max_tc = MLX5E_MAX_NUM_TC, 5315 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), 5316 .stats_grps = mlx5e_nic_stats_grps, 5317 .stats_grps_num = mlx5e_nic_stats_grps_num, 5318 }; 5319 5320 /* mlx5e generic netdev management API (move to en_common.c) */ 5321 5322 /* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */ 5323 int mlx5e_netdev_init(struct net_device *netdev, 5324 struct mlx5e_priv *priv, 5325 struct mlx5_core_dev *mdev, 5326 const struct mlx5e_profile *profile, 5327 void *ppriv) 5328 { 5329 /* priv init */ 5330 priv->mdev = mdev; 5331 priv->netdev = netdev; 5332 priv->profile = profile; 5333 priv->ppriv = ppriv; 5334 priv->msglevel = MLX5E_MSG_LEVEL; 5335 priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); 5336 priv->max_opened_tc = 1; 5337 5338 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) 5339 return -ENOMEM; 5340 5341 mutex_init(&priv->state_lock); 5342 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 5343 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 5344 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); 5345 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 5346 5347 priv->wq = create_singlethread_workqueue("mlx5e"); 5348 if (!priv->wq) 5349 goto err_free_cpumask; 5350 5351 /* netdev init */ 5352 netif_carrier_off(netdev); 5353 5354 return 0; 5355 5356 err_free_cpumask: 5357 free_cpumask_var(priv->scratchpad.cpumask); 5358 5359 return -ENOMEM; 5360 } 5361 5362 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) 5363 { 5364 destroy_workqueue(priv->wq); 5365 free_cpumask_var(priv->scratchpad.cpumask); 5366 } 5367 5368 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, 5369 const struct mlx5e_profile *profile, 5370 int nch, 5371 void *ppriv) 5372 { 5373 struct net_device *netdev; 5374 unsigned int ptp_txqs = 0; 5375 int err; 5376 5377 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) 5378 ptp_txqs = profile->max_tc; 5379 5380 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), 5381 nch * profile->max_tc + ptp_txqs, 5382 nch * profile->rq_groups); 5383 if (!netdev) { 5384 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); 5385 return NULL; 5386 } 5387 5388 err = profile->init(mdev, netdev, profile, ppriv); 5389 if (err) { 5390 mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err); 5391 goto err_free_netdev; 5392 } 5393 5394 return netdev; 5395 5396 err_free_netdev: 5397 free_netdev(netdev); 5398 5399 return NULL; 5400 } 5401 5402 int mlx5e_attach_netdev(struct mlx5e_priv *priv) 5403 { 5404 const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; 5405 const struct mlx5e_profile *profile; 5406 int max_nch; 5407 int err; 5408 5409 profile = priv->profile; 5410 clear_bit(MLX5E_STATE_DESTROYING, &priv->state); 5411 5412 /* max number of channels may have changed */ 5413 max_nch = mlx5e_get_max_num_channels(priv->mdev); 5414 if (priv->channels.params.num_channels > max_nch) { 5415 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); 5416 /* Reducing the number of channels - RXFH has to be reset, and 5417 * mlx5e_num_channels_changed below will build the RQT. 5418 */ 5419 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; 5420 priv->channels.params.num_channels = max_nch; 5421 } 5422 /* 1. Set the real number of queues in the kernel the first time. 5423 * 2. Set our default XPS cpumask. 5424 * 3. Build the RQT. 5425 * 5426 * rtnl_lock is required by netif_set_real_num_*_queues in case the 5427 * netdev has been registered by this point (if this function was called 5428 * in the reload or resume flow). 5429 */ 5430 if (take_rtnl) 5431 rtnl_lock(); 5432 err = mlx5e_num_channels_changed(priv); 5433 if (take_rtnl) 5434 rtnl_unlock(); 5435 if (err) 5436 goto out; 5437 5438 err = profile->init_tx(priv); 5439 if (err) 5440 goto out; 5441 5442 err = profile->init_rx(priv); 5443 if (err) 5444 goto err_cleanup_tx; 5445 5446 if (profile->enable) 5447 profile->enable(priv); 5448 5449 return 0; 5450 5451 err_cleanup_tx: 5452 profile->cleanup_tx(priv); 5453 5454 out: 5455 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 5456 cancel_work_sync(&priv->update_stats_work); 5457 return err; 5458 } 5459 5460 void mlx5e_detach_netdev(struct mlx5e_priv *priv) 5461 { 5462 const struct mlx5e_profile *profile = priv->profile; 5463 5464 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 5465 5466 if (profile->disable) 5467 profile->disable(priv); 5468 flush_workqueue(priv->wq); 5469 5470 profile->cleanup_rx(priv); 5471 profile->cleanup_tx(priv); 5472 cancel_work_sync(&priv->update_stats_work); 5473 } 5474 5475 void mlx5e_destroy_netdev(struct mlx5e_priv *priv) 5476 { 5477 const struct mlx5e_profile *profile = priv->profile; 5478 struct net_device *netdev = priv->netdev; 5479 5480 if (profile->cleanup) 5481 profile->cleanup(priv); 5482 free_netdev(netdev); 5483 } 5484 5485 static int mlx5e_resume(struct auxiliary_device *adev) 5486 { 5487 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); 5488 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); 5489 struct net_device *netdev = priv->netdev; 5490 struct mlx5_core_dev *mdev = edev->mdev; 5491 int err; 5492 5493 if (netif_device_present(netdev)) 5494 return 0; 5495 5496 err = mlx5e_create_mdev_resources(mdev); 5497 if (err) 5498 return err; 5499 5500 err = mlx5e_attach_netdev(priv); 5501 if (err) { 5502 mlx5e_destroy_mdev_resources(mdev); 5503 return err; 5504 } 5505 5506 return 0; 5507 } 5508 5509 static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) 5510 { 5511 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); 5512 struct net_device *netdev = priv->netdev; 5513 struct mlx5_core_dev *mdev = priv->mdev; 5514 5515 if (!netif_device_present(netdev)) 5516 return -ENODEV; 5517 5518 mlx5e_detach_netdev(priv); 5519 mlx5e_destroy_mdev_resources(mdev); 5520 return 0; 5521 } 5522 5523 static int mlx5e_probe(struct auxiliary_device *adev, 5524 const struct auxiliary_device_id *id) 5525 { 5526 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); 5527 struct mlx5_core_dev *mdev = edev->mdev; 5528 struct net_device *netdev; 5529 pm_message_t state = {}; 5530 void *priv; 5531 int err; 5532 int nch; 5533 5534 nch = mlx5e_get_max_num_channels(mdev); 5535 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); 5536 if (!netdev) { 5537 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); 5538 return -ENOMEM; 5539 } 5540 5541 dev_net_set(netdev, mlx5_core_net(mdev)); 5542 priv = netdev_priv(netdev); 5543 dev_set_drvdata(&adev->dev, priv); 5544 5545 err = mlx5e_resume(adev); 5546 if (err) { 5547 mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err); 5548 goto err_destroy_netdev; 5549 } 5550 5551 err = register_netdev(netdev); 5552 if (err) { 5553 mlx5_core_err(mdev, "register_netdev failed, %d\n", err); 5554 goto err_resume; 5555 } 5556 5557 mlx5e_devlink_port_type_eth_set(priv); 5558 5559 mlx5e_dcbnl_init_app(priv); 5560 return 0; 5561 5562 err_resume: 5563 mlx5e_suspend(adev, state); 5564 err_destroy_netdev: 5565 mlx5e_destroy_netdev(priv); 5566 return err; 5567 } 5568 5569 static void mlx5e_remove(struct auxiliary_device *adev) 5570 { 5571 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); 5572 pm_message_t state = {}; 5573 5574 mlx5e_dcbnl_delete_app(priv); 5575 unregister_netdev(priv->netdev); 5576 mlx5e_suspend(adev, state); 5577 mlx5e_destroy_netdev(priv); 5578 } 5579 5580 static const struct auxiliary_device_id mlx5e_id_table[] = { 5581 { .name = MLX5_ADEV_NAME ".eth", }, 5582 {}, 5583 }; 5584 5585 MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table); 5586 5587 static struct auxiliary_driver mlx5e_driver = { 5588 .name = "eth", 5589 .probe = mlx5e_probe, 5590 .remove = mlx5e_remove, 5591 .suspend = mlx5e_suspend, 5592 .resume = mlx5e_resume, 5593 .id_table = mlx5e_id_table, 5594 }; 5595 5596 int mlx5e_init(void) 5597 { 5598 int ret; 5599 5600 mlx5e_ipsec_build_inverse_table(); 5601 mlx5e_build_ptys2ethtool_map(); 5602 ret = mlx5e_rep_init(); 5603 if (ret) 5604 return ret; 5605 5606 ret = auxiliary_driver_register(&mlx5e_driver); 5607 if (ret) 5608 mlx5e_rep_cleanup(); 5609 return ret; 5610 } 5611 5612 void mlx5e_cleanup(void) 5613 { 5614 auxiliary_driver_unregister(&mlx5e_driver); 5615 mlx5e_rep_cleanup(); 5616 } 5617