1 /* 2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_verbs.h> 34 #include <linux/mlx5/fs.h> 35 #include "en.h" 36 #include "ipoib.h" 37 38 #define IB_DEFAULT_Q_KEY 0xb1b 39 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9 40 41 static int mlx5i_open(struct net_device *netdev); 42 static int mlx5i_close(struct net_device *netdev); 43 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); 44 45 static const struct net_device_ops mlx5i_netdev_ops = { 46 .ndo_open = mlx5i_open, 47 .ndo_stop = mlx5i_close, 48 .ndo_get_stats64 = mlx5i_get_stats, 49 .ndo_init = mlx5i_dev_init, 50 .ndo_uninit = mlx5i_dev_cleanup, 51 .ndo_change_mtu = mlx5i_change_mtu, 52 .ndo_do_ioctl = mlx5i_ioctl, 53 }; 54 55 /* IPoIB mlx5 netdev profile */ 56 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, 57 struct mlx5e_params *params) 58 { 59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 60 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); 61 mlx5e_set_rq_type(mdev, params); 62 mlx5e_init_rq_type_params(mdev, params); 63 64 /* RQ size in ipoib by default is 512 */ 65 params->log_rq_mtu_frames = is_kdump_kernel() ? 66 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 67 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; 68 69 params->lro_en = false; 70 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; 71 params->tunneled_offload_en = false; 72 } 73 74 /* Called directly after IPoIB netdevice was created to initialize SW structs */ 75 int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev) 76 { 77 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 78 79 netif_carrier_off(netdev); 80 mlx5e_set_netdev_mtu_boundaries(priv); 81 netdev->mtu = netdev->max_mtu; 82 83 mlx5e_build_nic_params(priv, NULL, netdev->mtu); 84 mlx5i_build_nic_params(mdev, &priv->channels.params); 85 86 mlx5e_timestamp_init(priv); 87 88 /* netdev init */ 89 netdev->hw_features |= NETIF_F_SG; 90 netdev->hw_features |= NETIF_F_IP_CSUM; 91 netdev->hw_features |= NETIF_F_IPV6_CSUM; 92 netdev->hw_features |= NETIF_F_GRO; 93 netdev->hw_features |= NETIF_F_TSO; 94 netdev->hw_features |= NETIF_F_TSO6; 95 netdev->hw_features |= NETIF_F_RXCSUM; 96 netdev->hw_features |= NETIF_F_RXHASH; 97 98 netdev->netdev_ops = &mlx5i_netdev_ops; 99 netdev->ethtool_ops = &mlx5i_ethtool_ops; 100 101 return 0; 102 } 103 104 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ 105 void mlx5i_cleanup(struct mlx5e_priv *priv) 106 { 107 mlx5e_priv_cleanup(priv); 108 } 109 110 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) 111 { 112 struct mlx5e_sw_stats s = { 0 }; 113 int i, j; 114 115 for (i = 0; i < priv->max_nch; i++) { 116 struct mlx5e_channel_stats *channel_stats; 117 struct mlx5e_rq_stats *rq_stats; 118 119 channel_stats = &priv->channel_stats[i]; 120 rq_stats = &channel_stats->rq; 121 122 s.rx_packets += rq_stats->packets; 123 s.rx_bytes += rq_stats->bytes; 124 125 for (j = 0; j < priv->max_opened_tc; j++) { 126 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; 127 128 s.tx_packets += sq_stats->packets; 129 s.tx_bytes += sq_stats->bytes; 130 s.tx_queue_dropped += sq_stats->dropped; 131 } 132 } 133 134 memcpy(&priv->stats.sw, &s, sizeof(s)); 135 } 136 137 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 138 { 139 struct mlx5e_priv *priv = mlx5i_epriv(dev); 140 struct mlx5e_sw_stats *sstats = &priv->stats.sw; 141 142 mlx5i_grp_sw_update_stats(priv); 143 144 stats->rx_packets = sstats->rx_packets; 145 stats->rx_bytes = sstats->rx_bytes; 146 stats->tx_packets = sstats->tx_packets; 147 stats->tx_bytes = sstats->tx_bytes; 148 stats->tx_dropped = sstats->tx_queue_dropped; 149 } 150 151 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 152 { 153 struct mlx5_core_dev *mdev = priv->mdev; 154 struct mlx5i_priv *ipriv = priv->ppriv; 155 int ret; 156 157 { 158 u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; 159 u32 *qpc; 160 161 qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); 162 163 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 164 MLX5_SET(qpc, qpc, primary_address_path.pkey_index, 165 ipriv->pkey_index); 166 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 167 MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY); 168 169 MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); 170 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); 171 ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in); 172 if (ret) 173 goto err_qp_modify_to_err; 174 } 175 { 176 u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; 177 178 MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); 179 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); 180 ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in); 181 if (ret) 182 goto err_qp_modify_to_err; 183 } 184 { 185 u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; 186 187 MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); 188 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); 189 ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); 190 if (ret) 191 goto err_qp_modify_to_err; 192 } 193 return 0; 194 195 err_qp_modify_to_err: 196 { 197 u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {}; 198 199 MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP); 200 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); 201 mlx5_cmd_exec_in(mdev, qp_2err, in); 202 } 203 return ret; 204 } 205 206 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) 207 { 208 struct mlx5i_priv *ipriv = priv->ppriv; 209 struct mlx5_core_dev *mdev = priv->mdev; 210 u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; 211 212 MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); 213 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); 214 mlx5_cmd_exec_in(mdev, qp_2rst, in); 215 } 216 217 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 218 219 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) 220 { 221 unsigned char *dev_addr = priv->netdev->dev_addr; 222 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 223 u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; 224 struct mlx5i_priv *ipriv = priv->ppriv; 225 void *addr_path; 226 int qpn = 0; 227 int ret = 0; 228 void *qpc; 229 230 if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) { 231 qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3]; 232 MLX5_SET(create_qp_in, in, input_qpn, qpn); 233 } 234 235 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 236 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); 237 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 238 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 239 MLX5_QP_ENHANCED_ULP_STATELESS_MODE); 240 241 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 242 MLX5_SET(ads, addr_path, vhca_port_num, 1); 243 MLX5_SET(ads, addr_path, grh, 1); 244 245 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 246 ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out); 247 if (ret) 248 return ret; 249 250 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); 251 252 return 0; 253 } 254 255 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) 256 { 257 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; 258 259 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 260 MLX5_SET(destroy_qp_in, in, qpn, qpn); 261 mlx5_cmd_exec_in(mdev, destroy_qp, in); 262 } 263 264 int mlx5i_update_nic_rx(struct mlx5e_priv *priv) 265 { 266 return mlx5e_refresh_tirs(priv, true, true); 267 } 268 269 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) 270 { 271 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 272 void *tisc; 273 274 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 275 276 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); 277 278 return mlx5e_create_tis(mdev, in, tisn); 279 } 280 281 static int mlx5i_init_tx(struct mlx5e_priv *priv) 282 { 283 struct mlx5i_priv *ipriv = priv->ppriv; 284 int err; 285 286 err = mlx5i_create_underlay_qp(priv); 287 if (err) { 288 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 289 return err; 290 } 291 292 err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); 293 if (err) { 294 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 295 goto err_destroy_underlay_qp; 296 } 297 298 return 0; 299 300 err_destroy_underlay_qp: 301 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 302 return err; 303 } 304 305 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) 306 { 307 struct mlx5i_priv *ipriv = priv->ppriv; 308 309 mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); 310 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 311 } 312 313 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 314 { 315 struct ttc_params ttc_params = {}; 316 int tt, err; 317 318 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 319 MLX5_FLOW_NAMESPACE_KERNEL); 320 321 if (!priv->fs.ns) 322 return -EINVAL; 323 324 err = mlx5e_arfs_create_tables(priv); 325 if (err) { 326 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 327 err); 328 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 329 } 330 331 mlx5e_set_ttc_basic_params(priv, &ttc_params); 332 mlx5e_set_inner_ttc_ft_params(&ttc_params); 333 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 334 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; 335 336 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); 337 if (err) { 338 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", 339 err); 340 goto err_destroy_arfs_tables; 341 } 342 343 mlx5e_set_ttc_ft_params(&ttc_params); 344 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 345 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 346 347 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 348 if (err) { 349 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 350 err); 351 goto err_destroy_inner_ttc_table; 352 } 353 354 return 0; 355 356 err_destroy_inner_ttc_table: 357 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 358 err_destroy_arfs_tables: 359 mlx5e_arfs_destroy_tables(priv); 360 361 return err; 362 } 363 364 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) 365 { 366 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 367 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 368 mlx5e_arfs_destroy_tables(priv); 369 } 370 371 static int mlx5i_init_rx(struct mlx5e_priv *priv) 372 { 373 struct mlx5_core_dev *mdev = priv->mdev; 374 int err; 375 376 mlx5e_create_q_counters(priv); 377 378 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 379 if (err) { 380 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 381 goto err_destroy_q_counters; 382 } 383 384 err = mlx5e_create_indirect_rqt(priv); 385 if (err) 386 goto err_close_drop_rq; 387 388 err = mlx5e_create_direct_rqts(priv, priv->direct_tir); 389 if (err) 390 goto err_destroy_indirect_rqts; 391 392 err = mlx5e_create_indirect_tirs(priv, true); 393 if (err) 394 goto err_destroy_direct_rqts; 395 396 err = mlx5e_create_direct_tirs(priv, priv->direct_tir); 397 if (err) 398 goto err_destroy_indirect_tirs; 399 400 err = mlx5i_create_flow_steering(priv); 401 if (err) 402 goto err_destroy_direct_tirs; 403 404 return 0; 405 406 err_destroy_direct_tirs: 407 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 408 err_destroy_indirect_tirs: 409 mlx5e_destroy_indirect_tirs(priv); 410 err_destroy_direct_rqts: 411 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 412 err_destroy_indirect_rqts: 413 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 414 err_close_drop_rq: 415 mlx5e_close_drop_rq(&priv->drop_rq); 416 err_destroy_q_counters: 417 mlx5e_destroy_q_counters(priv); 418 return err; 419 } 420 421 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 422 { 423 mlx5i_destroy_flow_steering(priv); 424 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 425 mlx5e_destroy_indirect_tirs(priv); 426 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 427 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 428 mlx5e_close_drop_rq(&priv->drop_rq); 429 mlx5e_destroy_q_counters(priv); 430 } 431 432 /* The stats groups order is opposite to the update_stats() order calls */ 433 static mlx5e_stats_grp_t mlx5i_stats_grps[] = { 434 &MLX5E_STATS_GRP(sw), 435 &MLX5E_STATS_GRP(qcnt), 436 &MLX5E_STATS_GRP(vnic_env), 437 &MLX5E_STATS_GRP(vport), 438 &MLX5E_STATS_GRP(802_3), 439 &MLX5E_STATS_GRP(2863), 440 &MLX5E_STATS_GRP(2819), 441 &MLX5E_STATS_GRP(phy), 442 &MLX5E_STATS_GRP(pcie), 443 &MLX5E_STATS_GRP(per_prio), 444 &MLX5E_STATS_GRP(pme), 445 &MLX5E_STATS_GRP(channels), 446 &MLX5E_STATS_GRP(per_port_buff_congest), 447 }; 448 449 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) 450 { 451 return ARRAY_SIZE(mlx5i_stats_grps); 452 } 453 454 static const struct mlx5e_profile mlx5i_nic_profile = { 455 .init = mlx5i_init, 456 .cleanup = mlx5i_cleanup, 457 .init_tx = mlx5i_init_tx, 458 .cleanup_tx = mlx5i_cleanup_tx, 459 .init_rx = mlx5i_init_rx, 460 .cleanup_rx = mlx5i_cleanup_rx, 461 .enable = NULL, /* mlx5i_enable */ 462 .disable = NULL, /* mlx5i_disable */ 463 .update_rx = mlx5i_update_nic_rx, 464 .update_stats = NULL, /* mlx5i_update_stats */ 465 .update_carrier = NULL, /* no HW update in IB link */ 466 .rx_handlers = &mlx5i_rx_handlers, 467 .max_tc = MLX5I_MAX_NUM_TC, 468 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 469 .stats_grps = mlx5i_stats_grps, 470 .stats_grps_num = mlx5i_stats_grps_num, 471 }; 472 473 /* mlx5i netdev NDos */ 474 475 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) 476 { 477 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 478 struct mlx5e_channels new_channels = {}; 479 struct mlx5e_params *params; 480 int err = 0; 481 482 mutex_lock(&priv->state_lock); 483 484 params = &priv->channels.params; 485 486 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 487 params->sw_mtu = new_mtu; 488 netdev->mtu = params->sw_mtu; 489 goto out; 490 } 491 492 new_channels.params = *params; 493 new_channels.params.sw_mtu = new_mtu; 494 495 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); 496 if (err) 497 goto out; 498 499 netdev->mtu = new_channels.params.sw_mtu; 500 501 out: 502 mutex_unlock(&priv->state_lock); 503 return err; 504 } 505 506 int mlx5i_dev_init(struct net_device *dev) 507 { 508 struct mlx5e_priv *priv = mlx5i_epriv(dev); 509 struct mlx5i_priv *ipriv = priv->ppriv; 510 511 /* Set dev address using underlay QP */ 512 dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff; 513 dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff; 514 dev->dev_addr[3] = (ipriv->qpn) & 0xff; 515 516 /* Add QPN to net-device mapping to HT */ 517 mlx5i_pkey_add_qpn(dev, ipriv->qpn); 518 519 return 0; 520 } 521 522 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 523 { 524 struct mlx5e_priv *priv = mlx5i_epriv(dev); 525 526 switch (cmd) { 527 case SIOCSHWTSTAMP: 528 return mlx5e_hwstamp_set(priv, ifr); 529 case SIOCGHWTSTAMP: 530 return mlx5e_hwstamp_get(priv, ifr); 531 default: 532 return -EOPNOTSUPP; 533 } 534 } 535 536 void mlx5i_dev_cleanup(struct net_device *dev) 537 { 538 struct mlx5e_priv *priv = mlx5i_epriv(dev); 539 struct mlx5i_priv *ipriv = priv->ppriv; 540 541 mlx5i_uninit_underlay_qp(priv); 542 543 /* Delete QPN to net-device mapping from HT */ 544 mlx5i_pkey_del_qpn(dev, ipriv->qpn); 545 } 546 547 static int mlx5i_open(struct net_device *netdev) 548 { 549 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 550 struct mlx5i_priv *ipriv = epriv->ppriv; 551 struct mlx5_core_dev *mdev = epriv->mdev; 552 int err; 553 554 mutex_lock(&epriv->state_lock); 555 556 set_bit(MLX5E_STATE_OPENED, &epriv->state); 557 558 err = mlx5i_init_underlay_qp(epriv); 559 if (err) { 560 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); 561 goto err_clear_state_opened_flag; 562 } 563 564 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); 565 if (err) { 566 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); 567 goto err_reset_qp; 568 } 569 570 err = mlx5e_open_channels(epriv, &epriv->channels); 571 if (err) 572 goto err_remove_fs_underlay_qp; 573 574 epriv->profile->update_rx(epriv); 575 mlx5e_activate_priv_channels(epriv); 576 577 mutex_unlock(&epriv->state_lock); 578 return 0; 579 580 err_remove_fs_underlay_qp: 581 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 582 err_reset_qp: 583 mlx5i_uninit_underlay_qp(epriv); 584 err_clear_state_opened_flag: 585 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 586 mutex_unlock(&epriv->state_lock); 587 return err; 588 } 589 590 static int mlx5i_close(struct net_device *netdev) 591 { 592 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 593 struct mlx5i_priv *ipriv = epriv->ppriv; 594 struct mlx5_core_dev *mdev = epriv->mdev; 595 596 /* May already be CLOSED in case a previous configuration operation 597 * (e.g RX/TX queue size change) that involves close&open failed. 598 */ 599 mutex_lock(&epriv->state_lock); 600 601 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) 602 goto unlock; 603 604 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 605 606 netif_carrier_off(epriv->netdev); 607 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 608 mlx5e_deactivate_priv_channels(epriv); 609 mlx5e_close_channels(&epriv->channels); 610 mlx5i_uninit_underlay_qp(epriv); 611 unlock: 612 mutex_unlock(&epriv->state_lock); 613 return 0; 614 } 615 616 /* IPoIB RDMA netdev callbacks */ 617 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, 618 union ib_gid *gid, u16 lid, int set_qkey, 619 u32 qkey) 620 { 621 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 622 struct mlx5_core_dev *mdev = epriv->mdev; 623 struct mlx5i_priv *ipriv = epriv->ppriv; 624 int err; 625 626 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 627 gid->raw); 628 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn); 629 if (err) 630 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", 631 ipriv->qpn, gid->raw); 632 633 if (set_qkey) { 634 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", 635 netdev->name, qkey); 636 ipriv->qkey = qkey; 637 } 638 639 return err; 640 } 641 642 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, 643 union ib_gid *gid, u16 lid) 644 { 645 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 646 struct mlx5_core_dev *mdev = epriv->mdev; 647 struct mlx5i_priv *ipriv = epriv->ppriv; 648 int err; 649 650 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 651 gid->raw); 652 653 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn); 654 if (err) 655 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", 656 ipriv->qpn, gid->raw); 657 658 return err; 659 } 660 661 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, 662 struct ib_ah *address, u32 dqpn) 663 { 664 struct mlx5e_priv *epriv = mlx5i_epriv(dev); 665 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; 666 struct mlx5_ib_ah *mah = to_mah(address); 667 struct mlx5i_priv *ipriv = epriv->ppriv; 668 669 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); 670 671 return NETDEV_TX_OK; 672 } 673 674 static void mlx5i_set_pkey_index(struct net_device *netdev, int id) 675 { 676 struct mlx5i_priv *ipriv = netdev_priv(netdev); 677 678 ipriv->pkey_index = (u16)id; 679 } 680 681 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) 682 { 683 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) 684 return -EOPNOTSUPP; 685 686 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 687 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); 688 return -EOPNOTSUPP; 689 } 690 691 return 0; 692 } 693 694 static void mlx5_rdma_netdev_free(struct net_device *netdev) 695 { 696 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 697 struct mlx5i_priv *ipriv = priv->ppriv; 698 const struct mlx5e_profile *profile = priv->profile; 699 700 mlx5e_detach_netdev(priv); 701 profile->cleanup(priv); 702 703 if (!ipriv->sub_interface) { 704 mlx5i_pkey_qpn_ht_cleanup(netdev); 705 mlx5e_destroy_mdev_resources(priv->mdev); 706 } 707 } 708 709 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) 710 { 711 return mdev->mlx5e_res.pdn != 0; 712 } 713 714 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) 715 { 716 if (mlx5_is_sub_interface(mdev)) 717 return mlx5i_pkey_get_profile(); 718 return &mlx5i_nic_profile; 719 } 720 721 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, 722 struct net_device *netdev, void *param) 723 { 724 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; 725 const struct mlx5e_profile *prof = mlx5_get_profile(mdev); 726 struct mlx5i_priv *ipriv; 727 struct mlx5e_priv *epriv; 728 struct rdma_netdev *rn; 729 int err; 730 731 ipriv = netdev_priv(netdev); 732 epriv = mlx5i_epriv(netdev); 733 734 ipriv->sub_interface = mlx5_is_sub_interface(mdev); 735 if (!ipriv->sub_interface) { 736 err = mlx5i_pkey_qpn_ht_init(netdev); 737 if (err) { 738 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); 739 return err; 740 } 741 742 /* This should only be called once per mdev */ 743 err = mlx5e_create_mdev_resources(mdev); 744 if (err) 745 goto destroy_ht; 746 } 747 748 err = mlx5e_priv_init(epriv, netdev, mdev); 749 if (err) 750 goto destroy_mdev_resources; 751 752 epriv->profile = prof; 753 epriv->ppriv = ipriv; 754 755 prof->init(mdev, netdev); 756 757 err = mlx5e_attach_netdev(epriv); 758 if (err) 759 goto detach; 760 netif_carrier_off(netdev); 761 762 /* set rdma_netdev func pointers */ 763 rn = &ipriv->rn; 764 rn->hca = ibdev; 765 rn->send = mlx5i_xmit; 766 rn->attach_mcast = mlx5i_attach_mcast; 767 rn->detach_mcast = mlx5i_detach_mcast; 768 rn->set_id = mlx5i_set_pkey_index; 769 770 netdev->priv_destructor = mlx5_rdma_netdev_free; 771 netdev->needs_free_netdev = 1; 772 773 return 0; 774 775 detach: 776 prof->cleanup(epriv); 777 if (ipriv->sub_interface) 778 return err; 779 destroy_mdev_resources: 780 mlx5e_destroy_mdev_resources(mdev); 781 destroy_ht: 782 mlx5i_pkey_qpn_ht_cleanup(netdev); 783 return err; 784 } 785 786 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, 787 struct ib_device *device, 788 struct rdma_netdev_alloc_params *params) 789 { 790 int nch; 791 int rc; 792 793 rc = mlx5i_check_required_hca_cap(mdev); 794 if (rc) 795 return rc; 796 797 nch = mlx5e_get_max_num_channels(mdev); 798 799 *params = (struct rdma_netdev_alloc_params){ 800 .sizeof_priv = sizeof(struct mlx5i_priv) + 801 sizeof(struct mlx5e_priv), 802 .txqs = nch * MLX5E_MAX_NUM_TC, 803 .rxqs = nch, 804 .param = mdev, 805 .initialize_rdma_netdev = mlx5_rdma_setup_rn, 806 }; 807 808 return 0; 809 } 810 EXPORT_SYMBOL(mlx5_rdma_rn_get_params); 811