1 /* 2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_verbs.h> 34 #include <linux/mlx5/fs.h> 35 #include "en.h" 36 #include "en/params.h" 37 #include "ipoib.h" 38 #include "en/fs_ethtool.h" 39 40 #define IB_DEFAULT_Q_KEY 0xb1b 41 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9 42 43 static int mlx5i_open(struct net_device *netdev); 44 static int mlx5i_close(struct net_device *netdev); 45 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); 46 47 static const struct net_device_ops mlx5i_netdev_ops = { 48 .ndo_open = mlx5i_open, 49 .ndo_stop = mlx5i_close, 50 .ndo_get_stats64 = mlx5i_get_stats, 51 .ndo_init = mlx5i_dev_init, 52 .ndo_uninit = mlx5i_dev_cleanup, 53 .ndo_change_mtu = mlx5i_change_mtu, 54 .ndo_eth_ioctl = mlx5i_ioctl, 55 }; 56 57 /* IPoIB mlx5 netdev profile */ 58 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, 59 struct mlx5e_params *params) 60 { 61 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 62 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); 63 mlx5e_set_rq_type(mdev, params); 64 mlx5e_init_rq_type_params(mdev, params); 65 66 /* RQ size in ipoib by default is 512 */ 67 params->log_rq_mtu_frames = is_kdump_kernel() ? 68 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 69 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; 70 71 params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; 72 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; 73 params->tunneled_offload_en = false; 74 } 75 76 /* Called directly after IPoIB netdevice was created to initialize SW structs */ 77 int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev) 78 { 79 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 80 81 netif_carrier_off(netdev); 82 mlx5e_set_netdev_mtu_boundaries(priv); 83 netdev->mtu = netdev->max_mtu; 84 85 mlx5e_build_nic_params(priv, NULL, netdev->mtu); 86 mlx5i_build_nic_params(mdev, &priv->channels.params); 87 88 mlx5e_timestamp_init(priv); 89 90 /* netdev init */ 91 netdev->hw_features |= NETIF_F_SG; 92 netdev->hw_features |= NETIF_F_IP_CSUM; 93 netdev->hw_features |= NETIF_F_IPV6_CSUM; 94 netdev->hw_features |= NETIF_F_GRO; 95 netdev->hw_features |= NETIF_F_TSO; 96 netdev->hw_features |= NETIF_F_TSO6; 97 netdev->hw_features |= NETIF_F_RXCSUM; 98 netdev->hw_features |= NETIF_F_RXHASH; 99 100 netdev->netdev_ops = &mlx5i_netdev_ops; 101 netdev->ethtool_ops = &mlx5i_ethtool_ops; 102 103 return 0; 104 } 105 106 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ 107 void mlx5i_cleanup(struct mlx5e_priv *priv) 108 { 109 mlx5e_priv_cleanup(priv); 110 } 111 112 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) 113 { 114 struct rtnl_link_stats64 s = {}; 115 int i, j; 116 117 for (i = 0; i < priv->stats_nch; i++) { 118 struct mlx5e_channel_stats *channel_stats; 119 struct mlx5e_rq_stats *rq_stats; 120 121 channel_stats = priv->channel_stats[i]; 122 rq_stats = &channel_stats->rq; 123 124 s.rx_packets += rq_stats->packets; 125 s.rx_bytes += rq_stats->bytes; 126 127 for (j = 0; j < priv->max_opened_tc; j++) { 128 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; 129 130 s.tx_packets += sq_stats->packets; 131 s.tx_bytes += sq_stats->bytes; 132 s.tx_dropped += sq_stats->dropped; 133 } 134 } 135 136 memset(&priv->stats.sw, 0, sizeof(s)); 137 138 priv->stats.sw.rx_packets = s.rx_packets; 139 priv->stats.sw.rx_bytes = s.rx_bytes; 140 priv->stats.sw.tx_packets = s.tx_packets; 141 priv->stats.sw.tx_bytes = s.tx_bytes; 142 priv->stats.sw.tx_queue_dropped = s.tx_dropped; 143 } 144 145 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 146 { 147 struct mlx5e_priv *priv = mlx5i_epriv(dev); 148 struct mlx5e_sw_stats *sstats = &priv->stats.sw; 149 150 mlx5i_grp_sw_update_stats(priv); 151 152 stats->rx_packets = sstats->rx_packets; 153 stats->rx_bytes = sstats->rx_bytes; 154 stats->tx_packets = sstats->tx_packets; 155 stats->tx_bytes = sstats->tx_bytes; 156 stats->tx_dropped = sstats->tx_queue_dropped; 157 } 158 159 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 160 { 161 struct mlx5_core_dev *mdev = priv->mdev; 162 struct mlx5i_priv *ipriv = priv->ppriv; 163 int ret; 164 165 { 166 u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; 167 u32 *qpc; 168 169 qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); 170 171 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 172 MLX5_SET(qpc, qpc, primary_address_path.pkey_index, 173 ipriv->pkey_index); 174 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 175 MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY); 176 177 MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); 178 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); 179 ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in); 180 if (ret) 181 goto err_qp_modify_to_err; 182 } 183 { 184 u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; 185 186 MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); 187 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); 188 ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in); 189 if (ret) 190 goto err_qp_modify_to_err; 191 } 192 { 193 u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; 194 195 MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); 196 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); 197 ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); 198 if (ret) 199 goto err_qp_modify_to_err; 200 } 201 return 0; 202 203 err_qp_modify_to_err: 204 { 205 u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {}; 206 207 MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP); 208 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); 209 mlx5_cmd_exec_in(mdev, qp_2err, in); 210 } 211 return ret; 212 } 213 214 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) 215 { 216 struct mlx5i_priv *ipriv = priv->ppriv; 217 struct mlx5_core_dev *mdev = priv->mdev; 218 u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; 219 220 MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); 221 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); 222 mlx5_cmd_exec_in(mdev, qp_2rst, in); 223 } 224 225 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 226 227 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) 228 { 229 const unsigned char *dev_addr = priv->netdev->dev_addr; 230 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 231 u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; 232 struct mlx5i_priv *ipriv = priv->ppriv; 233 void *addr_path; 234 int qpn = 0; 235 int ret = 0; 236 void *qpc; 237 238 if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) { 239 qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3]; 240 MLX5_SET(create_qp_in, in, input_qpn, qpn); 241 } 242 243 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 244 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev)); 245 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); 246 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 247 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 248 MLX5_QP_ENHANCED_ULP_STATELESS_MODE); 249 250 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 251 MLX5_SET(ads, addr_path, vhca_port_num, 1); 252 MLX5_SET(ads, addr_path, grh, 1); 253 254 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 255 ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out); 256 if (ret) 257 return ret; 258 259 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); 260 261 return 0; 262 } 263 264 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) 265 { 266 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; 267 268 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 269 MLX5_SET(destroy_qp_in, in, qpn, qpn); 270 mlx5_cmd_exec_in(mdev, destroy_qp, in); 271 } 272 273 int mlx5i_update_nic_rx(struct mlx5e_priv *priv) 274 { 275 return mlx5e_refresh_tirs(priv, true, true); 276 } 277 278 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) 279 { 280 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 281 void *tisc; 282 283 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 284 285 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); 286 287 return mlx5e_create_tis(mdev, in, tisn); 288 } 289 290 static int mlx5i_init_tx(struct mlx5e_priv *priv) 291 { 292 struct mlx5i_priv *ipriv = priv->ppriv; 293 int err; 294 295 err = mlx5i_create_underlay_qp(priv); 296 if (err) { 297 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 298 return err; 299 } 300 301 err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); 302 if (err) { 303 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 304 goto err_destroy_underlay_qp; 305 } 306 307 return 0; 308 309 err_destroy_underlay_qp: 310 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 311 return err; 312 } 313 314 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) 315 { 316 struct mlx5i_priv *ipriv = priv->ppriv; 317 318 mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); 319 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 320 } 321 322 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 323 { 324 struct mlx5_flow_namespace *ns = 325 mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 326 int err; 327 328 329 if (!ns) 330 return -EINVAL; 331 332 mlx5e_fs_set_ns(priv->fs, ns, false); 333 err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res, 334 !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); 335 if (err) { 336 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 337 err); 338 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 339 } 340 341 err = mlx5e_create_ttc_table(priv->fs, priv->rx_res); 342 if (err) { 343 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 344 err); 345 goto err_destroy_arfs_tables; 346 } 347 348 mlx5e_ethtool_init_steering(priv->fs); 349 350 return 0; 351 352 err_destroy_arfs_tables: 353 mlx5e_arfs_destroy_tables(priv->fs, 354 !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); 355 356 return err; 357 } 358 359 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) 360 { 361 mlx5e_destroy_ttc_table(priv->fs); 362 mlx5e_arfs_destroy_tables(priv->fs, 363 !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); 364 mlx5e_ethtool_cleanup_steering(priv->fs); 365 } 366 367 static int mlx5i_init_rx(struct mlx5e_priv *priv) 368 { 369 struct mlx5_core_dev *mdev = priv->mdev; 370 int err; 371 372 priv->fs = mlx5e_fs_init(priv->profile, mdev, 373 !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 374 if (!priv->fs) { 375 netdev_err(priv->netdev, "FS allocation failed\n"); 376 return -ENOMEM; 377 } 378 379 priv->rx_res = mlx5e_rx_res_alloc(); 380 if (!priv->rx_res) { 381 err = -ENOMEM; 382 goto err_free_fs; 383 } 384 385 mlx5e_create_q_counters(priv); 386 387 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 388 if (err) { 389 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 390 goto err_destroy_q_counters; 391 } 392 393 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, 394 priv->max_nch, priv->drop_rq.rqn, 395 &priv->channels.params.packet_merge, 396 priv->channels.params.num_channels); 397 if (err) 398 goto err_close_drop_rq; 399 400 err = mlx5i_create_flow_steering(priv); 401 if (err) 402 goto err_destroy_rx_res; 403 404 return 0; 405 406 err_destroy_rx_res: 407 mlx5e_rx_res_destroy(priv->rx_res); 408 err_close_drop_rq: 409 mlx5e_close_drop_rq(&priv->drop_rq); 410 err_destroy_q_counters: 411 mlx5e_destroy_q_counters(priv); 412 mlx5e_rx_res_free(priv->rx_res); 413 priv->rx_res = NULL; 414 err_free_fs: 415 mlx5e_fs_cleanup(priv->fs); 416 return err; 417 } 418 419 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 420 { 421 mlx5i_destroy_flow_steering(priv); 422 mlx5e_rx_res_destroy(priv->rx_res); 423 mlx5e_close_drop_rq(&priv->drop_rq); 424 mlx5e_destroy_q_counters(priv); 425 mlx5e_rx_res_free(priv->rx_res); 426 priv->rx_res = NULL; 427 mlx5e_fs_cleanup(priv->fs); 428 } 429 430 /* The stats groups order is opposite to the update_stats() order calls */ 431 static mlx5e_stats_grp_t mlx5i_stats_grps[] = { 432 &MLX5E_STATS_GRP(sw), 433 &MLX5E_STATS_GRP(qcnt), 434 &MLX5E_STATS_GRP(vnic_env), 435 &MLX5E_STATS_GRP(vport), 436 &MLX5E_STATS_GRP(802_3), 437 &MLX5E_STATS_GRP(2863), 438 &MLX5E_STATS_GRP(2819), 439 &MLX5E_STATS_GRP(phy), 440 &MLX5E_STATS_GRP(pcie), 441 &MLX5E_STATS_GRP(per_prio), 442 &MLX5E_STATS_GRP(pme), 443 &MLX5E_STATS_GRP(channels), 444 &MLX5E_STATS_GRP(per_port_buff_congest), 445 }; 446 447 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) 448 { 449 return ARRAY_SIZE(mlx5i_stats_grps); 450 } 451 452 static const struct mlx5e_profile mlx5i_nic_profile = { 453 .init = mlx5i_init, 454 .cleanup = mlx5i_cleanup, 455 .init_tx = mlx5i_init_tx, 456 .cleanup_tx = mlx5i_cleanup_tx, 457 .init_rx = mlx5i_init_rx, 458 .cleanup_rx = mlx5i_cleanup_rx, 459 .enable = NULL, /* mlx5i_enable */ 460 .disable = NULL, /* mlx5i_disable */ 461 .update_rx = mlx5i_update_nic_rx, 462 .update_stats = NULL, /* mlx5i_update_stats */ 463 .update_carrier = NULL, /* no HW update in IB link */ 464 .rx_handlers = &mlx5i_rx_handlers, 465 .max_tc = MLX5I_MAX_NUM_TC, 466 .stats_grps = mlx5i_stats_grps, 467 .stats_grps_num = mlx5i_stats_grps_num, 468 }; 469 470 /* mlx5i netdev NDos */ 471 472 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) 473 { 474 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 475 struct mlx5e_params new_params; 476 int err = 0; 477 478 mutex_lock(&priv->state_lock); 479 480 new_params = priv->channels.params; 481 new_params.sw_mtu = new_mtu; 482 483 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); 484 if (err) 485 goto out; 486 487 netdev->mtu = new_params.sw_mtu; 488 489 out: 490 mutex_unlock(&priv->state_lock); 491 return err; 492 } 493 494 int mlx5i_dev_init(struct net_device *dev) 495 { 496 struct mlx5e_priv *priv = mlx5i_epriv(dev); 497 struct mlx5i_priv *ipriv = priv->ppriv; 498 u8 addr_mod[3]; 499 500 /* Set dev address using underlay QP */ 501 addr_mod[0] = (ipriv->qpn >> 16) & 0xff; 502 addr_mod[1] = (ipriv->qpn >> 8) & 0xff; 503 addr_mod[2] = (ipriv->qpn) & 0xff; 504 dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod)); 505 506 /* Add QPN to net-device mapping to HT */ 507 mlx5i_pkey_add_qpn(dev, ipriv->qpn); 508 509 return 0; 510 } 511 512 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 513 { 514 struct mlx5e_priv *priv = mlx5i_epriv(dev); 515 516 switch (cmd) { 517 case SIOCSHWTSTAMP: 518 return mlx5e_hwstamp_set(priv, ifr); 519 case SIOCGHWTSTAMP: 520 return mlx5e_hwstamp_get(priv, ifr); 521 default: 522 return -EOPNOTSUPP; 523 } 524 } 525 526 void mlx5i_dev_cleanup(struct net_device *dev) 527 { 528 struct mlx5e_priv *priv = mlx5i_epriv(dev); 529 struct mlx5i_priv *ipriv = priv->ppriv; 530 531 mlx5i_uninit_underlay_qp(priv); 532 533 /* Delete QPN to net-device mapping from HT */ 534 mlx5i_pkey_del_qpn(dev, ipriv->qpn); 535 } 536 537 static int mlx5i_open(struct net_device *netdev) 538 { 539 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 540 struct mlx5i_priv *ipriv = epriv->ppriv; 541 struct mlx5_core_dev *mdev = epriv->mdev; 542 int err; 543 544 mutex_lock(&epriv->state_lock); 545 546 set_bit(MLX5E_STATE_OPENED, &epriv->state); 547 548 err = mlx5i_init_underlay_qp(epriv); 549 if (err) { 550 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); 551 goto err_clear_state_opened_flag; 552 } 553 554 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); 555 if (err) { 556 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); 557 goto err_reset_qp; 558 } 559 560 err = mlx5e_open_channels(epriv, &epriv->channels); 561 if (err) 562 goto err_remove_fs_underlay_qp; 563 564 err = epriv->profile->update_rx(epriv); 565 if (err) 566 goto err_close_channels; 567 568 mlx5e_activate_priv_channels(epriv); 569 570 mutex_unlock(&epriv->state_lock); 571 return 0; 572 573 err_close_channels: 574 mlx5e_close_channels(&epriv->channels); 575 err_remove_fs_underlay_qp: 576 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 577 err_reset_qp: 578 mlx5i_uninit_underlay_qp(epriv); 579 err_clear_state_opened_flag: 580 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 581 mutex_unlock(&epriv->state_lock); 582 return err; 583 } 584 585 static int mlx5i_close(struct net_device *netdev) 586 { 587 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 588 struct mlx5i_priv *ipriv = epriv->ppriv; 589 struct mlx5_core_dev *mdev = epriv->mdev; 590 591 /* May already be CLOSED in case a previous configuration operation 592 * (e.g RX/TX queue size change) that involves close&open failed. 593 */ 594 mutex_lock(&epriv->state_lock); 595 596 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) 597 goto unlock; 598 599 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 600 601 netif_carrier_off(epriv->netdev); 602 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 603 mlx5e_deactivate_priv_channels(epriv); 604 mlx5e_close_channels(&epriv->channels); 605 mlx5i_uninit_underlay_qp(epriv); 606 unlock: 607 mutex_unlock(&epriv->state_lock); 608 return 0; 609 } 610 611 /* IPoIB RDMA netdev callbacks */ 612 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, 613 union ib_gid *gid, u16 lid, int set_qkey, 614 u32 qkey) 615 { 616 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 617 struct mlx5_core_dev *mdev = epriv->mdev; 618 struct mlx5i_priv *ipriv = epriv->ppriv; 619 int err; 620 621 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 622 gid->raw); 623 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn); 624 if (err) 625 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", 626 ipriv->qpn, gid->raw); 627 628 if (set_qkey) { 629 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", 630 netdev->name, qkey); 631 ipriv->qkey = qkey; 632 } 633 634 return err; 635 } 636 637 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, 638 union ib_gid *gid, u16 lid) 639 { 640 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 641 struct mlx5_core_dev *mdev = epriv->mdev; 642 struct mlx5i_priv *ipriv = epriv->ppriv; 643 int err; 644 645 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 646 gid->raw); 647 648 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn); 649 if (err) 650 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", 651 ipriv->qpn, gid->raw); 652 653 return err; 654 } 655 656 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, 657 struct ib_ah *address, u32 dqpn) 658 { 659 struct mlx5e_priv *epriv = mlx5i_epriv(dev); 660 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; 661 struct mlx5_ib_ah *mah = to_mah(address); 662 struct mlx5i_priv *ipriv = epriv->ppriv; 663 664 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); 665 666 return NETDEV_TX_OK; 667 } 668 669 static void mlx5i_set_pkey_index(struct net_device *netdev, int id) 670 { 671 struct mlx5i_priv *ipriv = netdev_priv(netdev); 672 673 ipriv->pkey_index = (u16)id; 674 } 675 676 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) 677 { 678 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) 679 return -EOPNOTSUPP; 680 681 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 682 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); 683 return -EOPNOTSUPP; 684 } 685 686 return 0; 687 } 688 689 static void mlx5_rdma_netdev_free(struct net_device *netdev) 690 { 691 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 692 struct mlx5_core_dev *mdev = priv->mdev; 693 struct mlx5i_priv *ipriv = priv->ppriv; 694 const struct mlx5e_profile *profile = priv->profile; 695 696 mlx5e_detach_netdev(priv); 697 profile->cleanup(priv); 698 699 if (!ipriv->sub_interface) { 700 mlx5i_pkey_qpn_ht_cleanup(netdev); 701 mlx5e_destroy_mdev_resources(mdev); 702 } 703 } 704 705 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) 706 { 707 return mdev->mlx5e_res.hw_objs.pdn != 0; 708 } 709 710 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) 711 { 712 if (mlx5_is_sub_interface(mdev)) 713 return mlx5i_pkey_get_profile(); 714 return &mlx5i_nic_profile; 715 } 716 717 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num, 718 struct net_device *netdev, void *param) 719 { 720 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; 721 const struct mlx5e_profile *prof = mlx5_get_profile(mdev); 722 struct mlx5i_priv *ipriv; 723 struct mlx5e_priv *epriv; 724 struct rdma_netdev *rn; 725 int err; 726 727 ipriv = netdev_priv(netdev); 728 epriv = mlx5i_epriv(netdev); 729 730 ipriv->sub_interface = mlx5_is_sub_interface(mdev); 731 if (!ipriv->sub_interface) { 732 err = mlx5i_pkey_qpn_ht_init(netdev); 733 if (err) { 734 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); 735 return err; 736 } 737 738 /* This should only be called once per mdev */ 739 err = mlx5e_create_mdev_resources(mdev); 740 if (err) 741 goto destroy_ht; 742 } 743 744 err = mlx5e_priv_init(epriv, prof, netdev, mdev); 745 if (err) 746 goto destroy_mdev_resources; 747 748 epriv->profile = prof; 749 epriv->ppriv = ipriv; 750 751 prof->init(mdev, netdev); 752 753 err = mlx5e_attach_netdev(epriv); 754 if (err) 755 goto detach; 756 netif_carrier_off(netdev); 757 758 /* set rdma_netdev func pointers */ 759 rn = &ipriv->rn; 760 rn->hca = ibdev; 761 rn->send = mlx5i_xmit; 762 rn->attach_mcast = mlx5i_attach_mcast; 763 rn->detach_mcast = mlx5i_detach_mcast; 764 rn->set_id = mlx5i_set_pkey_index; 765 766 netdev->priv_destructor = mlx5_rdma_netdev_free; 767 netdev->needs_free_netdev = 1; 768 769 return 0; 770 771 detach: 772 prof->cleanup(epriv); 773 if (ipriv->sub_interface) 774 return err; 775 destroy_mdev_resources: 776 mlx5e_destroy_mdev_resources(mdev); 777 destroy_ht: 778 mlx5i_pkey_qpn_ht_cleanup(netdev); 779 return err; 780 } 781 782 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, 783 struct ib_device *device, 784 struct rdma_netdev_alloc_params *params) 785 { 786 int nch; 787 int rc; 788 789 rc = mlx5i_check_required_hca_cap(mdev); 790 if (rc) 791 return rc; 792 793 nch = mlx5e_get_max_num_channels(mdev); 794 795 *params = (struct rdma_netdev_alloc_params){ 796 .sizeof_priv = sizeof(struct mlx5i_priv) + 797 sizeof(struct mlx5e_priv), 798 .txqs = nch * MLX5E_MAX_NUM_TC, 799 .rxqs = nch, 800 .param = mdev, 801 .initialize_rdma_netdev = mlx5_rdma_setup_rn, 802 }; 803 804 return 0; 805 } 806 EXPORT_SYMBOL(mlx5_rdma_rn_get_params); 807