main.c (a0354d230843da87c2853c6fed2d5860259bef49) main.c (831df88381f73bca0f5624b69ab985cac3d036bc)
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>

--- 20 unchanged lines hidden (view full) ---

29#include <rdma/ib_umem.h>
30#include <rdma/lag.h>
31#include <linux/in.h>
32#include <linux/etherdevice.h>
33#include "mlx5_ib.h"
34#include "ib_rep.h"
35#include "cmd.h"
36#include "devx.h"
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>

--- 20 unchanged lines hidden (view full) ---

29#include <rdma/ib_umem.h>
30#include <rdma/lag.h>
31#include <linux/in.h>
32#include <linux/etherdevice.h>
33#include "mlx5_ib.h"
34#include "ib_rep.h"
35#include "cmd.h"
36#include "devx.h"
37#include "dm.h"
37#include "fs.h"
38#include "srq.h"
39#include "qp.h"
40#include "wr.h"
41#include "restrack.h"
42#include "counters.h"
43#include <linux/mlx5/accel.h>
44#include <rdma/uverbs_std_types.h>
38#include "fs.h"
39#include "srq.h"
40#include "qp.h"
41#include "wr.h"
42#include "restrack.h"
43#include "counters.h"
44#include <linux/mlx5/accel.h>
45#include <rdma/uverbs_std_types.h>
45#include <rdma/uverbs_ioctl.h>
46#include <rdma/mlx5_user_ioctl_verbs.h>
47#include <rdma/mlx5_user_ioctl_cmds.h>
48#include <rdma/ib_umem_odp.h>
49
50#define UVERBS_MODULE_NAME mlx5_ib
51#include <rdma/uverbs_named_ioctl.h>
52
53MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");

--- 42 unchanged lines hidden (view full) ---

96 case MLX5_CAP_PORT_TYPE_ETH:
97 return IB_LINK_LAYER_ETHERNET;
98 default:
99 return IB_LINK_LAYER_UNSPECIFIED;
100 }
101}
102
103static enum rdma_link_layer
46#include <rdma/mlx5_user_ioctl_verbs.h>
47#include <rdma/mlx5_user_ioctl_cmds.h>
48#include <rdma/ib_umem_odp.h>
49
50#define UVERBS_MODULE_NAME mlx5_ib
51#include <rdma/uverbs_named_ioctl.h>
52
53MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");

--- 42 unchanged lines hidden (view full) ---

96 case MLX5_CAP_PORT_TYPE_ETH:
97 return IB_LINK_LAYER_ETHERNET;
98 default:
99 return IB_LINK_LAYER_UNSPECIFIED;
100 }
101}
102
103static enum rdma_link_layer
104mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
104mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
105{
106 struct mlx5_ib_dev *dev = to_mdev(device);
107 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
108
109 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
110}
111
112static int get_port_state(struct ib_device *ibdev,
105{
106 struct mlx5_ib_dev *dev = to_mdev(device);
107 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
108
109 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
110}
111
112static int get_port_state(struct ib_device *ibdev,
113 u32 port_num,
113 u8 port_num,
114 enum ib_port_state *state)
115{
116 struct ib_port_attr attr;
117 int ret;
118
119 memset(&attr, 0, sizeof(attr));
120 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
121 if (!ret)
122 *state = attr.state;
123 return ret;
124}
125
126static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
127 struct net_device *ndev,
114 enum ib_port_state *state)
115{
116 struct ib_port_attr attr;
117 int ret;
118
119 memset(&attr, 0, sizeof(attr));
120 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
121 if (!ret)
122 *state = attr.state;
123 return ret;
124}
125
126static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
127 struct net_device *ndev,
128 u32 *port_num)
128 u8 *port_num)
129{
130 struct net_device *rep_ndev;
131 struct mlx5_ib_port *port;
132 int i;
133
134 for (i = 0; i < dev->num_ports; i++) {
135 port = &dev->port[i];
136 if (!port->rep)

--- 13 unchanged lines hidden (view full) ---

150 return NULL;
151}
152
153static int mlx5_netdev_event(struct notifier_block *this,
154 unsigned long event, void *ptr)
155{
156 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
157 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
129{
130 struct net_device *rep_ndev;
131 struct mlx5_ib_port *port;
132 int i;
133
134 for (i = 0; i < dev->num_ports; i++) {
135 port = &dev->port[i];
136 if (!port->rep)

--- 13 unchanged lines hidden (view full) ---

150 return NULL;
151}
152
153static int mlx5_netdev_event(struct notifier_block *this,
154 unsigned long event, void *ptr)
155{
156 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
157 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
158 u32 port_num = roce->native_port_num;
158 u8 port_num = roce->native_port_num;
159 struct mlx5_core_dev *mdev;
160 struct mlx5_ib_dev *ibdev;
161
162 ibdev = roce->dev;
163 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
164 if (!mdev)
165 return NOTIFY_DONE;
166

--- 62 unchanged lines hidden (view full) ---

229 break;
230 }
231done:
232 mlx5_ib_put_native_port_mdev(ibdev, port_num);
233 return NOTIFY_DONE;
234}
235
236static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
159 struct mlx5_core_dev *mdev;
160 struct mlx5_ib_dev *ibdev;
161
162 ibdev = roce->dev;
163 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
164 if (!mdev)
165 return NOTIFY_DONE;
166

--- 62 unchanged lines hidden (view full) ---

229 break;
230 }
231done:
232 mlx5_ib_put_native_port_mdev(ibdev, port_num);
233 return NOTIFY_DONE;
234}
235
236static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
237 u32 port_num)
237 u8 port_num)
238{
239 struct mlx5_ib_dev *ibdev = to_mdev(device);
240 struct net_device *ndev;
241 struct mlx5_core_dev *mdev;
242
243 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
244 if (!mdev)
245 return NULL;

--- 11 unchanged lines hidden (view full) ---

257 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
258
259out:
260 mlx5_ib_put_native_port_mdev(ibdev, port_num);
261 return ndev;
262}
263
264struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
238{
239 struct mlx5_ib_dev *ibdev = to_mdev(device);
240 struct net_device *ndev;
241 struct mlx5_core_dev *mdev;
242
243 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
244 if (!mdev)
245 return NULL;

--- 11 unchanged lines hidden (view full) ---

257 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
258
259out:
260 mlx5_ib_put_native_port_mdev(ibdev, port_num);
261 return ndev;
262}
263
264struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
265 u32 ib_port_num,
266 u32 *native_port_num)
265 u8 ib_port_num,
266 u8 *native_port_num)
267{
268 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
269 ib_port_num);
270 struct mlx5_core_dev *mdev = NULL;
271 struct mlx5_ib_multiport_info *mpi;
272 struct mlx5_ib_port *port;
273
274 if (!mlx5_core_mp_enabled(ibdev->mdev) ||

--- 17 unchanged lines hidden (view full) ---

292 if (!mpi->is_master)
293 mpi->mdev_refcnt++;
294 }
295 spin_unlock(&port->mp.mpi_lock);
296
297 return mdev;
298}
299
267{
268 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
269 ib_port_num);
270 struct mlx5_core_dev *mdev = NULL;
271 struct mlx5_ib_multiport_info *mpi;
272 struct mlx5_ib_port *port;
273
274 if (!mlx5_core_mp_enabled(ibdev->mdev) ||

--- 17 unchanged lines hidden (view full) ---

292 if (!mpi->is_master)
293 mpi->mdev_refcnt++;
294 }
295 spin_unlock(&port->mp.mpi_lock);
296
297 return mdev;
298}
299
300void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
300void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
301{
302 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
303 port_num);
304 struct mlx5_ib_multiport_info *mpi;
305 struct mlx5_ib_port *port;
306
307 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
308 return;

--- 139 unchanged lines hidden (view full) ---

448{
449 return ext ?
450 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
451 active_width) :
452 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
453 active_width);
454}
455
301{
302 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
303 port_num);
304 struct mlx5_ib_multiport_info *mpi;
305 struct mlx5_ib_port *port;
306
307 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
308 return;

--- 139 unchanged lines hidden (view full) ---

448{
449 return ext ?
450 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
451 active_width) :
452 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
453 active_width);
454}
455
456static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
456static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
457 struct ib_port_attr *props)
458{
459 struct mlx5_ib_dev *dev = to_mdev(device);
460 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
461 struct mlx5_core_dev *mdev;
462 struct net_device *ndev, *upper;
463 enum ib_mtu ndev_ib_mtu;
464 bool put_mdev = true;
465 u32 eth_prot_oper;
457 struct ib_port_attr *props)
458{
459 struct mlx5_ib_dev *dev = to_mdev(device);
460 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
461 struct mlx5_core_dev *mdev;
462 struct net_device *ndev, *upper;
463 enum ib_mtu ndev_ib_mtu;
464 bool put_mdev = true;
465 u32 eth_prot_oper;
466 u32 mdev_port_num;
466 u8 mdev_port_num;
467 bool ext;
468 int err;
469
470 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
471 if (!mdev) {
472 /* This means the port isn't affiliated yet. Get the
473 * info for the master port instead.
474 */

--- 19 unchanged lines hidden (view full) ---

494 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
495
496 props->active_width = IB_WIDTH_4X;
497 props->active_speed = IB_SPEED_QDR;
498
499 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
500 &props->active_width, ext);
501
467 bool ext;
468 int err;
469
470 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
471 if (!mdev) {
472 /* This means the port isn't affiliated yet. Get the
473 * info for the master port instead.
474 */

--- 19 unchanged lines hidden (view full) ---

494 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
495
496 props->active_width = IB_WIDTH_4X;
497 props->active_speed = IB_SPEED_QDR;
498
499 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
500 &props->active_width, ext);
501
502 if (!dev->is_rep && dev->mdev->roce.roce_en) {
502 if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
503 u16 qkey_viol_cntr;
504
505 props->port_cap_flags |= IB_PORT_CM_SUP;
506 props->ip_gids = true;
507 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
508 roce_address_table_size);
509 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
510 props->qkey_viol_cntr = qkey_viol_cntr;

--- 34 unchanged lines hidden (view full) ---

545
546 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
547out:
548 if (put_mdev)
549 mlx5_ib_put_native_port_mdev(dev, port_num);
550 return err;
551}
552
503 u16 qkey_viol_cntr;
504
505 props->port_cap_flags |= IB_PORT_CM_SUP;
506 props->ip_gids = true;
507 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
508 roce_address_table_size);
509 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
510 props->qkey_viol_cntr = qkey_viol_cntr;

--- 34 unchanged lines hidden (view full) ---

545
546 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
547out:
548 if (put_mdev)
549 mlx5_ib_put_native_port_mdev(dev, port_num);
550 return err;
551}
552
553static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
553static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
554 unsigned int index, const union ib_gid *gid,
555 const struct ib_gid_attr *attr)
556{
557 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
558 u16 vlan_id = 0xffff;
559 u8 roce_version = 0;
560 u8 roce_l3_type = 0;
561 u8 mac[ETH_ALEN];

--- 701 unchanged lines hidden (view full) ---

1263
1264 default:
1265 return -EINVAL;
1266 }
1267
1268 return 0;
1269}
1270
554 unsigned int index, const union ib_gid *gid,
555 const struct ib_gid_attr *attr)
556{
557 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
558 u16 vlan_id = 0xffff;
559 u8 roce_version = 0;
560 u8 roce_l3_type = 0;
561 u8 mac[ETH_ALEN];

--- 701 unchanged lines hidden (view full) ---

1263
1264 default:
1265 return -EINVAL;
1266 }
1267
1268 return 0;
1269}
1270
1271static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1271static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1272 struct ib_port_attr *props)
1273{
1274 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1275 struct mlx5_core_dev *mdev = dev->mdev;
1276 struct mlx5_hca_vport_context *rep;
1277 u16 max_mtu;
1278 u16 oper_mtu;
1279 int err;

--- 51 unchanged lines hidden (view full) ---

1331
1332 err = translate_max_vl_num(ibdev, vl_hw_cap,
1333 &props->max_vl_num);
1334out:
1335 kfree(rep);
1336 return err;
1337}
1338
1272 struct ib_port_attr *props)
1273{
1274 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1275 struct mlx5_core_dev *mdev = dev->mdev;
1276 struct mlx5_hca_vport_context *rep;
1277 u16 max_mtu;
1278 u16 oper_mtu;
1279 int err;

--- 51 unchanged lines hidden (view full) ---

1331
1332 err = translate_max_vl_num(ibdev, vl_hw_cap,
1333 &props->max_vl_num);
1334out:
1335 kfree(rep);
1336 return err;
1337}
1338
1339int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1339int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1340 struct ib_port_attr *props)
1341{
1342 unsigned int count;
1343 int ret;
1344
1345 switch (mlx5_get_vport_access_method(ibdev)) {
1346 case MLX5_VPORT_ACCESS_METHOD_MAD:
1347 ret = mlx5_query_mad_ifc_port(ibdev, port, props);

--- 28 unchanged lines hidden (view full) ---

1376 count = mlx5_core_reserved_gids_count(mdev);
1377 if (put_mdev)
1378 mlx5_ib_put_native_port_mdev(dev, port);
1379 props->gid_tbl_len -= count;
1380 }
1381 return ret;
1382}
1383
1340 struct ib_port_attr *props)
1341{
1342 unsigned int count;
1343 int ret;
1344
1345 switch (mlx5_get_vport_access_method(ibdev)) {
1346 case MLX5_VPORT_ACCESS_METHOD_MAD:
1347 ret = mlx5_query_mad_ifc_port(ibdev, port, props);

--- 28 unchanged lines hidden (view full) ---

1376 count = mlx5_core_reserved_gids_count(mdev);
1377 if (put_mdev)
1378 mlx5_ib_put_native_port_mdev(dev, port);
1379 props->gid_tbl_len -= count;
1380 }
1381 return ret;
1382}
1383
1384static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1384static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1385 struct ib_port_attr *props)
1386{
1387 return mlx5_query_port_roce(ibdev, port, props);
1388}
1389
1385 struct ib_port_attr *props)
1386{
1387 return mlx5_query_port_roce(ibdev, port, props);
1388}
1389
1390static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1390static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1391 u16 *pkey)
1392{
1393 /* Default special Pkey for representor device port as per the
1394 * IB specification 1.3 section 10.9.1.2.
1395 */
1396 *pkey = 0xffff;
1397 return 0;
1398}
1399
1391 u16 *pkey)
1392{
1393 /* Default special Pkey for representor device port as per the
1394 * IB specification 1.3 section 10.9.1.2.
1395 */
1396 *pkey = 0xffff;
1397 return 0;
1398}
1399
1400static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1400static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1401 union ib_gid *gid)
1402{
1403 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1404 struct mlx5_core_dev *mdev = dev->mdev;
1405
1406 switch (mlx5_get_vport_access_method(ibdev)) {
1407 case MLX5_VPORT_ACCESS_METHOD_MAD:
1408 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1409
1410 case MLX5_VPORT_ACCESS_METHOD_HCA:
1411 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1412
1413 default:
1414 return -EINVAL;
1415 }
1416
1417}
1418
1401 union ib_gid *gid)
1402{
1403 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1404 struct mlx5_core_dev *mdev = dev->mdev;
1405
1406 switch (mlx5_get_vport_access_method(ibdev)) {
1407 case MLX5_VPORT_ACCESS_METHOD_MAD:
1408 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1409
1410 case MLX5_VPORT_ACCESS_METHOD_HCA:
1411 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1412
1413 default:
1414 return -EINVAL;
1415 }
1416
1417}
1418
1419static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1419static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1420 u16 index, u16 *pkey)
1421{
1422 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1423 struct mlx5_core_dev *mdev;
1424 bool put_mdev = true;
1420 u16 index, u16 *pkey)
1421{
1422 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1423 struct mlx5_core_dev *mdev;
1424 bool put_mdev = true;
1425 u32 mdev_port_num;
1425 u8 mdev_port_num;
1426 int err;
1427
1428 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1429 if (!mdev) {
1430 /* The port isn't affiliated yet, get the PKey from the master
1431 * port. For RoCE the PKey tables will be the same.
1432 */
1433 put_mdev = false;

--- 4 unchanged lines hidden (view full) ---

1438 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1439 index, pkey);
1440 if (put_mdev)
1441 mlx5_ib_put_native_port_mdev(dev, port);
1442
1443 return err;
1444}
1445
1426 int err;
1427
1428 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1429 if (!mdev) {
1430 /* The port isn't affiliated yet, get the PKey from the master
1431 * port. For RoCE the PKey tables will be the same.
1432 */
1433 put_mdev = false;

--- 4 unchanged lines hidden (view full) ---

1438 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1439 index, pkey);
1440 if (put_mdev)
1441 mlx5_ib_put_native_port_mdev(dev, port);
1442
1443 return err;
1444}
1445
1446static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1446static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1447 u16 *pkey)
1448{
1449 switch (mlx5_get_vport_access_method(ibdev)) {
1450 case MLX5_VPORT_ACCESS_METHOD_MAD:
1451 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1452
1453 case MLX5_VPORT_ACCESS_METHOD_HCA:
1454 case MLX5_VPORT_ACCESS_METHOD_NIC:

--- 27 unchanged lines hidden (view full) ---

1482 if (err)
1483 return err;
1484
1485 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1486
1487 return err;
1488}
1489
1447 u16 *pkey)
1448{
1449 switch (mlx5_get_vport_access_method(ibdev)) {
1450 case MLX5_VPORT_ACCESS_METHOD_MAD:
1451 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1452
1453 case MLX5_VPORT_ACCESS_METHOD_HCA:
1454 case MLX5_VPORT_ACCESS_METHOD_NIC:

--- 27 unchanged lines hidden (view full) ---

1482 if (err)
1483 return err;
1484
1485 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1486
1487 return err;
1488}
1489
1490static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1490static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1491 u32 value)
1492{
1493 struct mlx5_hca_vport_context ctx = {};
1494 struct mlx5_core_dev *mdev;
1491 u32 value)
1492{
1493 struct mlx5_hca_vport_context ctx = {};
1494 struct mlx5_core_dev *mdev;
1495 u32 mdev_port_num;
1495 u8 mdev_port_num;
1496 int err;
1497
1498 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1499 if (!mdev)
1500 return -ENODEV;
1501
1502 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1503 if (err)

--- 12 unchanged lines hidden (view full) ---

1516 0, &ctx);
1517
1518out:
1519 mlx5_ib_put_native_port_mdev(dev, port_num);
1520
1521 return err;
1522}
1523
1496 int err;
1497
1498 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1499 if (!mdev)
1500 return -ENODEV;
1501
1502 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1503 if (err)

--- 12 unchanged lines hidden (view full) ---

1516 0, &ctx);
1517
1518out:
1519 mlx5_ib_put_native_port_mdev(dev, port_num);
1520
1521 return err;
1522}
1523
1524static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1524static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1525 struct ib_port_modify *props)
1526{
1527 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1528 struct ib_port_attr attr;
1529 u32 tmp;
1530 int err;
1531 u32 change_mask;
1532 u32 value;

--- 392 unchanged lines hidden (view full) ---

1925 goto out_mdev;
1926
1927 bfregi->ver = ver;
1928 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1929 context->lib_caps = req.lib_caps;
1930 print_lib_caps(dev, context->lib_caps);
1931
1932 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1525 struct ib_port_modify *props)
1526{
1527 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1528 struct ib_port_attr attr;
1529 u32 tmp;
1530 int err;
1531 u32 change_mask;
1532 u32 value;

--- 392 unchanged lines hidden (view full) ---

1925 goto out_mdev;
1926
1927 bfregi->ver = ver;
1928 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1929 context->lib_caps = req.lib_caps;
1930 print_lib_caps(dev, context->lib_caps);
1931
1932 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1933 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
1933 u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1934
1935 atomic_set(&context->tx_port_affinity,
1936 atomic_add_return(
1937 1, &dev->port[port].roce.tx_port_affinity));
1938 }
1939
1940 return 0;
1941

--- 273 unchanged lines hidden (view full) ---

2215 mlx5_cmd_free_uar(dev->mdev, idx);
2216
2217free_bfreg:
2218 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2219
2220 return err;
2221}
2222
1934
1935 atomic_set(&context->tx_port_affinity,
1936 atomic_add_return(
1937 1, &dev->port[port].roce.tx_port_affinity));
1938 }
1939
1940 return 0;
1941

--- 273 unchanged lines hidden (view full) ---

2215 mlx5_cmd_free_uar(dev->mdev, idx);
2216
2217free_bfreg:
2218 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2219
2220 return err;
2221}
2222
2223static int add_dm_mmap_entry(struct ib_ucontext *context,
2224 struct mlx5_ib_dm *mdm,
2225 u64 address)
2226{
2227 mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
2228 mdm->mentry.address = address;
2229 return rdma_user_mmap_entry_insert_range(
2230 context, &mdm->mentry.rdma_entry,
2231 mdm->size,
2232 MLX5_IB_MMAP_DEVICE_MEM << 16,
2233 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
2234}
2235
2236static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2237{
2238 unsigned long idx;
2239 u8 command;
2240
2241 command = get_command(vma->vm_pgoff);
2242 idx = get_extended_index(vma->vm_pgoff);
2243

--- 85 unchanged lines hidden (view full) ---

2329
2330 default:
2331 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2332 }
2333
2334 return 0;
2335}
2336
2223static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2224{
2225 unsigned long idx;
2226 u8 command;
2227
2228 command = get_command(vma->vm_pgoff);
2229 idx = get_extended_index(vma->vm_pgoff);
2230

--- 85 unchanged lines hidden (view full) ---

2316
2317 default:
2318 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2319 }
2320
2321 return 0;
2322}
2323
2337static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2338 u32 type)
2339{
2340 switch (type) {
2341 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2342 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2343 return -EOPNOTSUPP;
2344 break;
2345 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2346 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2347 if (!capable(CAP_SYS_RAWIO) ||
2348 !capable(CAP_NET_RAW))
2349 return -EPERM;
2350
2351 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2352 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
2353 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
2354 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
2355 return -EOPNOTSUPP;
2356 break;
2357 }
2358
2359 return 0;
2360}
2361
2362static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2363 struct mlx5_ib_dm *dm,
2364 struct ib_dm_alloc_attr *attr,
2365 struct uverbs_attr_bundle *attrs)
2366{
2367 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
2368 u64 start_offset;
2369 u16 page_idx;
2370 int err;
2371 u64 address;
2372
2373 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2374
2375 err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2376 dm->size, attr->alignment);
2377 if (err)
2378 return err;
2379
2380 address = dm->dev_addr & PAGE_MASK;
2381 err = add_dm_mmap_entry(ctx, dm, address);
2382 if (err)
2383 goto err_dealloc;
2384
2385 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
2386 err = uverbs_copy_to(attrs,
2387 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2388 &page_idx,
2389 sizeof(page_idx));
2390 if (err)
2391 goto err_copy;
2392
2393 start_offset = dm->dev_addr & ~PAGE_MASK;
2394 err = uverbs_copy_to(attrs,
2395 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2396 &start_offset, sizeof(start_offset));
2397 if (err)
2398 goto err_copy;
2399
2400 return 0;
2401
2402err_copy:
2403 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
2404err_dealloc:
2405 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2406
2407 return err;
2408}
2409
2410static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2411 struct mlx5_ib_dm *dm,
2412 struct ib_dm_alloc_attr *attr,
2413 struct uverbs_attr_bundle *attrs,
2414 int type)
2415{
2416 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
2417 u64 act_size;
2418 int err;
2419
2420 /* Allocation size must a multiple of the basic block size
2421 * and a power of 2.
2422 */
2423 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
2424 act_size = roundup_pow_of_two(act_size);
2425
2426 dm->size = act_size;
2427 err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
2428 to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2429 &dm->icm_dm.obj_id);
2430 if (err)
2431 return err;
2432
2433 err = uverbs_copy_to(attrs,
2434 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2435 &dm->dev_addr, sizeof(dm->dev_addr));
2436 if (err)
2437 mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
2438 to_mucontext(ctx)->devx_uid, dm->dev_addr,
2439 dm->icm_dm.obj_id);
2440
2441 return err;
2442}
2443
2444struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2445 struct ib_ucontext *context,
2446 struct ib_dm_alloc_attr *attr,
2447 struct uverbs_attr_bundle *attrs)
2448{
2449 struct mlx5_ib_dm *dm;
2450 enum mlx5_ib_uapi_dm_type type;
2451 int err;
2452
2453 err = uverbs_get_const_default(&type, attrs,
2454 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2455 MLX5_IB_UAPI_DM_TYPE_MEMIC);
2456 if (err)
2457 return ERR_PTR(err);
2458
2459 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2460 type, attr->length, attr->alignment);
2461
2462 err = check_dm_type_support(to_mdev(ibdev), type);
2463 if (err)
2464 return ERR_PTR(err);
2465
2466 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2467 if (!dm)
2468 return ERR_PTR(-ENOMEM);
2469
2470 dm->type = type;
2471
2472 switch (type) {
2473 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2474 err = handle_alloc_dm_memic(context, dm,
2475 attr,
2476 attrs);
2477 break;
2478 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2479 err = handle_alloc_dm_sw_icm(context, dm,
2480 attr, attrs,
2481 MLX5_SW_ICM_TYPE_STEERING);
2482 break;
2483 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2484 err = handle_alloc_dm_sw_icm(context, dm,
2485 attr, attrs,
2486 MLX5_SW_ICM_TYPE_HEADER_MODIFY);
2487 break;
2488 default:
2489 err = -EOPNOTSUPP;
2490 }
2491
2492 if (err)
2493 goto err_free;
2494
2495 return &dm->ibdm;
2496
2497err_free:
2498 kfree(dm);
2499 return ERR_PTR(err);
2500}
2501
2502int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
2503{
2504 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2505 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2506 struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
2507 struct mlx5_ib_dm *dm = to_mdm(ibdm);
2508 int ret;
2509
2510 switch (dm->type) {
2511 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2512 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
2513 return 0;
2514 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
2515 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
2516 dm->size, ctx->devx_uid, dm->dev_addr,
2517 dm->icm_dm.obj_id);
2518 if (ret)
2519 return ret;
2520 break;
2521 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
2522 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
2523 dm->size, ctx->devx_uid, dm->dev_addr,
2524 dm->icm_dm.obj_id);
2525 if (ret)
2526 return ret;
2527 break;
2528 default:
2529 return -EOPNOTSUPP;
2530 }
2531
2532 kfree(dm);
2533
2534 return 0;
2535}
2536
2537static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2538{
2539 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2540 struct ib_device *ibdev = ibpd->device;
2541 struct mlx5_ib_alloc_pd_resp resp;
2542 int err;
2543 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2544 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};

--- 230 unchanged lines hidden (view full) ---

2775 delay_drop->activate = false;
2776 }
2777 mutex_unlock(&delay_drop->lock);
2778}
2779
2780static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2781 struct ib_event *ibev)
2782{
2324static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2325{
2326 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2327 struct ib_device *ibdev = ibpd->device;
2328 struct mlx5_ib_alloc_pd_resp resp;
2329 int err;
2330 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2331 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};

--- 230 unchanged lines hidden (view full) ---

2562 delay_drop->activate = false;
2563 }
2564 mutex_unlock(&delay_drop->lock);
2565}
2566
2567static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2568 struct ib_event *ibev)
2569{
2783 u32 port = (eqe->data.port.port >> 4) & 0xf;
2570 u8 port = (eqe->data.port.port >> 4) & 0xf;
2784
2785 switch (eqe->sub_type) {
2786 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
2787 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2788 IB_LINK_LAYER_ETHERNET)
2789 schedule_work(&ibdev->delay_drop.delay_drop_work);
2790 break;
2791 default: /* do nothing */
2792 return;
2793 }
2794}
2795
2796static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2797 struct ib_event *ibev)
2798{
2571
2572 switch (eqe->sub_type) {
2573 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
2574 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2575 IB_LINK_LAYER_ETHERNET)
2576 schedule_work(&ibdev->delay_drop.delay_drop_work);
2577 break;
2578 default: /* do nothing */
2579 return;
2580 }
2581}
2582
2583static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2584 struct ib_event *ibev)
2585{
2799 u32 port = (eqe->data.port.port >> 4) & 0xf;
2586 u8 port = (eqe->data.port.port >> 4) & 0xf;
2800
2801 ibev->element.port_num = port;
2802
2803 switch (eqe->sub_type) {
2804 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
2805 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
2806 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
2807 /* In RoCE, port up/down events are handled in

--- 340 unchanged lines hidden (view full) ---

3148 ret |= RDMA_CORE_PORT_IBA_ROCE;
3149
3150 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
3151 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3152
3153 return ret;
3154}
3155
2587
2588 ibev->element.port_num = port;
2589
2590 switch (eqe->sub_type) {
2591 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
2592 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
2593 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
2594 /* In RoCE, port up/down events are handled in

--- 340 unchanged lines hidden (view full) ---

2935 ret |= RDMA_CORE_PORT_IBA_ROCE;
2936
2937 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2938 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2939
2940 return ret;
2941}
2942
3156static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2943static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
3157 struct ib_port_immutable *immutable)
3158{
3159 struct ib_port_attr attr;
3160 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3161 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
3162 struct mlx5_hca_vport_context rep = {0};
3163 int err;
3164

--- 11 unchanged lines hidden (view full) ---

3176 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3177 immutable->gid_tbl_len = attr.gid_tbl_len;
3178 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
3179 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3180
3181 return 0;
3182}
3183
2944 struct ib_port_immutable *immutable)
2945{
2946 struct ib_port_attr attr;
2947 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2948 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2949 struct mlx5_hca_vport_context rep = {0};
2950 int err;
2951

--- 11 unchanged lines hidden (view full) ---

2963 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2964 immutable->gid_tbl_len = attr.gid_tbl_len;
2965 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
2966 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2967
2968 return 0;
2969}
2970
3184static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2971static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
3185 struct ib_port_immutable *immutable)
3186{
3187 struct ib_port_attr attr;
3188 int err;
3189
3190 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3191
3192 err = ib_query_port(ibdev, port_num, &attr);

--- 55 unchanged lines hidden (view full) ---

3248
3249 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3250 dev->flow_db->lag_demux_ft = NULL;
3251
3252 mlx5_cmd_destroy_vport_lag(mdev);
3253 }
3254}
3255
2972 struct ib_port_immutable *immutable)
2973{
2974 struct ib_port_attr attr;
2975 int err;
2976
2977 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2978
2979 err = ib_query_port(ibdev, port_num, &attr);

--- 55 unchanged lines hidden (view full) ---

3035
3036 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3037 dev->flow_db->lag_demux_ft = NULL;
3038
3039 mlx5_cmd_destroy_vport_lag(mdev);
3040 }
3041}
3042
3256static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3043static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
3257{
3258 int err;
3259
3260 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
3261 err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
3262 if (err) {
3263 dev->port[port_num].roce.nb.notifier_call = NULL;
3264 return err;
3265 }
3266
3267 return 0;
3268}
3269
3044{
3045 int err;
3046
3047 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
3048 err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
3049 if (err) {
3050 dev->port[port_num].roce.nb.notifier_call = NULL;
3051 return err;
3052 }
3053
3054 return 0;
3055}
3056
3270static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3057static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
3271{
3272 if (dev->port[port_num].roce.nb.notifier_call) {
3273 unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
3274 dev->port[port_num].roce.nb.notifier_call = NULL;
3275 }
3276}
3277
3278static int mlx5_enable_eth(struct mlx5_ib_dev *dev)

--- 17 unchanged lines hidden (view full) ---

3296}
3297
3298static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3299{
3300 mlx5_eth_lag_cleanup(dev);
3301 mlx5_nic_vport_disable_roce(dev->mdev);
3302}
3303
3058{
3059 if (dev->port[port_num].roce.nb.notifier_call) {
3060 unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
3061 dev->port[port_num].roce.nb.notifier_call = NULL;
3062 }
3063}
3064
3065static int mlx5_enable_eth(struct mlx5_ib_dev *dev)

--- 17 unchanged lines hidden (view full) ---

3083}
3084
3085static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3086{
3087 mlx5_eth_lag_cleanup(dev);
3088 mlx5_nic_vport_disable_roce(dev->mdev);
3089}
3090
3304static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3091static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
3305 enum rdma_netdev_t type,
3306 struct rdma_netdev_alloc_params *params)
3307{
3308 if (type != RDMA_NETDEV_IPOIB)
3309 return -EOPNOTSUPP;
3310
3311 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3312}

--- 35 unchanged lines hidden (view full) ---

3348 .open = simple_open,
3349 .write = delay_drop_timeout_write,
3350 .read = delay_drop_timeout_read,
3351};
3352
3353static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3354 struct mlx5_ib_multiport_info *mpi)
3355{
3092 enum rdma_netdev_t type,
3093 struct rdma_netdev_alloc_params *params)
3094{
3095 if (type != RDMA_NETDEV_IPOIB)
3096 return -EOPNOTSUPP;
3097
3098 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3099}

--- 35 unchanged lines hidden (view full) ---

3135 .open = simple_open,
3136 .write = delay_drop_timeout_write,
3137 .read = delay_drop_timeout_read,
3138};
3139
3140static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3141 struct mlx5_ib_multiport_info *mpi)
3142{
3356 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3143 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3357 struct mlx5_ib_port *port = &ibdev->port[port_num];
3358 int comps;
3359 int err;
3360 int i;
3361
3362 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3363
3364 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);

--- 29 unchanged lines hidden (view full) ---

3394 port->mp.mpi = NULL;
3395
3396 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
3397
3398 spin_unlock(&port->mp.mpi_lock);
3399
3400 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3401
3144 struct mlx5_ib_port *port = &ibdev->port[port_num];
3145 int comps;
3146 int err;
3147 int i;
3148
3149 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3150
3151 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);

--- 29 unchanged lines hidden (view full) ---

3181 port->mp.mpi = NULL;
3182
3183 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
3184
3185 spin_unlock(&port->mp.mpi_lock);
3186
3187 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3188
3402 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3189 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
3403 /* Log an error, still needed to cleanup the pointers and add
3404 * it back to the list.
3405 */
3406 if (err)
3407 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3408 port_num + 1);
3409
3410 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3411}
3412
3413static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3414 struct mlx5_ib_multiport_info *mpi)
3415{
3190 /* Log an error, still needed to cleanup the pointers and add
3191 * it back to the list.
3192 */
3193 if (err)
3194 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3195 port_num + 1);
3196
3197 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3198}
3199
3200static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3201 struct mlx5_ib_multiport_info *mpi)
3202{
3416 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3203 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3417 int err;
3418
3419 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3420
3421 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3422 if (ibdev->port[port_num].mp.mpi) {
3204 int err;
3205
3206 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3207
3208 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3209 if (ibdev->port[port_num].mp.mpi) {
3423 mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3210 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
3424 port_num + 1);
3425 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3426 return false;
3427 }
3428
3429 ibdev->port[port_num].mp.mpi = mpi;
3430 mpi->ibdev = ibdev;
3431 mpi->mdev_events.notifier_call = NULL;

--- 19 unchanged lines hidden (view full) ---

3451
3452unbind:
3453 mlx5_ib_unbind_slave_port(ibdev, mpi);
3454 return false;
3455}
3456
3457static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3458{
3211 port_num + 1);
3212 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3213 return false;
3214 }
3215
3216 ibdev->port[port_num].mp.mpi = mpi;
3217 mpi->ibdev = ibdev;
3218 mpi->mdev_events.notifier_call = NULL;

--- 19 unchanged lines hidden (view full) ---

3238
3239unbind:
3240 mlx5_ib_unbind_slave_port(ibdev, mpi);
3241 return false;
3242}
3243
3244static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3245{
3459 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3246 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3460 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3461 port_num + 1);
3462 struct mlx5_ib_multiport_info *mpi;
3463 int err;
3247 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3248 port_num + 1);
3249 struct mlx5_ib_multiport_info *mpi;
3250 int err;
3464 u32 i;
3251 int i;
3465
3466 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3467 return 0;
3468
3469 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3470 &dev->sys_image_guid);
3471 if (err)
3472 return err;

--- 46 unchanged lines hidden (view full) ---

3519
3520 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3521 mutex_unlock(&mlx5_ib_multiport_mutex);
3522 return err;
3523}
3524
3525static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3526{
3252
3253 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3254 return 0;
3255
3256 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3257 &dev->sys_image_guid);
3258 if (err)
3259 return err;

--- 46 unchanged lines hidden (view full) ---

3306
3307 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3308 mutex_unlock(&mlx5_ib_multiport_mutex);
3309 return err;
3310}
3311
3312static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3313{
3527 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3314 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3528 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3529 port_num + 1);
3315 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3316 port_num + 1);
3530 u32 i;
3317 int i;
3531
3532 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3533 return;
3534
3535 mutex_lock(&mlx5_ib_multiport_mutex);
3536 for (i = 0; i < dev->num_ports; i++) {
3537 if (dev->port[i].mp.mpi) {
3538 /* Destroy the native port stub */
3539 if (i == port_num) {
3540 kfree(dev->port[i].mp.mpi);
3541 dev->port[i].mp.mpi = NULL;
3542 } else {
3318
3319 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3320 return;
3321
3322 mutex_lock(&mlx5_ib_multiport_mutex);
3323 for (i = 0; i < dev->num_ports; i++) {
3324 if (dev->port[i].mp.mpi) {
3325 /* Destroy the native port stub */
3326 if (i == port_num) {
3327 kfree(dev->port[i].mp.mpi);
3328 dev->port[i].mp.mpi = NULL;
3329 } else {
3543 mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3544 i + 1);
3330 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
3545 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
3546 }
3547 }
3548 }
3549
3550 mlx5_ib_dbg(dev, "removing from devlist\n");
3551 list_del(&dev->ib_dev_list);
3552 mutex_unlock(&mlx5_ib_multiport_mutex);

--- 260 unchanged lines hidden (view full) ---

3813 UA_MANDATORY));
3814
3815DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
3816 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3817 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
3818 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
3819
3820ADD_UVERBS_ATTRIBUTES_SIMPLE(
3331 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
3332 }
3333 }
3334 }
3335
3336 mlx5_ib_dbg(dev, "removing from devlist\n");
3337 list_del(&dev->ib_dev_list);
3338 mutex_unlock(&mlx5_ib_multiport_mutex);

--- 260 unchanged lines hidden (view full) ---

3599 UA_MANDATORY));
3600
3601DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
3602 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3603 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
3604 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
3605
3606ADD_UVERBS_ATTRIBUTES_SIMPLE(
3821 mlx5_ib_dm,
3822 UVERBS_OBJECT_DM,
3823 UVERBS_METHOD_DM_ALLOC,
3824 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
3825 UVERBS_ATTR_TYPE(u64),
3826 UA_MANDATORY),
3827 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
3828 UVERBS_ATTR_TYPE(u16),
3829 UA_OPTIONAL),
3830 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
3831 enum mlx5_ib_uapi_dm_type,
3832 UA_OPTIONAL));
3833
3834ADD_UVERBS_ATTRIBUTES_SIMPLE(
3835 mlx5_ib_flow_action,
3836 UVERBS_OBJECT_FLOW_ACTION,
3837 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
3838 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
3839 enum mlx5_ib_uapi_flow_action_flags));
3840
3841ADD_UVERBS_ATTRIBUTES_SIMPLE(
3842 mlx5_ib_query_context,

--- 5 unchanged lines hidden (view full) ---

3848 dump_fill_mkey),
3849 UA_MANDATORY));
3850
3851static const struct uapi_definition mlx5_ib_defs[] = {
3852 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3853 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3854 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3855 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3607 mlx5_ib_flow_action,
3608 UVERBS_OBJECT_FLOW_ACTION,
3609 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
3610 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
3611 enum mlx5_ib_uapi_flow_action_flags));
3612
3613ADD_UVERBS_ATTRIBUTES_SIMPLE(
3614 mlx5_ib_query_context,

--- 5 unchanged lines hidden (view full) ---

3620 dump_fill_mkey),
3621 UA_MANDATORY));
3622
3623static const struct uapi_definition mlx5_ib_defs[] = {
3624 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3625 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3626 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3627 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3628 UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3856
3857 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
3858 &mlx5_ib_flow_action),
3629
3630 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
3631 &mlx5_ib_flow_action),
3859 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
3860 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3861 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3862 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3863 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3864 {}
3865};
3866
3867static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)

--- 20 unchanged lines hidden (view full) ---

3888 for (i = 0; i < dev->num_ports; i++) {
3889 spin_lock_init(&dev->port[i].mp.mpi_lock);
3890 rwlock_init(&dev->port[i].roce.netdev_lock);
3891 dev->port[i].roce.dev = dev;
3892 dev->port[i].roce.native_port_num = i + 1;
3893 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3894 }
3895
3632 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3633 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3634 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3635 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3636 {}
3637};
3638
3639static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)

--- 20 unchanged lines hidden (view full) ---

3660 for (i = 0; i < dev->num_ports; i++) {
3661 spin_lock_init(&dev->port[i].mp.mpi_lock);
3662 rwlock_init(&dev->port[i].roce.netdev_lock);
3663 dev->port[i].roce.dev = dev;
3664 dev->port[i].roce.native_port_num = i + 1;
3665 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3666 }
3667
3668 mlx5_ib_internal_fill_odp_caps(dev);
3669
3896 err = mlx5_ib_init_multiport_master(dev);
3897 if (err)
3898 return err;
3899
3900 err = set_has_smi_cap(dev);
3901 if (err)
3902 goto err_mp;
3903

--- 123 unchanged lines hidden (view full) ---

4027
4028static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
4029 .alloc_xrcd = mlx5_ib_alloc_xrcd,
4030 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
4031
4032 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
4033};
4034
3670 err = mlx5_ib_init_multiport_master(dev);
3671 if (err)
3672 return err;
3673
3674 err = set_has_smi_cap(dev);
3675 if (err)
3676 goto err_mp;
3677

--- 123 unchanged lines hidden (view full) ---

3801
3802static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
3803 .alloc_xrcd = mlx5_ib_alloc_xrcd,
3804 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
3805
3806 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
3807};
3808
4035static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
4036 .alloc_dm = mlx5_ib_alloc_dm,
4037 .dealloc_dm = mlx5_ib_dealloc_dm,
4038 .reg_dm_mr = mlx5_ib_reg_dm_mr,
4039};
4040
4041static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
4042{
4043 struct mlx5_core_dev *mdev = dev->mdev;
4044 struct mlx5_var_table *var_table = &dev->var_table;
4045 u8 log_doorbell_bar_size;
4046 u8 log_doorbell_stride;
4047 u64 bar_size;
4048

--- 106 unchanged lines hidden (view full) ---

4155 ib_rwq_ind_tbl),
4156};
4157
4158static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
4159{
4160 struct mlx5_core_dev *mdev = dev->mdev;
4161 enum rdma_link_layer ll;
4162 int port_type_cap;
3809static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
3810{
3811 struct mlx5_core_dev *mdev = dev->mdev;
3812 struct mlx5_var_table *var_table = &dev->var_table;
3813 u8 log_doorbell_bar_size;
3814 u8 log_doorbell_stride;
3815 u64 bar_size;
3816

--- 106 unchanged lines hidden (view full) ---

3923 ib_rwq_ind_tbl),
3924};
3925
3926static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
3927{
3928 struct mlx5_core_dev *mdev = dev->mdev;
3929 enum rdma_link_layer ll;
3930 int port_type_cap;
4163 u32 port_num = 0;
3931 u8 port_num = 0;
4164 int err;
4165
4166 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4167 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4168
4169 if (ll == IB_LINK_LAYER_ETHERNET) {
4170 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
4171
4172 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4173
4174 /* Register only for native ports */
4175 err = mlx5_add_netdev_notifier(dev, port_num);
3932 int err;
3933
3934 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3935 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3936
3937 if (ll == IB_LINK_LAYER_ETHERNET) {
3938 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
3939
3940 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3941
3942 /* Register only for native ports */
3943 err = mlx5_add_netdev_notifier(dev, port_num);
4176 if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
3944 if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
4177 /*
4178 * We don't enable ETH interface for
4179 * 1. IB representors
4180 * 2. User disabled ROCE through devlink interface
4181 */
4182 return err;
4183
4184 err = mlx5_enable_eth(dev);

--- 7 unchanged lines hidden (view full) ---

4192 return err;
4193}
4194
4195static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
4196{
4197 struct mlx5_core_dev *mdev = dev->mdev;
4198 enum rdma_link_layer ll;
4199 int port_type_cap;
3945 /*
3946 * We don't enable ETH interface for
3947 * 1. IB representors
3948 * 2. User disabled ROCE through devlink interface
3949 */
3950 return err;
3951
3952 err = mlx5_enable_eth(dev);

--- 7 unchanged lines hidden (view full) ---

3960 return err;
3961}
3962
3963static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
3964{
3965 struct mlx5_core_dev *mdev = dev->mdev;
3966 enum rdma_link_layer ll;
3967 int port_type_cap;
4200 u32 port_num;
3968 u8 port_num;
4201
4202 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4203 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4204
4205 if (ll == IB_LINK_LAYER_ETHERNET) {
4206 if (!dev->is_rep)
4207 mlx5_disable_eth(dev);
4208

--- 496 unchanged lines hidden (view full) ---

4705 if (!dev->port) {
4706 ib_dealloc_device(&dev->ib_dev);
4707 return -ENOMEM;
4708 }
4709
4710 dev->mdev = mdev;
4711 dev->num_ports = num_ports;
4712
3969
3970 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3971 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3972
3973 if (ll == IB_LINK_LAYER_ETHERNET) {
3974 if (!dev->is_rep)
3975 mlx5_disable_eth(dev);
3976

--- 496 unchanged lines hidden (view full) ---

4473 if (!dev->port) {
4474 ib_dealloc_device(&dev->ib_dev);
4475 return -ENOMEM;
4476 }
4477
4478 dev->mdev = mdev;
4479 dev->num_ports = num_ports;
4480
4713 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
4481 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
4714 profile = &raw_eth_profile;
4715 else
4716 profile = &pf_profile;
4717
4718 ret = __mlx5_ib_add(dev, profile);
4719 if (ret) {
4720 kfree(dev->port);
4721 ib_dealloc_device(&dev->ib_dev);

--- 90 unchanged lines hidden ---
4482 profile = &raw_eth_profile;
4483 else
4484 profile = &pf_profile;
4485
4486 ret = __mlx5_ib_add(dev, profile);
4487 if (ret) {
4488 kfree(dev->port);
4489 ib_dealloc_device(&dev->ib_dev);

--- 90 unchanged lines hidden ---