main.c (181ae8844578d0a80f188c1d195fd6bb91bcec81) main.c (72a7720fca37fec0daf295923f17ac5d88a613e1)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 521 unchanged lines hidden (view full) ---

530 props->ip_gids = true;
531
532 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
533 roce_address_table_size);
534 props->max_mtu = IB_MTU_4096;
535 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
536 props->pkey_tbl_len = 1;
537 props->state = IB_PORT_DOWN;
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 521 unchanged lines hidden (view full) ---

530 props->ip_gids = true;
531
532 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
533 roce_address_table_size);
534 props->max_mtu = IB_MTU_4096;
535 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
536 props->pkey_tbl_len = 1;
537 props->state = IB_PORT_DOWN;
538 props->phys_state = 3;
538 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
539
540 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
541 props->qkey_viol_cntr = qkey_viol_cntr;
542
543 /* If this is a stub query for an unaffiliated port stop here */
544 if (!put_mdev)
545 goto out;
546

--- 9 unchanged lines hidden (view full) ---

556 ndev = upper;
557 dev_hold(ndev);
558 }
559 rcu_read_unlock();
560 }
561
562 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
563 props->state = IB_PORT_ACTIVE;
539
540 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
541 props->qkey_viol_cntr = qkey_viol_cntr;
542
543 /* If this is a stub query for an unaffiliated port stop here */
544 if (!put_mdev)
545 goto out;
546

--- 9 unchanged lines hidden (view full) ---

556 ndev = upper;
557 dev_hold(ndev);
558 }
559 rcu_read_unlock();
560 }
561
562 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
563 props->state = IB_PORT_ACTIVE;
564 props->phys_state = 5;
564 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
565 }
566
567 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
568
569 dev_put(ndev);
570
571 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
572out:

--- 4744 unchanged lines hidden (view full) ---

5317 { .name = #_name, .offset = \
5318 MLX5_BYTE_OFF(ppcnt_reg, \
5319 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5320
5321static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5322 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5323};
5324
565 }
566
567 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
568
569 dev_put(ndev);
570
571 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
572out:

--- 4744 unchanged lines hidden (view full) ---

5317 { .name = #_name, .offset = \
5318 MLX5_BYTE_OFF(ppcnt_reg, \
5319 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5320
5321static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5322 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5323};
5324
5325static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
5326{
5327 return MLX5_ESWITCH_MANAGER(mdev) &&
5328 mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
5329 MLX5_ESWITCH_OFFLOADS;
5330}
5331
5325static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5326{
5332static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5333{
5334 int num_cnt_ports;
5327 int i;
5328
5335 int i;
5336
5329 for (i = 0; i < dev->num_ports; i++) {
5337 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5338
5339 for (i = 0; i < num_cnt_ports; i++) {
5330 if (dev->port[i].cnts.set_id_valid)
5331 mlx5_core_dealloc_q_counter(dev->mdev,
5332 dev->port[i].cnts.set_id);
5333 kfree(dev->port[i].cnts.names);
5334 kfree(dev->port[i].cnts.offsets);
5335 }
5336}
5337

--- 85 unchanged lines hidden (view full) ---

5423 names[j] = ext_ppcnt_cnts[i].name;
5424 offsets[j] = ext_ppcnt_cnts[i].offset;
5425 }
5426 }
5427}
5428
5429static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5430{
5340 if (dev->port[i].cnts.set_id_valid)
5341 mlx5_core_dealloc_q_counter(dev->mdev,
5342 dev->port[i].cnts.set_id);
5343 kfree(dev->port[i].cnts.names);
5344 kfree(dev->port[i].cnts.offsets);
5345 }
5346}
5347

--- 85 unchanged lines hidden (view full) ---

5433 names[j] = ext_ppcnt_cnts[i].name;
5434 offsets[j] = ext_ppcnt_cnts[i].offset;
5435 }
5436 }
5437}
5438
5439static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5440{
5441 int num_cnt_ports;
5431 int err = 0;
5432 int i;
5433 bool is_shared;
5434
5435 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5442 int err = 0;
5443 int i;
5444 bool is_shared;
5445
5446 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5447 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5436
5448
5437 for (i = 0; i < dev->num_ports; i++) {
5449 for (i = 0; i < num_cnt_ports; i++) {
5438 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5439 if (err)
5440 goto err_alloc;
5441
5442 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5443 dev->port[i].cnts.offsets);
5444
5445 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5446 &dev->port[i].cnts.set_id,
5447 is_shared ?
5448 MLX5_SHARED_RESOURCE_UID : 0);
5449 if (err) {
5450 mlx5_ib_warn(dev,
5451 "couldn't allocate queue counter for port %d, err %d\n",
5452 i + 1, err);
5453 goto err_alloc;
5454 }
5455 dev->port[i].cnts.set_id_valid = true;
5456 }
5450 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5451 if (err)
5452 goto err_alloc;
5453
5454 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5455 dev->port[i].cnts.offsets);
5456
5457 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5458 &dev->port[i].cnts.set_id,
5459 is_shared ?
5460 MLX5_SHARED_RESOURCE_UID : 0);
5461 if (err) {
5462 mlx5_ib_warn(dev,
5463 "couldn't allocate queue counter for port %d, err %d\n",
5464 i + 1, err);
5465 goto err_alloc;
5466 }
5467 dev->port[i].cnts.set_id_valid = true;
5468 }
5457
5458 return 0;
5459
5460err_alloc:
5461 mlx5_ib_dealloc_counters(dev);
5462 return err;
5463}
5464
5469 return 0;
5470
5471err_alloc:
5472 mlx5_ib_dealloc_counters(dev);
5473 return err;
5474}
5475
5476static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
5477 u8 port_num)
5478{
5479 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
5480 &dev->port[port_num].cnts;
5481}
5482
5483/**
5484 * mlx5_ib_get_counters_id - Returns counters id to use for device+port
5485 * @dev: Pointer to mlx5 IB device
5486 * @port_num: Zero based port number
5487 *
5488 * mlx5_ib_get_counters_id() Returns counters set id to use for given
5489 * device port combination in switchdev and non switchdev mode of the
5490 * parent device.
5491 */
5492u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
5493{
5494 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
5495
5496 return cnts->set_id;
5497}
5498
5465static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5466 u8 port_num)
5467{
5468 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5499static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5500 u8 port_num)
5501{
5502 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5469 struct mlx5_ib_port *port = &dev->port[port_num - 1];
5503 const struct mlx5_ib_counters *cnts;
5504 bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
5470
5505
5471 /* We support only per port stats */
5472 if (port_num == 0)
5506 if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
5473 return NULL;
5474
5507 return NULL;
5508
5475 return rdma_alloc_hw_stats_struct(port->cnts.names,
5476 port->cnts.num_q_counters +
5477 port->cnts.num_cong_counters +
5478 port->cnts.num_ext_ppcnt_counters,
5509 cnts = get_counters(dev, port_num - 1);
5510
5511 return rdma_alloc_hw_stats_struct(cnts->names,
5512 cnts->num_q_counters +
5513 cnts->num_cong_counters +
5514 cnts->num_ext_ppcnt_counters,
5479 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5480}
5481
5482static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5515 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5516}
5517
5518static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5483 struct mlx5_ib_port *port,
5519 const struct mlx5_ib_counters *cnts,
5484 struct rdma_hw_stats *stats,
5485 u16 set_id)
5486{
5487 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5488 void *out;
5489 __be32 val;
5490 int ret, i;
5491
5492 out = kvzalloc(outlen, GFP_KERNEL);
5493 if (!out)
5494 return -ENOMEM;
5495
5496 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
5497 if (ret)
5498 goto free;
5499
5520 struct rdma_hw_stats *stats,
5521 u16 set_id)
5522{
5523 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5524 void *out;
5525 __be32 val;
5526 int ret, i;
5527
5528 out = kvzalloc(outlen, GFP_KERNEL);
5529 if (!out)
5530 return -ENOMEM;
5531
5532 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
5533 if (ret)
5534 goto free;
5535
5500 for (i = 0; i < port->cnts.num_q_counters; i++) {
5501 val = *(__be32 *)(out + port->cnts.offsets[i]);
5536 for (i = 0; i < cnts->num_q_counters; i++) {
5537 val = *(__be32 *)(out + cnts->offsets[i]);
5502 stats->value[i] = (u64)be32_to_cpu(val);
5503 }
5504
5505free:
5506 kvfree(out);
5507 return ret;
5508}
5509
5510static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5538 stats->value[i] = (u64)be32_to_cpu(val);
5539 }
5540
5541free:
5542 kvfree(out);
5543 return ret;
5544}
5545
5546static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5511 struct mlx5_ib_port *port,
5512 struct rdma_hw_stats *stats)
5547 const struct mlx5_ib_counters *cnts,
5548 struct rdma_hw_stats *stats)
5513{
5549{
5514 int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
5550 int offset = cnts->num_q_counters + cnts->num_cong_counters;
5515 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5516 int ret, i;
5517 void *out;
5518
5519 out = kvzalloc(sz, GFP_KERNEL);
5520 if (!out)
5521 return -ENOMEM;
5522
5523 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5524 if (ret)
5525 goto free;
5526
5551 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5552 int ret, i;
5553 void *out;
5554
5555 out = kvzalloc(sz, GFP_KERNEL);
5556 if (!out)
5557 return -ENOMEM;
5558
5559 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5560 if (ret)
5561 goto free;
5562
5527 for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
5563 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
5528 stats->value[i + offset] =
5529 be64_to_cpup((__be64 *)(out +
5564 stats->value[i + offset] =
5565 be64_to_cpup((__be64 *)(out +
5530 port->cnts.offsets[i + offset]));
5531 }
5532
5566 cnts->offsets[i + offset]));
5533free:
5534 kvfree(out);
5535 return ret;
5536}
5537
5538static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5539 struct rdma_hw_stats *stats,
5540 u8 port_num, int index)
5541{
5542 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5567free:
5568 kvfree(out);
5569 return ret;
5570}
5571
5572static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5573 struct rdma_hw_stats *stats,
5574 u8 port_num, int index)
5575{
5576 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5543 struct mlx5_ib_port *port = &dev->port[port_num - 1];
5577 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
5544 struct mlx5_core_dev *mdev;
5545 int ret, num_counters;
5546 u8 mdev_port_num;
5547
5548 if (!stats)
5549 return -EINVAL;
5550
5578 struct mlx5_core_dev *mdev;
5579 int ret, num_counters;
5580 u8 mdev_port_num;
5581
5582 if (!stats)
5583 return -EINVAL;
5584
5551 num_counters = port->cnts.num_q_counters +
5552 port->cnts.num_cong_counters +
5553 port->cnts.num_ext_ppcnt_counters;
5585 num_counters = cnts->num_q_counters +
5586 cnts->num_cong_counters +
5587 cnts->num_ext_ppcnt_counters;
5554
5555 /* q_counters are per IB device, query the master mdev */
5588
5589 /* q_counters are per IB device, query the master mdev */
5556 ret = mlx5_ib_query_q_counters(dev->mdev, port, stats,
5557 port->cnts.set_id);
5590 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
5558 if (ret)
5559 return ret;
5560
5561 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5591 if (ret)
5592 return ret;
5593
5594 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5562 ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
5595 ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
5563 if (ret)
5564 return ret;
5565 }
5566
5567 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5568 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5569 &mdev_port_num);
5570 if (!mdev) {
5571 /* If port is not affiliated yet, its in down state
5572 * which doesn't have any counters yet, so it would be
5573 * zero. So no need to read from the HCA.
5574 */
5575 goto done;
5576 }
5577 ret = mlx5_lag_query_cong_counters(dev->mdev,
5578 stats->value +
5596 if (ret)
5597 return ret;
5598 }
5599
5600 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5601 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5602 &mdev_port_num);
5603 if (!mdev) {
5604 /* If port is not affiliated yet, its in down state
5605 * which doesn't have any counters yet, so it would be
5606 * zero. So no need to read from the HCA.
5607 */
5608 goto done;
5609 }
5610 ret = mlx5_lag_query_cong_counters(dev->mdev,
5611 stats->value +
5579 port->cnts.num_q_counters,
5580 port->cnts.num_cong_counters,
5581 port->cnts.offsets +
5582 port->cnts.num_q_counters);
5612 cnts->num_q_counters,
5613 cnts->num_cong_counters,
5614 cnts->offsets +
5615 cnts->num_q_counters);
5583
5584 mlx5_ib_put_native_port_mdev(dev, port_num);
5585 if (ret)
5586 return ret;
5587 }
5588
5589done:
5590 return num_counters;
5591}
5592
5593static struct rdma_hw_stats *
5594mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5595{
5596 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5616
5617 mlx5_ib_put_native_port_mdev(dev, port_num);
5618 if (ret)
5619 return ret;
5620 }
5621
5622done:
5623 return num_counters;
5624}
5625
5626static struct rdma_hw_stats *
5627mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5628{
5629 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5597 struct mlx5_ib_port *port = &dev->port[counter->port - 1];
5630 const struct mlx5_ib_counters *cnts =
5631 get_counters(dev, counter->port - 1);
5598
5599 /* Q counters are in the beginning of all counters */
5632
5633 /* Q counters are in the beginning of all counters */
5600 return rdma_alloc_hw_stats_struct(port->cnts.names,
5601 port->cnts.num_q_counters,
5634 return rdma_alloc_hw_stats_struct(cnts->names,
5635 cnts->num_q_counters,
5602 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5603}
5604
5605static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5606{
5607 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5636 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5637}
5638
5639static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5640{
5641 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5608 struct mlx5_ib_port *port = &dev->port[counter->port - 1];
5642 const struct mlx5_ib_counters *cnts =
5643 get_counters(dev, counter->port - 1);
5609
5644
5610 return mlx5_ib_query_q_counters(dev->mdev, port,
5645 return mlx5_ib_query_q_counters(dev->mdev, cnts,
5611 counter->stats, counter->id);
5612}
5613
5614static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5615 struct ib_qp *qp)
5616{
5617 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5618 u16 cnt_set_id = 0;

--- 178 unchanged lines hidden (view full) ---

5797 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5798
5799 spin_lock(&port->mp.mpi_lock);
5800 if (!mpi->ibdev) {
5801 spin_unlock(&port->mp.mpi_lock);
5802 return;
5803 }
5804
5646 counter->stats, counter->id);
5647}
5648
5649static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5650 struct ib_qp *qp)
5651{
5652 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5653 u16 cnt_set_id = 0;

--- 178 unchanged lines hidden (view full) ---

5832 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5833
5834 spin_lock(&port->mp.mpi_lock);
5835 if (!mpi->ibdev) {
5836 spin_unlock(&port->mp.mpi_lock);
5837 return;
5838 }
5839
5805 mpi->ibdev = NULL;
5806
5807 spin_unlock(&port->mp.mpi_lock);
5808 if (mpi->mdev_events.notifier_call)
5809 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5810 mpi->mdev_events.notifier_call = NULL;
5840 if (mpi->mdev_events.notifier_call)
5841 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5842 mpi->mdev_events.notifier_call = NULL;
5843
5844 mpi->ibdev = NULL;
5845
5846 spin_unlock(&port->mp.mpi_lock);
5811 mlx5_remove_netdev_notifier(ibdev, port_num);
5812 spin_lock(&port->mp.mpi_lock);
5813
5814 comps = mpi->mdev_refcnt;
5815 if (comps) {
5816 mpi->unaffiliate = true;
5817 init_completion(&mpi->unref_comp);
5818 spin_unlock(&port->mp.mpi_lock);

--- 1108 unchanged lines hidden (view full) ---

6927 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6928 MLX5_CAP_GEN(mdev, num_vhca_ports));
6929 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6930 if (!dev)
6931 return NULL;
6932 dev->port = kcalloc(num_ports, sizeof(*dev->port),
6933 GFP_KERNEL);
6934 if (!dev->port) {
5847 mlx5_remove_netdev_notifier(ibdev, port_num);
5848 spin_lock(&port->mp.mpi_lock);
5849
5850 comps = mpi->mdev_refcnt;
5851 if (comps) {
5852 mpi->unaffiliate = true;
5853 init_completion(&mpi->unref_comp);
5854 spin_unlock(&port->mp.mpi_lock);

--- 1108 unchanged lines hidden (view full) ---

6963 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6964 MLX5_CAP_GEN(mdev, num_vhca_ports));
6965 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6966 if (!dev)
6967 return NULL;
6968 dev->port = kcalloc(num_ports, sizeof(*dev->port),
6969 GFP_KERNEL);
6970 if (!dev->port) {
6935 ib_dealloc_device((struct ib_device *)dev);
6971 ib_dealloc_device(&dev->ib_dev);
6936 return NULL;
6937 }
6938
6939 dev->mdev = mdev;
6940 dev->num_ports = num_ports;
6941
6942 return __mlx5_ib_add(dev, &pf_profile);
6943}

--- 75 unchanged lines hidden ---
6972 return NULL;
6973 }
6974
6975 dev->mdev = mdev;
6976 dev->num_ports = num_ports;
6977
6978 return __mlx5_ib_add(dev, &pf_profile);
6979}

--- 75 unchanged lines hidden ---