main.c (bcf87f1dbbec0d9abaf89073dd761a41876bc6c1) main.c (8e6efa3a31f4a81a4d8817d68110446df383d049)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 117 unchanged lines hidden (view full) ---

126static int get_port_state(struct ib_device *ibdev,
127 u8 port_num,
128 enum ib_port_state *state)
129{
130 struct ib_port_attr attr;
131 int ret;
132
133 memset(&attr, 0, sizeof(attr));
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 117 unchanged lines hidden (view full) ---

126static int get_port_state(struct ib_device *ibdev,
127 u8 port_num,
128 enum ib_port_state *state)
129{
130 struct ib_port_attr attr;
131 int ret;
132
133 memset(&attr, 0, sizeof(attr));
134 ret = mlx5_ib_query_port(ibdev, port_num, &attr);
134 ret = ibdev->query_port(ibdev, port_num, &attr);
135 if (!ret)
136 *state = attr.state;
137 return ret;
138}
139
140static int mlx5_netdev_event(struct notifier_block *this,
141 unsigned long event, void *ptr)
142{

--- 1130 unchanged lines hidden (view full) ---

1273 count = mlx5_core_reserved_gids_count(mdev);
1274 if (put_mdev)
1275 mlx5_ib_put_native_port_mdev(dev, port);
1276 props->gid_tbl_len -= count;
1277 }
1278 return ret;
1279}
1280
135 if (!ret)
136 *state = attr.state;
137 return ret;
138}
139
140static int mlx5_netdev_event(struct notifier_block *this,
141 unsigned long event, void *ptr)
142{

--- 1130 unchanged lines hidden (view full) ---

1273 count = mlx5_core_reserved_gids_count(mdev);
1274 if (put_mdev)
1275 mlx5_ib_put_native_port_mdev(dev, port);
1276 props->gid_tbl_len -= count;
1277 }
1278 return ret;
1279}
1280
1281static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1282 struct ib_port_attr *props)
1283{
1284 int ret;
1285
1286 /* Only link layer == ethernet is valid for representors */
1287 ret = mlx5_query_port_roce(ibdev, port, props);
1288 if (ret || !props)
1289 return ret;
1290
1291 /* We don't support GIDS */
1292 props->gid_tbl_len = 0;
1293
1294 return ret;
1295}
1296
1281static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1282 union ib_gid *gid)
1283{
1284 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1285 struct mlx5_core_dev *mdev = dev->mdev;
1286
1287 switch (mlx5_get_vport_access_method(ibdev)) {
1288 case MLX5_VPORT_ACCESS_METHOD_MAD:

--- 2500 unchanged lines hidden (view full) ---

3789 immutable->gid_tbl_len = attr.gid_tbl_len;
3790 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3791 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3792 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3793
3794 return 0;
3795}
3796
1297static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1298 union ib_gid *gid)
1299{
1300 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1301 struct mlx5_core_dev *mdev = dev->mdev;
1302
1303 switch (mlx5_get_vport_access_method(ibdev)) {
1304 case MLX5_VPORT_ACCESS_METHOD_MAD:

--- 2500 unchanged lines hidden (view full) ---

3805 immutable->gid_tbl_len = attr.gid_tbl_len;
3806 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3807 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3808 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3809
3810 return 0;
3811}
3812
3813static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
3814 struct ib_port_immutable *immutable)
3815{
3816 struct ib_port_attr attr;
3817 int err;
3818
3819 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3820
3821 err = ib_query_port(ibdev, port_num, &attr);
3822 if (err)
3823 return err;
3824
3825 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3826 immutable->gid_tbl_len = attr.gid_tbl_len;
3827 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3828
3829 return 0;
3830}
3831
3797static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3798{
3799 struct mlx5_ib_dev *dev =
3800 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3801 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3802 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3803 fw_rev_sub(dev->mdev));
3804}

--- 60 unchanged lines hidden (view full) ---

3865 dev->roce[port_num].nb.notifier_call = NULL;
3866 }
3867}
3868
3869static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
3870{
3871 int err;
3872
3832static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3833{
3834 struct mlx5_ib_dev *dev =
3835 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3836 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3837 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3838 fw_rev_sub(dev->mdev));
3839}

--- 60 unchanged lines hidden (view full) ---

3900 dev->roce[port_num].nb.notifier_call = NULL;
3901 }
3902}
3903
3904static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
3905{
3906 int err;
3907
3873 err = mlx5_add_netdev_notifier(dev, port_num);
3874 if (err)
3875 return err;
3876
3877 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3878 err = mlx5_nic_vport_enable_roce(dev->mdev);
3879 if (err)
3908 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3909 err = mlx5_nic_vport_enable_roce(dev->mdev);
3910 if (err)
3880 goto err_unregister_netdevice_notifier;
3911 return err;
3881 }
3882
3883 err = mlx5_eth_lag_init(dev);
3884 if (err)
3885 goto err_disable_roce;
3886
3887 return 0;
3888
3889err_disable_roce:
3890 if (MLX5_CAP_GEN(dev->mdev, roce))
3891 mlx5_nic_vport_disable_roce(dev->mdev);
3892
3912 }
3913
3914 err = mlx5_eth_lag_init(dev);
3915 if (err)
3916 goto err_disable_roce;
3917
3918 return 0;
3919
3920err_disable_roce:
3921 if (MLX5_CAP_GEN(dev->mdev, roce))
3922 mlx5_nic_vport_disable_roce(dev->mdev);
3923
3893err_unregister_netdevice_notifier:
3894 mlx5_remove_netdev_notifier(dev, port_num);
3895 return err;
3896}
3897
3898static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3899{
3900 mlx5_eth_lag_cleanup(dev);
3901 if (MLX5_CAP_GEN(dev->mdev, roce))
3902 mlx5_nic_vport_disable_roce(dev->mdev);

--- 756 unchanged lines hidden (view full) ---

4659 dev->ib_dev.uverbs_ex_cmd_mask =
4660 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
4661 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
4662 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
4663 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
4664 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
4665
4666 dev->ib_dev.query_device = mlx5_ib_query_device;
3924 return err;
3925}
3926
3927static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3928{
3929 mlx5_eth_lag_cleanup(dev);
3930 if (MLX5_CAP_GEN(dev->mdev, roce))
3931 mlx5_nic_vport_disable_roce(dev->mdev);

--- 756 unchanged lines hidden (view full) ---

4688 dev->ib_dev.uverbs_ex_cmd_mask =
4689 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
4690 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
4691 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
4692 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
4693 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
4694
4695 dev->ib_dev.query_device = mlx5_ib_query_device;
4667 dev->ib_dev.query_port = mlx5_ib_query_port;
4668 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
4669 dev->ib_dev.query_gid = mlx5_ib_query_gid;
4670 dev->ib_dev.add_gid = mlx5_ib_add_gid;
4671 dev->ib_dev.del_gid = mlx5_ib_del_gid;
4672 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
4673 dev->ib_dev.modify_device = mlx5_ib_modify_device;
4674 dev->ib_dev.modify_port = mlx5_ib_modify_port;
4675 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;

--- 26 unchanged lines hidden (view full) ---

4702 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
4703 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
4704 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
4705 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
4706 dev->ib_dev.process_mad = mlx5_ib_process_mad;
4707 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
4708 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
4709 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
4696 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
4697 dev->ib_dev.query_gid = mlx5_ib_query_gid;
4698 dev->ib_dev.add_gid = mlx5_ib_add_gid;
4699 dev->ib_dev.del_gid = mlx5_ib_del_gid;
4700 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
4701 dev->ib_dev.modify_device = mlx5_ib_modify_device;
4702 dev->ib_dev.modify_port = mlx5_ib_modify_port;
4703 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;

--- 26 unchanged lines hidden (view full) ---

4730 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
4731 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
4732 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
4733 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
4734 dev->ib_dev.process_mad = mlx5_ib_process_mad;
4735 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
4736 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
4737 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
4710 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
4711 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
4712 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
4713 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
4714 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
4715
4716 if (mlx5_core_is_pf(mdev)) {
4717 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
4718 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;

--- 34 unchanged lines hidden (view full) ---

4753 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4754 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4755 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4756 mutex_init(&dev->lb_mutex);
4757
4758 return 0;
4759}
4760
4738 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
4739 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
4740 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
4741 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
4742
4743 if (mlx5_core_is_pf(mdev)) {
4744 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
4745 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;

--- 34 unchanged lines hidden (view full) ---

4780 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4781 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4782 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4783 mutex_init(&dev->lb_mutex);
4784
4785 return 0;
4786}
4787
4788static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
4789{
4790 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
4791 dev->ib_dev.query_port = mlx5_ib_query_port;
4792
4793 return 0;
4794}
4795
4796static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
4797{
4798 dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable;
4799 dev->ib_dev.query_port = mlx5_ib_rep_query_port;
4800
4801 return 0;
4802}
4803
4804static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
4805 u8 port_num)
4806{
4807 int i;
4808
4809 for (i = 0; i < dev->num_ports; i++) {
4810 dev->roce[i].dev = dev;
4811 dev->roce[i].native_port_num = i + 1;
4812 dev->roce[i].last_port_state = IB_PORT_DOWN;
4813 }
4814
4815 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
4816 dev->ib_dev.create_wq = mlx5_ib_create_wq;
4817 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
4818 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
4819 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4820 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4821
4822 dev->ib_dev.uverbs_ex_cmd_mask |=
4823 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4824 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4825 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4826 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4827 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
4828
4829 return mlx5_add_netdev_notifier(dev, port_num);
4830}
4831
4832static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
4833{
4834 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4835
4836 mlx5_remove_netdev_notifier(dev, port_num);
4837}
4838
4839int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
4840{
4841 struct mlx5_core_dev *mdev = dev->mdev;
4842 enum rdma_link_layer ll;
4843 int port_type_cap;
4844 int err = 0;
4845 u8 port_num;
4846
4847 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4848 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4849 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4850
4851 if (ll == IB_LINK_LAYER_ETHERNET)
4852 err = mlx5_ib_stage_common_roce_init(dev, port_num);
4853
4854 return err;
4855}
4856
4857void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
4858{
4859 mlx5_ib_stage_common_roce_cleanup(dev);
4860}
4861
4761static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
4762{
4763 struct mlx5_core_dev *mdev = dev->mdev;
4764 enum rdma_link_layer ll;
4765 int port_type_cap;
4766 u8 port_num;
4767 int err;
4862static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
4863{
4864 struct mlx5_core_dev *mdev = dev->mdev;
4865 enum rdma_link_layer ll;
4866 int port_type_cap;
4867 u8 port_num;
4868 int err;
4768 int i;
4769
4770 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4771 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4772 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4773
4774 if (ll == IB_LINK_LAYER_ETHERNET) {
4869
4870 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4871 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4872 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4873
4874 if (ll == IB_LINK_LAYER_ETHERNET) {
4775 for (i = 0; i < dev->num_ports; i++) {
4776 dev->roce[i].dev = dev;
4777 dev->roce[i].native_port_num = i + 1;
4778 dev->roce[i].last_port_state = IB_PORT_DOWN;
4779 }
4875 err = mlx5_ib_stage_common_roce_init(dev, port_num);
4876 if (err)
4877 return err;
4780
4878
4781 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
4782 dev->ib_dev.create_wq = mlx5_ib_create_wq;
4783 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
4784 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
4785 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4786 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4787 dev->ib_dev.uverbs_ex_cmd_mask |=
4788 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4789 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4790 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4791 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4792 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
4793 err = mlx5_enable_eth(dev, port_num);
4794 if (err)
4879 err = mlx5_enable_eth(dev, port_num);
4880 if (err)
4795 return err;
4881 goto cleanup;
4796 }
4797
4798 return 0;
4882 }
4883
4884 return 0;
4885cleanup:
4886 mlx5_ib_stage_common_roce_cleanup(dev);
4887
4888 return err;
4799}
4800
4801static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
4802{
4803 struct mlx5_core_dev *mdev = dev->mdev;
4804 enum rdma_link_layer ll;
4805 int port_type_cap;
4806 u8 port_num;
4807
4808 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4809 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4810 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4811
4812 if (ll == IB_LINK_LAYER_ETHERNET) {
4813 mlx5_disable_eth(dev);
4889}
4890
4891static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
4892{
4893 struct mlx5_core_dev *mdev = dev->mdev;
4894 enum rdma_link_layer ll;
4895 int port_type_cap;
4896 u8 port_num;
4897
4898 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4899 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4900 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4901
4902 if (ll == IB_LINK_LAYER_ETHERNET) {
4903 mlx5_disable_eth(dev);
4814 mlx5_remove_netdev_notifier(dev, port_num);
4904 mlx5_ib_stage_common_roce_cleanup(dev);
4815 }
4816}
4817
4818static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
4819{
4820 return create_dev_resources(&dev->devr);
4821}
4822

--- 189 unchanged lines hidden (view full) ---

5012 mlx5_ib_stage_init_init,
5013 mlx5_ib_stage_init_cleanup),
5014 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
5015 mlx5_ib_stage_flow_db_init,
5016 mlx5_ib_stage_flow_db_cleanup),
5017 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5018 mlx5_ib_stage_caps_init,
5019 NULL),
4905 }
4906}
4907
4908static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
4909{
4910 return create_dev_resources(&dev->devr);
4911}
4912

--- 189 unchanged lines hidden (view full) ---

5102 mlx5_ib_stage_init_init,
5103 mlx5_ib_stage_init_cleanup),
5104 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
5105 mlx5_ib_stage_flow_db_init,
5106 mlx5_ib_stage_flow_db_cleanup),
5107 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5108 mlx5_ib_stage_caps_init,
5109 NULL),
5110 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5111 mlx5_ib_stage_non_default_cb,
5112 NULL),
5020 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5021 mlx5_ib_stage_roce_init,
5022 mlx5_ib_stage_roce_cleanup),
5023 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5024 mlx5_ib_stage_dev_res_init,
5025 mlx5_ib_stage_dev_res_cleanup),
5026 STAGE_CREATE(MLX5_IB_STAGE_ODP,
5027 mlx5_ib_stage_odp_init,

--- 138 unchanged lines hidden ---
5113 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5114 mlx5_ib_stage_roce_init,
5115 mlx5_ib_stage_roce_cleanup),
5116 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5117 mlx5_ib_stage_dev_res_init,
5118 mlx5_ib_stage_dev_res_cleanup),
5119 STAGE_CREATE(MLX5_IB_STAGE_ODP,
5120 mlx5_ib_stage_odp_init,

--- 138 unchanged lines hidden ---