qp.c (00af6729b52ede86a08173c8d5f2c8cd9fa3390d) qp.c (c0a6b5ecc5b7dd028c2921415ea036074a8f8b00)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 2395 unchanged lines hidden (view full) ---

2404 struct mlx5_ib_qp *qp,
2405 struct mlx5_create_qp_params *params)
2406{
2407 struct ib_qp_init_attr *attr = params->attr;
2408 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2409 u32 uidx = params->uidx;
2410 void *dctc;
2411
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 2395 unchanged lines hidden (view full) ---

2404 struct mlx5_ib_qp *qp,
2405 struct mlx5_create_qp_params *params)
2406{
2407 struct ib_qp_init_attr *attr = params->attr;
2408 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2409 u32 uidx = params->uidx;
2410 void *dctc;
2411
2412 if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
2413 return -EOPNOTSUPP;
2414
2412 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2413 if (!qp->dct.in)
2414 return -ENOMEM;
2415
2416 MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2417 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2418 MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2419 MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);

--- 663 unchanged lines hidden (view full) ---

3083}
3084
3085enum {
3086 MLX5_PATH_FLAG_FL = 1 << 0,
3087 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
3088 MLX5_PATH_FLAG_COUNTER = 1 << 2,
3089};
3090
2415 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2416 if (!qp->dct.in)
2417 return -ENOMEM;
2418
2419 MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2420 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2421 MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2422 MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);

--- 663 unchanged lines hidden (view full) ---

3086}
3087
3088enum {
3089 MLX5_PATH_FLAG_FL = 1 << 0,
3090 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
3091 MLX5_PATH_FLAG_COUNTER = 1 << 2,
3092};
3093
3094static int ib_to_mlx5_rate_map(u8 rate)
3095{
3096 switch (rate) {
3097 case IB_RATE_PORT_CURRENT:
3098 return 0;
3099 case IB_RATE_56_GBPS:
3100 return 1;
3101 case IB_RATE_25_GBPS:
3102 return 2;
3103 case IB_RATE_100_GBPS:
3104 return 3;
3105 case IB_RATE_200_GBPS:
3106 return 4;
3107 case IB_RATE_50_GBPS:
3108 return 5;
3109 default:
3110 return rate + MLX5_STAT_RATE_OFFSET;
3111 };
3112
3113 return 0;
3114}
3115
3091static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
3092{
3116static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
3117{
3118 u32 stat_rate_support;
3119
3093 if (rate == IB_RATE_PORT_CURRENT)
3094 return 0;
3095
3096 if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
3097 return -EINVAL;
3098
3120 if (rate == IB_RATE_PORT_CURRENT)
3121 return 0;
3122
3123 if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
3124 return -EINVAL;
3125
3126 stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
3099 while (rate != IB_RATE_PORT_CURRENT &&
3127 while (rate != IB_RATE_PORT_CURRENT &&
3100 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
3101 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
3128 !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support))
3102 --rate;
3103
3129 --rate;
3130
3104 return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
3131 return ib_to_mlx5_rate_map(rate);
3105}
3106
3107static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
3108 struct mlx5_ib_sq *sq, u8 sl,
3109 struct ib_pd *pd)
3110{
3111 void *in;
3112 void *tisc;

--- 525 unchanged lines hidden (view full) ---

3638 tx_port_affinity = &ucontext->tx_port_affinity;
3639 else
3640 tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
3641
3642 return (unsigned int)atomic_add_return(1, tx_port_affinity) %
3643 MLX5_MAX_PORTS + 1;
3644}
3645
3132}
3133
3134static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
3135 struct mlx5_ib_sq *sq, u8 sl,
3136 struct ib_pd *pd)
3137{
3138 void *in;
3139 void *tisc;

--- 525 unchanged lines hidden (view full) ---

3665 tx_port_affinity = &ucontext->tx_port_affinity;
3666 else
3667 tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
3668
3669 return (unsigned int)atomic_add_return(1, tx_port_affinity) %
3670 MLX5_MAX_PORTS + 1;
3671}
3672
3646static bool qp_supports_affinity(struct ib_qp *qp)
3673static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
3647{
3674{
3648 if ((qp->qp_type == IB_QPT_RC) ||
3649 (qp->qp_type == IB_QPT_UD) ||
3650 (qp->qp_type == IB_QPT_UC) ||
3651 (qp->qp_type == IB_QPT_RAW_PACKET) ||
3652 (qp->qp_type == IB_QPT_XRC_INI) ||
3653 (qp->qp_type == IB_QPT_XRC_TGT))
3675 if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
3676 (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
3677 (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
3678 (qp->type == MLX5_IB_QPT_DCI))
3654 return true;
3655 return false;
3656}
3657
3658static unsigned int get_tx_affinity(struct ib_qp *qp,
3659 const struct ib_qp_attr *attr,
3660 int attr_mask, u8 init,
3661 struct ib_udata *udata)
3662{
3663 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3664 udata, struct mlx5_ib_ucontext, ibucontext);
3665 struct mlx5_ib_dev *dev = to_mdev(qp->device);
3666 struct mlx5_ib_qp *mqp = to_mqp(qp);
3667 struct mlx5_ib_qp_base *qp_base;
3668 unsigned int tx_affinity;
3669
3670 if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
3679 return true;
3680 return false;
3681}
3682
3683static unsigned int get_tx_affinity(struct ib_qp *qp,
3684 const struct ib_qp_attr *attr,
3685 int attr_mask, u8 init,
3686 struct ib_udata *udata)
3687{
3688 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3689 udata, struct mlx5_ib_ucontext, ibucontext);
3690 struct mlx5_ib_dev *dev = to_mdev(qp->device);
3691 struct mlx5_ib_qp *mqp = to_mqp(qp);
3692 struct mlx5_ib_qp_base *qp_base;
3693 unsigned int tx_affinity;
3694
3695 if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
3671 qp_supports_affinity(qp)))
3696 qp_supports_affinity(mqp)))
3672 return 0;
3673
3674 if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
3675 tx_affinity = mqp->gsi_lag_port;
3676 else if (init)
3677 tx_affinity = get_tx_affinity_rr(dev, udata);
3678 else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
3679 tx_affinity =

--- 476 unchanged lines hidden (view full) ---

4156 atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
4157 if (atomic_mode < 0)
4158 return -EOPNOTSUPP;
4159
4160 MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
4161 MLX5_SET(dctc, dctc, rae, 1);
4162 }
4163 MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
3697 return 0;
3698
3699 if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
3700 tx_affinity = mqp->gsi_lag_port;
3701 else if (init)
3702 tx_affinity = get_tx_affinity_rr(dev, udata);
3703 else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
3704 tx_affinity =

--- 476 unchanged lines hidden (view full) ---

4181 atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
4182 if (atomic_mode < 0)
4183 return -EOPNOTSUPP;
4184
4185 MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
4186 MLX5_SET(dctc, dctc, rae, 1);
4187 }
4188 MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
4164 MLX5_SET(dctc, dctc, port, attr->port_num);
4189 if (mlx5_lag_is_active(dev->mdev))
4190 MLX5_SET(dctc, dctc, port,
4191 get_tx_affinity_rr(dev, udata));
4192 else
4193 MLX5_SET(dctc, dctc, port, attr->port_num);
4165
4166 set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
4167 MLX5_SET(dctc, dctc, counter_set_id, set_id);
4168 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4169 struct mlx5_ib_modify_qp_resp resp = {};
4170 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
4171 u32 min_resp_len = offsetofend(typeof(resp), dctn);
4172

--- 538 unchanged lines hidden (view full) ---

4711 struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
4712
4713 if (!MLX5_CAP_GEN(dev->mdev, xrc))
4714 return -EOPNOTSUPP;
4715
4716 return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
4717}
4718
4194
4195 set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
4196 MLX5_SET(dctc, dctc, counter_set_id, set_id);
4197 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4198 struct mlx5_ib_modify_qp_resp resp = {};
4199 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
4200 u32 min_resp_len = offsetofend(typeof(resp), dctn);
4201

--- 538 unchanged lines hidden (view full) ---

4740 struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
4741
4742 if (!MLX5_CAP_GEN(dev->mdev, xrc))
4743 return -EOPNOTSUPP;
4744
4745 return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
4746}
4747
4719void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
4748int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
4720{
4721 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
4722 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
4723
4749{
4750 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
4751 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
4752
4724 mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
4753 return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
4725}
4726
4727static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
4728{
4729 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
4730 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
4731 struct ib_event event;
4732

--- 183 unchanged lines hidden (view full) ---

4916 struct ib_udata *udata,
4917 struct mlx5_ib_rwq *rwq)
4918{
4919 struct mlx5_ib_dev *dev = to_mdev(pd->device);
4920 struct mlx5_ib_create_wq ucmd = {};
4921 int err;
4922 size_t required_cmd_sz;
4923
4754}
4755
4756static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
4757{
4758 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
4759 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
4760 struct ib_event event;
4761

--- 183 unchanged lines hidden (view full) ---

4945 struct ib_udata *udata,
4946 struct mlx5_ib_rwq *rwq)
4947{
4948 struct mlx5_ib_dev *dev = to_mdev(pd->device);
4949 struct mlx5_ib_create_wq ucmd = {};
4950 int err;
4951 size_t required_cmd_sz;
4952
4924 required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
4925 + sizeof(ucmd.single_stride_log_num_of_bytes);
4953 required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
4954 single_stride_log_num_of_bytes);
4926 if (udata->inlen < required_cmd_sz) {
4927 mlx5_ib_dbg(dev, "invalid inlen\n");
4928 return -EINVAL;
4929 }
4930
4931 if (udata->inlen > sizeof(ucmd) &&
4932 !ib_is_udata_cleared(udata, sizeof(ucmd),
4933 udata->inlen - sizeof(ucmd))) {

--- 67 unchanged lines hidden (view full) ---

5001 struct mlx5_ib_rwq *rwq;
5002 struct mlx5_ib_create_wq_resp resp = {};
5003 size_t min_resp_len;
5004 int err;
5005
5006 if (!udata)
5007 return ERR_PTR(-ENOSYS);
5008
4955 if (udata->inlen < required_cmd_sz) {
4956 mlx5_ib_dbg(dev, "invalid inlen\n");
4957 return -EINVAL;
4958 }
4959
4960 if (udata->inlen > sizeof(ucmd) &&
4961 !ib_is_udata_cleared(udata, sizeof(ucmd),
4962 udata->inlen - sizeof(ucmd))) {

--- 67 unchanged lines hidden (view full) ---

5030 struct mlx5_ib_rwq *rwq;
5031 struct mlx5_ib_create_wq_resp resp = {};
5032 size_t min_resp_len;
5033 int err;
5034
5035 if (!udata)
5036 return ERR_PTR(-ENOSYS);
5037
5009 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
5038 min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
5010 if (udata->outlen && udata->outlen < min_resp_len)
5011 return ERR_PTR(-EINVAL);
5012
5013 if (!capable(CAP_SYS_RAWIO) &&
5014 init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
5015 return ERR_PTR(-EPERM);
5016
5017 dev = to_mdev(pd->device);

--- 13 unchanged lines hidden (view full) ---

5031 mlx5_ib_dbg(dev, "unsupported wq type %d\n",
5032 init_attr->wq_type);
5033 return ERR_PTR(-EINVAL);
5034 }
5035
5036 rwq->ibwq.wq_num = rwq->core_qp.qpn;
5037 rwq->ibwq.state = IB_WQS_RESET;
5038 if (udata->outlen) {
5039 if (udata->outlen && udata->outlen < min_resp_len)
5040 return ERR_PTR(-EINVAL);
5041
5042 if (!capable(CAP_SYS_RAWIO) &&
5043 init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
5044 return ERR_PTR(-EPERM);
5045
5046 dev = to_mdev(pd->device);

--- 13 unchanged lines hidden (view full) ---

5060 mlx5_ib_dbg(dev, "unsupported wq type %d\n",
5061 init_attr->wq_type);
5062 return ERR_PTR(-EINVAL);
5063 }
5064
5065 rwq->ibwq.wq_num = rwq->core_qp.qpn;
5066 rwq->ibwq.state = IB_WQS_RESET;
5067 if (udata->outlen) {
5039 resp.response_length = offsetof(typeof(resp), response_length) +
5040 sizeof(resp.response_length);
5068 resp.response_length = offsetofend(
5069 struct mlx5_ib_create_wq_resp, response_length);
5041 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5042 if (err)
5043 goto err_copy;
5044 }
5045
5046 rwq->core_qp.event = mlx5_ib_wq_event;
5047 rwq->ibwq.event_handler = init_attr->event_handler;
5048 return &rwq->ibwq;
5049
5050err_copy:
5051 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5052err_user_rq:
5053 destroy_user_rq(dev, pd, rwq, udata);
5054err:
5055 kfree(rwq);
5056 return ERR_PTR(err);
5057}
5058
5070 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5071 if (err)
5072 goto err_copy;
5073 }
5074
5075 rwq->core_qp.event = mlx5_ib_wq_event;
5076 rwq->ibwq.event_handler = init_attr->event_handler;
5077 return &rwq->ibwq;
5078
5079err_copy:
5080 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5081err_user_rq:
5082 destroy_user_rq(dev, pd, rwq, udata);
5083err:
5084 kfree(rwq);
5085 return ERR_PTR(err);
5086}
5087
5059void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
5088int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
5060{
5061 struct mlx5_ib_dev *dev = to_mdev(wq->device);
5062 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5089{
5090 struct mlx5_ib_dev *dev = to_mdev(wq->device);
5091 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5092 int ret;
5063
5093
5064 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5094 ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5095 if (ret)
5096 return ret;
5065 destroy_user_rq(dev, wq->pd, rwq, udata);
5066 kfree(rwq);
5097 destroy_user_rq(dev, wq->pd, rwq, udata);
5098 kfree(rwq);
5099 return 0;
5067}
5068
5100}
5101
5069struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
5070 struct ib_rwq_ind_table_init_attr *init_attr,
5071 struct ib_udata *udata)
5102int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
5103 struct ib_rwq_ind_table_init_attr *init_attr,
5104 struct ib_udata *udata)
5072{
5105{
5073 struct mlx5_ib_dev *dev = to_mdev(device);
5074 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
5106 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
5107 to_mrwq_ind_table(ib_rwq_ind_table);
5108 struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
5075 int sz = 1 << init_attr->log_ind_tbl_size;
5076 struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
5077 size_t min_resp_len;
5078 int inlen;
5079 int err;
5080 int i;
5081 u32 *in;
5082 void *rqtc;
5083
5084 if (udata->inlen > 0 &&
5085 !ib_is_udata_cleared(udata, 0,
5086 udata->inlen))
5109 int sz = 1 << init_attr->log_ind_tbl_size;
5110 struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
5111 size_t min_resp_len;
5112 int inlen;
5113 int err;
5114 int i;
5115 u32 *in;
5116 void *rqtc;
5117
5118 if (udata->inlen > 0 &&
5119 !ib_is_udata_cleared(udata, 0,
5120 udata->inlen))
5087 return ERR_PTR(-EOPNOTSUPP);
5121 return -EOPNOTSUPP;
5088
5089 if (init_attr->log_ind_tbl_size >
5090 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
5091 mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5092 init_attr->log_ind_tbl_size,
5093 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
5122
5123 if (init_attr->log_ind_tbl_size >
5124 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
5125 mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5126 init_attr->log_ind_tbl_size,
5127 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
5094 return ERR_PTR(-EINVAL);
5128 return -EINVAL;
5095 }
5096
5129 }
5130
5097 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
5131 min_resp_len =
5132 offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
5098 if (udata->outlen && udata->outlen < min_resp_len)
5133 if (udata->outlen && udata->outlen < min_resp_len)
5099 return ERR_PTR(-EINVAL);
5134 return -EINVAL;
5100
5135
5101 rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
5102 if (!rwq_ind_tbl)
5103 return ERR_PTR(-ENOMEM);
5104
5105 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
5106 in = kvzalloc(inlen, GFP_KERNEL);
5136 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
5137 in = kvzalloc(inlen, GFP_KERNEL);
5107 if (!in) {
5108 err = -ENOMEM;
5109 goto err;
5110 }
5138 if (!in)
5139 return -ENOMEM;
5111
5112 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
5113
5114 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5115 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
5116
5117 for (i = 0; i < sz; i++)
5118 MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
5119
5120 rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
5121 MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
5122
5123 err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
5124 kvfree(in);
5140
5141 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
5142
5143 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5144 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
5145
5146 for (i = 0; i < sz; i++)
5147 MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
5148
5149 rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
5150 MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
5151
5152 err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
5153 kvfree(in);
5125
5126 if (err)
5154 if (err)
5127 goto err;
5155 return err;
5128
5129 rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
5130 if (udata->outlen) {
5156
5157 rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
5158 if (udata->outlen) {
5131 resp.response_length = offsetof(typeof(resp), response_length) +
5132 sizeof(resp.response_length);
5159 resp.response_length =
5160 offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
5161 response_length);
5133 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5134 if (err)
5135 goto err_copy;
5136 }
5137
5162 err = ib_copy_to_udata(udata, &resp, resp.response_length);
5163 if (err)
5164 goto err_copy;
5165 }
5166
5138 return &rwq_ind_tbl->ib_rwq_ind_tbl;
5167 return 0;
5139
5140err_copy:
5141 mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5168
5169err_copy:
5170 mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5142err:
5143 kfree(rwq_ind_tbl);
5144 return ERR_PTR(err);
5171 return err;
5145}
5146
5147int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
5148{
5149 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
5150 struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
5151
5172}
5173
5174int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
5175{
5176 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
5177 struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
5178
5152 mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5153
5154 kfree(rwq_ind_tbl);
5155 return 0;
5179 return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5156}
5157
5158int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
5159 u32 wq_attr_mask, struct ib_udata *udata)
5160{
5161 struct mlx5_ib_dev *dev = to_mdev(wq->device);
5162 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5163 struct mlx5_ib_modify_wq ucmd = {};
5164 size_t required_cmd_sz;
5165 int curr_wq_state;
5166 int wq_state;
5167 int inlen;
5168 int err;
5169 void *rqc;
5170 void *in;
5171
5180}
5181
5182int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
5183 u32 wq_attr_mask, struct ib_udata *udata)
5184{
5185 struct mlx5_ib_dev *dev = to_mdev(wq->device);
5186 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5187 struct mlx5_ib_modify_wq ucmd = {};
5188 size_t required_cmd_sz;
5189 int curr_wq_state;
5190 int wq_state;
5191 int inlen;
5192 int err;
5193 void *rqc;
5194 void *in;
5195
5172 required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
5196 required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
5173 if (udata->inlen < required_cmd_sz)
5174 return -EINVAL;
5175
5176 if (udata->inlen > sizeof(ucmd) &&
5177 !ib_is_udata_cleared(udata, sizeof(ucmd),
5178 udata->inlen - sizeof(ucmd)))
5179 return -EOPNOTSUPP;
5180

--- 235 unchanged lines hidden ---
5197 if (udata->inlen < required_cmd_sz)
5198 return -EINVAL;
5199
5200 if (udata->inlen > sizeof(ucmd) &&
5201 !ib_is_udata_cleared(udata, sizeof(ucmd),
5202 udata->inlen - sizeof(ucmd)))
5203 return -EOPNOTSUPP;
5204

--- 235 unchanged lines hidden ---