1e126ba97SEli Cohen /* 26cf0a15fSSaeed Mahameed * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3e126ba97SEli Cohen * 4e126ba97SEli Cohen * This software is available to you under a choice of one of two 5e126ba97SEli Cohen * licenses. You may choose to be licensed under the terms of the GNU 6e126ba97SEli Cohen * General Public License (GPL) Version 2, available from the file 7e126ba97SEli Cohen * COPYING in the main directory of this source tree, or the 8e126ba97SEli Cohen * OpenIB.org BSD license below: 9e126ba97SEli Cohen * 10e126ba97SEli Cohen * Redistribution and use in source and binary forms, with or 11e126ba97SEli Cohen * without modification, are permitted provided that the following 12e126ba97SEli Cohen * conditions are met: 13e126ba97SEli Cohen * 14e126ba97SEli Cohen * - Redistributions of source code must retain the above 15e126ba97SEli Cohen * copyright notice, this list of conditions and the following 16e126ba97SEli Cohen * disclaimer. 17e126ba97SEli Cohen * 18e126ba97SEli Cohen * - Redistributions in binary form must reproduce the above 19e126ba97SEli Cohen * copyright notice, this list of conditions and the following 20e126ba97SEli Cohen * disclaimer in the documentation and/or other materials 21e126ba97SEli Cohen * provided with the distribution. 22e126ba97SEli Cohen * 23e126ba97SEli Cohen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e126ba97SEli Cohen * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e126ba97SEli Cohen * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e126ba97SEli Cohen * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e126ba97SEli Cohen * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e126ba97SEli Cohen * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e126ba97SEli Cohen * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e126ba97SEli Cohen * SOFTWARE. 31e126ba97SEli Cohen */ 32e126ba97SEli Cohen 33b6459415SJakub Kicinski #include <linux/etherdevice.h> 34e126ba97SEli Cohen #include <rdma/ib_umem.h> 352811ba51SAchiad Shochat #include <rdma/ib_cache.h> 36cfb5e088SHaggai Abramovsky #include <rdma/ib_user_verbs.h> 37d14133ddSMark Zhang #include <rdma/rdma_counter.h> 38c2e53b2cSYishai Hadas #include <linux/mlx5/fs.h> 39e126ba97SEli Cohen #include "mlx5_ib.h" 40b96c9ddeSMark Bloch #include "ib_rep.h" 4164825827SLeon Romanovsky #include "counters.h" 42443c1cf9SYishai Hadas #include "cmd.h" 438a8a5d37SAharon Landau #include "umr.h" 44333fbaa0SLeon Romanovsky #include "qp.h" 45029e88fdSLeon Romanovsky #include "wr.h" 46e126ba97SEli Cohen 47e126ba97SEli Cohen enum { 48e126ba97SEli Cohen MLX5_IB_ACK_REQ_FREQ = 8, 49e126ba97SEli Cohen }; 50e126ba97SEli Cohen 51e126ba97SEli Cohen enum { 52e126ba97SEli Cohen MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 53e126ba97SEli Cohen MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 54e126ba97SEli Cohen MLX5_IB_LINK_TYPE_IB = 0, 55e126ba97SEli Cohen MLX5_IB_LINK_TYPE_ETH = 1 56e126ba97SEli Cohen }; 57e126ba97SEli Cohen 58eb49ab0cSAlex Vesker enum raw_qp_set_mask_map { 59eb49ab0cSAlex Vesker MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, 607d29f349SBodong Wang MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, 61eb49ab0cSAlex Vesker }; 62eb49ab0cSAlex Vesker 63742948ccSOr Har-Toov enum { 64742948ccSOr Har-Toov MLX5_QP_RM_GO_BACK_N = 0x1, 65742948ccSOr Har-Toov }; 66742948ccSOr Har-Toov 670680efa2SAlex Vesker struct mlx5_modify_raw_qp_param { 680680efa2SAlex Vesker u16 operation; 69eb49ab0cSAlex Vesker 70eb49ab0cSAlex Vesker u32 set_mask; /* raw_qp_set_mask_map */ 7161147f39SBodong Wang 7261147f39SBodong Wang struct mlx5_rate_limit rl; 7361147f39SBodong Wang 74eb49ab0cSAlex Vesker u8 rq_q_ctr_id; 751fb7f897SMark Bloch u32 port; 760680efa2SAlex Vesker }; 770680efa2SAlex Vesker 78312b8f79SMark Zhang struct mlx5_ib_qp_event_work { 79312b8f79SMark Zhang struct work_struct work; 80312b8f79SMark Zhang struct mlx5_core_qp *qp; 81312b8f79SMark Zhang int type; 82312b8f79SMark Zhang }; 83312b8f79SMark Zhang 84312b8f79SMark Zhang static struct workqueue_struct *mlx5_ib_qp_event_wq; 85312b8f79SMark Zhang 8689ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 8789ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 8889ea94a7SMaor Gottlieb struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 8989ea94a7SMaor Gottlieb 90e126ba97SEli Cohen static int is_qp0(enum ib_qp_type qp_type) 91e126ba97SEli Cohen { 92e126ba97SEli Cohen return qp_type == IB_QPT_SMI; 93e126ba97SEli Cohen } 94e126ba97SEli Cohen 95e126ba97SEli Cohen static int is_sqp(enum ib_qp_type qp_type) 96e126ba97SEli Cohen { 97e126ba97SEli Cohen return is_qp0(qp_type) || is_qp1(qp_type); 98e126ba97SEli Cohen } 99e126ba97SEli Cohen 100c1395a2aSHaggai Eran /** 101fbeb4075SMoni Shoua * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ 102fbeb4075SMoni Shoua * to kernel buffer 103c1395a2aSHaggai Eran * 104fbeb4075SMoni Shoua * @umem: User space memory where the WQ is 105fbeb4075SMoni Shoua * @buffer: buffer to copy to 106fbeb4075SMoni Shoua * @buflen: buffer length 107fbeb4075SMoni Shoua * @wqe_index: index of WQE to copy from 108fbeb4075SMoni Shoua * @wq_offset: offset to start of WQ 109fbeb4075SMoni Shoua * @wq_wqe_cnt: number of WQEs in WQ 110fbeb4075SMoni Shoua * @wq_wqe_shift: log2 of WQE size 111fbeb4075SMoni Shoua * @bcnt: number of bytes to copy 112fbeb4075SMoni Shoua * @bytes_copied: number of bytes to copy (return value) 113c1395a2aSHaggai Eran * 114fbeb4075SMoni Shoua * Copies from start of WQE bcnt or less bytes. 115fbeb4075SMoni Shoua * Does not gurantee to copy the entire WQE. 116c1395a2aSHaggai Eran * 117fbeb4075SMoni Shoua * Return: zero on success, or an error code. 118c1395a2aSHaggai Eran */ 119da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer, 120da9ee9d8SMoni Shoua size_t buflen, int wqe_index, 121da9ee9d8SMoni Shoua int wq_offset, int wq_wqe_cnt, 122da9ee9d8SMoni Shoua int wq_wqe_shift, int bcnt, 123fbeb4075SMoni Shoua size_t *bytes_copied) 124c1395a2aSHaggai Eran { 125fbeb4075SMoni Shoua size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift); 126fbeb4075SMoni Shoua size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift); 127fbeb4075SMoni Shoua size_t copy_length; 128c1395a2aSHaggai Eran int ret; 129c1395a2aSHaggai Eran 130fbeb4075SMoni Shoua /* don't copy more than requested, more than buffer length or 131fbeb4075SMoni Shoua * beyond WQ end 132fbeb4075SMoni Shoua */ 133fbeb4075SMoni Shoua copy_length = min_t(u32, buflen, wq_end - offset); 134fbeb4075SMoni Shoua copy_length = min_t(u32, copy_length, bcnt); 135c1395a2aSHaggai Eran 136fbeb4075SMoni Shoua ret = ib_umem_copy_from(buffer, umem, offset, copy_length); 137c1395a2aSHaggai Eran if (ret) 138c1395a2aSHaggai Eran return ret; 139c1395a2aSHaggai Eran 140fbeb4075SMoni Shoua if (!ret && bytes_copied) 141fbeb4075SMoni Shoua *bytes_copied = copy_length; 142c1395a2aSHaggai Eran 143fbeb4075SMoni Shoua return 0; 144fbeb4075SMoni Shoua } 145fbeb4075SMoni Shoua 146da9ee9d8SMoni Shoua static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 147da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 148da9ee9d8SMoni Shoua { 149da9ee9d8SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 150da9ee9d8SMoni Shoua size_t bytes_copied = 0; 151da9ee9d8SMoni Shoua size_t wqe_length; 152da9ee9d8SMoni Shoua void *p; 153da9ee9d8SMoni Shoua int ds; 154da9ee9d8SMoni Shoua 155da9ee9d8SMoni Shoua wqe_index = wqe_index & qp->sq.fbc.sz_m1; 156da9ee9d8SMoni Shoua 157da9ee9d8SMoni Shoua /* read the control segment first */ 158da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 159da9ee9d8SMoni Shoua ctrl = p; 160da9ee9d8SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 161da9ee9d8SMoni Shoua wqe_length = ds * MLX5_WQE_DS_UNITS; 162da9ee9d8SMoni Shoua 163da9ee9d8SMoni Shoua /* read rest of WQE if it spreads over more than one stride */ 164da9ee9d8SMoni Shoua while (bytes_copied < wqe_length) { 165da9ee9d8SMoni Shoua size_t copy_length = 166da9ee9d8SMoni Shoua min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB); 167da9ee9d8SMoni Shoua 168da9ee9d8SMoni Shoua if (!copy_length) 169da9ee9d8SMoni Shoua break; 170da9ee9d8SMoni Shoua 171da9ee9d8SMoni Shoua memcpy(buffer + bytes_copied, p, copy_length); 172da9ee9d8SMoni Shoua bytes_copied += copy_length; 173da9ee9d8SMoni Shoua 174da9ee9d8SMoni Shoua wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; 175da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 176da9ee9d8SMoni Shoua } 177da9ee9d8SMoni Shoua *bc = bytes_copied; 178da9ee9d8SMoni Shoua return 0; 179da9ee9d8SMoni Shoua } 180da9ee9d8SMoni Shoua 181da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 182da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 183fbeb4075SMoni Shoua { 184fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 185fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 186fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->sq; 187fbeb4075SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 188fbeb4075SMoni Shoua size_t bytes_copied; 189fbeb4075SMoni Shoua size_t bytes_copied2; 190fbeb4075SMoni Shoua size_t wqe_length; 191fbeb4075SMoni Shoua int ret; 192fbeb4075SMoni Shoua int ds; 193fbeb4075SMoni Shoua 194fbeb4075SMoni Shoua /* at first read as much as possible */ 195da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 196da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 197da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 198fbeb4075SMoni Shoua &bytes_copied); 199fbeb4075SMoni Shoua if (ret) 200fbeb4075SMoni Shoua return ret; 201fbeb4075SMoni Shoua 202fbeb4075SMoni Shoua /* we need at least control segment size to proceed */ 203fbeb4075SMoni Shoua if (bytes_copied < sizeof(*ctrl)) 204fbeb4075SMoni Shoua return -EINVAL; 205fbeb4075SMoni Shoua 206fbeb4075SMoni Shoua ctrl = buffer; 207fbeb4075SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 208c1395a2aSHaggai Eran wqe_length = ds * MLX5_WQE_DS_UNITS; 209fbeb4075SMoni Shoua 210fbeb4075SMoni Shoua /* if we copied enough then we are done */ 211fbeb4075SMoni Shoua if (bytes_copied >= wqe_length) { 212fbeb4075SMoni Shoua *bc = bytes_copied; 213fbeb4075SMoni Shoua return 0; 214c1395a2aSHaggai Eran } 215c1395a2aSHaggai Eran 216fbeb4075SMoni Shoua /* otherwise this a wrapped around wqe 217fbeb4075SMoni Shoua * so read the remaining bytes starting 218fbeb4075SMoni Shoua * from wqe_index 0 219fbeb4075SMoni Shoua */ 220da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied, 221da9ee9d8SMoni Shoua buflen - bytes_copied, 0, wq->offset, 222da9ee9d8SMoni Shoua wq->wqe_cnt, wq->wqe_shift, 223fbeb4075SMoni Shoua wqe_length - bytes_copied, 224fbeb4075SMoni Shoua &bytes_copied2); 225c1395a2aSHaggai Eran 226c1395a2aSHaggai Eran if (ret) 227c1395a2aSHaggai Eran return ret; 228fbeb4075SMoni Shoua *bc = bytes_copied + bytes_copied2; 229fbeb4075SMoni Shoua return 0; 230fbeb4075SMoni Shoua } 231c1395a2aSHaggai Eran 232da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 233da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 234da9ee9d8SMoni Shoua { 235da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 236da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 237da9ee9d8SMoni Shoua 238da9ee9d8SMoni Shoua if (buflen < sizeof(struct mlx5_wqe_ctrl_seg)) 239da9ee9d8SMoni Shoua return -EINVAL; 240da9ee9d8SMoni Shoua 241da9ee9d8SMoni Shoua if (!umem) 242da9ee9d8SMoni Shoua return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer, 243da9ee9d8SMoni Shoua buflen, bc); 244da9ee9d8SMoni Shoua 245da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc); 246da9ee9d8SMoni Shoua } 247da9ee9d8SMoni Shoua 248da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, 249da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 250fbeb4075SMoni Shoua { 251fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 252fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 253fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 254fbeb4075SMoni Shoua size_t bytes_copied; 255fbeb4075SMoni Shoua int ret; 256fbeb4075SMoni Shoua 257da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 258da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 259da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 260fbeb4075SMoni Shoua &bytes_copied); 261fbeb4075SMoni Shoua 262fbeb4075SMoni Shoua if (ret) 263fbeb4075SMoni Shoua return ret; 264fbeb4075SMoni Shoua *bc = bytes_copied; 265fbeb4075SMoni Shoua return 0; 266fbeb4075SMoni Shoua } 267fbeb4075SMoni Shoua 268da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 269da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 270da9ee9d8SMoni Shoua { 271da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 272da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 273da9ee9d8SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 274da9ee9d8SMoni Shoua size_t wqe_size = 1 << wq->wqe_shift; 275da9ee9d8SMoni Shoua 276da9ee9d8SMoni Shoua if (buflen < wqe_size) 277da9ee9d8SMoni Shoua return -EINVAL; 278da9ee9d8SMoni Shoua 279da9ee9d8SMoni Shoua if (!umem) 280da9ee9d8SMoni Shoua return -EOPNOTSUPP; 281da9ee9d8SMoni Shoua 282da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc); 283da9ee9d8SMoni Shoua } 284da9ee9d8SMoni Shoua 285da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, 286da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 287fbeb4075SMoni Shoua { 288fbeb4075SMoni Shoua struct ib_umem *umem = srq->umem; 289fbeb4075SMoni Shoua size_t bytes_copied; 290fbeb4075SMoni Shoua int ret; 291fbeb4075SMoni Shoua 292da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0, 293da9ee9d8SMoni Shoua srq->msrq.max, srq->msrq.wqe_shift, 294da9ee9d8SMoni Shoua buflen, &bytes_copied); 295fbeb4075SMoni Shoua 296fbeb4075SMoni Shoua if (ret) 297fbeb4075SMoni Shoua return ret; 298fbeb4075SMoni Shoua *bc = bytes_copied; 299fbeb4075SMoni Shoua return 0; 300c1395a2aSHaggai Eran } 301c1395a2aSHaggai Eran 302da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, 303da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 304da9ee9d8SMoni Shoua { 305da9ee9d8SMoni Shoua struct ib_umem *umem = srq->umem; 306da9ee9d8SMoni Shoua size_t wqe_size = 1 << srq->msrq.wqe_shift; 307da9ee9d8SMoni Shoua 308da9ee9d8SMoni Shoua if (buflen < wqe_size) 309da9ee9d8SMoni Shoua return -EINVAL; 310da9ee9d8SMoni Shoua 311da9ee9d8SMoni Shoua if (!umem) 312da9ee9d8SMoni Shoua return -EOPNOTSUPP; 313da9ee9d8SMoni Shoua 314da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc); 315da9ee9d8SMoni Shoua } 316da9ee9d8SMoni Shoua 3178067fd8bSPatrisious Haddad static void mlx5_ib_qp_err_syndrome(struct ib_qp *ibqp) 3188067fd8bSPatrisious Haddad { 3198067fd8bSPatrisious Haddad struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3208067fd8bSPatrisious Haddad int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 3218067fd8bSPatrisious Haddad struct mlx5_ib_qp *qp = to_mqp(ibqp); 3228067fd8bSPatrisious Haddad void *pas_ext_union, *err_syn; 3238067fd8bSPatrisious Haddad u32 *outb; 3248067fd8bSPatrisious Haddad int err; 3258067fd8bSPatrisious Haddad 3268067fd8bSPatrisious Haddad if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) || 3278067fd8bSPatrisious Haddad !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome)) 3288067fd8bSPatrisious Haddad return; 3298067fd8bSPatrisious Haddad 3308067fd8bSPatrisious Haddad outb = kzalloc(outlen, GFP_KERNEL); 3318067fd8bSPatrisious Haddad if (!outb) 3328067fd8bSPatrisious Haddad return; 3338067fd8bSPatrisious Haddad 3348067fd8bSPatrisious Haddad err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, 3358067fd8bSPatrisious Haddad true); 3368067fd8bSPatrisious Haddad if (err) 3378067fd8bSPatrisious Haddad goto out; 3388067fd8bSPatrisious Haddad 3398067fd8bSPatrisious Haddad pas_ext_union = 3408067fd8bSPatrisious Haddad MLX5_ADDR_OF(query_qp_out, outb, qp_pas_or_qpc_ext_and_pas); 3418067fd8bSPatrisious Haddad err_syn = MLX5_ADDR_OF(qpc_extension_and_pas_list_in, pas_ext_union, 3428067fd8bSPatrisious Haddad qpc_data_extension.error_syndrome); 3438067fd8bSPatrisious Haddad 3448067fd8bSPatrisious Haddad pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n", 3458067fd8bSPatrisious Haddad ibqp->device->name, ibqp->port, ibqp->qp_num, 3468067fd8bSPatrisious Haddad ib_wc_status_msg( 3478067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, syndrome)), 3488067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, vendor_error_syndrome), 3498067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, hw_syndrome_type), 3508067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, hw_error_syndrome)); 3518067fd8bSPatrisious Haddad out: 3528067fd8bSPatrisious Haddad kfree(outb); 3538067fd8bSPatrisious Haddad } 3548067fd8bSPatrisious Haddad 355312b8f79SMark Zhang static void mlx5_ib_handle_qp_event(struct work_struct *_work) 356e126ba97SEli Cohen { 357312b8f79SMark Zhang struct mlx5_ib_qp_event_work *qpe_work = 358312b8f79SMark Zhang container_of(_work, struct mlx5_ib_qp_event_work, work); 359312b8f79SMark Zhang struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; 360312b8f79SMark Zhang struct ib_event event = {}; 361e126ba97SEli Cohen 362e126ba97SEli Cohen event.device = ibqp->device; 363e126ba97SEli Cohen event.element.qp = ibqp; 364312b8f79SMark Zhang switch (qpe_work->type) { 365e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG: 366e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG; 367e126ba97SEli Cohen break; 368e126ba97SEli Cohen case MLX5_EVENT_TYPE_COMM_EST: 369e126ba97SEli Cohen event.event = IB_EVENT_COMM_EST; 370e126ba97SEli Cohen break; 371e126ba97SEli Cohen case MLX5_EVENT_TYPE_SQ_DRAINED: 372e126ba97SEli Cohen event.event = IB_EVENT_SQ_DRAINED; 373e126ba97SEli Cohen break; 374e126ba97SEli Cohen case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 375e126ba97SEli Cohen event.event = IB_EVENT_QP_LAST_WQE_REACHED; 376e126ba97SEli Cohen break; 377e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 378e126ba97SEli Cohen event.event = IB_EVENT_QP_FATAL; 379e126ba97SEli Cohen break; 380e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 381e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG_ERR; 382e126ba97SEli Cohen break; 383e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 384e126ba97SEli Cohen event.event = IB_EVENT_QP_REQ_ERR; 385e126ba97SEli Cohen break; 386e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 387e126ba97SEli Cohen event.event = IB_EVENT_QP_ACCESS_ERR; 388e126ba97SEli Cohen break; 389e126ba97SEli Cohen default: 390312b8f79SMark Zhang pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", 391312b8f79SMark Zhang qpe_work->type, qpe_work->qp->qpn); 392312b8f79SMark Zhang goto out; 393e126ba97SEli Cohen } 394e126ba97SEli Cohen 3958067fd8bSPatrisious Haddad if ((event.event == IB_EVENT_QP_FATAL) || 3968067fd8bSPatrisious Haddad (event.event == IB_EVENT_QP_ACCESS_ERR)) 3978067fd8bSPatrisious Haddad mlx5_ib_qp_err_syndrome(ibqp); 3988067fd8bSPatrisious Haddad 399e126ba97SEli Cohen ibqp->event_handler(&event, ibqp->qp_context); 400312b8f79SMark Zhang 401312b8f79SMark Zhang out: 402312b8f79SMark Zhang mlx5_core_res_put(&qpe_work->qp->common); 403312b8f79SMark Zhang kfree(qpe_work); 404e126ba97SEli Cohen } 405312b8f79SMark Zhang 406312b8f79SMark Zhang static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 407312b8f79SMark Zhang { 408312b8f79SMark Zhang struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 409312b8f79SMark Zhang struct mlx5_ib_qp_event_work *qpe_work; 410312b8f79SMark Zhang 411312b8f79SMark Zhang if (type == MLX5_EVENT_TYPE_PATH_MIG) { 412312b8f79SMark Zhang /* This event is only valid for trans_qps */ 413312b8f79SMark Zhang to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; 414312b8f79SMark Zhang } 415312b8f79SMark Zhang 416312b8f79SMark Zhang if (!ibqp->event_handler) 417312b8f79SMark Zhang goto out_no_handler; 418312b8f79SMark Zhang 419312b8f79SMark Zhang qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); 420312b8f79SMark Zhang if (!qpe_work) 421312b8f79SMark Zhang goto out_no_handler; 422312b8f79SMark Zhang 423312b8f79SMark Zhang qpe_work->qp = qp; 424312b8f79SMark Zhang qpe_work->type = type; 425312b8f79SMark Zhang INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event); 426312b8f79SMark Zhang queue_work(mlx5_ib_qp_event_wq, &qpe_work->work); 427312b8f79SMark Zhang return; 428312b8f79SMark Zhang 429312b8f79SMark Zhang out_no_handler: 430312b8f79SMark Zhang mlx5_core_res_put(&qp->common); 431e126ba97SEli Cohen } 432e126ba97SEli Cohen 433e126ba97SEli Cohen static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 434e126ba97SEli Cohen int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 435e126ba97SEli Cohen { 436e126ba97SEli Cohen int wqe_size; 437e126ba97SEli Cohen int wq_size; 438e126ba97SEli Cohen 439e126ba97SEli Cohen /* Sanity check RQ size before proceeding */ 440938fe83cSSaeed Mahameed if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 441e126ba97SEli Cohen return -EINVAL; 442e126ba97SEli Cohen 443e126ba97SEli Cohen if (!has_rq) { 444e126ba97SEli Cohen qp->rq.max_gs = 0; 445e126ba97SEli Cohen qp->rq.wqe_cnt = 0; 446e126ba97SEli Cohen qp->rq.wqe_shift = 0; 4470540d814SNoa Osherovich cap->max_recv_wr = 0; 4480540d814SNoa Osherovich cap->max_recv_sge = 0; 449e126ba97SEli Cohen } else { 450c95e6d53SLeon Romanovsky int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE); 451c95e6d53SLeon Romanovsky 452e126ba97SEli Cohen if (ucmd) { 453e126ba97SEli Cohen qp->rq.wqe_cnt = ucmd->rq_wqe_count; 454002bf228SLeon Romanovsky if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) 455002bf228SLeon Romanovsky return -EINVAL; 456e126ba97SEli Cohen qp->rq.wqe_shift = ucmd->rq_wqe_shift; 457c95e6d53SLeon Romanovsky if ((1 << qp->rq.wqe_shift) / 458c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) < 459c95e6d53SLeon Romanovsky wq_sig) 460002bf228SLeon Romanovsky return -EINVAL; 461c95e6d53SLeon Romanovsky qp->rq.max_gs = 462c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 463c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 464c95e6d53SLeon Romanovsky wq_sig; 465e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 466e126ba97SEli Cohen } else { 467c95e6d53SLeon Romanovsky wqe_size = 468c95e6d53SLeon Romanovsky wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 469c95e6d53SLeon Romanovsky 0; 470e126ba97SEli Cohen wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 471e126ba97SEli Cohen wqe_size = roundup_pow_of_two(wqe_size); 472e126ba97SEli Cohen wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 473e126ba97SEli Cohen wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 474e126ba97SEli Cohen qp->rq.wqe_cnt = wq_size / wqe_size; 475938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 476e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 477e126ba97SEli Cohen wqe_size, 478938fe83cSSaeed Mahameed MLX5_CAP_GEN(dev->mdev, 479938fe83cSSaeed Mahameed max_wqe_sz_rq)); 480e126ba97SEli Cohen return -EINVAL; 481e126ba97SEli Cohen } 482e126ba97SEli Cohen qp->rq.wqe_shift = ilog2(wqe_size); 483c95e6d53SLeon Romanovsky qp->rq.max_gs = 484c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 485c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 486c95e6d53SLeon Romanovsky wq_sig; 487e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 488e126ba97SEli Cohen } 489e126ba97SEli Cohen } 490e126ba97SEli Cohen 491e126ba97SEli Cohen return 0; 492e126ba97SEli Cohen } 493e126ba97SEli Cohen 494f0313965SErez Shitrit static int sq_overhead(struct ib_qp_init_attr *attr) 495e126ba97SEli Cohen { 496618af384SAndi Shyti int size = 0; 497e126ba97SEli Cohen 498f0313965SErez Shitrit switch (attr->qp_type) { 499e126ba97SEli Cohen case IB_QPT_XRC_INI: 500b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_xrc_seg); 501df561f66SGustavo A. R. Silva fallthrough; 502e126ba97SEli Cohen case IB_QPT_RC: 503e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 50475c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_atomic_seg) + 50575c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_raddr_seg), 50675c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_umr_ctrl_seg) + 507064e5262SIdan Burstein sizeof(struct mlx5_mkey_seg) + 508064e5262SIdan Burstein MLX5_IB_SQ_UMR_INLINE_THRESHOLD / 509064e5262SIdan Burstein MLX5_IB_UMR_OCTOWORD); 510e126ba97SEli Cohen break; 511e126ba97SEli Cohen 512b125a54bSEli Cohen case IB_QPT_XRC_TGT: 513b125a54bSEli Cohen return 0; 514b125a54bSEli Cohen 515e126ba97SEli Cohen case IB_QPT_UC: 516b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 51775c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_raddr_seg), 5189e65dc37SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 51975c1657eSLeon Romanovsky sizeof(struct mlx5_mkey_seg)); 520e126ba97SEli Cohen break; 521e126ba97SEli Cohen 522e126ba97SEli Cohen case IB_QPT_UD: 523f0313965SErez Shitrit if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 524f0313965SErez Shitrit size += sizeof(struct mlx5_wqe_eth_pad) + 525f0313965SErez Shitrit sizeof(struct mlx5_wqe_eth_seg); 526df561f66SGustavo A. R. Silva fallthrough; 527e126ba97SEli Cohen case IB_QPT_SMI: 528d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 529b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 530e126ba97SEli Cohen sizeof(struct mlx5_wqe_datagram_seg); 531e126ba97SEli Cohen break; 532e126ba97SEli Cohen 533e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 534b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 535e126ba97SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 536e126ba97SEli Cohen sizeof(struct mlx5_mkey_seg); 537e126ba97SEli Cohen break; 538e126ba97SEli Cohen 539e126ba97SEli Cohen default: 540e126ba97SEli Cohen return -EINVAL; 541e126ba97SEli Cohen } 542e126ba97SEli Cohen 543e126ba97SEli Cohen return size; 544e126ba97SEli Cohen } 545e126ba97SEli Cohen 546e126ba97SEli Cohen static int calc_send_wqe(struct ib_qp_init_attr *attr) 547e126ba97SEli Cohen { 548e126ba97SEli Cohen int inl_size = 0; 549e126ba97SEli Cohen int size; 550e126ba97SEli Cohen 551f0313965SErez Shitrit size = sq_overhead(attr); 552e126ba97SEli Cohen if (size < 0) 553e126ba97SEli Cohen return size; 554e126ba97SEli Cohen 555e126ba97SEli Cohen if (attr->cap.max_inline_data) { 556e126ba97SEli Cohen inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 557e126ba97SEli Cohen attr->cap.max_inline_data; 558e126ba97SEli Cohen } 559e126ba97SEli Cohen 560e126ba97SEli Cohen size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 561c0a6cbb9SIsrael Rukshin if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN && 562e1e66cc2SSagi Grimberg ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) 563e1e66cc2SSagi Grimberg return MLX5_SIG_WQE_SIZE; 564e1e66cc2SSagi Grimberg else 565e126ba97SEli Cohen return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 566e126ba97SEli Cohen } 567e126ba97SEli Cohen 568288c01b7SEli Cohen static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) 569288c01b7SEli Cohen { 570288c01b7SEli Cohen int max_sge; 571288c01b7SEli Cohen 572288c01b7SEli Cohen if (attr->qp_type == IB_QPT_RC) 573288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 574288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 575288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 576288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 577288c01b7SEli Cohen else if (attr->qp_type == IB_QPT_XRC_INI) 578288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 579288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 580288c01b7SEli Cohen sizeof(struct mlx5_wqe_xrc_seg) - 581288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 582288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 583288c01b7SEli Cohen else 584288c01b7SEli Cohen max_sge = (wqe_size - sq_overhead(attr)) / 585288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 586288c01b7SEli Cohen 587288c01b7SEli Cohen return min_t(int, max_sge, wqe_size - sq_overhead(attr) / 588288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg)); 589288c01b7SEli Cohen } 590288c01b7SEli Cohen 591e126ba97SEli Cohen static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 592e126ba97SEli Cohen struct mlx5_ib_qp *qp) 593e126ba97SEli Cohen { 594e126ba97SEli Cohen int wqe_size; 595e126ba97SEli Cohen int wq_size; 596e126ba97SEli Cohen 597e126ba97SEli Cohen if (!attr->cap.max_send_wr) 598e126ba97SEli Cohen return 0; 599e126ba97SEli Cohen 600e126ba97SEli Cohen wqe_size = calc_send_wqe(attr); 601e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 602e126ba97SEli Cohen if (wqe_size < 0) 603e126ba97SEli Cohen return wqe_size; 604e126ba97SEli Cohen 605938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 606b125a54bSEli Cohen mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 607938fe83cSSaeed Mahameed wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 608e126ba97SEli Cohen return -EINVAL; 609e126ba97SEli Cohen } 610e126ba97SEli Cohen 611f0313965SErez Shitrit qp->max_inline_data = wqe_size - sq_overhead(attr) - 612e126ba97SEli Cohen sizeof(struct mlx5_wqe_inline_seg); 613e126ba97SEli Cohen attr->cap.max_inline_data = qp->max_inline_data; 614e126ba97SEli Cohen 615e126ba97SEli Cohen wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 616e126ba97SEli Cohen qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 617938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 6181974ab9dSBart Van Assche mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n", 6191974ab9dSBart Van Assche attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB, 620938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 621938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 622b125a54bSEli Cohen return -ENOMEM; 623b125a54bSEli Cohen } 624e126ba97SEli Cohen qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 625288c01b7SEli Cohen qp->sq.max_gs = get_send_sge(attr, wqe_size); 626288c01b7SEli Cohen if (qp->sq.max_gs < attr->cap.max_send_sge) 627288c01b7SEli Cohen return -ENOMEM; 628288c01b7SEli Cohen 629288c01b7SEli Cohen attr->cap.max_send_sge = qp->sq.max_gs; 630b125a54bSEli Cohen qp->sq.max_post = wq_size / wqe_size; 631b125a54bSEli Cohen attr->cap.max_send_wr = qp->sq.max_post; 632e126ba97SEli Cohen 633e126ba97SEli Cohen return wq_size; 634e126ba97SEli Cohen } 635e126ba97SEli Cohen 636e126ba97SEli Cohen static int set_user_buf_size(struct mlx5_ib_dev *dev, 637e126ba97SEli Cohen struct mlx5_ib_qp *qp, 63819098df2Smajd@mellanox.com struct mlx5_ib_create_qp *ucmd, 6390fb2ed66Smajd@mellanox.com struct mlx5_ib_qp_base *base, 6400fb2ed66Smajd@mellanox.com struct ib_qp_init_attr *attr) 641e126ba97SEli Cohen { 642e126ba97SEli Cohen int desc_sz = 1 << qp->sq.wqe_shift; 643e126ba97SEli Cohen 644938fe83cSSaeed Mahameed if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 645e126ba97SEli Cohen mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 646938fe83cSSaeed Mahameed desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 647e126ba97SEli Cohen return -EINVAL; 648e126ba97SEli Cohen } 649e126ba97SEli Cohen 650af8b38edSGal Pressman if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { 651af8b38edSGal Pressman mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n", 652af8b38edSGal Pressman ucmd->sq_wqe_count); 653e126ba97SEli Cohen return -EINVAL; 654e126ba97SEli Cohen } 655e126ba97SEli Cohen 656e126ba97SEli Cohen qp->sq.wqe_cnt = ucmd->sq_wqe_count; 657e126ba97SEli Cohen 658938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 659e126ba97SEli Cohen mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 660938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 661938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 662e126ba97SEli Cohen return -EINVAL; 663e126ba97SEli Cohen } 664e126ba97SEli Cohen 665c2e53b2cSYishai Hadas if (attr->qp_type == IB_QPT_RAW_PACKET || 6662be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 6670fb2ed66Smajd@mellanox.com base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 6680fb2ed66Smajd@mellanox.com qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; 6690fb2ed66Smajd@mellanox.com } else { 67019098df2Smajd@mellanox.com base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 671e126ba97SEli Cohen (qp->sq.wqe_cnt << 6); 6720fb2ed66Smajd@mellanox.com } 673e126ba97SEli Cohen 674e126ba97SEli Cohen return 0; 675e126ba97SEli Cohen } 676e126ba97SEli Cohen 677e126ba97SEli Cohen static int qp_has_rq(struct ib_qp_init_attr *attr) 678e126ba97SEli Cohen { 679e126ba97SEli Cohen if (attr->qp_type == IB_QPT_XRC_INI || 680e126ba97SEli Cohen attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 681e126ba97SEli Cohen attr->qp_type == MLX5_IB_QPT_REG_UMR || 682e126ba97SEli Cohen !attr->cap.max_recv_wr) 683e126ba97SEli Cohen return 0; 684e126ba97SEli Cohen 685e126ba97SEli Cohen return 1; 686e126ba97SEli Cohen } 687e126ba97SEli Cohen 6880b80c14fSEli Cohen enum { 6890b80c14fSEli Cohen /* this is the first blue flame register in the array of bfregs assigned 6900b80c14fSEli Cohen * to a processes. Since we do not use it for blue flame but rather 6910b80c14fSEli Cohen * regular 64 bit doorbells, we do not need a lock for maintaiing 6920b80c14fSEli Cohen * "odd/even" order 6930b80c14fSEli Cohen */ 6940b80c14fSEli Cohen NUM_NON_BLUE_FLAME_BFREGS = 1, 6950b80c14fSEli Cohen }; 6960b80c14fSEli Cohen 697b037c29aSEli Cohen static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) 698b037c29aSEli Cohen { 69984aa6c39SLeon Romanovsky return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 70084aa6c39SLeon Romanovsky bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR; 701b037c29aSEli Cohen } 702b037c29aSEli Cohen 703b037c29aSEli Cohen static int num_med_bfreg(struct mlx5_ib_dev *dev, 704b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 705c1be5232SEli Cohen { 706c1be5232SEli Cohen int n; 707c1be5232SEli Cohen 708b037c29aSEli Cohen n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - 709b037c29aSEli Cohen NUM_NON_BLUE_FLAME_BFREGS; 710c1be5232SEli Cohen 711c1be5232SEli Cohen return n >= 0 ? n : 0; 712c1be5232SEli Cohen } 713c1be5232SEli Cohen 71418b0362eSYishai Hadas static int first_med_bfreg(struct mlx5_ib_dev *dev, 71518b0362eSYishai Hadas struct mlx5_bfreg_info *bfregi) 71618b0362eSYishai Hadas { 71718b0362eSYishai Hadas return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; 71818b0362eSYishai Hadas } 71918b0362eSYishai Hadas 720b037c29aSEli Cohen static int first_hi_bfreg(struct mlx5_ib_dev *dev, 721b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 722c1be5232SEli Cohen { 723c1be5232SEli Cohen int med; 724c1be5232SEli Cohen 725b037c29aSEli Cohen med = num_med_bfreg(dev, bfregi); 726b037c29aSEli Cohen return ++med; 727c1be5232SEli Cohen } 728c1be5232SEli Cohen 729b037c29aSEli Cohen static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, 730b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 731e126ba97SEli Cohen { 732e126ba97SEli Cohen int i; 733e126ba97SEli Cohen 734b037c29aSEli Cohen for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { 735b037c29aSEli Cohen if (!bfregi->count[i]) { 7362f5ff264SEli Cohen bfregi->count[i]++; 737e126ba97SEli Cohen return i; 738e126ba97SEli Cohen } 739e126ba97SEli Cohen } 740e126ba97SEli Cohen 741e126ba97SEli Cohen return -ENOMEM; 742e126ba97SEli Cohen } 743e126ba97SEli Cohen 744b037c29aSEli Cohen static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 745b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 746e126ba97SEli Cohen { 74718b0362eSYishai Hadas int minidx = first_med_bfreg(dev, bfregi); 748e126ba97SEli Cohen int i; 749e126ba97SEli Cohen 75018b0362eSYishai Hadas if (minidx < 0) 75118b0362eSYishai Hadas return minidx; 75218b0362eSYishai Hadas 75318b0362eSYishai Hadas for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { 7542f5ff264SEli Cohen if (bfregi->count[i] < bfregi->count[minidx]) 755e126ba97SEli Cohen minidx = i; 7560b80c14fSEli Cohen if (!bfregi->count[minidx]) 7570b80c14fSEli Cohen break; 758e126ba97SEli Cohen } 759e126ba97SEli Cohen 7602f5ff264SEli Cohen bfregi->count[minidx]++; 761e126ba97SEli Cohen return minidx; 762e126ba97SEli Cohen } 763e126ba97SEli Cohen 764b037c29aSEli Cohen static int alloc_bfreg(struct mlx5_ib_dev *dev, 765ffaf58deSLeon Romanovsky struct mlx5_bfreg_info *bfregi) 766e126ba97SEli Cohen { 767ffaf58deSLeon Romanovsky int bfregn = -ENOMEM; 768e126ba97SEli Cohen 7690a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 7700a2fd01cSYishai Hadas return -EINVAL; 7710a2fd01cSYishai Hadas 7722f5ff264SEli Cohen mutex_lock(&bfregi->lock); 773ffaf58deSLeon Romanovsky if (bfregi->ver >= 2) { 774ffaf58deSLeon Romanovsky bfregn = alloc_high_class_bfreg(dev, bfregi); 775ffaf58deSLeon Romanovsky if (bfregn < 0) 776ffaf58deSLeon Romanovsky bfregn = alloc_med_class_bfreg(dev, bfregi); 777ffaf58deSLeon Romanovsky } 778ffaf58deSLeon Romanovsky 779ffaf58deSLeon Romanovsky if (bfregn < 0) { 7800b80c14fSEli Cohen BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); 7812f5ff264SEli Cohen bfregn = 0; 7822f5ff264SEli Cohen bfregi->count[bfregn]++; 783e126ba97SEli Cohen } 7842f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 785e126ba97SEli Cohen 7862f5ff264SEli Cohen return bfregn; 787e126ba97SEli Cohen } 788e126ba97SEli Cohen 7894ed131d0SYishai Hadas void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) 790e126ba97SEli Cohen { 7912f5ff264SEli Cohen mutex_lock(&bfregi->lock); 792b037c29aSEli Cohen bfregi->count[bfregn]--; 7932f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 794e126ba97SEli Cohen } 795e126ba97SEli Cohen 796e126ba97SEli Cohen static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 797e126ba97SEli Cohen { 798e126ba97SEli Cohen switch (state) { 799e126ba97SEli Cohen case IB_QPS_RESET: return MLX5_QP_STATE_RST; 800e126ba97SEli Cohen case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 801e126ba97SEli Cohen case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 802e126ba97SEli Cohen case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 803e126ba97SEli Cohen case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 804e126ba97SEli Cohen case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 805e126ba97SEli Cohen case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 806e126ba97SEli Cohen default: return -1; 807e126ba97SEli Cohen } 808e126ba97SEli Cohen } 809e126ba97SEli Cohen 810e126ba97SEli Cohen static int to_mlx5_st(enum ib_qp_type type) 811e126ba97SEli Cohen { 812e126ba97SEli Cohen switch (type) { 813e126ba97SEli Cohen case IB_QPT_RC: return MLX5_QP_ST_RC; 814e126ba97SEli Cohen case IB_QPT_UC: return MLX5_QP_ST_UC; 815e126ba97SEli Cohen case IB_QPT_UD: return MLX5_QP_ST_UD; 816e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; 817e126ba97SEli Cohen case IB_QPT_XRC_INI: 818e126ba97SEli Cohen case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 819e126ba97SEli Cohen case IB_QPT_SMI: return MLX5_QP_ST_QP0; 820d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; 821c32a4f29SMoni Shoua case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI; 8223ae7e66aSLeon Romanovsky case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE; 823e126ba97SEli Cohen default: return -EINVAL; 824e126ba97SEli Cohen } 825e126ba97SEli Cohen } 826e126ba97SEli Cohen 82789ea94a7SMaor Gottlieb static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 82889ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 82989ea94a7SMaor Gottlieb static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 83089ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 83189ea94a7SMaor Gottlieb 8327c043e90SYishai Hadas int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 83305f58cebSLeon Romanovsky struct mlx5_bfreg_info *bfregi, u32 bfregn, 8341ee47ab3SYishai Hadas bool dyn_bfreg) 835e126ba97SEli Cohen { 83605f58cebSLeon Romanovsky unsigned int bfregs_per_sys_page; 83705f58cebSLeon Romanovsky u32 index_of_sys_page; 83805f58cebSLeon Romanovsky u32 offset; 839b037c29aSEli Cohen 8400a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 8410a2fd01cSYishai Hadas return -EINVAL; 8420a2fd01cSYishai Hadas 843b037c29aSEli Cohen bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 844b037c29aSEli Cohen MLX5_NON_FP_BFREGS_PER_UAR; 845b037c29aSEli Cohen index_of_sys_page = bfregn / bfregs_per_sys_page; 846b037c29aSEli Cohen 84705f58cebSLeon Romanovsky if (dyn_bfreg) { 84805f58cebSLeon Romanovsky index_of_sys_page += bfregi->num_static_sys_pages; 84905f58cebSLeon Romanovsky 8507c043e90SYishai Hadas if (index_of_sys_page >= bfregi->num_sys_pages) 8517c043e90SYishai Hadas return -EINVAL; 8527c043e90SYishai Hadas 8531ee47ab3SYishai Hadas if (bfregn > bfregi->num_dyn_bfregs || 8541ee47ab3SYishai Hadas bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) { 8551ee47ab3SYishai Hadas mlx5_ib_dbg(dev, "Invalid dynamic uar index\n"); 8561ee47ab3SYishai Hadas return -EINVAL; 8571ee47ab3SYishai Hadas } 8581ee47ab3SYishai Hadas } 859b037c29aSEli Cohen 8601ee47ab3SYishai Hadas offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; 861b037c29aSEli Cohen return bfregi->sys_pages[index_of_sys_page] + offset; 862e126ba97SEli Cohen } 863e126ba97SEli Cohen 864fe248c3aSMaor Gottlieb static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 865bdeacabdSShamir Rabinovitch struct mlx5_ib_rwq *rwq, struct ib_udata *udata) 86679b20a6cSYishai Hadas { 867bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext *context = 868bdeacabdSShamir Rabinovitch rdma_udata_to_drv_context( 869bdeacabdSShamir Rabinovitch udata, 870bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext, 871bdeacabdSShamir Rabinovitch ibucontext); 87279b20a6cSYishai Hadas 873fe248c3aSMaor Gottlieb if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) 874fe248c3aSMaor Gottlieb atomic_dec(&dev->delay_drop.rqs_cnt); 875fe248c3aSMaor Gottlieb 87679b20a6cSYishai Hadas mlx5_ib_db_unmap_user(context, &rwq->db); 87779b20a6cSYishai Hadas ib_umem_release(rwq->umem); 87879b20a6cSYishai Hadas } 87979b20a6cSYishai Hadas 88079b20a6cSYishai Hadas static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 881b0ea0fa5SJason Gunthorpe struct ib_udata *udata, struct mlx5_ib_rwq *rwq, 88279b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd) 88379b20a6cSYishai Hadas { 88489944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 88589944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 886ad480ea5SJason Gunthorpe unsigned long page_size = 0; 88779b20a6cSYishai Hadas u32 offset = 0; 88879b20a6cSYishai Hadas int err; 88979b20a6cSYishai Hadas 89079b20a6cSYishai Hadas if (!ucmd->buf_addr) 89179b20a6cSYishai Hadas return -EINVAL; 89279b20a6cSYishai Hadas 893c320e527SMoni Shoua rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); 89479b20a6cSYishai Hadas if (IS_ERR(rwq->umem)) { 89579b20a6cSYishai Hadas mlx5_ib_dbg(dev, "umem_get failed\n"); 89679b20a6cSYishai Hadas err = PTR_ERR(rwq->umem); 89779b20a6cSYishai Hadas return err; 89879b20a6cSYishai Hadas } 89979b20a6cSYishai Hadas 900ad480ea5SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 901ad480ea5SJason Gunthorpe rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT, 902ad480ea5SJason Gunthorpe page_offset, 64, &rwq->rq_page_offset); 903ad480ea5SJason Gunthorpe if (!page_size) { 90479b20a6cSYishai Hadas mlx5_ib_warn(dev, "bad offset\n"); 905ad480ea5SJason Gunthorpe err = -EINVAL; 90679b20a6cSYishai Hadas goto err_umem; 90779b20a6cSYishai Hadas } 90879b20a6cSYishai Hadas 909ad480ea5SJason Gunthorpe rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size); 910ad480ea5SJason Gunthorpe rwq->page_shift = order_base_2(page_size); 911ad480ea5SJason Gunthorpe rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT; 91279b20a6cSYishai Hadas rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); 91379b20a6cSYishai Hadas 914f8fb3110SJason Gunthorpe mlx5_ib_dbg( 915f8fb3110SJason Gunthorpe dev, 916ad480ea5SJason Gunthorpe "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n", 91779b20a6cSYishai Hadas (unsigned long long)ucmd->buf_addr, rwq->buf_size, 918ad480ea5SJason Gunthorpe ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas, 919f8fb3110SJason Gunthorpe offset); 92079b20a6cSYishai Hadas 9210bedd3d0SLang Cheng err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db); 92279b20a6cSYishai Hadas if (err) { 92379b20a6cSYishai Hadas mlx5_ib_dbg(dev, "map failed\n"); 92479b20a6cSYishai Hadas goto err_umem; 92579b20a6cSYishai Hadas } 92679b20a6cSYishai Hadas 92779b20a6cSYishai Hadas return 0; 92879b20a6cSYishai Hadas 92979b20a6cSYishai Hadas err_umem: 93079b20a6cSYishai Hadas ib_umem_release(rwq->umem); 93179b20a6cSYishai Hadas return err; 93279b20a6cSYishai Hadas } 93379b20a6cSYishai Hadas 934b037c29aSEli Cohen static int adjust_bfregn(struct mlx5_ib_dev *dev, 935b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi, int bfregn) 936b037c29aSEli Cohen { 937b037c29aSEli Cohen return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + 938b037c29aSEli Cohen bfregn % MLX5_NON_FP_BFREGS_PER_UAR; 939b037c29aSEli Cohen } 940b037c29aSEli Cohen 94198fc1126SLeon Romanovsky static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 942e126ba97SEli Cohen struct mlx5_ib_qp *qp, struct ib_udata *udata, 94376883a6cSLeon Romanovsky struct ib_qp_init_attr *attr, u32 **in, 94419098df2Smajd@mellanox.com struct mlx5_ib_create_qp_resp *resp, int *inlen, 94576883a6cSLeon Romanovsky struct mlx5_ib_qp_base *base, 94676883a6cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd) 947e126ba97SEli Cohen { 948e126ba97SEli Cohen struct mlx5_ib_ucontext *context; 94919098df2Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer; 950a59b7b05SJason Gunthorpe unsigned int page_offset_quantized = 0; 951a59b7b05SJason Gunthorpe unsigned long page_size = 0; 9521ee47ab3SYishai Hadas int uar_index = 0; 9532f5ff264SEli Cohen int bfregn; 9549e9c47d0SEli Cohen int ncont = 0; 95509a7d9ecSSaeed Mahameed __be64 *pas; 95609a7d9ecSSaeed Mahameed void *qpc; 957e126ba97SEli Cohen int err; 9585aa3771dSYishai Hadas u16 uid; 959ac42a5eeSYishai Hadas u32 uar_flags; 960e126ba97SEli Cohen 96189944450SShamir Rabinovitch context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext, 96289944450SShamir Rabinovitch ibucontext); 96376883a6cSLeon Romanovsky uar_flags = qp->flags_en & 96476883a6cSLeon Romanovsky (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX); 965ac42a5eeSYishai Hadas switch (uar_flags) { 966ac42a5eeSYishai Hadas case MLX5_QP_FLAG_UAR_PAGE_INDEX: 96776883a6cSLeon Romanovsky uar_index = ucmd->bfreg_index; 968ac42a5eeSYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 969ac42a5eeSYishai Hadas break; 970ac42a5eeSYishai Hadas case MLX5_QP_FLAG_BFREG_INDEX: 9711ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, 97276883a6cSLeon Romanovsky ucmd->bfreg_index, true); 9731ee47ab3SYishai Hadas if (uar_index < 0) 9741ee47ab3SYishai Hadas return uar_index; 9751ee47ab3SYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 976ac42a5eeSYishai Hadas break; 977ac42a5eeSYishai Hadas case 0: 9782be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 979ac42a5eeSYishai Hadas return -EINVAL; 980ffaf58deSLeon Romanovsky bfregn = alloc_bfreg(dev, &context->bfregi); 981ffaf58deSLeon Romanovsky if (bfregn < 0) 9822f5ff264SEli Cohen return bfregn; 983ac42a5eeSYishai Hadas break; 984ac42a5eeSYishai Hadas default: 985ac42a5eeSYishai Hadas return -EINVAL; 986e126ba97SEli Cohen } 987e126ba97SEli Cohen 9882f5ff264SEli Cohen mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); 9891ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 9901ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn, 9911ee47ab3SYishai Hadas false); 992e126ba97SEli Cohen 99348fea837SHaggai Eran qp->rq.offset = 0; 99448fea837SHaggai Eran qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 99548fea837SHaggai Eran qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 99648fea837SHaggai Eran 99776883a6cSLeon Romanovsky err = set_user_buf_size(dev, qp, ucmd, base, attr); 998e126ba97SEli Cohen if (err) 9992f5ff264SEli Cohen goto err_bfreg; 1000e126ba97SEli Cohen 100176883a6cSLeon Romanovsky if (ucmd->buf_addr && ubuffer->buf_size) { 100276883a6cSLeon Romanovsky ubuffer->buf_addr = ucmd->buf_addr; 1003a59b7b05SJason Gunthorpe ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr, 1004a59b7b05SJason Gunthorpe ubuffer->buf_size, 0); 1005a59b7b05SJason Gunthorpe if (IS_ERR(ubuffer->umem)) { 1006a59b7b05SJason Gunthorpe err = PTR_ERR(ubuffer->umem); 10072f5ff264SEli Cohen goto err_bfreg; 1008a59b7b05SJason Gunthorpe } 1009a59b7b05SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 1010a59b7b05SJason Gunthorpe ubuffer->umem, qpc, log_page_size, 1011a59b7b05SJason Gunthorpe MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64, 1012a59b7b05SJason Gunthorpe &page_offset_quantized); 1013a59b7b05SJason Gunthorpe if (!page_size) { 1014a59b7b05SJason Gunthorpe err = -EINVAL; 1015a59b7b05SJason Gunthorpe goto err_umem; 1016a59b7b05SJason Gunthorpe } 1017a59b7b05SJason Gunthorpe ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size); 10189e9c47d0SEli Cohen } else { 101919098df2Smajd@mellanox.com ubuffer->umem = NULL; 10209e9c47d0SEli Cohen } 1021e126ba97SEli Cohen 102209a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 102309a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; 10241b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 1025e126ba97SEli Cohen if (!*in) { 1026e126ba97SEli Cohen err = -ENOMEM; 1027e126ba97SEli Cohen goto err_umem; 1028e126ba97SEli Cohen } 1029e126ba97SEli Cohen 103004bcc1c2SLeon Romanovsky uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0; 10315aa3771dSYishai Hadas MLX5_SET(create_qp_in, *in, uid, uid); 103209a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 1033a59b7b05SJason Gunthorpe pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); 1034a59b7b05SJason Gunthorpe if (ubuffer->umem) { 1035a59b7b05SJason Gunthorpe mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0); 1036a59b7b05SJason Gunthorpe MLX5_SET(qpc, qpc, log_page_size, 1037a59b7b05SJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 1038a59b7b05SJason Gunthorpe MLX5_SET(qpc, qpc, page_offset, page_offset_quantized); 1039a59b7b05SJason Gunthorpe } 104009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 10411ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 1042b037c29aSEli Cohen resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); 10431ee47ab3SYishai Hadas else 10441ee47ab3SYishai Hadas resp->bfreg_index = MLX5_IB_INVALID_BFREG; 10452f5ff264SEli Cohen qp->bfregn = bfregn; 1046e126ba97SEli Cohen 10470bedd3d0SLang Cheng err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); 1048e126ba97SEli Cohen if (err) { 1049e126ba97SEli Cohen mlx5_ib_dbg(dev, "map failed\n"); 1050e126ba97SEli Cohen goto err_free; 1051e126ba97SEli Cohen } 1052e126ba97SEli Cohen 1053e126ba97SEli Cohen return 0; 1054e126ba97SEli Cohen 1055e126ba97SEli Cohen err_free: 1056479163f4SAl Viro kvfree(*in); 1057e126ba97SEli Cohen 1058e126ba97SEli Cohen err_umem: 105919098df2Smajd@mellanox.com ib_umem_release(ubuffer->umem); 1060e126ba97SEli Cohen 10612f5ff264SEli Cohen err_bfreg: 10621ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 10634ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn); 1064e126ba97SEli Cohen return err; 1065e126ba97SEli Cohen } 1066e126ba97SEli Cohen 1067747c519cSLeon Romanovsky static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1068747c519cSLeon Romanovsky struct mlx5_ib_qp_base *base, struct ib_udata *udata) 1069e126ba97SEli Cohen { 1070747c519cSLeon Romanovsky struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1071747c519cSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 1072e126ba97SEli Cohen 1073747c519cSLeon Romanovsky if (udata) { 1074747c519cSLeon Romanovsky /* User QP */ 1075e126ba97SEli Cohen mlx5_ib_db_unmap_user(context, &qp->db); 107619098df2Smajd@mellanox.com ib_umem_release(base->ubuffer.umem); 10771ee47ab3SYishai Hadas 10781ee47ab3SYishai Hadas /* 10791ee47ab3SYishai Hadas * Free only the BFREGs which are handled by the kernel. 10801ee47ab3SYishai Hadas * BFREGs of UARs allocated dynamically are handled by user. 10811ee47ab3SYishai Hadas */ 10821ee47ab3SYishai Hadas if (qp->bfregn != MLX5_IB_INVALID_BFREG) 10834ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); 1084747c519cSLeon Romanovsky return; 1085747c519cSLeon Romanovsky } 1086747c519cSLeon Romanovsky 1087747c519cSLeon Romanovsky /* Kernel QP */ 1088747c519cSLeon Romanovsky kvfree(qp->sq.wqe_head); 1089747c519cSLeon Romanovsky kvfree(qp->sq.w_list); 1090747c519cSLeon Romanovsky kvfree(qp->sq.wrid); 1091747c519cSLeon Romanovsky kvfree(qp->sq.wr_data); 1092747c519cSLeon Romanovsky kvfree(qp->rq.wrid); 1093747c519cSLeon Romanovsky if (qp->db.db) 1094747c519cSLeon Romanovsky mlx5_db_free(dev->mdev, &qp->db); 1095747c519cSLeon Romanovsky if (qp->buf.frags) 1096747c519cSLeon Romanovsky mlx5_frag_buf_free(dev->mdev, &qp->buf); 1097e126ba97SEli Cohen } 1098e126ba97SEli Cohen 109998fc1126SLeon Romanovsky static int _create_kernel_qp(struct mlx5_ib_dev *dev, 1100e126ba97SEli Cohen struct ib_qp_init_attr *init_attr, 110198fc1126SLeon Romanovsky struct mlx5_ib_qp *qp, u32 **in, int *inlen, 110219098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base) 1103e126ba97SEli Cohen { 1104e126ba97SEli Cohen int uar_index; 110509a7d9ecSSaeed Mahameed void *qpc; 1106e126ba97SEli Cohen int err; 1107e126ba97SEli Cohen 1108e126ba97SEli Cohen if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 11095fe9dec0SEli Cohen qp->bf.bfreg = &dev->fp_bfreg; 11102978975cSLeon Romanovsky else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) 111111f552e2SMichael Guralnik qp->bf.bfreg = &dev->wc_bfreg; 11125fe9dec0SEli Cohen else 11135fe9dec0SEli Cohen qp->bf.bfreg = &dev->bfreg; 1114e126ba97SEli Cohen 1115d8030b0dSEli Cohen /* We need to divide by two since each register is comprised of 1116d8030b0dSEli Cohen * two buffers of identical size, namely odd and even 1117d8030b0dSEli Cohen */ 1118d8030b0dSEli Cohen qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; 11195fe9dec0SEli Cohen uar_index = qp->bf.bfreg->index; 1120e126ba97SEli Cohen 1121e126ba97SEli Cohen err = calc_sq_size(dev, init_attr, qp); 1122e126ba97SEli Cohen if (err < 0) { 1123e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11245fe9dec0SEli Cohen return err; 1125e126ba97SEli Cohen } 1126e126ba97SEli Cohen 1127e126ba97SEli Cohen qp->rq.offset = 0; 1128e126ba97SEli Cohen qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 112919098df2Smajd@mellanox.com base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 1130e126ba97SEli Cohen 113134f4c955SGuy Levi err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size, 113234f4c955SGuy Levi &qp->buf, dev->mdev->priv.numa_node); 1133e126ba97SEli Cohen if (err) { 1134e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11355fe9dec0SEli Cohen return err; 1136e126ba97SEli Cohen } 1137e126ba97SEli Cohen 113834f4c955SGuy Levi if (qp->rq.wqe_cnt) 113934f4c955SGuy Levi mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift, 114034f4c955SGuy Levi ilog2(qp->rq.wqe_cnt), &qp->rq.fbc); 114134f4c955SGuy Levi 114234f4c955SGuy Levi if (qp->sq.wqe_cnt) { 114334f4c955SGuy Levi int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) / 114434f4c955SGuy Levi MLX5_SEND_WQE_BB; 114534f4c955SGuy Levi mlx5_init_fbc_offset(qp->buf.frags + 114634f4c955SGuy Levi (qp->sq.offset / PAGE_SIZE), 114734f4c955SGuy Levi ilog2(MLX5_SEND_WQE_BB), 114834f4c955SGuy Levi ilog2(qp->sq.wqe_cnt), 114934f4c955SGuy Levi sq_strides_offset, &qp->sq.fbc); 115034f4c955SGuy Levi 115134f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 115234f4c955SGuy Levi } 115334f4c955SGuy Levi 115409a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 115509a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; 11561b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 1157e126ba97SEli Cohen if (!*in) { 1158e126ba97SEli Cohen err = -ENOMEM; 1159e126ba97SEli Cohen goto err_buf; 1160e126ba97SEli Cohen } 116109a7d9ecSSaeed Mahameed 116209a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 116309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 11648256c69bSMaor Gottlieb MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 116509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 116609a7d9ecSSaeed Mahameed 1167e126ba97SEli Cohen /* Set "fast registration enabled" for all kernel QPs */ 116809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, fre, 1); 116909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rlky, 1); 1170e126ba97SEli Cohen 11712978975cSLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 117209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, deth_sqpn, 1); 1173b11a4f9cSHaggai Eran 117434f4c955SGuy Levi mlx5_fill_page_frag_array(&qp->buf, 117534f4c955SGuy Levi (__be64 *)MLX5_ADDR_OF(create_qp_in, 117634f4c955SGuy Levi *in, pas)); 1177e126ba97SEli Cohen 11789603b61dSJack Morgenstein err = mlx5_db_alloc(dev->mdev, &qp->db); 1179e126ba97SEli Cohen if (err) { 1180e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 1181e126ba97SEli Cohen goto err_free; 1182e126ba97SEli Cohen } 1183e126ba97SEli Cohen 1184b5883008SLi Dongyang qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, 1185b5883008SLi Dongyang sizeof(*qp->sq.wrid), GFP_KERNEL); 1186b5883008SLi Dongyang qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, 1187b5883008SLi Dongyang sizeof(*qp->sq.wr_data), GFP_KERNEL); 1188b5883008SLi Dongyang qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, 1189b5883008SLi Dongyang sizeof(*qp->rq.wrid), GFP_KERNEL); 1190b5883008SLi Dongyang qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, 1191b5883008SLi Dongyang sizeof(*qp->sq.w_list), GFP_KERNEL); 1192b5883008SLi Dongyang qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, 1193b5883008SLi Dongyang sizeof(*qp->sq.wqe_head), GFP_KERNEL); 1194e126ba97SEli Cohen 1195e126ba97SEli Cohen if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || 1196e126ba97SEli Cohen !qp->sq.w_list || !qp->sq.wqe_head) { 1197e126ba97SEli Cohen err = -ENOMEM; 1198e126ba97SEli Cohen goto err_wrid; 1199e126ba97SEli Cohen } 1200e126ba97SEli Cohen 1201e126ba97SEli Cohen return 0; 1202e126ba97SEli Cohen 1203e126ba97SEli Cohen err_wrid: 1204b5883008SLi Dongyang kvfree(qp->sq.wqe_head); 1205b5883008SLi Dongyang kvfree(qp->sq.w_list); 1206b5883008SLi Dongyang kvfree(qp->sq.wrid); 1207b5883008SLi Dongyang kvfree(qp->sq.wr_data); 1208b5883008SLi Dongyang kvfree(qp->rq.wrid); 1209f4044dacSEli Cohen mlx5_db_free(dev->mdev, &qp->db); 1210e126ba97SEli Cohen 1211e126ba97SEli Cohen err_free: 1212479163f4SAl Viro kvfree(*in); 1213e126ba97SEli Cohen 1214e126ba97SEli Cohen err_buf: 121534f4c955SGuy Levi mlx5_frag_buf_free(dev->mdev, &qp->buf); 1216e126ba97SEli Cohen return err; 1217e126ba97SEli Cohen } 1218e126ba97SEli Cohen 121909a7d9ecSSaeed Mahameed static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 1220e126ba97SEli Cohen { 12217aede1a2SLeon Romanovsky if (attr->srq || (qp->type == IB_QPT_XRC_TGT) || 12227aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI)) 122309a7d9ecSSaeed Mahameed return MLX5_SRQ_RQ; 1224e126ba97SEli Cohen else if (!qp->has_rq) 122509a7d9ecSSaeed Mahameed return MLX5_ZERO_LEN_RQ; 12267aede1a2SLeon Romanovsky 122709a7d9ecSSaeed Mahameed return MLX5_NON_ZERO_RQ; 1228e126ba97SEli Cohen } 1229e126ba97SEli Cohen 12300fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 1231c2e53b2cSYishai Hadas struct mlx5_ib_qp *qp, 12321cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u32 tdn, 12331cd6dbd3SYishai Hadas struct ib_pd *pd) 12340fb2ed66Smajd@mellanox.com { 1235e0b4b472SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 12360fb2ed66Smajd@mellanox.com void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 12370fb2ed66Smajd@mellanox.com 12381cd6dbd3SYishai Hadas MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); 12390fb2ed66Smajd@mellanox.com MLX5_SET(tisc, tisc, transport_domain, tdn); 12402be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 1241c2e53b2cSYishai Hadas MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); 1242c2e53b2cSYishai Hadas 1243e0b4b472SLeon Romanovsky return mlx5_core_create_tis(dev->mdev, in, &sq->tisn); 12440fb2ed66Smajd@mellanox.com } 12450fb2ed66Smajd@mellanox.com 12460fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 12471cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, struct ib_pd *pd) 12480fb2ed66Smajd@mellanox.com { 12491cd6dbd3SYishai Hadas mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); 12500fb2ed66Smajd@mellanox.com } 12510fb2ed66Smajd@mellanox.com 1252d5ed8ac3SMark Bloch static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq) 1253b96c9ddeSMark Bloch { 1254b96c9ddeSMark Bloch if (sq->flow_rule) 1255b96c9ddeSMark Bloch mlx5_del_flow_rules(sq->flow_rule); 1256d5ed8ac3SMark Bloch sq->flow_rule = NULL; 1257b96c9ddeSMark Bloch } 1258b96c9ddeSMark Bloch 12599a1ac95aSAharon Landau static bool fr_supported(int ts_cap) 12602fe8d4b8SAharon Landau { 12619a1ac95aSAharon Landau return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || 12629a1ac95aSAharon Landau ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; 12639a1ac95aSAharon Landau } 12642fe8d4b8SAharon Landau 12659a1ac95aSAharon Landau static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 126633652951SAharon Landau bool fr_sup, bool rt_sup) 12679a1ac95aSAharon Landau { 126833652951SAharon Landau if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) { 126933652951SAharon Landau if (!rt_sup) { 127033652951SAharon Landau mlx5_ib_dbg(dev, 127133652951SAharon Landau "Real time TS format is not supported\n"); 12722fe8d4b8SAharon Landau return -EOPNOTSUPP; 12732fe8d4b8SAharon Landau } 127433652951SAharon Landau return MLX5_TIMESTAMP_FORMAT_REAL_TIME; 12752fe8d4b8SAharon Landau } 12769a1ac95aSAharon Landau if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) { 12779a1ac95aSAharon Landau if (!fr_sup) { 12789a1ac95aSAharon Landau mlx5_ib_dbg(dev, 12799a1ac95aSAharon Landau "Free running TS format is not supported\n"); 12802fe8d4b8SAharon Landau return -EOPNOTSUPP; 12812fe8d4b8SAharon Landau } 12829a1ac95aSAharon Landau return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; 12832fe8d4b8SAharon Landau } 12849a1ac95aSAharon Landau return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : 12859a1ac95aSAharon Landau MLX5_TIMESTAMP_FORMAT_DEFAULT; 12869a1ac95aSAharon Landau } 12879a1ac95aSAharon Landau 12889a1ac95aSAharon Landau static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq) 12899a1ac95aSAharon Landau { 12909a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format); 12919a1ac95aSAharon Landau 129233652951SAharon Landau return get_ts_format(dev, recv_cq, fr_supported(ts_cap), 129333652951SAharon Landau rt_supported(ts_cap)); 12942fe8d4b8SAharon Landau } 12952fe8d4b8SAharon Landau 12962fe8d4b8SAharon Landau static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) 12972fe8d4b8SAharon Landau { 12989a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format); 12992fe8d4b8SAharon Landau 130033652951SAharon Landau return get_ts_format(dev, send_cq, fr_supported(ts_cap), 130133652951SAharon Landau rt_supported(ts_cap)); 13022fe8d4b8SAharon Landau } 13032fe8d4b8SAharon Landau 13042fe8d4b8SAharon Landau static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, 13052fe8d4b8SAharon Landau struct mlx5_ib_cq *recv_cq) 13062fe8d4b8SAharon Landau { 13079a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format); 13089a1ac95aSAharon Landau bool fr_sup = fr_supported(ts_cap); 130933652951SAharon Landau bool rt_sup = rt_supported(ts_cap); 13109a1ac95aSAharon Landau u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : 13119a1ac95aSAharon Landau MLX5_TIMESTAMP_FORMAT_DEFAULT; 13129a1ac95aSAharon Landau int send_ts_format = 131333652951SAharon Landau send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : 13149a1ac95aSAharon Landau default_ts; 13159a1ac95aSAharon Landau int recv_ts_format = 131633652951SAharon Landau recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) : 13179a1ac95aSAharon Landau default_ts; 13182fe8d4b8SAharon Landau 13199a1ac95aSAharon Landau if (send_ts_format < 0 || recv_ts_format < 0) 13202fe8d4b8SAharon Landau return -EOPNOTSUPP; 13212fe8d4b8SAharon Landau 132233652951SAharon Landau if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && 132333652951SAharon Landau recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && 132433652951SAharon Landau send_ts_format != recv_ts_format) { 132533652951SAharon Landau mlx5_ib_dbg( 132633652951SAharon Landau dev, 132733652951SAharon Landau "The send ts_format does not match the receive ts_format\n"); 13282fe8d4b8SAharon Landau return -EOPNOTSUPP; 13292fe8d4b8SAharon Landau } 133033652951SAharon Landau 13319a1ac95aSAharon Landau return send_ts_format == default_ts ? recv_ts_format : send_ts_format; 13322fe8d4b8SAharon Landau } 13332fe8d4b8SAharon Landau 13340fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1335b0ea0fa5SJason Gunthorpe struct ib_udata *udata, 13360fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq, void *qpin, 13372fe8d4b8SAharon Landau struct ib_pd *pd, struct mlx5_ib_cq *cq) 13380fb2ed66Smajd@mellanox.com { 13390fb2ed66Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; 13400fb2ed66Smajd@mellanox.com __be64 *pas; 13410fb2ed66Smajd@mellanox.com void *in; 13420fb2ed66Smajd@mellanox.com void *sqc; 13430fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 13440fb2ed66Smajd@mellanox.com void *wq; 13450fb2ed66Smajd@mellanox.com int inlen; 13460fb2ed66Smajd@mellanox.com int err; 1347ad480ea5SJason Gunthorpe unsigned int page_offset_quantized; 1348ad480ea5SJason Gunthorpe unsigned long page_size; 13492fe8d4b8SAharon Landau int ts_format; 13502fe8d4b8SAharon Landau 13512fe8d4b8SAharon Landau ts_format = get_sq_ts_format(dev, cq); 13522fe8d4b8SAharon Landau if (ts_format < 0) 13532fe8d4b8SAharon Landau return ts_format; 13540fb2ed66Smajd@mellanox.com 1355ad480ea5SJason Gunthorpe sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr, 1356ad480ea5SJason Gunthorpe ubuffer->buf_size, 0); 1357ad480ea5SJason Gunthorpe if (IS_ERR(sq->ubuffer.umem)) 1358ad480ea5SJason Gunthorpe return PTR_ERR(sq->ubuffer.umem); 1359ad480ea5SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 1360ad480ea5SJason Gunthorpe ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT, 1361ad480ea5SJason Gunthorpe page_offset, 64, &page_offset_quantized); 1362ad480ea5SJason Gunthorpe if (!page_size) { 1363ad480ea5SJason Gunthorpe err = -EINVAL; 1364ad480ea5SJason Gunthorpe goto err_umem; 1365ad480ea5SJason Gunthorpe } 13660fb2ed66Smajd@mellanox.com 13677db0eea9SJason Gunthorpe inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1368ad480ea5SJason Gunthorpe sizeof(u64) * 1369ad480ea5SJason Gunthorpe ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size); 13701b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 13710fb2ed66Smajd@mellanox.com if (!in) { 13720fb2ed66Smajd@mellanox.com err = -ENOMEM; 13730fb2ed66Smajd@mellanox.com goto err_umem; 13740fb2ed66Smajd@mellanox.com } 13750fb2ed66Smajd@mellanox.com 1376c14003f0SYishai Hadas MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid); 13770fb2ed66Smajd@mellanox.com sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 13780fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1379795b609cSBodong Wang if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) 1380795b609cSBodong Wang MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1); 13810fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 13822fe8d4b8SAharon Landau MLX5_SET(sqc, sqc, ts_format, ts_format); 13830fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); 13840fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); 13850fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_lst_sz, 1); 13860fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); 138796dc3fc5SNoa Osherovich if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 138896dc3fc5SNoa Osherovich MLX5_CAP_ETH(dev->mdev, swp)) 138996dc3fc5SNoa Osherovich MLX5_SET(sqc, sqc, allow_swp, 1); 13900fb2ed66Smajd@mellanox.com 13910fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(sqc, sqc, wq); 13920fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 13930fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 13940fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); 13950fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 13960fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 13970fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); 1398ad480ea5SJason Gunthorpe MLX5_SET(wq, wq, log_wq_pg_sz, 1399ad480ea5SJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 1400ad480ea5SJason Gunthorpe MLX5_SET(wq, wq, page_offset, page_offset_quantized); 14010fb2ed66Smajd@mellanox.com 14020fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 1403ad480ea5SJason Gunthorpe mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0); 14040fb2ed66Smajd@mellanox.com 1405333fbaa0SLeon Romanovsky err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp); 14060fb2ed66Smajd@mellanox.com 14070fb2ed66Smajd@mellanox.com kvfree(in); 14080fb2ed66Smajd@mellanox.com 14090fb2ed66Smajd@mellanox.com if (err) 14100fb2ed66Smajd@mellanox.com goto err_umem; 14110fb2ed66Smajd@mellanox.com 14120fb2ed66Smajd@mellanox.com return 0; 14130fb2ed66Smajd@mellanox.com 14140fb2ed66Smajd@mellanox.com err_umem: 14150fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 14160fb2ed66Smajd@mellanox.com sq->ubuffer.umem = NULL; 14170fb2ed66Smajd@mellanox.com 14180fb2ed66Smajd@mellanox.com return err; 14190fb2ed66Smajd@mellanox.com } 14200fb2ed66Smajd@mellanox.com 14210fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 14220fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq) 14230fb2ed66Smajd@mellanox.com { 1424d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 1425333fbaa0SLeon Romanovsky mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp); 14260fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 14270fb2ed66Smajd@mellanox.com } 14280fb2ed66Smajd@mellanox.com 14290fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 14302c292dbbSBoris Pismenny struct mlx5_ib_rq *rq, void *qpin, 14312fe8d4b8SAharon Landau struct ib_pd *pd, struct mlx5_ib_cq *cq) 14320fb2ed66Smajd@mellanox.com { 1433358e42eaSMajd Dibbiny struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 14340fb2ed66Smajd@mellanox.com __be64 *pas; 14350fb2ed66Smajd@mellanox.com void *in; 14360fb2ed66Smajd@mellanox.com void *rqc; 14370fb2ed66Smajd@mellanox.com void *wq; 14380fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 14397579dcdfSJason Gunthorpe struct ib_umem *umem = rq->base.ubuffer.umem; 14407579dcdfSJason Gunthorpe unsigned int page_offset_quantized; 14417579dcdfSJason Gunthorpe unsigned long page_size = 0; 14422fe8d4b8SAharon Landau int ts_format; 14432c292dbbSBoris Pismenny size_t inlen; 14440fb2ed66Smajd@mellanox.com int err; 14452c292dbbSBoris Pismenny 14462fe8d4b8SAharon Landau ts_format = get_rq_ts_format(dev, cq); 14472fe8d4b8SAharon Landau if (ts_format < 0) 14482fe8d4b8SAharon Landau return ts_format; 14492fe8d4b8SAharon Landau 14507579dcdfSJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz, 14517579dcdfSJason Gunthorpe MLX5_ADAPTER_PAGE_SHIFT, 14527579dcdfSJason Gunthorpe page_offset, 64, 14537579dcdfSJason Gunthorpe &page_offset_quantized); 14547579dcdfSJason Gunthorpe if (!page_size) 14552c292dbbSBoris Pismenny return -EINVAL; 14560fb2ed66Smajd@mellanox.com 14577579dcdfSJason Gunthorpe inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 14587579dcdfSJason Gunthorpe sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size); 14591b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 14600fb2ed66Smajd@mellanox.com if (!in) 14610fb2ed66Smajd@mellanox.com return -ENOMEM; 14620fb2ed66Smajd@mellanox.com 146334d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 14640fb2ed66Smajd@mellanox.com rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1465e4cc4fa7SNoa Osherovich if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING)) 14660fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, vsd, 1); 14670fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 14680fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 14692fe8d4b8SAharon Landau MLX5_SET(rqc, rqc, ts_format, ts_format); 14700fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, flush_in_error_en, 1); 14710fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index)); 14720fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv)); 14730fb2ed66Smajd@mellanox.com 14742be08c30SLeon Romanovsky if (mqp->flags & IB_QP_CREATE_SCATTER_FCS) 1475358e42eaSMajd Dibbiny MLX5_SET(rqc, rqc, scatter_fcs, 1); 1476358e42eaSMajd Dibbiny 14770fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(rqc, rqc, wq); 14780fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1479b1383aa6SNoa Osherovich if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING) 1480b1383aa6SNoa Osherovich MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 14817579dcdfSJason Gunthorpe MLX5_SET(wq, wq, page_offset, page_offset_quantized); 14820fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 14830fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 14840fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); 14857579dcdfSJason Gunthorpe MLX5_SET(wq, wq, log_wq_pg_sz, 14867579dcdfSJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 14870fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); 14880fb2ed66Smajd@mellanox.com 14890fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 14907579dcdfSJason Gunthorpe mlx5_ib_populate_pas(umem, page_size, pas, 0); 14910fb2ed66Smajd@mellanox.com 1492333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp); 14930fb2ed66Smajd@mellanox.com 14940fb2ed66Smajd@mellanox.com kvfree(in); 14950fb2ed66Smajd@mellanox.com 14960fb2ed66Smajd@mellanox.com return err; 14970fb2ed66Smajd@mellanox.com } 14980fb2ed66Smajd@mellanox.com 14990fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 15000fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq) 15010fb2ed66Smajd@mellanox.com { 1502333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp); 15030fb2ed66Smajd@mellanox.com } 15040fb2ed66Smajd@mellanox.com 15050042f9e4SMark Bloch static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 15060042f9e4SMark Bloch struct mlx5_ib_rq *rq, 1507443c1cf9SYishai Hadas u32 qp_flags_en, 1508443c1cf9SYishai Hadas struct ib_pd *pd) 15090042f9e4SMark Bloch { 15100042f9e4SMark Bloch if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 15110042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 15120042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1513443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid); 15140042f9e4SMark Bloch } 15150042f9e4SMark Bloch 15160fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1517f95ef6cbSMaor Gottlieb struct mlx5_ib_rq *rq, u32 tdn, 1518e0b4b472SLeon Romanovsky u32 *qp_flags_en, struct ib_pd *pd, 1519e0b4b472SLeon Romanovsky u32 *out) 15200fb2ed66Smajd@mellanox.com { 1521175edba8SMark Bloch u8 lb_flag = 0; 15220fb2ed66Smajd@mellanox.com u32 *in; 15230fb2ed66Smajd@mellanox.com void *tirc; 15240fb2ed66Smajd@mellanox.com int inlen; 15250fb2ed66Smajd@mellanox.com int err; 15260fb2ed66Smajd@mellanox.com 15270fb2ed66Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(create_tir_in); 15281b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 15290fb2ed66Smajd@mellanox.com if (!in) 15300fb2ed66Smajd@mellanox.com return -ENOMEM; 15310fb2ed66Smajd@mellanox.com 1532443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 15330fb2ed66Smajd@mellanox.com tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 15340fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 15350fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); 15360fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, transport_domain, tdn); 1537175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1538f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 15390fb2ed66Smajd@mellanox.com 1540175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 1541175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1542175edba8SMark Bloch 1543175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1544175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1545175edba8SMark Bloch 15466a4d00beSMark Bloch if (dev->is_rep) { 1547175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1548175edba8SMark Bloch *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1549175edba8SMark Bloch } 1550175edba8SMark Bloch 1551175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1552e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1553e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 15541f1d6abbSAriel Levkovich rq->tirn = MLX5_GET(create_tir_out, out, tirn); 15550042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 15560042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 15570042f9e4SMark Bloch 15580042f9e4SMark Bloch if (err) 1559443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, 0, pd); 15600042f9e4SMark Bloch } 15610fb2ed66Smajd@mellanox.com kvfree(in); 15620fb2ed66Smajd@mellanox.com 15630fb2ed66Smajd@mellanox.com return err; 15640fb2ed66Smajd@mellanox.com } 15650fb2ed66Smajd@mellanox.com 15660fb2ed66Smajd@mellanox.com static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 15672fe8d4b8SAharon Landau u32 *in, size_t inlen, struct ib_pd *pd, 15687f72052cSYishai Hadas struct ib_udata *udata, 15692fe8d4b8SAharon Landau struct mlx5_ib_create_qp_resp *resp, 15702fe8d4b8SAharon Landau struct ib_qp_init_attr *init_attr) 15710fb2ed66Smajd@mellanox.com { 15720fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 15730fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 15740fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 157589944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 157689944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 15770fb2ed66Smajd@mellanox.com int err; 15780fb2ed66Smajd@mellanox.com u32 tdn = mucontext->tdn; 15797f72052cSYishai Hadas u16 uid = to_mpd(pd)->uid; 15801f1d6abbSAriel Levkovich u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 15810fb2ed66Smajd@mellanox.com 15820eacc574SAharon Landau if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) 15830eacc574SAharon Landau return -EINVAL; 15840fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 15851cd6dbd3SYishai Hadas err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); 15860fb2ed66Smajd@mellanox.com if (err) 15870fb2ed66Smajd@mellanox.com return err; 15880fb2ed66Smajd@mellanox.com 15892fe8d4b8SAharon Landau err = create_raw_packet_qp_sq(dev, udata, sq, in, pd, 15902fe8d4b8SAharon Landau to_mcq(init_attr->send_cq)); 15910fb2ed66Smajd@mellanox.com if (err) 15920fb2ed66Smajd@mellanox.com goto err_destroy_tis; 15930fb2ed66Smajd@mellanox.com 15947f72052cSYishai Hadas if (uid) { 15957f72052cSYishai Hadas resp->tisn = sq->tisn; 15967f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN; 15977f72052cSYishai Hadas resp->sqn = sq->base.mqp.qpn; 15987f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN; 15997f72052cSYishai Hadas } 16007f72052cSYishai Hadas 16010fb2ed66Smajd@mellanox.com sq->base.container_mibqp = qp; 16021d31e9c0SMajd Dibbiny sq->base.mqp.event = mlx5_ib_qp_event; 16030fb2ed66Smajd@mellanox.com } 16040fb2ed66Smajd@mellanox.com 16050fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1606358e42eaSMajd Dibbiny rq->base.container_mibqp = qp; 1607358e42eaSMajd Dibbiny 16082be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING) 1609e4cc4fa7SNoa Osherovich rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 16102be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) 1611b1383aa6SNoa Osherovich rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 16122fe8d4b8SAharon Landau err = create_raw_packet_qp_rq(dev, rq, in, pd, 16132fe8d4b8SAharon Landau to_mcq(init_attr->recv_cq)); 16140fb2ed66Smajd@mellanox.com if (err) 16150fb2ed66Smajd@mellanox.com goto err_destroy_sq; 16160fb2ed66Smajd@mellanox.com 1617e0b4b472SLeon Romanovsky err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd, 1618e0b4b472SLeon Romanovsky out); 16190fb2ed66Smajd@mellanox.com if (err) 16200fb2ed66Smajd@mellanox.com goto err_destroy_rq; 16217f72052cSYishai Hadas 16227f72052cSYishai Hadas if (uid) { 16237f72052cSYishai Hadas resp->rqn = rq->base.mqp.qpn; 16247f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN; 16257f72052cSYishai Hadas resp->tirn = rq->tirn; 16267f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 162754a38b66SAlex Vesker if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 162854a38b66SAlex Vesker MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { 16291f1d6abbSAriel Levkovich resp->tir_icm_addr = MLX5_GET( 16301f1d6abbSAriel Levkovich create_tir_out, out, icm_address_31_0); 16311f1d6abbSAriel Levkovich resp->tir_icm_addr |= 16321f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 16331f1d6abbSAriel Levkovich icm_address_39_32) 16341f1d6abbSAriel Levkovich << 32; 16351f1d6abbSAriel Levkovich resp->tir_icm_addr |= 16361f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 16371f1d6abbSAriel Levkovich icm_address_63_40) 16381f1d6abbSAriel Levkovich << 40; 16391f1d6abbSAriel Levkovich resp->comp_mask |= 16401f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 16411f1d6abbSAriel Levkovich } 16427f72052cSYishai Hadas } 16430fb2ed66Smajd@mellanox.com } 16440fb2ed66Smajd@mellanox.com 16450fb2ed66Smajd@mellanox.com qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : 16460fb2ed66Smajd@mellanox.com rq->base.mqp.qpn; 16470fb2ed66Smajd@mellanox.com return 0; 16480fb2ed66Smajd@mellanox.com 16490fb2ed66Smajd@mellanox.com err_destroy_rq: 16500fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 16510fb2ed66Smajd@mellanox.com err_destroy_sq: 16520fb2ed66Smajd@mellanox.com if (!qp->sq.wqe_cnt) 16530fb2ed66Smajd@mellanox.com return err; 16540fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 16550fb2ed66Smajd@mellanox.com err_destroy_tis: 16561cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, pd); 16570fb2ed66Smajd@mellanox.com 16580fb2ed66Smajd@mellanox.com return err; 16590fb2ed66Smajd@mellanox.com } 16600fb2ed66Smajd@mellanox.com 16610fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, 16620fb2ed66Smajd@mellanox.com struct mlx5_ib_qp *qp) 16630fb2ed66Smajd@mellanox.com { 16640fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 16650fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 16660fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 16670fb2ed66Smajd@mellanox.com 16680fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1669443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd); 16700fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 16710fb2ed66Smajd@mellanox.com } 16720fb2ed66Smajd@mellanox.com 16730fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 16740fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 16751cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); 16760fb2ed66Smajd@mellanox.com } 16770fb2ed66Smajd@mellanox.com } 16780fb2ed66Smajd@mellanox.com 16790fb2ed66Smajd@mellanox.com static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, 16800fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp) 16810fb2ed66Smajd@mellanox.com { 16820fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 16830fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 16840fb2ed66Smajd@mellanox.com 16850fb2ed66Smajd@mellanox.com sq->sq = &qp->sq; 16860fb2ed66Smajd@mellanox.com rq->rq = &qp->rq; 16870fb2ed66Smajd@mellanox.com sq->doorbell = &qp->db; 16880fb2ed66Smajd@mellanox.com rq->doorbell = &qp->db; 16890fb2ed66Smajd@mellanox.com } 16900fb2ed66Smajd@mellanox.com 169128d61370SYishai Hadas static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 169228d61370SYishai Hadas { 16930042f9e4SMark Bloch if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 16940042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 16950042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1696443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1697443c1cf9SYishai Hadas to_mpd(qp->ibqp.pd)->uid); 169828d61370SYishai Hadas } 169928d61370SYishai Hadas 1700f78d358cSLeon Romanovsky struct mlx5_create_qp_params { 1701f78d358cSLeon Romanovsky struct ib_udata *udata; 1702f78d358cSLeon Romanovsky size_t inlen; 17036f2cf76eSLeon Romanovsky size_t outlen; 1704e383085cSLeon Romanovsky size_t ucmd_size; 1705f78d358cSLeon Romanovsky void *ucmd; 1706f78d358cSLeon Romanovsky u8 is_rss_raw : 1; 1707f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr; 1708f78d358cSLeon Romanovsky u32 uidx; 170908d53976SLeon Romanovsky struct mlx5_ib_create_qp_resp resp; 1710f78d358cSLeon Romanovsky }; 1711f78d358cSLeon Romanovsky 1712f78d358cSLeon Romanovsky static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1713f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 1714f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 171528d61370SYishai Hadas { 1716f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 1717f78d358cSLeon Romanovsky struct mlx5_ib_create_qp_rss *ucmd = params->ucmd; 1718f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 171989944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 172089944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 172128d61370SYishai Hadas int inlen; 17221f1d6abbSAriel Levkovich int outlen; 172328d61370SYishai Hadas int err; 172428d61370SYishai Hadas u32 *in; 17251f1d6abbSAriel Levkovich u32 *out; 172628d61370SYishai Hadas void *tirc; 172728d61370SYishai Hadas void *hfso; 172828d61370SYishai Hadas u32 selected_fields = 0; 17292d93fc85SMatan Barak u32 outer_l4; 173028d61370SYishai Hadas u32 tdn = mucontext->tdn; 1731175edba8SMark Bloch u8 lb_flag = 0; 173228d61370SYishai Hadas 17335ce0592bSLeon Romanovsky if (ucmd->comp_mask) { 173428d61370SYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 173528d61370SYishai Hadas return -EOPNOTSUPP; 173628d61370SYishai Hadas } 173728d61370SYishai Hadas 17385ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER && 17395ce0592bSLeon Romanovsky !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) { 1740309fa347SMaor Gottlieb mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n"); 1741309fa347SMaor Gottlieb return -EOPNOTSUPP; 1742309fa347SMaor Gottlieb } 1743309fa347SMaor Gottlieb 174437518fa4SLeon Romanovsky if (dev->is_rep) 1745175edba8SMark Bloch qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1746175edba8SMark Bloch 174737518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 174837518fa4SLeon Romanovsky lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 174937518fa4SLeon Romanovsky 175037518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1751175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1752175edba8SMark Bloch 175328d61370SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_tir_in); 17541f1d6abbSAriel Levkovich outlen = MLX5_ST_SZ_BYTES(create_tir_out); 17551f1d6abbSAriel Levkovich in = kvzalloc(inlen + outlen, GFP_KERNEL); 175628d61370SYishai Hadas if (!in) 175728d61370SYishai Hadas return -ENOMEM; 175828d61370SYishai Hadas 17591f1d6abbSAriel Levkovich out = in + MLX5_ST_SZ_DW(create_tir_in); 1760443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 176128d61370SYishai Hadas tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 176228d61370SYishai Hadas MLX5_SET(tirc, tirc, disp_type, 176328d61370SYishai Hadas MLX5_TIRC_DISP_TYPE_INDIRECT); 176428d61370SYishai Hadas MLX5_SET(tirc, tirc, indirect_table, 176528d61370SYishai Hadas init_attr->rwq_ind_tbl->ind_tbl_num); 176628d61370SYishai Hadas MLX5_SET(tirc, tirc, transport_domain, tdn); 176728d61370SYishai Hadas 176828d61370SYishai Hadas hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1769f95ef6cbSMaor Gottlieb 17705ce0592bSLeon Romanovsky if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1771f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 1772f95ef6cbSMaor Gottlieb 1773175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1774175edba8SMark Bloch 17755ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER) 1776309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); 1777309fa347SMaor Gottlieb else 1778309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1779309fa347SMaor Gottlieb 17805ce0592bSLeon Romanovsky switch (ucmd->rx_hash_function) { 178128d61370SYishai Hadas case MLX5_RX_HASH_FUNC_TOEPLITZ: 178228d61370SYishai Hadas { 178328d61370SYishai Hadas void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 178428d61370SYishai Hadas size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); 178528d61370SYishai Hadas 17865ce0592bSLeon Romanovsky if (len != ucmd->rx_key_len) { 178728d61370SYishai Hadas err = -EINVAL; 178828d61370SYishai Hadas goto err; 178928d61370SYishai Hadas } 179028d61370SYishai Hadas 179128d61370SYishai Hadas MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 17925ce0592bSLeon Romanovsky memcpy(rss_key, ucmd->rx_hash_key, len); 179328d61370SYishai Hadas break; 179428d61370SYishai Hadas } 179528d61370SYishai Hadas default: 179628d61370SYishai Hadas err = -EOPNOTSUPP; 179728d61370SYishai Hadas goto err; 179828d61370SYishai Hadas } 179928d61370SYishai Hadas 18005ce0592bSLeon Romanovsky if (!ucmd->rx_hash_fields_mask) { 180128d61370SYishai Hadas /* special case when this TIR serves as steering entry without hashing */ 180228d61370SYishai Hadas if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) 180328d61370SYishai Hadas goto create_tir; 180428d61370SYishai Hadas err = -EINVAL; 180528d61370SYishai Hadas goto err; 180628d61370SYishai Hadas } 180728d61370SYishai Hadas 18085ce0592bSLeon Romanovsky if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18095ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && 18105ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 18115ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { 181228d61370SYishai Hadas err = -EINVAL; 181328d61370SYishai Hadas goto err; 181428d61370SYishai Hadas } 181528d61370SYishai Hadas 181628d61370SYishai Hadas /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */ 18175ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18185ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) 181928d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 182028d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV4); 18215ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 18225ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 182328d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 182428d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV6); 182528d61370SYishai Hadas 18265ce0592bSLeon Romanovsky outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18275ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 18285ce0592bSLeon Romanovsky << 0 | 18295ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 18305ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 18315ce0592bSLeon Romanovsky << 1 | 18325ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2; 18332d93fc85SMatan Barak 18342d93fc85SMatan Barak /* Check that only one l4 protocol is set */ 18352d93fc85SMatan Barak if (outer_l4 & (outer_l4 - 1)) { 183628d61370SYishai Hadas err = -EINVAL; 183728d61370SYishai Hadas goto err; 183828d61370SYishai Hadas } 183928d61370SYishai Hadas 184028d61370SYishai Hadas /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */ 18415ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18425ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 184328d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 184428d61370SYishai Hadas MLX5_L4_PROT_TYPE_TCP); 18455ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 18465ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 184728d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 184828d61370SYishai Hadas MLX5_L4_PROT_TYPE_UDP); 184928d61370SYishai Hadas 18505ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18515ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) 185228d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP; 185328d61370SYishai Hadas 18545ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || 18555ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 185628d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP; 185728d61370SYishai Hadas 18585ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18595ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) 186028d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT; 186128d61370SYishai Hadas 18625ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || 18635ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 186428d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT; 186528d61370SYishai Hadas 18665ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) 18672d93fc85SMatan Barak selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI; 18682d93fc85SMatan Barak 186928d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 187028d61370SYishai Hadas 187128d61370SYishai Hadas create_tir: 1872e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1873e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 187428d61370SYishai Hadas 18751f1d6abbSAriel Levkovich qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); 18760042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 18770042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 18780042f9e4SMark Bloch 18790042f9e4SMark Bloch if (err) 1880443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1881443c1cf9SYishai Hadas to_mpd(pd)->uid); 18820042f9e4SMark Bloch } 18830042f9e4SMark Bloch 188428d61370SYishai Hadas if (err) 188528d61370SYishai Hadas goto err; 188628d61370SYishai Hadas 18877f72052cSYishai Hadas if (mucontext->devx_uid) { 188808d53976SLeon Romanovsky params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 188908d53976SLeon Romanovsky params->resp.tirn = qp->rss_qp.tirn; 189054a38b66SAlex Vesker if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 189154a38b66SAlex Vesker MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { 189208d53976SLeon Romanovsky params->resp.tir_icm_addr = 18931f1d6abbSAriel Levkovich MLX5_GET(create_tir_out, out, icm_address_31_0); 189408d53976SLeon Romanovsky params->resp.tir_icm_addr |= 189508d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 18961f1d6abbSAriel Levkovich icm_address_39_32) 18971f1d6abbSAriel Levkovich << 32; 189808d53976SLeon Romanovsky params->resp.tir_icm_addr |= 189908d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 19001f1d6abbSAriel Levkovich icm_address_63_40) 19011f1d6abbSAriel Levkovich << 40; 190208d53976SLeon Romanovsky params->resp.comp_mask |= 19031f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 19041f1d6abbSAriel Levkovich } 19057f72052cSYishai Hadas } 19067f72052cSYishai Hadas 190728d61370SYishai Hadas kvfree(in); 190828d61370SYishai Hadas /* qpn is reserved for that QP */ 190928d61370SYishai Hadas qp->trans_qp.base.mqp.qpn = 0; 19102be08c30SLeon Romanovsky qp->is_rss = true; 191128d61370SYishai Hadas return 0; 191228d61370SYishai Hadas 191328d61370SYishai Hadas err: 191428d61370SYishai Hadas kvfree(in); 191528d61370SYishai Hadas return err; 191628d61370SYishai Hadas } 191728d61370SYishai Hadas 19185d6ff1baSYonatan Cohen static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, 191981530ab0SLeon Romanovsky struct mlx5_ib_qp *qp, 19205d6ff1baSYonatan Cohen struct ib_qp_init_attr *init_attr, 19215d6ff1baSYonatan Cohen void *qpc) 19225d6ff1baSYonatan Cohen { 19235d6ff1baSYonatan Cohen int scqe_sz; 19242ab367a7Szhengbin bool allow_scat_cqe = false; 19255d6ff1baSYonatan Cohen 192681530ab0SLeon Romanovsky allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; 19276f4bc0eaSYonatan Cohen 19286f4bc0eaSYonatan Cohen if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) 19295d6ff1baSYonatan Cohen return; 19305d6ff1baSYonatan Cohen 19315d6ff1baSYonatan Cohen scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); 19325d6ff1baSYonatan Cohen if (scqe_sz == 128) { 19335d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); 19345d6ff1baSYonatan Cohen return; 19355d6ff1baSYonatan Cohen } 19365d6ff1baSYonatan Cohen 19375d6ff1baSYonatan Cohen if (init_attr->qp_type != MLX5_IB_QPT_DCI || 19385d6ff1baSYonatan Cohen MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe)) 19395d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 19405d6ff1baSYonatan Cohen } 19415d6ff1baSYonatan Cohen 1942a60109dcSYonatan Cohen static int atomic_size_to_mode(int size_mask) 1943a60109dcSYonatan Cohen { 1944a60109dcSYonatan Cohen /* driver does not support atomic_size > 256B 1945a60109dcSYonatan Cohen * and does not know how to translate bigger sizes 1946a60109dcSYonatan Cohen */ 1947a60109dcSYonatan Cohen int supported_size_mask = size_mask & 0x1ff; 1948a60109dcSYonatan Cohen int log_max_size; 1949a60109dcSYonatan Cohen 1950a60109dcSYonatan Cohen if (!supported_size_mask) 1951a60109dcSYonatan Cohen return -EOPNOTSUPP; 1952a60109dcSYonatan Cohen 1953a60109dcSYonatan Cohen log_max_size = __fls(supported_size_mask); 1954a60109dcSYonatan Cohen 1955a60109dcSYonatan Cohen if (log_max_size > 3) 1956a60109dcSYonatan Cohen return log_max_size; 1957a60109dcSYonatan Cohen 1958a60109dcSYonatan Cohen return MLX5_ATOMIC_MODE_8B; 1959a60109dcSYonatan Cohen } 1960a60109dcSYonatan Cohen 1961a60109dcSYonatan Cohen static int get_atomic_mode(struct mlx5_ib_dev *dev, 1962a60109dcSYonatan Cohen enum ib_qp_type qp_type) 1963a60109dcSYonatan Cohen { 1964a60109dcSYonatan Cohen u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 1965a60109dcSYonatan Cohen u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic); 1966a60109dcSYonatan Cohen int atomic_mode = -EOPNOTSUPP; 1967a60109dcSYonatan Cohen int atomic_size_mask; 1968a60109dcSYonatan Cohen 1969a60109dcSYonatan Cohen if (!atomic) 1970a60109dcSYonatan Cohen return -EOPNOTSUPP; 1971a60109dcSYonatan Cohen 1972a60109dcSYonatan Cohen if (qp_type == MLX5_IB_QPT_DCT) 1973a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 1974a60109dcSYonatan Cohen else 1975a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 1976a60109dcSYonatan Cohen 1977a60109dcSYonatan Cohen if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) || 1978a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD)) 1979a60109dcSYonatan Cohen atomic_mode = atomic_size_to_mode(atomic_size_mask); 1980a60109dcSYonatan Cohen 1981a60109dcSYonatan Cohen if (atomic_mode <= 0 && 1982a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP && 1983a60109dcSYonatan Cohen atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD)) 1984a60109dcSYonatan Cohen atomic_mode = MLX5_ATOMIC_MODE_IB_COMP; 1985a60109dcSYonatan Cohen 1986a60109dcSYonatan Cohen return atomic_mode; 1987a60109dcSYonatan Cohen } 1988a60109dcSYonatan Cohen 1989f78d358cSLeon Romanovsky static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1990f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 199104bcc1c2SLeon Romanovsky { 1992f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 1993f78d358cSLeon Romanovsky u32 uidx = params->uidx; 199404bcc1c2SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 19953e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 199604bcc1c2SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 199704bcc1c2SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 199804bcc1c2SLeon Romanovsky struct mlx5_ib_qp_base *base; 199904bcc1c2SLeon Romanovsky unsigned long flags; 200004bcc1c2SLeon Romanovsky void *qpc; 200104bcc1c2SLeon Romanovsky u32 *in; 200204bcc1c2SLeon Romanovsky int err; 200304bcc1c2SLeon Romanovsky 200404bcc1c2SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 200504bcc1c2SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 200604bcc1c2SLeon Romanovsky 200704bcc1c2SLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 200804bcc1c2SLeon Romanovsky if (!in) 200904bcc1c2SLeon Romanovsky return -ENOMEM; 201004bcc1c2SLeon Romanovsky 201104bcc1c2SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 201204bcc1c2SLeon Romanovsky 201304bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC); 201404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 201504bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn); 201604bcc1c2SLeon Romanovsky 201704bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 201804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 201904bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 202004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_master, 1); 202104bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 202204bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_send, 1); 202304bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 202404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_receive, 1); 202504bcc1c2SLeon Romanovsky 20268256c69bSMaor Gottlieb MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 202704bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); 202804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 202904bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 203004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); 203104bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 203204bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn); 203304bcc1c2SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 203404bcc1c2SLeon Romanovsky 203504bcc1c2SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 203604bcc1c2SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 203704bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 203804bcc1c2SLeon Romanovsky 203904bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 204004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, end_padding_mode, 204104bcc1c2SLeon Romanovsky MLX5_WQ_END_PAD_MODE_ALIGN); 204204bcc1c2SLeon Romanovsky /* Special case to clean flag */ 204304bcc1c2SLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 204404bcc1c2SLeon Romanovsky } 204504bcc1c2SLeon Romanovsky 204604bcc1c2SLeon Romanovsky base = &qp->trans_qp.base; 20473e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 204804bcc1c2SLeon Romanovsky kvfree(in); 20496367da46SLeon Romanovsky if (err) 205004bcc1c2SLeon Romanovsky return err; 205104bcc1c2SLeon Romanovsky 205204bcc1c2SLeon Romanovsky base->container_mibqp = qp; 205304bcc1c2SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 205492cd667cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 20553e09a427SLeon Romanovsky params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 205604bcc1c2SLeon Romanovsky 205704bcc1c2SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 205804bcc1c2SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 205904bcc1c2SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 206004bcc1c2SLeon Romanovsky 2061968f0b6fSLeon Romanovsky qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn; 206204bcc1c2SLeon Romanovsky return 0; 206304bcc1c2SLeon Romanovsky } 206404bcc1c2SLeon Romanovsky 20652013b4d5SLior Nahmanson static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd, 20662013b4d5SLior Nahmanson struct mlx5_ib_qp *qp, 20672013b4d5SLior Nahmanson struct mlx5_create_qp_params *params) 20682013b4d5SLior Nahmanson { 20692013b4d5SLior Nahmanson struct ib_qp_init_attr *init_attr = params->attr; 20702013b4d5SLior Nahmanson struct mlx5_ib_create_qp *ucmd = params->ucmd; 20712013b4d5SLior Nahmanson u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 20722013b4d5SLior Nahmanson struct ib_udata *udata = params->udata; 20732013b4d5SLior Nahmanson u32 uidx = params->uidx; 20742013b4d5SLior Nahmanson struct mlx5_ib_resources *devr = &dev->devr; 20752013b4d5SLior Nahmanson int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 20762013b4d5SLior Nahmanson struct mlx5_core_dev *mdev = dev->mdev; 20772013b4d5SLior Nahmanson struct mlx5_ib_cq *send_cq; 20782013b4d5SLior Nahmanson struct mlx5_ib_cq *recv_cq; 20792013b4d5SLior Nahmanson unsigned long flags; 20802013b4d5SLior Nahmanson struct mlx5_ib_qp_base *base; 20812013b4d5SLior Nahmanson int ts_format; 20822013b4d5SLior Nahmanson int mlx5_st; 20832013b4d5SLior Nahmanson void *qpc; 20842013b4d5SLior Nahmanson u32 *in; 20852013b4d5SLior Nahmanson int err; 20862013b4d5SLior Nahmanson 20872013b4d5SLior Nahmanson spin_lock_init(&qp->sq.lock); 20882013b4d5SLior Nahmanson spin_lock_init(&qp->rq.lock); 20892013b4d5SLior Nahmanson 20902013b4d5SLior Nahmanson mlx5_st = to_mlx5_st(qp->type); 20912013b4d5SLior Nahmanson if (mlx5_st < 0) 20922013b4d5SLior Nahmanson return -EINVAL; 20932013b4d5SLior Nahmanson 20942013b4d5SLior Nahmanson if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 20952013b4d5SLior Nahmanson qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 20962013b4d5SLior Nahmanson 20972013b4d5SLior Nahmanson base = &qp->trans_qp.base; 20982013b4d5SLior Nahmanson 20992013b4d5SLior Nahmanson qp->has_rq = qp_has_rq(init_attr); 21002013b4d5SLior Nahmanson err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); 21012013b4d5SLior Nahmanson if (err) { 21022013b4d5SLior Nahmanson mlx5_ib_dbg(dev, "err %d\n", err); 21032013b4d5SLior Nahmanson return err; 21042013b4d5SLior Nahmanson } 21052013b4d5SLior Nahmanson 21062013b4d5SLior Nahmanson if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || 21072013b4d5SLior Nahmanson ucmd->rq_wqe_count != qp->rq.wqe_cnt) 21082013b4d5SLior Nahmanson return -EINVAL; 21092013b4d5SLior Nahmanson 21102013b4d5SLior Nahmanson if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) 21112013b4d5SLior Nahmanson return -EINVAL; 21122013b4d5SLior Nahmanson 21132013b4d5SLior Nahmanson ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), 21142013b4d5SLior Nahmanson to_mcq(init_attr->recv_cq)); 21152013b4d5SLior Nahmanson 21162013b4d5SLior Nahmanson if (ts_format < 0) 21172013b4d5SLior Nahmanson return ts_format; 21182013b4d5SLior Nahmanson 21192013b4d5SLior Nahmanson err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, 21202013b4d5SLior Nahmanson &inlen, base, ucmd); 21212013b4d5SLior Nahmanson if (err) 21222013b4d5SLior Nahmanson return err; 21232013b4d5SLior Nahmanson 21242013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, ece_support)) 21252013b4d5SLior Nahmanson MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); 21262013b4d5SLior Nahmanson qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 21272013b4d5SLior Nahmanson 21282013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, st, mlx5_st); 21292013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 21302013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); 21312013b4d5SLior Nahmanson 21322013b4d5SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 21332013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, wq_signature, 1); 21342013b4d5SLior Nahmanson 21352013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 21362013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cd_master, 1); 21372013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 21382013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cd_slave_send, 1); 21392013b4d5SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) 21402013b4d5SLior Nahmanson configure_requester_scat_cqe(dev, qp, init_attr, qpc); 21412013b4d5SLior Nahmanson 21422013b4d5SLior Nahmanson if (qp->rq.wqe_cnt) { 21432013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 21442013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 21452013b4d5SLior Nahmanson } 21462013b4d5SLior Nahmanson 214711656f59SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) { 214811656f59SLior Nahmanson MLX5_SET(qpc, qpc, log_num_dci_stream_channels, 214911656f59SLior Nahmanson ucmd->dci_streams.log_num_concurent); 215011656f59SLior Nahmanson MLX5_SET(qpc, qpc, log_num_dci_errored_streams, 215111656f59SLior Nahmanson ucmd->dci_streams.log_num_errored); 215211656f59SLior Nahmanson } 215311656f59SLior Nahmanson 21542013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, ts_format, ts_format); 21552013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 21562013b4d5SLior Nahmanson 21572013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 21582013b4d5SLior Nahmanson 21592013b4d5SLior Nahmanson /* Set default resources */ 21602013b4d5SLior Nahmanson if (init_attr->srq) { 21612013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 21622013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 21632013b4d5SLior Nahmanson to_msrq(init_attr->srq)->msrq.srqn); 21642013b4d5SLior Nahmanson } else { 21652013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 21662013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 21672013b4d5SLior Nahmanson to_msrq(devr->s1)->msrq.srqn); 21682013b4d5SLior Nahmanson } 21692013b4d5SLior Nahmanson 21702013b4d5SLior Nahmanson if (init_attr->send_cq) 21712013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cqn_snd, 21722013b4d5SLior Nahmanson to_mcq(init_attr->send_cq)->mcq.cqn); 21732013b4d5SLior Nahmanson 21742013b4d5SLior Nahmanson if (init_attr->recv_cq) 21752013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cqn_rcv, 21762013b4d5SLior Nahmanson to_mcq(init_attr->recv_cq)->mcq.cqn); 21772013b4d5SLior Nahmanson 21782013b4d5SLior Nahmanson MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 21792013b4d5SLior Nahmanson 21802013b4d5SLior Nahmanson /* 0xffffff means we ask to work with cqe version 0 */ 21812013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 21822013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, user_index, uidx); 21832013b4d5SLior Nahmanson 21842013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 21852013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, end_padding_mode, 21862013b4d5SLior Nahmanson MLX5_WQ_END_PAD_MODE_ALIGN); 21872013b4d5SLior Nahmanson /* Special case to clean flag */ 21882013b4d5SLior Nahmanson qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 21892013b4d5SLior Nahmanson } 21902013b4d5SLior Nahmanson 21912013b4d5SLior Nahmanson err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 21922013b4d5SLior Nahmanson 21932013b4d5SLior Nahmanson kvfree(in); 21942013b4d5SLior Nahmanson if (err) 21952013b4d5SLior Nahmanson goto err_create; 21962013b4d5SLior Nahmanson 21972013b4d5SLior Nahmanson base->container_mibqp = qp; 21982013b4d5SLior Nahmanson base->mqp.event = mlx5_ib_qp_event; 21992013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, ece_support)) 22002013b4d5SLior Nahmanson params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 22012013b4d5SLior Nahmanson 22022013b4d5SLior Nahmanson get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, 22032013b4d5SLior Nahmanson &send_cq, &recv_cq); 22042013b4d5SLior Nahmanson spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 22052013b4d5SLior Nahmanson mlx5_ib_lock_cqs(send_cq, recv_cq); 22062013b4d5SLior Nahmanson /* Maintain device to QPs access, needed for further handling via reset 22072013b4d5SLior Nahmanson * flow 22082013b4d5SLior Nahmanson */ 22092013b4d5SLior Nahmanson list_add_tail(&qp->qps_list, &dev->qp_list); 22102013b4d5SLior Nahmanson /* Maintain CQ to QPs access, needed for further handling via reset flow 22112013b4d5SLior Nahmanson */ 22122013b4d5SLior Nahmanson if (send_cq) 22132013b4d5SLior Nahmanson list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 22142013b4d5SLior Nahmanson if (recv_cq) 22152013b4d5SLior Nahmanson list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 22162013b4d5SLior Nahmanson mlx5_ib_unlock_cqs(send_cq, recv_cq); 22172013b4d5SLior Nahmanson spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 22182013b4d5SLior Nahmanson 22192013b4d5SLior Nahmanson return 0; 22202013b4d5SLior Nahmanson 22212013b4d5SLior Nahmanson err_create: 22222013b4d5SLior Nahmanson destroy_qp(dev, qp, base, udata); 22232013b4d5SLior Nahmanson return err; 22242013b4d5SLior Nahmanson } 22252013b4d5SLior Nahmanson 222698fc1126SLeon Romanovsky static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2227f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2228f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 2229e126ba97SEli Cohen { 2230f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 2231f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 22323e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 2233f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 2234f78d358cSLeon Romanovsky u32 uidx = params->uidx; 2235e126ba97SEli Cohen struct mlx5_ib_resources *devr = &dev->devr; 223609a7d9ecSSaeed Mahameed int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 2237938fe83cSSaeed Mahameed struct mlx5_core_dev *mdev = dev->mdev; 223889ea94a7SMaor Gottlieb struct mlx5_ib_cq *send_cq; 223989ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq; 224089ea94a7SMaor Gottlieb unsigned long flags; 224109a7d9ecSSaeed Mahameed struct mlx5_ib_qp_base *base; 22422fe8d4b8SAharon Landau int ts_format; 2243e7b169f3SNoa Osherovich int mlx5_st; 2244cfb5e088SHaggai Abramovsky void *qpc; 224509a7d9ecSSaeed Mahameed u32 *in; 224609a7d9ecSSaeed Mahameed int err; 2247e126ba97SEli Cohen 2248e126ba97SEli Cohen spin_lock_init(&qp->sq.lock); 2249e126ba97SEli Cohen spin_lock_init(&qp->rq.lock); 2250e126ba97SEli Cohen 22517aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 2252e7b169f3SNoa Osherovich if (mlx5_st < 0) 2253e7b169f3SNoa Osherovich return -EINVAL; 2254e7b169f3SNoa Osherovich 2255e126ba97SEli Cohen if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 2256e126ba97SEli Cohen qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 2257e126ba97SEli Cohen 22582978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 22592978975cSLeon Romanovsky qp->underlay_qpn = init_attr->source_qpn; 22602978975cSLeon Romanovsky 2261c2e53b2cSYishai Hadas base = (init_attr->qp_type == IB_QPT_RAW_PACKET || 22622be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 2263c2e53b2cSYishai Hadas &qp->raw_packet_qp.rq.base : 2264c2e53b2cSYishai Hadas &qp->trans_qp.base; 2265c2e53b2cSYishai Hadas 2266e126ba97SEli Cohen qp->has_rq = qp_has_rq(init_attr); 22672dfac92dSLeon Romanovsky err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); 2268e126ba97SEli Cohen if (err) { 2269e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 2270e126ba97SEli Cohen return err; 2271e126ba97SEli Cohen } 2272e126ba97SEli Cohen 22732dfac92dSLeon Romanovsky if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || 227498fc1126SLeon Romanovsky ucmd->rq_wqe_count != qp->rq.wqe_cnt) 2275e126ba97SEli Cohen return -EINVAL; 2276e126ba97SEli Cohen 227798fc1126SLeon Romanovsky if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) 227898fc1126SLeon Romanovsky return -EINVAL; 227998fc1126SLeon Romanovsky 22802fe8d4b8SAharon Landau if (init_attr->qp_type != IB_QPT_RAW_PACKET) { 22812fe8d4b8SAharon Landau ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), 22822fe8d4b8SAharon Landau to_mcq(init_attr->recv_cq)); 22832fe8d4b8SAharon Landau if (ts_format < 0) 22842fe8d4b8SAharon Landau return ts_format; 22852fe8d4b8SAharon Landau } 22862fe8d4b8SAharon Landau 228708d53976SLeon Romanovsky err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, 228808d53976SLeon Romanovsky &inlen, base, ucmd); 2289e126ba97SEli Cohen if (err) 2290e126ba97SEli Cohen return err; 2291e126ba97SEli Cohen 2292e126ba97SEli Cohen if (is_sqp(init_attr->qp_type)) 2293e126ba97SEli Cohen qp->port = init_attr->port_num; 2294e126ba97SEli Cohen 2295e383085cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 2296e383085cSLeon Romanovsky MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); 229709a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 229809a7d9ecSSaeed Mahameed 2299e7b169f3SNoa Osherovich MLX5_SET(qpc, qpc, st, mlx5_st); 230009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 230198fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); 2302e126ba97SEli Cohen 2303c95e6d53SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 230409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, wq_signature, 1); 2305e126ba97SEli Cohen 23062be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 230709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, block_lb_mc, 1); 2308f360d88aSEli Cohen 23092be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 231009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_master, 1); 23112be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 231209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_send, 1); 23132be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 231409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_receive, 1); 23152be08c30SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) 2316569c6651SDanit Goldberg MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1); 231790ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 231890ecb37aSLeon Romanovsky (init_attr->qp_type == IB_QPT_RC || 23198bde2c50SLeon Romanovsky init_attr->qp_type == IB_QPT_UC)) { 232052c81f47SColin Ian King int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq); 23218bde2c50SLeon Romanovsky 23228bde2c50SLeon Romanovsky MLX5_SET(qpc, qpc, cs_res, 23238bde2c50SLeon Romanovsky rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE : 23248bde2c50SLeon Romanovsky MLX5_RES_SCAT_DATA32_CQE); 23258bde2c50SLeon Romanovsky } 232690ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 23277aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) 232881530ab0SLeon Romanovsky configure_requester_scat_cqe(dev, qp, init_attr, qpc); 2329e126ba97SEli Cohen 2330e126ba97SEli Cohen if (qp->rq.wqe_cnt) { 233109a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 233209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 2333e126ba97SEli Cohen } 2334e126ba97SEli Cohen 23352fe8d4b8SAharon Landau if (init_attr->qp_type != IB_QPT_RAW_PACKET) 23362fe8d4b8SAharon Landau MLX5_SET(qpc, qpc, ts_format, ts_format); 23372fe8d4b8SAharon Landau 233809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 2339e126ba97SEli Cohen 23403fd3307eSArtemy Kovalyov if (qp->sq.wqe_cnt) { 234109a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 23423fd3307eSArtemy Kovalyov } else { 234309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, no_sq, 1); 23443fd3307eSArtemy Kovalyov if (init_attr->srq && 23453fd3307eSArtemy Kovalyov init_attr->srq->srq_type == IB_SRQT_TM) 23463fd3307eSArtemy Kovalyov MLX5_SET(qpc, qpc, offload_type, 23473fd3307eSArtemy Kovalyov MLX5_QPC_OFFLOAD_TYPE_RNDV); 23483fd3307eSArtemy Kovalyov } 2349e126ba97SEli Cohen 2350e126ba97SEli Cohen /* Set default resources */ 2351e126ba97SEli Cohen switch (init_attr->qp_type) { 2352e126ba97SEli Cohen case IB_QPT_XRC_INI: 235309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 2354f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 235509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 2356e126ba97SEli Cohen break; 2357e126ba97SEli Cohen default: 2358e126ba97SEli Cohen if (init_attr->srq) { 2359f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 236009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); 2361e126ba97SEli Cohen } else { 2362f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 236309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); 2364e126ba97SEli Cohen } 2365e126ba97SEli Cohen } 2366e126ba97SEli Cohen 2367e126ba97SEli Cohen if (init_attr->send_cq) 236809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); 2369e126ba97SEli Cohen 2370e126ba97SEli Cohen if (init_attr->recv_cq) 237109a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); 2372e126ba97SEli Cohen 237309a7d9ecSSaeed Mahameed MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 2374e126ba97SEli Cohen 2375cfb5e088SHaggai Abramovsky /* 0xffffff means we ask to work with cqe version 0 */ 237609a7d9ecSSaeed Mahameed if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 2377cfb5e088SHaggai Abramovsky MLX5_SET(qpc, qpc, user_index, uidx); 237809a7d9ecSSaeed Mahameed 23792978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING && 23802978975cSLeon Romanovsky init_attr->qp_type != IB_QPT_RAW_PACKET) { 2381b1383aa6SNoa Osherovich MLX5_SET(qpc, qpc, end_padding_mode, 2382b1383aa6SNoa Osherovich MLX5_WQ_END_PAD_MODE_ALIGN); 23832978975cSLeon Romanovsky /* Special case to clean flag */ 23842978975cSLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 2385b1383aa6SNoa Osherovich } 2386b1383aa6SNoa Osherovich 2387c2e53b2cSYishai Hadas if (init_attr->qp_type == IB_QPT_RAW_PACKET || 23882be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 23892dfac92dSLeon Romanovsky qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr; 23900fb2ed66Smajd@mellanox.com raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 23917f72052cSYishai Hadas err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, 23922fe8d4b8SAharon Landau ¶ms->resp, init_attr); 239304bcc1c2SLeon Romanovsky } else 23943e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 2395e126ba97SEli Cohen 2396479163f4SAl Viro kvfree(in); 239704bcc1c2SLeon Romanovsky if (err) 239804bcc1c2SLeon Romanovsky goto err_create; 2399e126ba97SEli Cohen 240019098df2Smajd@mellanox.com base->container_mibqp = qp; 240119098df2Smajd@mellanox.com base->mqp.event = mlx5_ib_qp_event; 240292cd667cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 24033e09a427SLeon Romanovsky params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 2404e126ba97SEli Cohen 24057aede1a2SLeon Romanovsky get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, 240689ea94a7SMaor Gottlieb &send_cq, &recv_cq); 240789ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 240889ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 240989ea94a7SMaor Gottlieb /* Maintain device to QPs access, needed for further handling via reset 241089ea94a7SMaor Gottlieb * flow 241189ea94a7SMaor Gottlieb */ 241289ea94a7SMaor Gottlieb list_add_tail(&qp->qps_list, &dev->qp_list); 241389ea94a7SMaor Gottlieb /* Maintain CQ to QPs access, needed for further handling via reset flow 241489ea94a7SMaor Gottlieb */ 241589ea94a7SMaor Gottlieb if (send_cq) 241689ea94a7SMaor Gottlieb list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 241789ea94a7SMaor Gottlieb if (recv_cq) 241889ea94a7SMaor Gottlieb list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 241989ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 242089ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 242189ea94a7SMaor Gottlieb 2422e126ba97SEli Cohen return 0; 2423e126ba97SEli Cohen 2424e126ba97SEli Cohen err_create: 2425747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2426e126ba97SEli Cohen return err; 2427e126ba97SEli Cohen } 2428e126ba97SEli Cohen 242998fc1126SLeon Romanovsky static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2430f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2431f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 243298fc1126SLeon Romanovsky { 2433f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2434f78d358cSLeon Romanovsky u32 uidx = params->uidx; 243598fc1126SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 24363e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 243798fc1126SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 243898fc1126SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 243998fc1126SLeon Romanovsky struct mlx5_ib_cq *send_cq; 244098fc1126SLeon Romanovsky struct mlx5_ib_cq *recv_cq; 244198fc1126SLeon Romanovsky unsigned long flags; 244298fc1126SLeon Romanovsky struct mlx5_ib_qp_base *base; 244398fc1126SLeon Romanovsky int mlx5_st; 244498fc1126SLeon Romanovsky void *qpc; 244598fc1126SLeon Romanovsky u32 *in; 244698fc1126SLeon Romanovsky int err; 244798fc1126SLeon Romanovsky 244898fc1126SLeon Romanovsky spin_lock_init(&qp->sq.lock); 244998fc1126SLeon Romanovsky spin_lock_init(&qp->rq.lock); 245098fc1126SLeon Romanovsky 245198fc1126SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 245298fc1126SLeon Romanovsky if (mlx5_st < 0) 245398fc1126SLeon Romanovsky return -EINVAL; 245498fc1126SLeon Romanovsky 245598fc1126SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 245698fc1126SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 245798fc1126SLeon Romanovsky 245898fc1126SLeon Romanovsky base = &qp->trans_qp.base; 245998fc1126SLeon Romanovsky 246098fc1126SLeon Romanovsky qp->has_rq = qp_has_rq(attr); 246198fc1126SLeon Romanovsky err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL); 246298fc1126SLeon Romanovsky if (err) { 246398fc1126SLeon Romanovsky mlx5_ib_dbg(dev, "err %d\n", err); 246498fc1126SLeon Romanovsky return err; 246598fc1126SLeon Romanovsky } 246698fc1126SLeon Romanovsky 246798fc1126SLeon Romanovsky err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base); 246898fc1126SLeon Romanovsky if (err) 246998fc1126SLeon Romanovsky return err; 247098fc1126SLeon Romanovsky 247198fc1126SLeon Romanovsky if (is_sqp(attr->qp_type)) 247298fc1126SLeon Romanovsky qp->port = attr->port_num; 247398fc1126SLeon Romanovsky 247498fc1126SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 247598fc1126SLeon Romanovsky 247698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, st, mlx5_st); 247798fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 247898fc1126SLeon Romanovsky 247998fc1126SLeon Romanovsky if (attr->qp_type != MLX5_IB_QPT_REG_UMR) 248098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); 248198fc1126SLeon Romanovsky else 248298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, latency_sensitive, 1); 248398fc1126SLeon Romanovsky 248498fc1126SLeon Romanovsky 248598fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 248698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 248798fc1126SLeon Romanovsky 248898fc1126SLeon Romanovsky if (qp->rq.wqe_cnt) { 248998fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 249098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 249198fc1126SLeon Romanovsky } 249298fc1126SLeon Romanovsky 249398fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr)); 249498fc1126SLeon Romanovsky 249598fc1126SLeon Romanovsky if (qp->sq.wqe_cnt) 249698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 249798fc1126SLeon Romanovsky else 249898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 249998fc1126SLeon Romanovsky 250098fc1126SLeon Romanovsky if (attr->srq) { 2501f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 250298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 250398fc1126SLeon Romanovsky to_msrq(attr->srq)->msrq.srqn); 250498fc1126SLeon Romanovsky } else { 2505f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 250698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 250798fc1126SLeon Romanovsky to_msrq(devr->s1)->msrq.srqn); 250898fc1126SLeon Romanovsky } 250998fc1126SLeon Romanovsky 251098fc1126SLeon Romanovsky if (attr->send_cq) 251198fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn); 251298fc1126SLeon Romanovsky 251398fc1126SLeon Romanovsky if (attr->recv_cq) 251498fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn); 251598fc1126SLeon Romanovsky 251698fc1126SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 251798fc1126SLeon Romanovsky 251898fc1126SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 251998fc1126SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 252098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 252198fc1126SLeon Romanovsky 252298fc1126SLeon Romanovsky /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 252398fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) 252498fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 252598fc1126SLeon Romanovsky 2526742948ccSOr Har-Toov if (qp->flags & IB_QP_CREATE_INTEGRITY_EN && 2527742948ccSOr Har-Toov MLX5_CAP_GEN(mdev, go_back_n)) 2528742948ccSOr Har-Toov MLX5_SET(qpc, qpc, retry_mode, MLX5_QP_RM_GO_BACK_N); 2529742948ccSOr Har-Toov 25303e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 253198fc1126SLeon Romanovsky kvfree(in); 253298fc1126SLeon Romanovsky if (err) 253398fc1126SLeon Romanovsky goto err_create; 253498fc1126SLeon Romanovsky 253598fc1126SLeon Romanovsky base->container_mibqp = qp; 253698fc1126SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 253798fc1126SLeon Romanovsky 253898fc1126SLeon Romanovsky get_cqs(qp->type, attr->send_cq, attr->recv_cq, 253998fc1126SLeon Romanovsky &send_cq, &recv_cq); 254098fc1126SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 254198fc1126SLeon Romanovsky mlx5_ib_lock_cqs(send_cq, recv_cq); 254298fc1126SLeon Romanovsky /* Maintain device to QPs access, needed for further handling via reset 254398fc1126SLeon Romanovsky * flow 254498fc1126SLeon Romanovsky */ 254598fc1126SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 254698fc1126SLeon Romanovsky /* Maintain CQ to QPs access, needed for further handling via reset flow 254798fc1126SLeon Romanovsky */ 254898fc1126SLeon Romanovsky if (send_cq) 254998fc1126SLeon Romanovsky list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 255098fc1126SLeon Romanovsky if (recv_cq) 255198fc1126SLeon Romanovsky list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 255298fc1126SLeon Romanovsky mlx5_ib_unlock_cqs(send_cq, recv_cq); 255398fc1126SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 255498fc1126SLeon Romanovsky 255598fc1126SLeon Romanovsky return 0; 255698fc1126SLeon Romanovsky 255798fc1126SLeon Romanovsky err_create: 2558747c519cSLeon Romanovsky destroy_qp(dev, qp, base, NULL); 255998fc1126SLeon Romanovsky return err; 256098fc1126SLeon Romanovsky } 256198fc1126SLeon Romanovsky 2562e126ba97SEli Cohen static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2563e126ba97SEli Cohen __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 2564e126ba97SEli Cohen { 2565e126ba97SEli Cohen if (send_cq) { 2566e126ba97SEli Cohen if (recv_cq) { 2567e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 256889ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2569e126ba97SEli Cohen spin_lock_nested(&recv_cq->lock, 2570e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2571e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 257289ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2573e126ba97SEli Cohen __acquire(&recv_cq->lock); 2574e126ba97SEli Cohen } else { 257589ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 2576e126ba97SEli Cohen spin_lock_nested(&send_cq->lock, 2577e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2578e126ba97SEli Cohen } 2579e126ba97SEli Cohen } else { 258089ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 25816a4f139aSEli Cohen __acquire(&recv_cq->lock); 2582e126ba97SEli Cohen } 2583e126ba97SEli Cohen } else if (recv_cq) { 258489ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 25856a4f139aSEli Cohen __acquire(&send_cq->lock); 25866a4f139aSEli Cohen } else { 25876a4f139aSEli Cohen __acquire(&send_cq->lock); 25886a4f139aSEli Cohen __acquire(&recv_cq->lock); 2589e126ba97SEli Cohen } 2590e126ba97SEli Cohen } 2591e126ba97SEli Cohen 2592e126ba97SEli Cohen static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2593e126ba97SEli Cohen __releases(&send_cq->lock) __releases(&recv_cq->lock) 2594e126ba97SEli Cohen { 2595e126ba97SEli Cohen if (send_cq) { 2596e126ba97SEli Cohen if (recv_cq) { 2597e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 2598e126ba97SEli Cohen spin_unlock(&recv_cq->lock); 259989ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2600e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 2601e126ba97SEli Cohen __release(&recv_cq->lock); 260289ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2603e126ba97SEli Cohen } else { 2604e126ba97SEli Cohen spin_unlock(&send_cq->lock); 260589ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 2606e126ba97SEli Cohen } 2607e126ba97SEli Cohen } else { 26086a4f139aSEli Cohen __release(&recv_cq->lock); 260989ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2610e126ba97SEli Cohen } 2611e126ba97SEli Cohen } else if (recv_cq) { 26126a4f139aSEli Cohen __release(&send_cq->lock); 261389ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 26146a4f139aSEli Cohen } else { 26156a4f139aSEli Cohen __release(&recv_cq->lock); 26166a4f139aSEli Cohen __release(&send_cq->lock); 2617e126ba97SEli Cohen } 2618e126ba97SEli Cohen } 2619e126ba97SEli Cohen 262089ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 262189ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 2622e126ba97SEli Cohen struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 2623e126ba97SEli Cohen { 262489ea94a7SMaor Gottlieb switch (qp_type) { 2625e126ba97SEli Cohen case IB_QPT_XRC_TGT: 2626e126ba97SEli Cohen *send_cq = NULL; 2627e126ba97SEli Cohen *recv_cq = NULL; 2628e126ba97SEli Cohen break; 2629e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 2630e126ba97SEli Cohen case IB_QPT_XRC_INI: 263189ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 2632e126ba97SEli Cohen *recv_cq = NULL; 2633e126ba97SEli Cohen break; 2634e126ba97SEli Cohen 2635e126ba97SEli Cohen case IB_QPT_SMI: 2636d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 2637e126ba97SEli Cohen case IB_QPT_RC: 2638e126ba97SEli Cohen case IB_QPT_UC: 2639e126ba97SEli Cohen case IB_QPT_UD: 26400fb2ed66Smajd@mellanox.com case IB_QPT_RAW_PACKET: 264189ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 264289ea94a7SMaor Gottlieb *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 2643e126ba97SEli Cohen break; 2644e126ba97SEli Cohen default: 2645e126ba97SEli Cohen *send_cq = NULL; 2646e126ba97SEli Cohen *recv_cq = NULL; 2647e126ba97SEli Cohen break; 2648e126ba97SEli Cohen } 2649e126ba97SEli Cohen } 2650e126ba97SEli Cohen 2651ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 265213eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 265313eab21fSAviv Heller u8 lag_tx_affinity); 2654ad5f8e96Smajd@mellanox.com 2655bdeacabdSShamir Rabinovitch static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2656bdeacabdSShamir Rabinovitch struct ib_udata *udata) 2657e126ba97SEli Cohen { 2658e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 2659c2e53b2cSYishai Hadas struct mlx5_ib_qp_base *base; 266089ea94a7SMaor Gottlieb unsigned long flags; 2661e126ba97SEli Cohen int err; 2662e126ba97SEli Cohen 26636c41965dSLeon Romanovsky if (qp->is_rss) { 266428d61370SYishai Hadas destroy_rss_raw_qp_tir(dev, qp); 266528d61370SYishai Hadas return; 266628d61370SYishai Hadas } 266728d61370SYishai Hadas 26686c41965dSLeon Romanovsky base = (qp->type == IB_QPT_RAW_PACKET || 26692be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 26700fb2ed66Smajd@mellanox.com &qp->raw_packet_qp.rq.base : 26710fb2ed66Smajd@mellanox.com &qp->trans_qp.base; 26720fb2ed66Smajd@mellanox.com 26736aec21f6SHaggai Eran if (qp->state != IB_QPS_RESET) { 26746c41965dSLeon Romanovsky if (qp->type != IB_QPT_RAW_PACKET && 26752be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { 2676333fbaa0SLeon Romanovsky err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, 26775f62a521SLeon Romanovsky NULL, &base->mqp, NULL); 2678ad5f8e96Smajd@mellanox.com } else { 26790680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = { 26800680efa2SAlex Vesker .operation = MLX5_CMD_OP_2RST_QP 26810680efa2SAlex Vesker }; 26820680efa2SAlex Vesker 268313eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); 2684ad5f8e96Smajd@mellanox.com } 2685ad5f8e96Smajd@mellanox.com if (err) 2686427c1e7bSmajd@mellanox.com mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 268719098df2Smajd@mellanox.com base->mqp.qpn); 26886aec21f6SHaggai Eran } 2689e126ba97SEli Cohen 26906c41965dSLeon Romanovsky get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, 26916c41965dSLeon Romanovsky &recv_cq); 269289ea94a7SMaor Gottlieb 269389ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 269489ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 269589ea94a7SMaor Gottlieb /* del from lists under both locks above to protect reset flow paths */ 269689ea94a7SMaor Gottlieb list_del(&qp->qps_list); 269789ea94a7SMaor Gottlieb if (send_cq) 269889ea94a7SMaor Gottlieb list_del(&qp->cq_send_list); 269989ea94a7SMaor Gottlieb 270089ea94a7SMaor Gottlieb if (recv_cq) 270189ea94a7SMaor Gottlieb list_del(&qp->cq_recv_list); 2702e126ba97SEli Cohen 270303c4077bSLeon Romanovsky if (!udata) { 270419098df2Smajd@mellanox.com __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 2705e126ba97SEli Cohen qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 2706e126ba97SEli Cohen if (send_cq != recv_cq) 270719098df2Smajd@mellanox.com __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 270819098df2Smajd@mellanox.com NULL); 2709e126ba97SEli Cohen } 271089ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 271189ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 2712e126ba97SEli Cohen 27136c41965dSLeon Romanovsky if (qp->type == IB_QPT_RAW_PACKET || 27142be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 27150fb2ed66Smajd@mellanox.com destroy_raw_packet_qp(dev, qp); 27160fb2ed66Smajd@mellanox.com } else { 2717333fbaa0SLeon Romanovsky err = mlx5_core_destroy_qp(dev, &base->mqp); 2718e126ba97SEli Cohen if (err) 27190fb2ed66Smajd@mellanox.com mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 27200fb2ed66Smajd@mellanox.com base->mqp.qpn); 27210fb2ed66Smajd@mellanox.com } 2722e126ba97SEli Cohen 2723747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2724e126ba97SEli Cohen } 2725e126ba97SEli Cohen 2726a645a89dSLeon Romanovsky static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2727a645a89dSLeon Romanovsky struct mlx5_ib_qp *qp, 2728f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 2729b4aaa1f0SMoni Shoua { 2730f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2731f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 2732f78d358cSLeon Romanovsky u32 uidx = params->uidx; 2733b4aaa1f0SMoni Shoua void *dctc; 2734b4aaa1f0SMoni Shoua 27357c4b1ab9SMark Zhang if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct)) 27367c4b1ab9SMark Zhang return -EOPNOTSUPP; 27377c4b1ab9SMark Zhang 2738b4aaa1f0SMoni Shoua qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); 27399c2ba4edSLeon Romanovsky if (!qp->dct.in) 274047c80612SLeon Romanovsky return -ENOMEM; 2741b4aaa1f0SMoni Shoua 2742a01a5860SYishai Hadas MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid); 2743b4aaa1f0SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 2744b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn); 2745b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); 2746b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); 2747b4aaa1f0SMoni Shoua MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); 2748b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, user_index, uidx); 2749a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support)) 2750a645a89dSLeon Romanovsky MLX5_SET(dctc, dctc, ece, ucmd->ece_options); 2751b4aaa1f0SMoni Shoua 275237518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) { 2753fd9dab7eSLeon Romanovsky int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq); 2754fd9dab7eSLeon Romanovsky 2755fd9dab7eSLeon Romanovsky if (rcqe_sz == 128) 2756fd9dab7eSLeon Romanovsky MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 2757fd9dab7eSLeon Romanovsky } 27585d6ff1baSYonatan Cohen 2759b4aaa1f0SMoni Shoua qp->state = IB_QPS_RESET; 276047c80612SLeon Romanovsky return 0; 2761b4aaa1f0SMoni Shoua } 2762b4aaa1f0SMoni Shoua 27637aede1a2SLeon Romanovsky static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 27647aede1a2SLeon Romanovsky enum ib_qp_type *type) 27656eb7edffSLeon Romanovsky { 27666eb7edffSLeon Romanovsky if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct)) 27676eb7edffSLeon Romanovsky goto out; 27686eb7edffSLeon Romanovsky 27696eb7edffSLeon Romanovsky switch (attr->qp_type) { 27706eb7edffSLeon Romanovsky case IB_QPT_XRC_TGT: 27716eb7edffSLeon Romanovsky case IB_QPT_XRC_INI: 27726eb7edffSLeon Romanovsky if (!MLX5_CAP_GEN(dev->mdev, xrc)) 27736eb7edffSLeon Romanovsky goto out; 27746eb7edffSLeon Romanovsky fallthrough; 27756eb7edffSLeon Romanovsky case IB_QPT_RC: 27766eb7edffSLeon Romanovsky case IB_QPT_UC: 27776eb7edffSLeon Romanovsky case IB_QPT_SMI: 27786eb7edffSLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 27796eb7edffSLeon Romanovsky case IB_QPT_DRIVER: 27806eb7edffSLeon Romanovsky case IB_QPT_GSI: 278142caf9cbSMark Bloch case IB_QPT_RAW_PACKET: 278242caf9cbSMark Bloch case IB_QPT_UD: 278342caf9cbSMark Bloch case MLX5_IB_QPT_REG_UMR: 27847aede1a2SLeon Romanovsky break; 27856eb7edffSLeon Romanovsky default: 27866eb7edffSLeon Romanovsky goto out; 2787b4aaa1f0SMoni Shoua } 2788b4aaa1f0SMoni Shoua 27897aede1a2SLeon Romanovsky *type = attr->qp_type; 2790b4aaa1f0SMoni Shoua return 0; 27916eb7edffSLeon Romanovsky 27926eb7edffSLeon Romanovsky out: 27936eb7edffSLeon Romanovsky mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type); 27946eb7edffSLeon Romanovsky return -EOPNOTSUPP; 2795b4aaa1f0SMoni Shoua } 2796b4aaa1f0SMoni Shoua 27972242cc25SLeon Romanovsky static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd, 27982242cc25SLeon Romanovsky struct ib_qp_init_attr *attr, 27992242cc25SLeon Romanovsky struct ib_udata *udata) 28002242cc25SLeon Romanovsky { 28012242cc25SLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 28022242cc25SLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 28032242cc25SLeon Romanovsky 28042242cc25SLeon Romanovsky if (!udata) { 28052242cc25SLeon Romanovsky /* Kernel create_qp callers */ 28062242cc25SLeon Romanovsky if (attr->rwq_ind_tbl) 28072242cc25SLeon Romanovsky return -EOPNOTSUPP; 28082242cc25SLeon Romanovsky 28092242cc25SLeon Romanovsky switch (attr->qp_type) { 28102242cc25SLeon Romanovsky case IB_QPT_RAW_PACKET: 28112242cc25SLeon Romanovsky case IB_QPT_DRIVER: 28122242cc25SLeon Romanovsky return -EOPNOTSUPP; 28132242cc25SLeon Romanovsky default: 28142242cc25SLeon Romanovsky return 0; 28152242cc25SLeon Romanovsky } 28162242cc25SLeon Romanovsky } 28172242cc25SLeon Romanovsky 28182242cc25SLeon Romanovsky /* Userspace create_qp callers */ 28192242cc25SLeon Romanovsky if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) { 28202242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 28212242cc25SLeon Romanovsky "Raw Packet QP is only supported for CQE version > 0\n"); 28222242cc25SLeon Romanovsky return -EINVAL; 28232242cc25SLeon Romanovsky } 28242242cc25SLeon Romanovsky 28252242cc25SLeon Romanovsky if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) { 28262242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 28272242cc25SLeon Romanovsky "Wrong QP type %d for the RWQ indirect table\n", 28282242cc25SLeon Romanovsky attr->qp_type); 28292242cc25SLeon Romanovsky return -EINVAL; 28302242cc25SLeon Romanovsky } 28312242cc25SLeon Romanovsky 28322242cc25SLeon Romanovsky /* 28332242cc25SLeon Romanovsky * We don't need to see this warning, it means that kernel code 28342242cc25SLeon Romanovsky * missing ib_pd. Placed here to catch developer's mistakes. 28352242cc25SLeon Romanovsky */ 28362242cc25SLeon Romanovsky WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT, 28372242cc25SLeon Romanovsky "There is a missing PD pointer assignment\n"); 28382242cc25SLeon Romanovsky return 0; 28392242cc25SLeon Romanovsky } 28402242cc25SLeon Romanovsky 284137518fa4SLeon Romanovsky static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 284237518fa4SLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 284337518fa4SLeon Romanovsky { 284437518fa4SLeon Romanovsky if (!(*flags & flag)) 284537518fa4SLeon Romanovsky return; 284637518fa4SLeon Romanovsky 284737518fa4SLeon Romanovsky if (cond) { 284837518fa4SLeon Romanovsky qp->flags_en |= flag; 284937518fa4SLeon Romanovsky *flags &= ~flag; 285037518fa4SLeon Romanovsky return; 285137518fa4SLeon Romanovsky } 285237518fa4SLeon Romanovsky 285381530ab0SLeon Romanovsky switch (flag) { 285481530ab0SLeon Romanovsky case MLX5_QP_FLAG_SCATTER_CQE: 285581530ab0SLeon Romanovsky case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: 285637518fa4SLeon Romanovsky /* 285781530ab0SLeon Romanovsky * We don't return error if these flags were provided, 285837518fa4SLeon Romanovsky * and mlx5 doesn't have right capability. 285937518fa4SLeon Romanovsky */ 286081530ab0SLeon Romanovsky *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | 286181530ab0SLeon Romanovsky MLX5_QP_FLAG_ALLOW_SCATTER_CQE); 286237518fa4SLeon Romanovsky return; 286381530ab0SLeon Romanovsky default: 286481530ab0SLeon Romanovsky break; 286537518fa4SLeon Romanovsky } 286637518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); 286737518fa4SLeon Romanovsky } 286837518fa4SLeon Romanovsky 286937518fa4SLeon Romanovsky static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 28705ce0592bSLeon Romanovsky void *ucmd, struct ib_qp_init_attr *attr) 28712fdddbd5SLeon Romanovsky { 287237518fa4SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 287337518fa4SLeon Romanovsky bool cond; 28745ce0592bSLeon Romanovsky int flags; 28755ce0592bSLeon Romanovsky 28765ce0592bSLeon Romanovsky if (attr->rwq_ind_tbl) 28775ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags; 28785ce0592bSLeon Romanovsky else 28795ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp *)ucmd)->flags; 288037518fa4SLeon Romanovsky 288137518fa4SLeon Romanovsky switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) { 28822fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCI: 28837aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCI; 28842fdddbd5SLeon Romanovsky break; 28852fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCT: 28867aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCT; 288737518fa4SLeon Romanovsky break; 28887aede1a2SLeon Romanovsky default: 28897aede1a2SLeon Romanovsky if (qp->type != IB_QPT_DRIVER) 28907aede1a2SLeon Romanovsky break; 28917aede1a2SLeon Romanovsky /* 28927aede1a2SLeon Romanovsky * It is IB_QPT_DRIVER and or no subtype or 28937aede1a2SLeon Romanovsky * wrong subtype were provided. 28947aede1a2SLeon Romanovsky */ 289537518fa4SLeon Romanovsky return -EINVAL; 28967aede1a2SLeon Romanovsky } 289737518fa4SLeon Romanovsky 289837518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); 289937518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); 290011656f59SLior Nahmanson process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM, 290165f90c8eSLior Nahmanson MLX5_CAP_GEN(mdev, log_max_dci_stream_channels), 290211656f59SLior Nahmanson qp); 290337518fa4SLeon Romanovsky 290437518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); 290537518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, 290637518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); 290781530ab0SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE, 290881530ab0SLeon Romanovsky MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); 290937518fa4SLeon Romanovsky 29107aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RAW_PACKET) { 291137518fa4SLeon Romanovsky cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || 291237518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_gre) || 291337518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx); 291437518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS, 291537518fa4SLeon Romanovsky cond, qp); 291637518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 291737518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true, 291837518fa4SLeon Romanovsky qp); 291937518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 292037518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true, 292137518fa4SLeon Romanovsky qp); 292237518fa4SLeon Romanovsky } 292337518fa4SLeon Romanovsky 29247aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RC) 292537518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 292637518fa4SLeon Romanovsky MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE, 292737518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, qp_packet_based), qp); 292837518fa4SLeon Romanovsky 292976883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp); 293076883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp); 293176883a6cSLeon Romanovsky 29325d6fffedSLeon Romanovsky cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS | 29335d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 29345d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC); 29355d6fffedSLeon Romanovsky if (attr->rwq_ind_tbl && cond) { 29365d6fffedSLeon Romanovsky mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n", 29375d6fffedSLeon Romanovsky cond); 29385d6fffedSLeon Romanovsky return -EINVAL; 29395d6fffedSLeon Romanovsky } 29405d6fffedSLeon Romanovsky 294137518fa4SLeon Romanovsky if (flags) 294237518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags); 294337518fa4SLeon Romanovsky 294437518fa4SLeon Romanovsky return (flags) ? -EINVAL : 0; 29452fdddbd5SLeon Romanovsky } 29462fdddbd5SLeon Romanovsky 29472978975cSLeon Romanovsky static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 29482978975cSLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 29492978975cSLeon Romanovsky { 29502978975cSLeon Romanovsky if (!(*flags & flag)) 29512978975cSLeon Romanovsky return; 29522978975cSLeon Romanovsky 29532978975cSLeon Romanovsky if (cond) { 29542978975cSLeon Romanovsky qp->flags |= flag; 29552978975cSLeon Romanovsky *flags &= ~flag; 29562978975cSLeon Romanovsky return; 29572978975cSLeon Romanovsky } 29582978975cSLeon Romanovsky 29592978975cSLeon Romanovsky if (flag == MLX5_IB_QP_CREATE_WC_TEST) { 29602978975cSLeon Romanovsky /* 29612978975cSLeon Romanovsky * Special case, if condition didn't meet, it won't be error, 29622978975cSLeon Romanovsky * just different in-kernel flow. 29632978975cSLeon Romanovsky */ 29642978975cSLeon Romanovsky *flags &= ~MLX5_IB_QP_CREATE_WC_TEST; 29652978975cSLeon Romanovsky return; 29662978975cSLeon Romanovsky } 29672978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag); 29682978975cSLeon Romanovsky } 29692978975cSLeon Romanovsky 29702978975cSLeon Romanovsky static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 29712978975cSLeon Romanovsky struct ib_qp_init_attr *attr) 29722978975cSLeon Romanovsky { 29737aede1a2SLeon Romanovsky enum ib_qp_type qp_type = qp->type; 29742978975cSLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 29752978975cSLeon Romanovsky int create_flags = attr->create_flags; 29762978975cSLeon Romanovsky bool cond; 29772978975cSLeon Romanovsky 29787aede1a2SLeon Romanovsky if (qp_type == MLX5_IB_QPT_DCT) 29792978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 29802978975cSLeon Romanovsky 29812978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) 29822978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 29832978975cSLeon Romanovsky 2984f81b4565SLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP, 2985f81b4565SLeon Romanovsky mlx5_get_flow_namespace(dev->mdev, 2986f81b4565SLeon Romanovsky MLX5_FLOW_NAMESPACE_BYPASS), 2987f81b4565SLeon Romanovsky qp); 29882978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 29899e0dc7b9SMax Gurtovoy IB_QP_CREATE_INTEGRITY_EN, 29909e0dc7b9SMax Gurtovoy MLX5_CAP_GEN(mdev, sho), qp); 29919e0dc7b9SMax Gurtovoy process_create_flag(dev, &create_flags, 29922978975cSLeon Romanovsky IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 29932978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, block_lb_mc), qp); 29942978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL, 29952978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 29962978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND, 29972978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 29982978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV, 29992978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 30002978975cSLeon Romanovsky 30012978975cSLeon Romanovsky if (qp_type == IB_QPT_UD) { 30022978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30032978975cSLeon Romanovsky IB_QP_CREATE_IPOIB_UD_LSO, 30042978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, ipoib_basic_offloads), 30052978975cSLeon Romanovsky qp); 30062978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB; 30072978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN, 30082978975cSLeon Romanovsky cond, qp); 30092978975cSLeon Romanovsky } 30102978975cSLeon Romanovsky 30112978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET) { 30122978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 30132978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, scatter_fcs); 30142978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30152978975cSLeon Romanovsky IB_QP_CREATE_SCATTER_FCS, cond, qp); 30162978975cSLeon Romanovsky 30172978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 30182978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, vlan_cap); 30192978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30202978975cSLeon Romanovsky IB_QP_CREATE_CVLAN_STRIPPING, cond, qp); 30212978975cSLeon Romanovsky } 30222978975cSLeon Romanovsky 30232978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30242978975cSLeon Romanovsky IB_QP_CREATE_PCI_WRITE_END_PADDING, 30252978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, end_pad), qp); 30262978975cSLeon Romanovsky 30272978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST, 30282978975cSLeon Romanovsky qp_type != MLX5_IB_QPT_REG_UMR, qp); 30292978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1, 30302978975cSLeon Romanovsky true, qp); 30312978975cSLeon Romanovsky 30321f11a761SJason Gunthorpe if (create_flags) { 30332978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n", 30342978975cSLeon Romanovsky create_flags); 30351f11a761SJason Gunthorpe return -EOPNOTSUPP; 30361f11a761SJason Gunthorpe } 30371f11a761SJason Gunthorpe return 0; 30382978975cSLeon Romanovsky } 30392978975cSLeon Romanovsky 30406f2cf76eSLeon Romanovsky static int process_udata_size(struct mlx5_ib_dev *dev, 30416f2cf76eSLeon Romanovsky struct mlx5_create_qp_params *params) 30422fdddbd5SLeon Romanovsky { 30432fdddbd5SLeon Romanovsky size_t ucmd = sizeof(struct mlx5_ib_create_qp); 30446f2cf76eSLeon Romanovsky struct ib_udata *udata = params->udata; 30456f2cf76eSLeon Romanovsky size_t outlen = udata->outlen; 30465ce0592bSLeon Romanovsky size_t inlen = udata->inlen; 30472fdddbd5SLeon Romanovsky 30486f2cf76eSLeon Romanovsky params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp)); 3049e383085cSLeon Romanovsky params->ucmd_size = ucmd; 30506f2cf76eSLeon Romanovsky if (!params->is_rss_raw) { 3051e383085cSLeon Romanovsky /* User has old rdma-core, which doesn't support ECE */ 3052e383085cSLeon Romanovsky size_t min_inlen = 3053e383085cSLeon Romanovsky offsetof(struct mlx5_ib_create_qp, ece_options); 3054e383085cSLeon Romanovsky 3055e383085cSLeon Romanovsky /* 3056e383085cSLeon Romanovsky * We will check in check_ucmd_data() that user 3057e383085cSLeon Romanovsky * cleared everything after inlen. 3058e383085cSLeon Romanovsky */ 3059e383085cSLeon Romanovsky params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd); 30606f2cf76eSLeon Romanovsky goto out; 30616f2cf76eSLeon Romanovsky } 30625ce0592bSLeon Romanovsky 30636f2cf76eSLeon Romanovsky /* RSS RAW QP */ 30645ce0592bSLeon Romanovsky if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags)) 30656f2cf76eSLeon Romanovsky return -EINVAL; 30666f2cf76eSLeon Romanovsky 30676f2cf76eSLeon Romanovsky if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index)) 30686f2cf76eSLeon Romanovsky return -EINVAL; 30695ce0592bSLeon Romanovsky 30705ce0592bSLeon Romanovsky ucmd = sizeof(struct mlx5_ib_create_qp_rss); 3071e383085cSLeon Romanovsky params->ucmd_size = ucmd; 30725ce0592bSLeon Romanovsky if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd)) 30736f2cf76eSLeon Romanovsky return -EINVAL; 30745ce0592bSLeon Romanovsky 30756f2cf76eSLeon Romanovsky params->inlen = min(ucmd, inlen); 30766f2cf76eSLeon Romanovsky out: 30776f2cf76eSLeon Romanovsky if (!params->inlen) 3078e383085cSLeon Romanovsky mlx5_ib_dbg(dev, "udata is too small\n"); 30796f2cf76eSLeon Romanovsky 30806f2cf76eSLeon Romanovsky return (params->inlen) ? 0 : -EINVAL; 30812fdddbd5SLeon Romanovsky } 30822fdddbd5SLeon Romanovsky 3083968f0b6fSLeon Romanovsky static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 3084f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 3085f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 30865d0dc3d9SLeon Romanovsky { 3087968f0b6fSLeon Romanovsky int err; 30885d0dc3d9SLeon Romanovsky 3089968f0b6fSLeon Romanovsky if (params->is_rss_raw) { 3090968f0b6fSLeon Romanovsky err = create_rss_raw_qp_tir(dev, pd, qp, params); 3091968f0b6fSLeon Romanovsky goto out; 3092968f0b6fSLeon Romanovsky } 3093968f0b6fSLeon Romanovsky 30942dc4d672SLeon Romanovsky switch (qp->type) { 30952dc4d672SLeon Romanovsky case MLX5_IB_QPT_DCT: 3096a645a89dSLeon Romanovsky err = create_dct(dev, pd, qp, params); 30970dc0da15SLeon Romanovsky rdma_restrack_no_track(&qp->ibqp.res); 30982dc4d672SLeon Romanovsky break; 30992013b4d5SLior Nahmanson case MLX5_IB_QPT_DCI: 31002013b4d5SLior Nahmanson err = create_dci(dev, pd, qp, params); 31012013b4d5SLior Nahmanson break; 31022dc4d672SLeon Romanovsky case IB_QPT_XRC_TGT: 3103968f0b6fSLeon Romanovsky err = create_xrc_tgt_qp(dev, qp, params); 31042dc4d672SLeon Romanovsky break; 31052dc4d672SLeon Romanovsky case IB_QPT_GSI: 31062dc4d672SLeon Romanovsky err = mlx5_ib_create_gsi(pd, qp, params->attr); 31072dc4d672SLeon Romanovsky break; 31080dc0da15SLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 31090dc0da15SLeon Romanovsky case MLX5_IB_QPT_REG_UMR: 31100dc0da15SLeon Romanovsky rdma_restrack_no_track(&qp->ibqp.res); 31110dc0da15SLeon Romanovsky fallthrough; 31122dc4d672SLeon Romanovsky default: 3113968f0b6fSLeon Romanovsky if (params->udata) 3114968f0b6fSLeon Romanovsky err = create_user_qp(dev, pd, qp, params); 3115968f0b6fSLeon Romanovsky else 3116968f0b6fSLeon Romanovsky err = create_kernel_qp(dev, pd, qp, params); 31172dc4d672SLeon Romanovsky } 3118968f0b6fSLeon Romanovsky 3119968f0b6fSLeon Romanovsky out: 3120968f0b6fSLeon Romanovsky if (err) { 3121968f0b6fSLeon Romanovsky mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type); 3122968f0b6fSLeon Romanovsky return err; 3123968f0b6fSLeon Romanovsky } 3124968f0b6fSLeon Romanovsky 3125968f0b6fSLeon Romanovsky if (is_qp0(qp->type)) 3126968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 0; 3127968f0b6fSLeon Romanovsky else if (is_qp1(qp->type)) 3128968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 1; 3129968f0b6fSLeon Romanovsky else 3130968f0b6fSLeon Romanovsky qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; 3131968f0b6fSLeon Romanovsky 3132968f0b6fSLeon Romanovsky mlx5_ib_dbg(dev, 31333e09a427SLeon Romanovsky "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n", 3134968f0b6fSLeon Romanovsky qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 3135968f0b6fSLeon Romanovsky params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn : 3136968f0b6fSLeon Romanovsky -1, 3137968f0b6fSLeon Romanovsky params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn : 31383e09a427SLeon Romanovsky -1, 31393e09a427SLeon Romanovsky params->resp.ece_options); 3140968f0b6fSLeon Romanovsky 3141968f0b6fSLeon Romanovsky return 0; 31425d0dc3d9SLeon Romanovsky } 31435d0dc3d9SLeon Romanovsky 31447aede1a2SLeon Romanovsky static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 31457aede1a2SLeon Romanovsky struct ib_qp_init_attr *attr) 31467aede1a2SLeon Romanovsky { 31477aede1a2SLeon Romanovsky int ret = 0; 31487aede1a2SLeon Romanovsky 31497aede1a2SLeon Romanovsky switch (qp->type) { 31507aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCT: 31517aede1a2SLeon Romanovsky ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0; 31527aede1a2SLeon Romanovsky break; 31537aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCI: 31547aede1a2SLeon Romanovsky ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ? 31557aede1a2SLeon Romanovsky -EINVAL : 31567aede1a2SLeon Romanovsky 0; 31577aede1a2SLeon Romanovsky break; 3158266424ebSLeon Romanovsky case IB_QPT_RAW_PACKET: 3159266424ebSLeon Romanovsky ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0; 3160266424ebSLeon Romanovsky break; 31617aede1a2SLeon Romanovsky default: 31627aede1a2SLeon Romanovsky break; 31637aede1a2SLeon Romanovsky } 31647aede1a2SLeon Romanovsky 31657aede1a2SLeon Romanovsky if (ret) 31667aede1a2SLeon Romanovsky mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type); 31677aede1a2SLeon Romanovsky 31687aede1a2SLeon Romanovsky return ret; 31697aede1a2SLeon Romanovsky } 31707aede1a2SLeon Romanovsky 3171f78d358cSLeon Romanovsky static int get_qp_uidx(struct mlx5_ib_qp *qp, 3172f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 317321aad80bSLeon Romanovsky { 3174f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 3175f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 317621aad80bSLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 317721aad80bSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 317821aad80bSLeon Romanovsky 3179f78d358cSLeon Romanovsky if (params->is_rss_raw) 318021aad80bSLeon Romanovsky return 0; 318121aad80bSLeon Romanovsky 3182f78d358cSLeon Romanovsky return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), ¶ms->uidx); 318321aad80bSLeon Romanovsky } 318421aad80bSLeon Romanovsky 318508d53976SLeon Romanovsky static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) 318608d53976SLeon Romanovsky { 318708d53976SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device); 318808d53976SLeon Romanovsky 318908d53976SLeon Romanovsky if (mqp->state == IB_QPS_RTR) { 319008d53976SLeon Romanovsky int err; 319108d53976SLeon Romanovsky 319208d53976SLeon Romanovsky err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct); 319308d53976SLeon Romanovsky if (err) { 319408d53976SLeon Romanovsky mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); 319508d53976SLeon Romanovsky return err; 319608d53976SLeon Romanovsky } 319708d53976SLeon Romanovsky } 319808d53976SLeon Romanovsky 319908d53976SLeon Romanovsky kfree(mqp->dct.in); 320008d53976SLeon Romanovsky return 0; 320108d53976SLeon Romanovsky } 320208d53976SLeon Romanovsky 3203e383085cSLeon Romanovsky static int check_ucmd_data(struct mlx5_ib_dev *dev, 3204e383085cSLeon Romanovsky struct mlx5_create_qp_params *params) 3205e383085cSLeon Romanovsky { 3206e383085cSLeon Romanovsky struct ib_udata *udata = params->udata; 3207e383085cSLeon Romanovsky size_t size, last; 3208e383085cSLeon Romanovsky int ret; 3209e383085cSLeon Romanovsky 3210e383085cSLeon Romanovsky if (params->is_rss_raw) 3211e383085cSLeon Romanovsky /* 3212e383085cSLeon Romanovsky * These QPs don't have "reserved" field in their 3213e383085cSLeon Romanovsky * create_qp input struct, so their data is always valid. 3214e383085cSLeon Romanovsky */ 3215e383085cSLeon Romanovsky last = sizeof(struct mlx5_ib_create_qp_rss); 3216e383085cSLeon Romanovsky else 3217e383085cSLeon Romanovsky last = offsetof(struct mlx5_ib_create_qp, reserved); 3218e383085cSLeon Romanovsky 3219e383085cSLeon Romanovsky if (udata->inlen <= last) 3220e383085cSLeon Romanovsky return 0; 3221e383085cSLeon Romanovsky 3222e383085cSLeon Romanovsky /* 3223e383085cSLeon Romanovsky * User provides different create_qp structures based on the 3224e383085cSLeon Romanovsky * flow and we need to know if he cleared memory after our 3225e383085cSLeon Romanovsky * struct create_qp ends. 3226e383085cSLeon Romanovsky */ 3227e383085cSLeon Romanovsky size = udata->inlen - last; 3228e383085cSLeon Romanovsky ret = ib_is_udata_cleared(params->udata, last, size); 3229e383085cSLeon Romanovsky if (!ret) 3230e383085cSLeon Romanovsky mlx5_ib_dbg( 3231e383085cSLeon Romanovsky dev, 32324f5747cfSTom Seewald "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n", 3233e383085cSLeon Romanovsky udata->inlen, params->ucmd_size, last, size); 3234e383085cSLeon Romanovsky return ret ? 0 : -EINVAL; 3235e383085cSLeon Romanovsky } 3236e383085cSLeon Romanovsky 3237514aee66SLeon Romanovsky int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 3238e126ba97SEli Cohen struct ib_udata *udata) 3239e126ba97SEli Cohen { 3240f78d358cSLeon Romanovsky struct mlx5_create_qp_params params = {}; 3241514aee66SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3242514aee66SLeon Romanovsky struct mlx5_ib_qp *qp = to_mqp(ibqp); 3243514aee66SLeon Romanovsky struct ib_pd *pd = ibqp->pd; 32447aede1a2SLeon Romanovsky enum ib_qp_type type; 3245e126ba97SEli Cohen int err; 3246e126ba97SEli Cohen 3247f78d358cSLeon Romanovsky err = check_qp_type(dev, attr, &type); 32482242cc25SLeon Romanovsky if (err) 3249514aee66SLeon Romanovsky return err; 3250e126ba97SEli Cohen 3251f78d358cSLeon Romanovsky err = check_valid_flow(dev, pd, attr, udata); 3252f78d358cSLeon Romanovsky if (err) 3253514aee66SLeon Romanovsky return err; 3254f78d358cSLeon Romanovsky 3255f78d358cSLeon Romanovsky params.udata = udata; 3256f78d358cSLeon Romanovsky params.uidx = MLX5_IB_DEFAULT_UIDX; 3257f78d358cSLeon Romanovsky params.attr = attr; 3258f78d358cSLeon Romanovsky params.is_rss_raw = !!attr->rwq_ind_tbl; 32599c2ba4edSLeon Romanovsky 32605ce0592bSLeon Romanovsky if (udata) { 32616f2cf76eSLeon Romanovsky err = process_udata_size(dev, ¶ms); 32626f2cf76eSLeon Romanovsky if (err) 3263514aee66SLeon Romanovsky return err; 32642fdddbd5SLeon Romanovsky 3265e383085cSLeon Romanovsky err = check_ucmd_data(dev, ¶ms); 3266e383085cSLeon Romanovsky if (err) 3267514aee66SLeon Romanovsky return err; 3268e383085cSLeon Romanovsky 3269e383085cSLeon Romanovsky params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); 3270f78d358cSLeon Romanovsky if (!params.ucmd) 3271514aee66SLeon Romanovsky return -ENOMEM; 32725ce0592bSLeon Romanovsky 3273f78d358cSLeon Romanovsky err = ib_copy_from_udata(params.ucmd, udata, params.inlen); 32742fdddbd5SLeon Romanovsky if (err) 32755ce0592bSLeon Romanovsky goto free_ucmd; 32762fdddbd5SLeon Romanovsky } 32772fdddbd5SLeon Romanovsky 32787fa84b57SLeon Romanovsky mutex_init(&qp->mutex); 32797aede1a2SLeon Romanovsky qp->type = type; 328037518fa4SLeon Romanovsky if (udata) { 3281f78d358cSLeon Romanovsky err = process_vendor_flags(dev, qp, params.ucmd, attr); 3282b4aaa1f0SMoni Shoua if (err) 3283514aee66SLeon Romanovsky goto free_ucmd; 328421aad80bSLeon Romanovsky 3285f78d358cSLeon Romanovsky err = get_qp_uidx(qp, ¶ms); 328621aad80bSLeon Romanovsky if (err) 3287514aee66SLeon Romanovsky goto free_ucmd; 3288b4aaa1f0SMoni Shoua } 3289f78d358cSLeon Romanovsky err = process_create_flags(dev, qp, attr); 32902978975cSLeon Romanovsky if (err) 3291514aee66SLeon Romanovsky goto free_ucmd; 3292b4aaa1f0SMoni Shoua 3293f78d358cSLeon Romanovsky err = check_qp_attr(dev, qp, attr); 32947aede1a2SLeon Romanovsky if (err) 3295514aee66SLeon Romanovsky goto free_ucmd; 32967aede1a2SLeon Romanovsky 3297968f0b6fSLeon Romanovsky err = create_qp(dev, pd, qp, ¶ms); 3298968f0b6fSLeon Romanovsky if (err) 3299514aee66SLeon Romanovsky goto free_ucmd; 3300e126ba97SEli Cohen 3301f78d358cSLeon Romanovsky kfree(params.ucmd); 330208d53976SLeon Romanovsky params.ucmd = NULL; 33035ce0592bSLeon Romanovsky 330408d53976SLeon Romanovsky if (udata) 330508d53976SLeon Romanovsky /* 330608d53976SLeon Romanovsky * It is safe to copy response for all user create QP flows, 330708d53976SLeon Romanovsky * including MLX5_IB_QPT_DCT, which doesn't need it. 330808d53976SLeon Romanovsky * In that case, resp will be filled with zeros. 330908d53976SLeon Romanovsky */ 331008d53976SLeon Romanovsky err = ib_copy_to_udata(udata, ¶ms.resp, params.outlen); 331108d53976SLeon Romanovsky if (err) 331208d53976SLeon Romanovsky goto destroy_qp; 331308d53976SLeon Romanovsky 3314514aee66SLeon Romanovsky return 0; 33159c2ba4edSLeon Romanovsky 331608d53976SLeon Romanovsky destroy_qp: 33172dc4d672SLeon Romanovsky switch (qp->type) { 33182dc4d672SLeon Romanovsky case MLX5_IB_QPT_DCT: 331908d53976SLeon Romanovsky mlx5_ib_destroy_dct(qp); 33202dc4d672SLeon Romanovsky break; 33212dc4d672SLeon Romanovsky case IB_QPT_GSI: 33222dc4d672SLeon Romanovsky mlx5_ib_destroy_gsi(qp); 33232dc4d672SLeon Romanovsky break; 33242dc4d672SLeon Romanovsky default: 332508d53976SLeon Romanovsky destroy_qp_common(dev, qp, udata); 33266c41965dSLeon Romanovsky } 33276c41965dSLeon Romanovsky 33285ce0592bSLeon Romanovsky free_ucmd: 3329f78d358cSLeon Romanovsky kfree(params.ucmd); 3330514aee66SLeon Romanovsky return err; 3331e126ba97SEli Cohen } 3332e126ba97SEli Cohen 3333c4367a26SShamir Rabinovitch int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 3334e126ba97SEli Cohen { 3335e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(qp->device); 3336e126ba97SEli Cohen struct mlx5_ib_qp *mqp = to_mqp(qp); 3337e126ba97SEli Cohen 33389ecf6ac1SMaor Gottlieb if (mqp->type == IB_QPT_GSI) 33390d9aef86SLeon Romanovsky return mlx5_ib_destroy_gsi(mqp); 3340d16e91daSHaggai Eran 33417aede1a2SLeon Romanovsky if (mqp->type == MLX5_IB_QPT_DCT) 3342776a3906SMoni Shoua return mlx5_ib_destroy_dct(mqp); 3343776a3906SMoni Shoua 3344bdeacabdSShamir Rabinovitch destroy_qp_common(dev, mqp, udata); 3345e126ba97SEli Cohen return 0; 3346e126ba97SEli Cohen } 3347e126ba97SEli Cohen 3348f18e26afSLeon Romanovsky static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp, 3349f18e26afSLeon Romanovsky const struct ib_qp_attr *attr, int attr_mask, 3350f18e26afSLeon Romanovsky void *qpc) 3351e126ba97SEli Cohen { 3352a60109dcSYonatan Cohen struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 3353f18e26afSLeon Romanovsky u8 dest_rd_atomic; 3354f18e26afSLeon Romanovsky u32 access_flags; 3355a60109dcSYonatan Cohen 3356e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 3357e126ba97SEli Cohen dest_rd_atomic = attr->max_dest_rd_atomic; 3358e126ba97SEli Cohen else 335919098df2Smajd@mellanox.com dest_rd_atomic = qp->trans_qp.resp_depth; 3360e126ba97SEli Cohen 3361e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 3362e126ba97SEli Cohen access_flags = attr->qp_access_flags; 3363e126ba97SEli Cohen else 336419098df2Smajd@mellanox.com access_flags = qp->trans_qp.atomic_rd_en; 3365e126ba97SEli Cohen 3366e126ba97SEli Cohen if (!dest_rd_atomic) 3367e126ba97SEli Cohen access_flags &= IB_ACCESS_REMOTE_WRITE; 3368e126ba97SEli Cohen 3369f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ)); 3370f18e26afSLeon Romanovsky 337113f8d9c1SYonatan Cohen if (access_flags & IB_ACCESS_REMOTE_ATOMIC) { 3372a60109dcSYonatan Cohen int atomic_mode; 3373e126ba97SEli Cohen 33749ecf6ac1SMaor Gottlieb atomic_mode = get_atomic_mode(dev, qp->type); 3375a60109dcSYonatan Cohen if (atomic_mode < 0) 3376a60109dcSYonatan Cohen return -EOPNOTSUPP; 3377a60109dcSYonatan Cohen 3378f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rae, 1); 3379f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, atomic_mode, atomic_mode); 3380a60109dcSYonatan Cohen } 3381a60109dcSYonatan Cohen 3382f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); 3383a60109dcSYonatan Cohen return 0; 3384e126ba97SEli Cohen } 3385e126ba97SEli Cohen 3386e126ba97SEli Cohen enum { 3387e126ba97SEli Cohen MLX5_PATH_FLAG_FL = 1 << 0, 3388e126ba97SEli Cohen MLX5_PATH_FLAG_FREE_AR = 1 << 1, 3389e126ba97SEli Cohen MLX5_PATH_FLAG_COUNTER = 1 << 2, 3390e126ba97SEli Cohen }; 3391e126ba97SEli Cohen 33926fe6e568SMark Zhang static int mlx5_to_ib_rate_map(u8 rate) 33936fe6e568SMark Zhang { 33946fe6e568SMark Zhang static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS, 33956fe6e568SMark Zhang IB_RATE_25_GBPS, IB_RATE_100_GBPS, 33966fe6e568SMark Zhang IB_RATE_200_GBPS, IB_RATE_50_GBPS, 33976fe6e568SMark Zhang IB_RATE_400_GBPS }; 33986fe6e568SMark Zhang 33996fe6e568SMark Zhang if (rate < ARRAY_SIZE(rates)) 34006fe6e568SMark Zhang return rates[rate]; 34016fe6e568SMark Zhang 34026fe6e568SMark Zhang return rate - MLX5_STAT_RATE_OFFSET; 34036fe6e568SMark Zhang } 34046fe6e568SMark Zhang 3405c531024bSMark Zhang static int ib_to_mlx5_rate_map(u8 rate) 3406c531024bSMark Zhang { 3407c531024bSMark Zhang switch (rate) { 3408c531024bSMark Zhang case IB_RATE_PORT_CURRENT: 3409c531024bSMark Zhang return 0; 3410c531024bSMark Zhang case IB_RATE_56_GBPS: 3411c531024bSMark Zhang return 1; 3412c531024bSMark Zhang case IB_RATE_25_GBPS: 3413c531024bSMark Zhang return 2; 3414c531024bSMark Zhang case IB_RATE_100_GBPS: 3415c531024bSMark Zhang return 3; 3416c531024bSMark Zhang case IB_RATE_200_GBPS: 3417c531024bSMark Zhang return 4; 3418c531024bSMark Zhang case IB_RATE_50_GBPS: 3419c531024bSMark Zhang return 5; 3420c70f51deSPatrisious Haddad case IB_RATE_400_GBPS: 3421c70f51deSPatrisious Haddad return 6; 3422c531024bSMark Zhang default: 3423c531024bSMark Zhang return rate + MLX5_STAT_RATE_OFFSET; 34247f1d2dfaSTom Rix } 3425c531024bSMark Zhang 3426c531024bSMark Zhang return 0; 3427c531024bSMark Zhang } 3428c531024bSMark Zhang 3429e126ba97SEli Cohen static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 3430e126ba97SEli Cohen { 3431c531024bSMark Zhang u32 stat_rate_support; 3432c531024bSMark Zhang 34334f32ac2eSDanit Goldberg if (rate == IB_RATE_PORT_CURRENT) 3434e126ba97SEli Cohen return 0; 34354f32ac2eSDanit Goldberg 3436a5a5d199SMichael Guralnik if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS) 3437e126ba97SEli Cohen return -EINVAL; 34384f32ac2eSDanit Goldberg 3439c531024bSMark Zhang stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support); 34404f32ac2eSDanit Goldberg while (rate != IB_RATE_PORT_CURRENT && 3441c531024bSMark Zhang !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support)) 3442e126ba97SEli Cohen --rate; 3443e126ba97SEli Cohen 3444c531024bSMark Zhang return ib_to_mlx5_rate_map(rate); 3445e126ba97SEli Cohen } 3446e126ba97SEli Cohen 344775850d0bSmajd@mellanox.com static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, 34481cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 sl, 34491cd6dbd3SYishai Hadas struct ib_pd *pd) 345075850d0bSmajd@mellanox.com { 345175850d0bSmajd@mellanox.com void *in; 345275850d0bSmajd@mellanox.com void *tisc; 345375850d0bSmajd@mellanox.com int inlen; 345475850d0bSmajd@mellanox.com int err; 345575850d0bSmajd@mellanox.com 345675850d0bSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 34571b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 345875850d0bSmajd@mellanox.com if (!in) 345975850d0bSmajd@mellanox.com return -ENOMEM; 346075850d0bSmajd@mellanox.com 346175850d0bSmajd@mellanox.com MLX5_SET(modify_tis_in, in, bitmask.prio, 1); 34621cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 346375850d0bSmajd@mellanox.com 346475850d0bSmajd@mellanox.com tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 346575850d0bSmajd@mellanox.com MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); 346675850d0bSmajd@mellanox.com 3467e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 346875850d0bSmajd@mellanox.com 346975850d0bSmajd@mellanox.com kvfree(in); 347075850d0bSmajd@mellanox.com 347175850d0bSmajd@mellanox.com return err; 347275850d0bSmajd@mellanox.com } 347375850d0bSmajd@mellanox.com 347413eab21fSAviv Heller static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, 34751cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 tx_affinity, 34761cd6dbd3SYishai Hadas struct ib_pd *pd) 347713eab21fSAviv Heller { 347813eab21fSAviv Heller void *in; 347913eab21fSAviv Heller void *tisc; 348013eab21fSAviv Heller int inlen; 348113eab21fSAviv Heller int err; 348213eab21fSAviv Heller 348313eab21fSAviv Heller inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 34841b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 348513eab21fSAviv Heller if (!in) 348613eab21fSAviv Heller return -ENOMEM; 348713eab21fSAviv Heller 348813eab21fSAviv Heller MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1); 34891cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 349013eab21fSAviv Heller 349113eab21fSAviv Heller tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 349213eab21fSAviv Heller MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity); 349313eab21fSAviv Heller 3494e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 349513eab21fSAviv Heller 349613eab21fSAviv Heller kvfree(in); 349713eab21fSAviv Heller 349813eab21fSAviv Heller return err; 349913eab21fSAviv Heller } 350013eab21fSAviv Heller 3501f18e26afSLeon Romanovsky static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah, 35022b880b2eSMark Zhang u32 lqpn, u32 rqpn) 35032b880b2eSMark Zhang 35042b880b2eSMark Zhang { 35052b880b2eSMark Zhang u32 fl = ah->grh.flow_label; 35062b880b2eSMark Zhang 35072b880b2eSMark Zhang if (!fl) 35082b880b2eSMark Zhang fl = rdma_calc_flow_label(lqpn, rqpn); 35092b880b2eSMark Zhang 3510f18e26afSLeon Romanovsky MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl)); 35112b880b2eSMark Zhang } 35122b880b2eSMark Zhang 351375850d0bSmajd@mellanox.com static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 3514f18e26afSLeon Romanovsky const struct rdma_ah_attr *ah, void *path, u8 port, 3515f18e26afSLeon Romanovsky int attr_mask, u32 path_flags, 3516f18e26afSLeon Romanovsky const struct ib_qp_attr *attr, bool alt) 3517e126ba97SEli Cohen { 3518d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grh = rdma_ah_read_grh(ah); 3519e126ba97SEli Cohen int err; 3520ed88451eSMajd Dibbiny enum ib_gid_type gid_type; 3521d8966fcdSDasaratharaman Chandramouli u8 ah_flags = rdma_ah_get_ah_flags(ah); 3522d8966fcdSDasaratharaman Chandramouli u8 sl = rdma_ah_get_sl(ah); 3523e126ba97SEli Cohen 3524e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 3525f18e26afSLeon Romanovsky MLX5_SET(ads, path, pkey_index, 3526f18e26afSLeon Romanovsky alt ? attr->alt_pkey_index : attr->pkey_index); 3527e126ba97SEli Cohen 3528d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 35297416790eSParav Pandit const struct ib_port_immutable *immutable; 35307416790eSParav Pandit 35317416790eSParav Pandit immutable = ib_port_immutable_read(&dev->ib_dev, port); 35327416790eSParav Pandit if (grh->sgid_index >= immutable->gid_tbl_len) { 3533f4f01b54SJoe Perches pr_err("sgid_index (%u) too large. max is %d\n", 3534d8966fcdSDasaratharaman Chandramouli grh->sgid_index, 35357416790eSParav Pandit immutable->gid_tbl_len); 3536f83b4263SEli Cohen return -EINVAL; 3537f83b4263SEli Cohen } 35382811ba51SAchiad Shochat } 353944c58487SDasaratharaman Chandramouli 354044c58487SDasaratharaman Chandramouli if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { 3541d8966fcdSDasaratharaman Chandramouli if (!(ah_flags & IB_AH_GRH)) 35422811ba51SAchiad Shochat return -EINVAL; 354347ec3866SParav Pandit 3544f18e26afSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32), 3545f18e26afSLeon Romanovsky ah->roce.dmac); 35469ecf6ac1SMaor Gottlieb if ((qp->type == IB_QPT_RC || 35479ecf6ac1SMaor Gottlieb qp->type == IB_QPT_UC || 35489ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_INI || 35499ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_TGT) && 35502b880b2eSMark Zhang (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) && 35512b880b2eSMark Zhang (attr_mask & IB_QP_DEST_QPN)) 35522b880b2eSMark Zhang mlx5_set_path_udp_sport(path, ah, 35532b880b2eSMark Zhang qp->ibqp.qp_num, 35542b880b2eSMark Zhang attr->dest_qp_num); 3555f18e26afSLeon Romanovsky MLX5_SET(ads, path, eth_prio, sl & 0x7); 355647ec3866SParav Pandit gid_type = ah->grh.sgid_attr->gid_type; 3557ed88451eSMajd Dibbiny if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3558f18e26afSLeon Romanovsky MLX5_SET(ads, path, dscp, grh->traffic_class >> 2); 35592811ba51SAchiad Shochat } else { 3560f18e26afSLeon Romanovsky MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL)); 3561f18e26afSLeon Romanovsky MLX5_SET(ads, path, free_ar, 3562f18e26afSLeon Romanovsky !!(path_flags & MLX5_PATH_FLAG_FREE_AR)); 3563f18e26afSLeon Romanovsky MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah)); 3564f18e26afSLeon Romanovsky MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah)); 3565f18e26afSLeon Romanovsky MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH)); 3566f18e26afSLeon Romanovsky MLX5_SET(ads, path, sl, sl); 35672811ba51SAchiad Shochat } 35682811ba51SAchiad Shochat 3569d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 3570f18e26afSLeon Romanovsky MLX5_SET(ads, path, src_addr_index, grh->sgid_index); 3571f18e26afSLeon Romanovsky MLX5_SET(ads, path, hop_limit, grh->hop_limit); 3572f18e26afSLeon Romanovsky MLX5_SET(ads, path, tclass, grh->traffic_class); 3573f18e26afSLeon Romanovsky MLX5_SET(ads, path, flow_label, grh->flow_label); 3574f18e26afSLeon Romanovsky memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw, 3575f18e26afSLeon Romanovsky sizeof(grh->dgid.raw)); 3576e126ba97SEli Cohen } 3577e126ba97SEli Cohen 3578d8966fcdSDasaratharaman Chandramouli err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); 3579e126ba97SEli Cohen if (err < 0) 3580e126ba97SEli Cohen return err; 3581f18e26afSLeon Romanovsky MLX5_SET(ads, path, stat_rate, err); 3582f18e26afSLeon Romanovsky MLX5_SET(ads, path, vhca_port_num, port); 3583e126ba97SEli Cohen 3584e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 3585f18e26afSLeon Romanovsky MLX5_SET(ads, path, ack_timeout, 3586f18e26afSLeon Romanovsky alt ? attr->alt_timeout : attr->timeout); 3587e126ba97SEli Cohen 35889ecf6ac1SMaor Gottlieb if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 358975850d0bSmajd@mellanox.com return modify_raw_packet_eth_prio(dev->mdev, 359075850d0bSmajd@mellanox.com &qp->raw_packet_qp.sq, 35911cd6dbd3SYishai Hadas sl & 0xf, qp->ibqp.pd); 359275850d0bSmajd@mellanox.com 3593e126ba97SEli Cohen return 0; 3594e126ba97SEli Cohen } 3595e126ba97SEli Cohen 3596e126ba97SEli Cohen static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 3597e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3598e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3599e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3600e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3601e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3602e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3603cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3604cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3605e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3606e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3607cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3608cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3609e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3610e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY | 3611e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT, 36128f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 36138f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36148f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36158f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PKEY_INDEX | 3616cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3617cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3618e126ba97SEli Cohen }, 3619e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3620e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3621e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3622e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3623e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3624cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3625cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3626e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3627e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3628cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3629cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3630e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3631e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3632e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 3633e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3634a4774e90SEli Cohen [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3635a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE | 3636a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3637a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3638cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3639cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3640e126ba97SEli Cohen }, 3641e126ba97SEli Cohen }, 3642e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3643e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3644e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3645e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3646e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3647e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3648e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3649e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT, 3650e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3651e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3652e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE, 3653e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 36548f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 36558f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE | 36568f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36578f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36588f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 36598f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT, 3660e126ba97SEli Cohen }, 3661e126ba97SEli Cohen }, 3662e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3663e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3664e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3665e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3666e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3667e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT | 3668c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3669c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3670e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3671c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3672c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3673e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 3674e126ba97SEli Cohen MLX5_QP_OPTPAR_SRQN | 3675e126ba97SEli Cohen MLX5_QP_OPTPAR_CQN_RCV, 36768f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 36778f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36788f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36798f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT | 36808f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 36818f4426aaSJack Morgenstein MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3682e126ba97SEli Cohen }, 3683e126ba97SEli Cohen }, 3684e126ba97SEli Cohen [MLX5_QP_STATE_SQER] = { 3685e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3686e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 3687e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 368875959f56SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 3689a4774e90SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 3690a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3691a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3692a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE, 36938f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 36948f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36958f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36968f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE, 3697e126ba97SEli Cohen }, 3698e126ba97SEli Cohen }, 3699021c1f24SSergey Gorenko [MLX5_QP_STATE_SQD] = { 3700021c1f24SSergey Gorenko [MLX5_QP_STATE_RTS] = { 3701021c1f24SSergey Gorenko [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 3702021c1f24SSergey Gorenko [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 3703021c1f24SSergey Gorenko [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 3704021c1f24SSergey Gorenko [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 3705021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RWE | 3706021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RAE | 3707021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RRE, 3708021c1f24SSergey Gorenko }, 3709021c1f24SSergey Gorenko }, 3710e126ba97SEli Cohen }; 3711e126ba97SEli Cohen 3712e126ba97SEli Cohen static int ib_nr_to_mlx5_nr(int ib_mask) 3713e126ba97SEli Cohen { 3714e126ba97SEli Cohen switch (ib_mask) { 3715e126ba97SEli Cohen case IB_QP_STATE: 3716e126ba97SEli Cohen return 0; 3717e126ba97SEli Cohen case IB_QP_CUR_STATE: 3718e126ba97SEli Cohen return 0; 3719e126ba97SEli Cohen case IB_QP_EN_SQD_ASYNC_NOTIFY: 3720e126ba97SEli Cohen return 0; 3721e126ba97SEli Cohen case IB_QP_ACCESS_FLAGS: 3722e126ba97SEli Cohen return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 3723e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE; 3724e126ba97SEli Cohen case IB_QP_PKEY_INDEX: 3725e126ba97SEli Cohen return MLX5_QP_OPTPAR_PKEY_INDEX; 3726e126ba97SEli Cohen case IB_QP_PORT: 3727e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRI_PORT; 3728e126ba97SEli Cohen case IB_QP_QKEY: 3729e126ba97SEli Cohen return MLX5_QP_OPTPAR_Q_KEY; 3730e126ba97SEli Cohen case IB_QP_AV: 3731e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 3732e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT; 3733e126ba97SEli Cohen case IB_QP_PATH_MTU: 3734e126ba97SEli Cohen return 0; 3735e126ba97SEli Cohen case IB_QP_TIMEOUT: 3736e126ba97SEli Cohen return MLX5_QP_OPTPAR_ACK_TIMEOUT; 3737e126ba97SEli Cohen case IB_QP_RETRY_CNT: 3738e126ba97SEli Cohen return MLX5_QP_OPTPAR_RETRY_COUNT; 3739e126ba97SEli Cohen case IB_QP_RNR_RETRY: 3740e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_RETRY; 3741e126ba97SEli Cohen case IB_QP_RQ_PSN: 3742e126ba97SEli Cohen return 0; 3743e126ba97SEli Cohen case IB_QP_MAX_QP_RD_ATOMIC: 3744e126ba97SEli Cohen return MLX5_QP_OPTPAR_SRA_MAX; 3745e126ba97SEli Cohen case IB_QP_ALT_PATH: 3746e126ba97SEli Cohen return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 3747e126ba97SEli Cohen case IB_QP_MIN_RNR_TIMER: 3748e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_TIMEOUT; 3749e126ba97SEli Cohen case IB_QP_SQ_PSN: 3750e126ba97SEli Cohen return 0; 3751e126ba97SEli Cohen case IB_QP_MAX_DEST_RD_ATOMIC: 3752e126ba97SEli Cohen return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 3753e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 3754e126ba97SEli Cohen case IB_QP_PATH_MIG_STATE: 3755e126ba97SEli Cohen return MLX5_QP_OPTPAR_PM_STATE; 3756e126ba97SEli Cohen case IB_QP_CAP: 3757e126ba97SEli Cohen return 0; 3758e126ba97SEli Cohen case IB_QP_DEST_QPN: 3759e126ba97SEli Cohen return 0; 3760e126ba97SEli Cohen } 3761e126ba97SEli Cohen return 0; 3762e126ba97SEli Cohen } 3763e126ba97SEli Cohen 3764e126ba97SEli Cohen static int ib_mask_to_mlx5_opt(int ib_mask) 3765e126ba97SEli Cohen { 3766e126ba97SEli Cohen int result = 0; 3767e126ba97SEli Cohen int i; 3768e126ba97SEli Cohen 3769e126ba97SEli Cohen for (i = 0; i < 8 * sizeof(int); i++) { 3770e126ba97SEli Cohen if ((1 << i) & ib_mask) 3771e126ba97SEli Cohen result |= ib_nr_to_mlx5_nr(1 << i); 3772e126ba97SEli Cohen } 3773e126ba97SEli Cohen 3774e126ba97SEli Cohen return result; 3775e126ba97SEli Cohen } 3776e126ba97SEli Cohen 377734d57585SYishai Hadas static int modify_raw_packet_qp_rq( 377834d57585SYishai Hadas struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state, 377934d57585SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3780ad5f8e96Smajd@mellanox.com { 3781ad5f8e96Smajd@mellanox.com void *in; 3782ad5f8e96Smajd@mellanox.com void *rqc; 3783ad5f8e96Smajd@mellanox.com int inlen; 3784ad5f8e96Smajd@mellanox.com int err; 3785ad5f8e96Smajd@mellanox.com 3786ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 37871b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3788ad5f8e96Smajd@mellanox.com if (!in) 3789ad5f8e96Smajd@mellanox.com return -ENOMEM; 3790ad5f8e96Smajd@mellanox.com 3791ad5f8e96Smajd@mellanox.com MLX5_SET(modify_rq_in, in, rq_state, rq->state); 379234d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid); 3793ad5f8e96Smajd@mellanox.com 3794ad5f8e96Smajd@mellanox.com rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 3795ad5f8e96Smajd@mellanox.com MLX5_SET(rqc, rqc, state, new_state); 3796ad5f8e96Smajd@mellanox.com 3797eb49ab0cSAlex Vesker if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) { 3798eb49ab0cSAlex Vesker if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 3799eb49ab0cSAlex Vesker MLX5_SET64(modify_rq_in, in, modify_bitmask, 380023a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 3801eb49ab0cSAlex Vesker MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id); 3802eb49ab0cSAlex Vesker } else 38035a738b5dSJason Gunthorpe dev_info_once( 38045a738b5dSJason Gunthorpe &dev->ib_dev.dev, 38055a738b5dSJason Gunthorpe "RAW PACKET QP counters are not supported on current FW\n"); 3806eb49ab0cSAlex Vesker } 3807eb49ab0cSAlex Vesker 3808e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in); 3809ad5f8e96Smajd@mellanox.com if (err) 3810ad5f8e96Smajd@mellanox.com goto out; 3811ad5f8e96Smajd@mellanox.com 3812ad5f8e96Smajd@mellanox.com rq->state = new_state; 3813ad5f8e96Smajd@mellanox.com 3814ad5f8e96Smajd@mellanox.com out: 3815ad5f8e96Smajd@mellanox.com kvfree(in); 3816ad5f8e96Smajd@mellanox.com return err; 3817ad5f8e96Smajd@mellanox.com } 3818ad5f8e96Smajd@mellanox.com 3819c14003f0SYishai Hadas static int modify_raw_packet_qp_sq( 3820c14003f0SYishai Hadas struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state, 3821c14003f0SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3822ad5f8e96Smajd@mellanox.com { 38237d29f349SBodong Wang struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; 382461147f39SBodong Wang struct mlx5_rate_limit old_rl = ibqp->rl; 382561147f39SBodong Wang struct mlx5_rate_limit new_rl = old_rl; 382661147f39SBodong Wang bool new_rate_added = false; 38277d29f349SBodong Wang u16 rl_index = 0; 3828ad5f8e96Smajd@mellanox.com void *in; 3829ad5f8e96Smajd@mellanox.com void *sqc; 3830ad5f8e96Smajd@mellanox.com int inlen; 3831ad5f8e96Smajd@mellanox.com int err; 3832ad5f8e96Smajd@mellanox.com 3833ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 38341b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3835ad5f8e96Smajd@mellanox.com if (!in) 3836ad5f8e96Smajd@mellanox.com return -ENOMEM; 3837ad5f8e96Smajd@mellanox.com 3838c14003f0SYishai Hadas MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid); 3839ad5f8e96Smajd@mellanox.com MLX5_SET(modify_sq_in, in, sq_state, sq->state); 3840ad5f8e96Smajd@mellanox.com 3841ad5f8e96Smajd@mellanox.com sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 3842ad5f8e96Smajd@mellanox.com MLX5_SET(sqc, sqc, state, new_state); 3843ad5f8e96Smajd@mellanox.com 38447d29f349SBodong Wang if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { 38457d29f349SBodong Wang if (new_state != MLX5_SQC_STATE_RDY) 38467d29f349SBodong Wang pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", 38477d29f349SBodong Wang __func__); 38487d29f349SBodong Wang else 384961147f39SBodong Wang new_rl = raw_qp_param->rl; 38507d29f349SBodong Wang } 3851ad5f8e96Smajd@mellanox.com 385261147f39SBodong Wang if (!mlx5_rl_are_equal(&old_rl, &new_rl)) { 385361147f39SBodong Wang if (new_rl.rate) { 385461147f39SBodong Wang err = mlx5_rl_add_rate(dev, &rl_index, &new_rl); 38557d29f349SBodong Wang if (err) { 385661147f39SBodong Wang pr_err("Failed configuring rate limit(err %d): \ 385761147f39SBodong Wang rate %u, max_burst_sz %u, typical_pkt_sz %u\n", 385861147f39SBodong Wang err, new_rl.rate, new_rl.max_burst_sz, 385961147f39SBodong Wang new_rl.typical_pkt_sz); 386061147f39SBodong Wang 38617d29f349SBodong Wang goto out; 38627d29f349SBodong Wang } 386361147f39SBodong Wang new_rate_added = true; 38647d29f349SBodong Wang } 38657d29f349SBodong Wang 38667d29f349SBodong Wang MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 386761147f39SBodong Wang /* index 0 means no limit */ 38687d29f349SBodong Wang MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); 38697d29f349SBodong Wang } 38707d29f349SBodong Wang 3871e0b4b472SLeon Romanovsky err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in); 38727d29f349SBodong Wang if (err) { 38737d29f349SBodong Wang /* Remove new rate from table if failed */ 387461147f39SBodong Wang if (new_rate_added) 387561147f39SBodong Wang mlx5_rl_remove_rate(dev, &new_rl); 38767d29f349SBodong Wang goto out; 38777d29f349SBodong Wang } 38787d29f349SBodong Wang 38797d29f349SBodong Wang /* Only remove the old rate after new rate was set */ 3880c8973df2SRafi Wiener if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) || 3881c8973df2SRafi Wiener (new_state != MLX5_SQC_STATE_RDY)) { 388261147f39SBodong Wang mlx5_rl_remove_rate(dev, &old_rl); 3883c8973df2SRafi Wiener if (new_state != MLX5_SQC_STATE_RDY) 3884c8973df2SRafi Wiener memset(&new_rl, 0, sizeof(new_rl)); 3885c8973df2SRafi Wiener } 38867d29f349SBodong Wang 388761147f39SBodong Wang ibqp->rl = new_rl; 3888ad5f8e96Smajd@mellanox.com sq->state = new_state; 3889ad5f8e96Smajd@mellanox.com 3890ad5f8e96Smajd@mellanox.com out: 3891ad5f8e96Smajd@mellanox.com kvfree(in); 3892ad5f8e96Smajd@mellanox.com return err; 3893ad5f8e96Smajd@mellanox.com } 3894ad5f8e96Smajd@mellanox.com 3895ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 389613eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 389713eab21fSAviv Heller u8 tx_affinity) 3898ad5f8e96Smajd@mellanox.com { 3899ad5f8e96Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 3900ad5f8e96Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 3901ad5f8e96Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 39027d29f349SBodong Wang int modify_rq = !!qp->rq.wqe_cnt; 39037d29f349SBodong Wang int modify_sq = !!qp->sq.wqe_cnt; 3904ad5f8e96Smajd@mellanox.com int rq_state; 3905ad5f8e96Smajd@mellanox.com int sq_state; 3906ad5f8e96Smajd@mellanox.com int err; 3907ad5f8e96Smajd@mellanox.com 39080680efa2SAlex Vesker switch (raw_qp_param->operation) { 3909ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RST2INIT_QP: 3910ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RDY; 3911c94e272bSMaor Gottlieb sq_state = MLX5_SQC_STATE_RST; 3912ad5f8e96Smajd@mellanox.com break; 3913ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2ERR_QP: 3914ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_ERR; 3915ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_ERR; 3916ad5f8e96Smajd@mellanox.com break; 3917ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2RST_QP: 3918ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RST; 3919ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_RST; 3920ad5f8e96Smajd@mellanox.com break; 3921ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTR2RTS_QP: 3922ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTS2RTS_QP: 3923c94e272bSMaor Gottlieb if (raw_qp_param->set_mask & ~MLX5_RAW_QP_RATE_LIMIT) 3924c94e272bSMaor Gottlieb return -EINVAL; 3925c94e272bSMaor Gottlieb 39267d29f349SBodong Wang modify_rq = 0; 3927c94e272bSMaor Gottlieb sq_state = MLX5_SQC_STATE_RDY; 39287d29f349SBodong Wang break; 39297d29f349SBodong Wang case MLX5_CMD_OP_INIT2INIT_QP: 39307d29f349SBodong Wang case MLX5_CMD_OP_INIT2RTR_QP: 3931eb49ab0cSAlex Vesker if (raw_qp_param->set_mask) 3932eb49ab0cSAlex Vesker return -EINVAL; 3933eb49ab0cSAlex Vesker else 3934ad5f8e96Smajd@mellanox.com return 0; 3935ad5f8e96Smajd@mellanox.com default: 3936ad5f8e96Smajd@mellanox.com WARN_ON(1); 3937ad5f8e96Smajd@mellanox.com return -EINVAL; 3938ad5f8e96Smajd@mellanox.com } 3939ad5f8e96Smajd@mellanox.com 39407d29f349SBodong Wang if (modify_rq) { 394134d57585SYishai Hadas err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param, 394234d57585SYishai Hadas qp->ibqp.pd); 3943ad5f8e96Smajd@mellanox.com if (err) 3944ad5f8e96Smajd@mellanox.com return err; 3945ad5f8e96Smajd@mellanox.com } 3946ad5f8e96Smajd@mellanox.com 39477d29f349SBodong Wang if (modify_sq) { 3948d5ed8ac3SMark Bloch struct mlx5_flow_handle *flow_rule; 3949d5ed8ac3SMark Bloch 395013eab21fSAviv Heller if (tx_affinity) { 395113eab21fSAviv Heller err = modify_raw_packet_tx_affinity(dev->mdev, sq, 39521cd6dbd3SYishai Hadas tx_affinity, 39531cd6dbd3SYishai Hadas qp->ibqp.pd); 395413eab21fSAviv Heller if (err) 395513eab21fSAviv Heller return err; 395613eab21fSAviv Heller } 395713eab21fSAviv Heller 3958d5ed8ac3SMark Bloch flow_rule = create_flow_rule_vport_sq(dev, sq, 3959d5ed8ac3SMark Bloch raw_qp_param->port); 3960d5ed8ac3SMark Bloch if (IS_ERR(flow_rule)) 39611db86318SColin Ian King return PTR_ERR(flow_rule); 3962d5ed8ac3SMark Bloch 3963d5ed8ac3SMark Bloch err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, 3964c14003f0SYishai Hadas raw_qp_param, qp->ibqp.pd); 3965d5ed8ac3SMark Bloch if (err) { 3966d5ed8ac3SMark Bloch if (flow_rule) 3967d5ed8ac3SMark Bloch mlx5_del_flow_rules(flow_rule); 3968d5ed8ac3SMark Bloch return err; 3969d5ed8ac3SMark Bloch } 3970d5ed8ac3SMark Bloch 3971d5ed8ac3SMark Bloch if (flow_rule) { 3972d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 3973d5ed8ac3SMark Bloch sq->flow_rule = flow_rule; 3974d5ed8ac3SMark Bloch } 3975d5ed8ac3SMark Bloch 3976d5ed8ac3SMark Bloch return err; 397713eab21fSAviv Heller } 3978ad5f8e96Smajd@mellanox.com 3979ad5f8e96Smajd@mellanox.com return 0; 3980ad5f8e96Smajd@mellanox.com } 3981ad5f8e96Smajd@mellanox.com 39825163b274SMaor Gottlieb static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev, 39835163b274SMaor Gottlieb struct ib_udata *udata) 3984c6a21c38SMajd Dibbiny { 398589944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 398689944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 39875163b274SMaor Gottlieb u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 39885163b274SMaor Gottlieb atomic_t *tx_port_affinity; 3989c6a21c38SMajd Dibbiny 39905163b274SMaor Gottlieb if (ucontext) 39915163b274SMaor Gottlieb tx_port_affinity = &ucontext->tx_port_affinity; 39925163b274SMaor Gottlieb else 39935163b274SMaor Gottlieb tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity; 39945163b274SMaor Gottlieb 39955163b274SMaor Gottlieb return (unsigned int)atomic_add_return(1, tx_port_affinity) % 399634a30d76SMark Bloch (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1; 3997c6a21c38SMajd Dibbiny } 3998c6a21c38SMajd Dibbiny 39998f3243a0SMark Zhang static bool qp_supports_affinity(struct mlx5_ib_qp *qp) 40005163b274SMaor Gottlieb { 40018f3243a0SMark Zhang if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) || 40028f3243a0SMark Zhang (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) || 40038f3243a0SMark Zhang (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) || 40048f3243a0SMark Zhang (qp->type == MLX5_IB_QPT_DCI)) 40055163b274SMaor Gottlieb return true; 40065163b274SMaor Gottlieb return false; 40075163b274SMaor Gottlieb } 40085163b274SMaor Gottlieb 4009cfc1a89eSMaor Gottlieb static unsigned int get_tx_affinity(struct ib_qp *qp, 4010cfc1a89eSMaor Gottlieb const struct ib_qp_attr *attr, 4011cfc1a89eSMaor Gottlieb int attr_mask, u8 init, 40125163b274SMaor Gottlieb struct ib_udata *udata) 40135163b274SMaor Gottlieb { 40145163b274SMaor Gottlieb struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 40155163b274SMaor Gottlieb udata, struct mlx5_ib_ucontext, ibucontext); 40165163b274SMaor Gottlieb struct mlx5_ib_dev *dev = to_mdev(qp->device); 40175163b274SMaor Gottlieb struct mlx5_ib_qp *mqp = to_mqp(qp); 40185163b274SMaor Gottlieb struct mlx5_ib_qp_base *qp_base; 40195163b274SMaor Gottlieb unsigned int tx_affinity; 40205163b274SMaor Gottlieb 4021802dcc7fSMark Zhang if (!(mlx5_ib_lag_should_assign_affinity(dev) && 40228f3243a0SMark Zhang qp_supports_affinity(mqp))) 40235163b274SMaor Gottlieb return 0; 40245163b274SMaor Gottlieb 4025cfc1a89eSMaor Gottlieb if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 4026cfc1a89eSMaor Gottlieb tx_affinity = mqp->gsi_lag_port; 4027cfc1a89eSMaor Gottlieb else if (init) 40285163b274SMaor Gottlieb tx_affinity = get_tx_affinity_rr(dev, udata); 4029cfc1a89eSMaor Gottlieb else if ((attr_mask & IB_QP_AV) && attr->xmit_slave) 4030cfc1a89eSMaor Gottlieb tx_affinity = 4031cfc1a89eSMaor Gottlieb mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave); 4032cfc1a89eSMaor Gottlieb else 4033cfc1a89eSMaor Gottlieb return 0; 40345163b274SMaor Gottlieb 40355163b274SMaor Gottlieb qp_base = &mqp->trans_qp.base; 40365163b274SMaor Gottlieb if (ucontext) 40375163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n", 40385163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn, ucontext); 40395163b274SMaor Gottlieb else 40405163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n", 40415163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn); 40425163b274SMaor Gottlieb return tx_affinity; 4043c6a21c38SMajd Dibbiny } 4044c6a21c38SMajd Dibbiny 4045d14133ddSMark Zhang static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, 4046d14133ddSMark Zhang struct rdma_counter *counter) 4047d14133ddSMark Zhang { 4048d14133ddSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 404964bae2d4SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {}; 4050d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 4051d14133ddSMark Zhang struct mlx5_ib_qp_base *base; 4052d14133ddSMark Zhang u32 set_id; 405364bae2d4SLeon Romanovsky u32 *qpc; 4054d14133ddSMark Zhang 40553e1f000fSParav Pandit if (counter) 4056d14133ddSMark Zhang set_id = counter->id; 40573e1f000fSParav Pandit else 40583e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); 4059d14133ddSMark Zhang 4060d14133ddSMark Zhang base = &mqp->trans_qp.base; 406164bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP); 406264bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn); 406364bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid); 406464bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, opt_param_mask, 406564bae2d4SLeon Romanovsky MLX5_QP_OPTPAR_COUNTER_SET_ID); 406664bae2d4SLeon Romanovsky 406764bae2d4SLeon Romanovsky qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc); 406864bae2d4SLeon Romanovsky MLX5_SET(qpc, qpc, counter_set_id, set_id); 406964bae2d4SLeon Romanovsky return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in); 4070d14133ddSMark Zhang } 4071d14133ddSMark Zhang 4072e126ba97SEli Cohen static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 4073e126ba97SEli Cohen const struct ib_qp_attr *attr, int attr_mask, 407489944450SShamir Rabinovitch enum ib_qp_state cur_state, 407589944450SShamir Rabinovitch enum ib_qp_state new_state, 407689944450SShamir Rabinovitch const struct mlx5_ib_modify_qp *ucmd, 407750aec2c3SLeon Romanovsky struct mlx5_ib_modify_qp_resp *resp, 407889944450SShamir Rabinovitch struct ib_udata *udata) 4079e126ba97SEli Cohen { 4080427c1e7bSmajd@mellanox.com static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 4081427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = { 4082427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4083427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4084427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 4085427c1e7bSmajd@mellanox.com }, 4086427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = { 4087427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4088427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4089427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 4090427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 4091427c1e7bSmajd@mellanox.com }, 4092427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = { 4093427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4094427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4095427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 4096427c1e7bSmajd@mellanox.com }, 4097427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = { 4098427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4099427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4100427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 4101427c1e7bSmajd@mellanox.com }, 4102427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQD] = { 4103427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4104427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4105021c1f24SSergey Gorenko [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP, 4106427c1e7bSmajd@mellanox.com }, 4107427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQER] = { 4108427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4109427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4110427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 4111427c1e7bSmajd@mellanox.com }, 4112427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = { 4113427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4114427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4115427c1e7bSmajd@mellanox.com } 4116427c1e7bSmajd@mellanox.com }; 4117427c1e7bSmajd@mellanox.com 4118e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4119e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 412019098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 4121e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 4122e126ba97SEli Cohen struct mlx5_ib_pd *pd; 4123e126ba97SEli Cohen enum mlx5_qp_state mlx5_cur, mlx5_new; 4124f18e26afSLeon Romanovsky void *qpc, *pri_path, *alt_path; 4125cfc1a89eSMaor Gottlieb enum mlx5_qp_optpar optpar = 0; 4126d14133ddSMark Zhang u32 set_id = 0; 4127e126ba97SEli Cohen int mlx5_st; 4128e126ba97SEli Cohen int err; 4129427c1e7bSmajd@mellanox.com u16 op; 413013eab21fSAviv Heller u8 tx_affinity = 0; 4131e126ba97SEli Cohen 41327aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 413355de9a77SLeon Romanovsky if (mlx5_st < 0) 413455de9a77SLeon Romanovsky return -EINVAL; 413555de9a77SLeon Romanovsky 4136f18e26afSLeon Romanovsky qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); 4137f18e26afSLeon Romanovsky if (!qpc) 4138e126ba97SEli Cohen return -ENOMEM; 4139e126ba97SEli Cohen 4140029e88fdSLeon Romanovsky pd = to_mpd(qp->ibqp.pd); 4141f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, st, mlx5_st); 4142e126ba97SEli Cohen 4143e126ba97SEli Cohen if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 4144f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 4145e126ba97SEli Cohen } else { 4146e126ba97SEli Cohen switch (attr->path_mig_state) { 4147e126ba97SEli Cohen case IB_MIG_MIGRATED: 4148f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 4149e126ba97SEli Cohen break; 4150e126ba97SEli Cohen case IB_MIG_REARM: 4151f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM); 4152e126ba97SEli Cohen break; 4153e126ba97SEli Cohen case IB_MIG_ARMED: 4154f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED); 4155e126ba97SEli Cohen break; 4156e126ba97SEli Cohen } 4157e126ba97SEli Cohen } 4158e126ba97SEli Cohen 4159cfc1a89eSMaor Gottlieb tx_affinity = get_tx_affinity(ibqp, attr, attr_mask, 41605163b274SMaor Gottlieb cur_state == IB_QPS_RESET && 41615163b274SMaor Gottlieb new_state == IB_QPS_INIT, udata); 4162f18e26afSLeon Romanovsky 4163f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity); 4164f18e26afSLeon Romanovsky if (tx_affinity && new_state == IB_QPS_RTR && 4165cfc1a89eSMaor Gottlieb MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity)) 4166cfc1a89eSMaor Gottlieb optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF; 416713eab21fSAviv Heller 41689ecf6ac1SMaor Gottlieb if (is_sqp(qp->type)) { 4169f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, IB_MTU_256); 4170f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 8); 41719ecf6ac1SMaor Gottlieb } else if ((qp->type == IB_QPT_UD && 41722be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) || 41739ecf6ac1SMaor Gottlieb qp->type == MLX5_IB_QPT_REG_UMR) { 4174f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, IB_MTU_4096); 4175f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 12); 4176e126ba97SEli Cohen } else if (attr_mask & IB_QP_PATH_MTU) { 4177e126ba97SEli Cohen if (attr->path_mtu < IB_MTU_256 || 4178e126ba97SEli Cohen attr->path_mtu > IB_MTU_4096) { 4179e126ba97SEli Cohen mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 4180e126ba97SEli Cohen err = -EINVAL; 4181e126ba97SEli Cohen goto out; 4182e126ba97SEli Cohen } 4183f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, attr->path_mtu); 4184f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 4185f18e26afSLeon Romanovsky MLX5_CAP_GEN(dev->mdev, log_max_msg)); 4186e126ba97SEli Cohen } 4187e126ba97SEli Cohen 4188e126ba97SEli Cohen if (attr_mask & IB_QP_DEST_QPN) 4189f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num); 4190f18e26afSLeon Romanovsky 4191f18e26afSLeon Romanovsky pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 4192f18e26afSLeon Romanovsky alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path); 4193e126ba97SEli Cohen 4194e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 4195f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index); 4196e126ba97SEli Cohen 4197e126ba97SEli Cohen /* todo implement counter_index functionality */ 4198e126ba97SEli Cohen 41999ecf6ac1SMaor Gottlieb if (is_sqp(qp->type)) 4200f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, vhca_port_num, qp->port); 4201e126ba97SEli Cohen 4202e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 4203f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num); 4204e126ba97SEli Cohen 4205e126ba97SEli Cohen if (attr_mask & IB_QP_AV) { 4206f18e26afSLeon Romanovsky err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path, 4207f18e26afSLeon Romanovsky attr_mask & IB_QP_PORT ? attr->port_num : 4208f18e26afSLeon Romanovsky qp->port, 4209f879ee8dSAchiad Shochat attr_mask, 0, attr, false); 4210e126ba97SEli Cohen if (err) 4211e126ba97SEli Cohen goto out; 4212e126ba97SEli Cohen } 4213e126ba97SEli Cohen 4214e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 4215f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, ack_timeout, attr->timeout); 4216e126ba97SEli Cohen 4217e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) { 4218f18e26afSLeon Romanovsky err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path, 4219f879ee8dSAchiad Shochat attr->alt_port_num, 4220f18e26afSLeon Romanovsky attr_mask | IB_QP_PKEY_INDEX | 4221f18e26afSLeon Romanovsky IB_QP_TIMEOUT, 4222f879ee8dSAchiad Shochat 0, attr, true); 4223e126ba97SEli Cohen if (err) 4224e126ba97SEli Cohen goto out; 4225e126ba97SEli Cohen } 4226e126ba97SEli Cohen 42279ecf6ac1SMaor Gottlieb get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 422889ea94a7SMaor Gottlieb &send_cq, &recv_cq); 4229e126ba97SEli Cohen 4230f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 4231f18e26afSLeon Romanovsky if (send_cq) 4232f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn); 4233f18e26afSLeon Romanovsky if (recv_cq) 4234f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn); 4235f18e26afSLeon Romanovsky 4236f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ); 4237e126ba97SEli Cohen 4238e126ba97SEli Cohen if (attr_mask & IB_QP_RNR_RETRY) 4239f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); 4240e126ba97SEli Cohen 4241e126ba97SEli Cohen if (attr_mask & IB_QP_RETRY_CNT) 4242f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); 4243e126ba97SEli Cohen 4244f18e26afSLeon Romanovsky if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic) 4245f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic)); 4246e126ba97SEli Cohen 4247e126ba97SEli Cohen if (attr_mask & IB_QP_SQ_PSN) 4248f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn); 4249e126ba97SEli Cohen 4250f18e26afSLeon Romanovsky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic) 4251f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_rra_max, 4252f18e26afSLeon Romanovsky ilog2(attr->max_dest_rd_atomic)); 4253e126ba97SEli Cohen 4254a60109dcSYonatan Cohen if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 4255f18e26afSLeon Romanovsky err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc); 4256a60109dcSYonatan Cohen if (err) 4257a60109dcSYonatan Cohen goto out; 4258a60109dcSYonatan Cohen } 4259e126ba97SEli Cohen 4260e126ba97SEli Cohen if (attr_mask & IB_QP_MIN_RNR_TIMER) 4261f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer); 4262e126ba97SEli Cohen 4263e126ba97SEli Cohen if (attr_mask & IB_QP_RQ_PSN) 4264f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn); 4265e126ba97SEli Cohen 4266e126ba97SEli Cohen if (attr_mask & IB_QP_QKEY) 4267f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, q_key, attr->qkey); 4268e126ba97SEli Cohen 4269e126ba97SEli Cohen if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 4270f18e26afSLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 4271e126ba97SEli Cohen 42720837e86aSMark Bloch if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 42730837e86aSMark Bloch u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 42740837e86aSMark Bloch qp->port) - 1; 4275c2e53b2cSYishai Hadas 4276c2e53b2cSYishai Hadas /* Underlay port should be used - index 0 function per port */ 42772be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 4278c2e53b2cSYishai Hadas port_num = 0; 4279c2e53b2cSYishai Hadas 4280d14133ddSMark Zhang if (ibqp->counter) 4281d14133ddSMark Zhang set_id = ibqp->counter->id; 4282d14133ddSMark Zhang else 42833e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, port_num); 4284f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, counter_set_id, set_id); 42850837e86aSMark Bloch } 42860837e86aSMark Bloch 4287e126ba97SEli Cohen if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 4288f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rlky, 1); 4289e126ba97SEli Cohen 42902be08c30SLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 4291f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, deth_sqpn, 1); 4292e126ba97SEli Cohen 4293e126ba97SEli Cohen mlx5_cur = to_mlx5_state(cur_state); 4294e126ba97SEli Cohen mlx5_new = to_mlx5_state(new_state); 4295e126ba97SEli Cohen 4296427c1e7bSmajd@mellanox.com if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 42975d414b17SDan Carpenter !optab[mlx5_cur][mlx5_new]) { 42985d414b17SDan Carpenter err = -EINVAL; 4299427c1e7bSmajd@mellanox.com goto out; 43005d414b17SDan Carpenter } 4301427c1e7bSmajd@mellanox.com 4302427c1e7bSmajd@mellanox.com op = optab[mlx5_cur][mlx5_new]; 4303cfc1a89eSMaor Gottlieb optpar |= ib_mask_to_mlx5_opt(attr_mask); 4304e126ba97SEli Cohen optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 4305ad5f8e96Smajd@mellanox.com 43069ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || 43072be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 43080680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = {}; 43090680efa2SAlex Vesker 43100680efa2SAlex Vesker raw_qp_param.operation = op; 4311eb49ab0cSAlex Vesker if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4312d14133ddSMark Zhang raw_qp_param.rq_q_ctr_id = set_id; 4313eb49ab0cSAlex Vesker raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; 4314eb49ab0cSAlex Vesker } 43157d29f349SBodong Wang 4316d5ed8ac3SMark Bloch if (attr_mask & IB_QP_PORT) 4317d5ed8ac3SMark Bloch raw_qp_param.port = attr->port_num; 4318d5ed8ac3SMark Bloch 43197d29f349SBodong Wang if (attr_mask & IB_QP_RATE_LIMIT) { 432061147f39SBodong Wang raw_qp_param.rl.rate = attr->rate_limit; 432161147f39SBodong Wang 432261147f39SBodong Wang if (ucmd->burst_info.max_burst_sz) { 432361147f39SBodong Wang if (attr->rate_limit && 432461147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { 432561147f39SBodong Wang raw_qp_param.rl.max_burst_sz = 432661147f39SBodong Wang ucmd->burst_info.max_burst_sz; 432761147f39SBodong Wang } else { 432861147f39SBodong Wang err = -EINVAL; 432961147f39SBodong Wang goto out; 433061147f39SBodong Wang } 433161147f39SBodong Wang } 433261147f39SBodong Wang 433361147f39SBodong Wang if (ucmd->burst_info.typical_pkt_sz) { 433461147f39SBodong Wang if (attr->rate_limit && 433561147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { 433661147f39SBodong Wang raw_qp_param.rl.typical_pkt_sz = 433761147f39SBodong Wang ucmd->burst_info.typical_pkt_sz; 433861147f39SBodong Wang } else { 433961147f39SBodong Wang err = -EINVAL; 434061147f39SBodong Wang goto out; 434161147f39SBodong Wang } 434261147f39SBodong Wang } 434361147f39SBodong Wang 43447d29f349SBodong Wang raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; 43457d29f349SBodong Wang } 43467d29f349SBodong Wang 434713eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); 43480680efa2SAlex Vesker } else { 434950aec2c3SLeon Romanovsky if (udata) { 435050aec2c3SLeon Romanovsky /* For the kernel flows, the resp will stay zero */ 435150aec2c3SLeon Romanovsky resp->ece_options = 435250aec2c3SLeon Romanovsky MLX5_CAP_GEN(dev->mdev, ece_support) ? 43535f62a521SLeon Romanovsky ucmd->ece_options : 0; 435450aec2c3SLeon Romanovsky resp->response_length = sizeof(*resp); 435550aec2c3SLeon Romanovsky } 43565f62a521SLeon Romanovsky err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp, 435750aec2c3SLeon Romanovsky &resp->ece_options); 43580680efa2SAlex Vesker } 43590680efa2SAlex Vesker 4360e126ba97SEli Cohen if (err) 4361e126ba97SEli Cohen goto out; 4362e126ba97SEli Cohen 4363e126ba97SEli Cohen qp->state = new_state; 4364e126ba97SEli Cohen 4365e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 436619098df2Smajd@mellanox.com qp->trans_qp.atomic_rd_en = attr->qp_access_flags; 4367e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 436819098df2Smajd@mellanox.com qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; 4369e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 4370e126ba97SEli Cohen qp->port = attr->port_num; 4371e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) 437219098df2Smajd@mellanox.com qp->trans_qp.alt_port = attr->alt_port_num; 4373e126ba97SEli Cohen 4374e126ba97SEli Cohen /* 4375e126ba97SEli Cohen * If we moved a kernel QP to RESET, clean up all old CQ 4376e126ba97SEli Cohen * entries and reinitialize the QP. 4377e126ba97SEli Cohen */ 437875a45982SLeon Romanovsky if (new_state == IB_QPS_RESET && 43799ecf6ac1SMaor Gottlieb !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) { 438019098df2Smajd@mellanox.com mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 4381e126ba97SEli Cohen ibqp->srq ? to_msrq(ibqp->srq) : NULL); 4382e126ba97SEli Cohen if (send_cq != recv_cq) 438319098df2Smajd@mellanox.com mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); 4384e126ba97SEli Cohen 4385e126ba97SEli Cohen qp->rq.head = 0; 4386e126ba97SEli Cohen qp->rq.tail = 0; 4387e126ba97SEli Cohen qp->sq.head = 0; 4388e126ba97SEli Cohen qp->sq.tail = 0; 4389e126ba97SEli Cohen qp->sq.cur_post = 0; 439034f4c955SGuy Levi if (qp->sq.wqe_cnt) 439134f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 4392950bf4f1SLeon Romanovsky qp->sq.last_poll = 0; 4393e126ba97SEli Cohen qp->db.db[MLX5_RCV_DBR] = 0; 4394e126ba97SEli Cohen qp->db.db[MLX5_SND_DBR] = 0; 4395e126ba97SEli Cohen } 4396e126ba97SEli Cohen 4397d14133ddSMark Zhang if ((new_state == IB_QPS_RTS) && qp->counter_pending) { 4398d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter); 4399d14133ddSMark Zhang if (!err) 4400d14133ddSMark Zhang qp->counter_pending = 0; 4401d14133ddSMark Zhang } 4402d14133ddSMark Zhang 4403e126ba97SEli Cohen out: 4404f18e26afSLeon Romanovsky kfree(qpc); 4405e126ba97SEli Cohen return err; 4406e126ba97SEli Cohen } 4407e126ba97SEli Cohen 4408c32a4f29SMoni Shoua static inline bool is_valid_mask(int mask, int req, int opt) 4409c32a4f29SMoni Shoua { 4410c32a4f29SMoni Shoua if ((mask & req) != req) 4411c32a4f29SMoni Shoua return false; 4412c32a4f29SMoni Shoua 4413c32a4f29SMoni Shoua if (mask & ~(req | opt)) 4414c32a4f29SMoni Shoua return false; 4415c32a4f29SMoni Shoua 4416c32a4f29SMoni Shoua return true; 4417c32a4f29SMoni Shoua } 4418c32a4f29SMoni Shoua 4419c32a4f29SMoni Shoua /* check valid transition for driver QP types 4420c32a4f29SMoni Shoua * for now the only QP type that this function supports is DCI 4421c32a4f29SMoni Shoua */ 4422c32a4f29SMoni Shoua static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state, 4423c32a4f29SMoni Shoua enum ib_qp_attr_mask attr_mask) 4424c32a4f29SMoni Shoua { 4425c32a4f29SMoni Shoua int req = IB_QP_STATE; 4426c32a4f29SMoni Shoua int opt = 0; 4427c32a4f29SMoni Shoua 442899ed748eSMoni Shoua if (new_state == IB_QPS_RESET) { 442999ed748eSMoni Shoua return is_valid_mask(attr_mask, req, opt); 443099ed748eSMoni Shoua } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4431c32a4f29SMoni Shoua req |= IB_QP_PKEY_INDEX | IB_QP_PORT; 4432c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4433c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 4434c32a4f29SMoni Shoua opt = IB_QP_PKEY_INDEX | IB_QP_PORT; 4435c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4436c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4437c32a4f29SMoni Shoua req |= IB_QP_PATH_MTU; 44385ec0304cSArtemy Kovalyov opt = IB_QP_PKEY_INDEX | IB_QP_AV; 4439c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4440c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 4441c32a4f29SMoni Shoua req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 4442c32a4f29SMoni Shoua IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN; 4443c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4444c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4445c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) { 4446c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4447c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4448c32a4f29SMoni Shoua } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) { 4449c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4450c32a4f29SMoni Shoua } 4451c32a4f29SMoni Shoua return false; 4452c32a4f29SMoni Shoua } 4453c32a4f29SMoni Shoua 4454776a3906SMoni Shoua /* mlx5_ib_modify_dct: modify a DCT QP 4455776a3906SMoni Shoua * valid transitions are: 4456776a3906SMoni Shoua * RESET to INIT: must set access_flags, pkey_index and port 4457776a3906SMoni Shoua * INIT to RTR : must set min_rnr_timer, tclass, flow_label, 4458776a3906SMoni Shoua * mtu, gid_index and hop_limit 4459776a3906SMoni Shoua * Other transitions and attributes are illegal 4460776a3906SMoni Shoua */ 4461776a3906SMoni Shoua static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4462a645a89dSLeon Romanovsky int attr_mask, struct mlx5_ib_modify_qp *ucmd, 4463a645a89dSLeon Romanovsky struct ib_udata *udata) 4464776a3906SMoni Shoua { 4465776a3906SMoni Shoua struct mlx5_ib_qp *qp = to_mqp(ibqp); 4466776a3906SMoni Shoua struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4467776a3906SMoni Shoua enum ib_qp_state cur_state, new_state; 4468776a3906SMoni Shoua int required = IB_QP_STATE; 4469776a3906SMoni Shoua void *dctc; 447071cab8efSLeon Romanovsky int err; 4471776a3906SMoni Shoua 4472776a3906SMoni Shoua if (!(attr_mask & IB_QP_STATE)) 4473776a3906SMoni Shoua return -EINVAL; 4474776a3906SMoni Shoua 4475776a3906SMoni Shoua cur_state = qp->state; 4476776a3906SMoni Shoua new_state = attr->qp_state; 4477776a3906SMoni Shoua 4478776a3906SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 4479a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options) 4480a645a89dSLeon Romanovsky /* 4481a645a89dSLeon Romanovsky * DCT doesn't initialize QP till modify command is executed, 4482a645a89dSLeon Romanovsky * so we need to overwrite previously set ECE field if user 4483a645a89dSLeon Romanovsky * provided any value except zero, which means not set/not 4484a645a89dSLeon Romanovsky * valid. 4485a645a89dSLeon Romanovsky */ 4486a645a89dSLeon Romanovsky MLX5_SET(dctc, dctc, ece, ucmd->ece_options); 4487a645a89dSLeon Romanovsky 4488776a3906SMoni Shoua if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 44893e1f000fSParav Pandit u16 set_id; 44903e1f000fSParav Pandit 4491776a3906SMoni Shoua required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 4492776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4493776a3906SMoni Shoua return -EINVAL; 4494776a3906SMoni Shoua 4495776a3906SMoni Shoua if (attr->port_num == 0 || 4496*746aa3c8SMark Zhang attr->port_num > dev->num_ports) { 4497776a3906SMoni Shoua mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4498776a3906SMoni Shoua attr->port_num, dev->num_ports); 4499776a3906SMoni Shoua return -EINVAL; 4500776a3906SMoni Shoua } 4501776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 4502776a3906SMoni Shoua MLX5_SET(dctc, dctc, rre, 1); 4503776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 4504776a3906SMoni Shoua MLX5_SET(dctc, dctc, rwe, 1); 4505776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { 4506a60109dcSYonatan Cohen int atomic_mode; 4507a60109dcSYonatan Cohen 4508a60109dcSYonatan Cohen atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT); 4509a60109dcSYonatan Cohen if (atomic_mode < 0) 4510776a3906SMoni Shoua return -EOPNOTSUPP; 4511a60109dcSYonatan Cohen 4512a60109dcSYonatan Cohen MLX5_SET(dctc, dctc, atomic_mode, atomic_mode); 4513776a3906SMoni Shoua MLX5_SET(dctc, dctc, rae, 1); 4514776a3906SMoni Shoua } 4515776a3906SMoni Shoua MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); 45167c4b1ab9SMark Zhang if (mlx5_lag_is_active(dev->mdev)) 45177c4b1ab9SMark Zhang MLX5_SET(dctc, dctc, port, 45187c4b1ab9SMark Zhang get_tx_affinity_rr(dev, udata)); 45197c4b1ab9SMark Zhang else 4520776a3906SMoni Shoua MLX5_SET(dctc, dctc, port, attr->port_num); 45213e1f000fSParav Pandit 45223e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); 45233e1f000fSParav Pandit MLX5_SET(dctc, dctc, counter_set_id, set_id); 4524776a3906SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4525776a3906SMoni Shoua struct mlx5_ib_modify_qp_resp resp = {}; 4526a645a89dSLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {}; 4527a645a89dSLeon Romanovsky u32 min_resp_len = offsetofend(typeof(resp), dctn); 4528776a3906SMoni Shoua 4529776a3906SMoni Shoua if (udata->outlen < min_resp_len) 4530776a3906SMoni Shoua return -EINVAL; 4531a645a89dSLeon Romanovsky /* 4532a645a89dSLeon Romanovsky * If we don't have enough space for the ECE options, 4533a645a89dSLeon Romanovsky * simply indicate it with resp.response_length. 4534a645a89dSLeon Romanovsky */ 4535a645a89dSLeon Romanovsky resp.response_length = (udata->outlen < sizeof(resp)) ? 4536a645a89dSLeon Romanovsky min_resp_len : 4537a645a89dSLeon Romanovsky sizeof(resp); 4538a645a89dSLeon Romanovsky 4539776a3906SMoni Shoua required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU; 4540776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4541776a3906SMoni Shoua return -EINVAL; 4542776a3906SMoni Shoua MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer); 4543776a3906SMoni Shoua MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class); 4544776a3906SMoni Shoua MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label); 4545776a3906SMoni Shoua MLX5_SET(dctc, dctc, mtu, attr->path_mtu); 4546776a3906SMoni Shoua MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); 4547776a3906SMoni Shoua MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 45481ab52ac1SPatrisious Haddad if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) 45491ab52ac1SPatrisious Haddad MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7); 4550776a3906SMoni Shoua 4551333fbaa0SLeon Romanovsky err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, 4552c5ae1954SYishai Hadas MLX5_ST_SZ_BYTES(create_dct_in), out, 4553c5ae1954SYishai Hadas sizeof(out)); 455431803e59SSaeed Mahameed err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); 4555776a3906SMoni Shoua if (err) 4556776a3906SMoni Shoua return err; 4557776a3906SMoni Shoua resp.dctn = qp->dct.mdct.mqp.qpn; 4558a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support)) 4559a645a89dSLeon Romanovsky resp.ece_options = MLX5_GET(create_dct_out, out, ece); 4560776a3906SMoni Shoua err = ib_copy_to_udata(udata, &resp, resp.response_length); 4561776a3906SMoni Shoua if (err) { 4562333fbaa0SLeon Romanovsky mlx5_core_destroy_dct(dev, &qp->dct.mdct); 4563776a3906SMoni Shoua return err; 4564776a3906SMoni Shoua } 4565776a3906SMoni Shoua } else { 4566776a3906SMoni Shoua mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state); 4567776a3906SMoni Shoua return -EINVAL; 4568776a3906SMoni Shoua } 456971cab8efSLeon Romanovsky 4570776a3906SMoni Shoua qp->state = new_state; 457171cab8efSLeon Romanovsky return 0; 4572776a3906SMoni Shoua } 4573776a3906SMoni Shoua 45742614488dSMark Bloch static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, 45759ecf6ac1SMaor Gottlieb struct mlx5_ib_qp *qp) 45762614488dSMark Bloch { 45772614488dSMark Bloch if (dev->profile != &raw_eth_profile) 45782614488dSMark Bloch return true; 45792614488dSMark Bloch 45809ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR) 45812614488dSMark Bloch return true; 45822614488dSMark Bloch 45832614488dSMark Bloch /* Internal QP used for wc testing, with NOPs in wq */ 45842614488dSMark Bloch if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) 45852614488dSMark Bloch return true; 45862614488dSMark Bloch 45872614488dSMark Bloch return false; 45882614488dSMark Bloch } 45892614488dSMark Bloch 45908de8482fSMaor Gottlieb static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, 45918de8482fSMaor Gottlieb int attr_mask, enum ib_qp_type qp_type) 45928de8482fSMaor Gottlieb { 45938de8482fSMaor Gottlieb int log_max_ra_res; 45948de8482fSMaor Gottlieb int log_max_ra_req; 45958de8482fSMaor Gottlieb 45968de8482fSMaor Gottlieb if (qp_type == MLX5_IB_QPT_DCI) { 45978de8482fSMaor Gottlieb log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, 45988de8482fSMaor Gottlieb log_max_ra_res_dc); 45998de8482fSMaor Gottlieb log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, 46008de8482fSMaor Gottlieb log_max_ra_req_dc); 46018de8482fSMaor Gottlieb } else { 46028de8482fSMaor Gottlieb log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, 46038de8482fSMaor Gottlieb log_max_ra_res_qp); 46048de8482fSMaor Gottlieb log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, 46058de8482fSMaor Gottlieb log_max_ra_req_qp); 46068de8482fSMaor Gottlieb } 46078de8482fSMaor Gottlieb 46088de8482fSMaor Gottlieb if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 46098de8482fSMaor Gottlieb attr->max_rd_atomic > log_max_ra_res) { 46108de8482fSMaor Gottlieb mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", 46118de8482fSMaor Gottlieb attr->max_rd_atomic); 46128de8482fSMaor Gottlieb return false; 46138de8482fSMaor Gottlieb } 46148de8482fSMaor Gottlieb 46158de8482fSMaor Gottlieb if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 46168de8482fSMaor Gottlieb attr->max_dest_rd_atomic > log_max_ra_req) { 46178de8482fSMaor Gottlieb mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", 46188de8482fSMaor Gottlieb attr->max_dest_rd_atomic); 46198de8482fSMaor Gottlieb return false; 46208de8482fSMaor Gottlieb } 46218de8482fSMaor Gottlieb return true; 46228de8482fSMaor Gottlieb } 46238de8482fSMaor Gottlieb 4624e126ba97SEli Cohen int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4625e126ba97SEli Cohen int attr_mask, struct ib_udata *udata) 4626e126ba97SEli Cohen { 4627e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 462850aec2c3SLeon Romanovsky struct mlx5_ib_modify_qp_resp resp = {}; 4629e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 463061147f39SBodong Wang struct mlx5_ib_modify_qp ucmd = {}; 4631d16e91daSHaggai Eran enum ib_qp_type qp_type; 4632e126ba97SEli Cohen enum ib_qp_state cur_state, new_state; 4633e126ba97SEli Cohen int err = -EINVAL; 4634e126ba97SEli Cohen 46359ecf6ac1SMaor Gottlieb if (!mlx5_ib_modify_qp_allowed(dev, qp)) 46362614488dSMark Bloch return -EOPNOTSUPP; 46372614488dSMark Bloch 463826e990baSJason Gunthorpe if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) 463926e990baSJason Gunthorpe return -EOPNOTSUPP; 464026e990baSJason Gunthorpe 464128d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 464228d61370SYishai Hadas return -ENOSYS; 464328d61370SYishai Hadas 464461147f39SBodong Wang if (udata && udata->inlen) { 46455f62a521SLeon Romanovsky if (udata->inlen < offsetofend(typeof(ucmd), ece_options)) 464661147f39SBodong Wang return -EINVAL; 464761147f39SBodong Wang 464861147f39SBodong Wang if (udata->inlen > sizeof(ucmd) && 464961147f39SBodong Wang !ib_is_udata_cleared(udata, sizeof(ucmd), 465061147f39SBodong Wang udata->inlen - sizeof(ucmd))) 465161147f39SBodong Wang return -EOPNOTSUPP; 465261147f39SBodong Wang 465361147f39SBodong Wang if (ib_copy_from_udata(&ucmd, udata, 465461147f39SBodong Wang min(udata->inlen, sizeof(ucmd)))) 465561147f39SBodong Wang return -EFAULT; 465661147f39SBodong Wang 465761147f39SBodong Wang if (ucmd.comp_mask || 465861147f39SBodong Wang memchr_inv(&ucmd.burst_info.reserved, 0, 465961147f39SBodong Wang sizeof(ucmd.burst_info.reserved))) 466061147f39SBodong Wang return -EOPNOTSUPP; 46615f62a521SLeon Romanovsky 466261147f39SBodong Wang } 466361147f39SBodong Wang 46649ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_GSI) 4665d16e91daSHaggai Eran return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 4666d16e91daSHaggai Eran 46679ecf6ac1SMaor Gottlieb qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type; 4668d16e91daSHaggai Eran 4669a645a89dSLeon Romanovsky if (qp_type == MLX5_IB_QPT_DCT) 4670a645a89dSLeon Romanovsky return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata); 4671c32a4f29SMoni Shoua 4672e126ba97SEli Cohen mutex_lock(&qp->mutex); 4673e126ba97SEli Cohen 4674e126ba97SEli Cohen cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 4675e126ba97SEli Cohen new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 4676e126ba97SEli Cohen 46772be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) { 4678c2e53b2cSYishai Hadas if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) { 4679c2e53b2cSYishai Hadas mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n", 4680c2e53b2cSYishai Hadas attr_mask); 4681c2e53b2cSYishai Hadas goto out; 4682c2e53b2cSYishai Hadas } 4683c2e53b2cSYishai Hadas } else if (qp_type != MLX5_IB_QPT_REG_UMR && 4684c32a4f29SMoni Shoua qp_type != MLX5_IB_QPT_DCI && 4685d31131bbSKamal Heib !ib_modify_qp_is_ok(cur_state, new_state, qp_type, 4686d31131bbSKamal Heib attr_mask)) { 4687158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 46889ecf6ac1SMaor Gottlieb cur_state, new_state, qp->type, attr_mask); 4689e126ba97SEli Cohen goto out; 4690c32a4f29SMoni Shoua } else if (qp_type == MLX5_IB_QPT_DCI && 4691c32a4f29SMoni Shoua !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) { 4692c32a4f29SMoni Shoua mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 4693c32a4f29SMoni Shoua cur_state, new_state, qp_type, attr_mask); 4694c32a4f29SMoni Shoua goto out; 4695158abf86SHaggai Eran } 4696e126ba97SEli Cohen 4697e126ba97SEli Cohen if ((attr_mask & IB_QP_PORT) && 4698938fe83cSSaeed Mahameed (attr->port_num == 0 || 4699508562d6SDaniel Jurgens attr->port_num > dev->num_ports)) { 4700158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4701158abf86SHaggai Eran attr->port_num, dev->num_ports); 4702e126ba97SEli Cohen goto out; 4703158abf86SHaggai Eran } 4704e126ba97SEli Cohen 47052019d70eSParav Pandit if ((attr_mask & IB_QP_PKEY_INDEX) && 47062019d70eSParav Pandit attr->pkey_index >= dev->pkey_table_len) { 47072019d70eSParav Pandit mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index); 4708e126ba97SEli Cohen goto out; 4709e126ba97SEli Cohen } 4710e126ba97SEli Cohen 47118de8482fSMaor Gottlieb if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) 4712e126ba97SEli Cohen goto out; 4713e126ba97SEli Cohen 4714e126ba97SEli Cohen if (cur_state == new_state && cur_state == IB_QPS_RESET) { 4715e126ba97SEli Cohen err = 0; 4716e126ba97SEli Cohen goto out; 4717e126ba97SEli Cohen } 4718e126ba97SEli Cohen 471961147f39SBodong Wang err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, 472050aec2c3SLeon Romanovsky new_state, &ucmd, &resp, udata); 472150aec2c3SLeon Romanovsky 472250aec2c3SLeon Romanovsky /* resp.response_length is set in ECE supported flows only */ 472350aec2c3SLeon Romanovsky if (!err && resp.response_length && 472450aec2c3SLeon Romanovsky udata->outlen >= resp.response_length) 47256512f11dSLeon Romanovsky /* Return -EFAULT to the user and expect him to destroy QP. */ 47266512f11dSLeon Romanovsky err = ib_copy_to_udata(udata, &resp, resp.response_length); 4727e126ba97SEli Cohen 4728e126ba97SEli Cohen out: 4729e126ba97SEli Cohen mutex_unlock(&qp->mutex); 4730e126ba97SEli Cohen return err; 4731e126ba97SEli Cohen } 4732e126ba97SEli Cohen 4733e126ba97SEli Cohen static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 4734e126ba97SEli Cohen { 4735e126ba97SEli Cohen switch (mlx5_state) { 4736e126ba97SEli Cohen case MLX5_QP_STATE_RST: return IB_QPS_RESET; 4737e126ba97SEli Cohen case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 4738e126ba97SEli Cohen case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 4739e126ba97SEli Cohen case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 4740e126ba97SEli Cohen case MLX5_QP_STATE_SQ_DRAINING: 4741e126ba97SEli Cohen case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 4742e126ba97SEli Cohen case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 4743e126ba97SEli Cohen case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 4744e126ba97SEli Cohen default: return -1; 4745e126ba97SEli Cohen } 4746e126ba97SEli Cohen } 4747e126ba97SEli Cohen 4748e126ba97SEli Cohen static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 4749e126ba97SEli Cohen { 4750e126ba97SEli Cohen switch (mlx5_mig_state) { 4751e126ba97SEli Cohen case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 4752e126ba97SEli Cohen case MLX5_QP_PM_REARM: return IB_MIG_REARM; 4753e126ba97SEli Cohen case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 4754e126ba97SEli Cohen default: return -1; 4755e126ba97SEli Cohen } 4756e126ba97SEli Cohen } 4757e126ba97SEli Cohen 475838349389SDasaratharaman Chandramouli static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, 475970bd7fb8SLeon Romanovsky struct rdma_ah_attr *ah_attr, void *path) 4760e126ba97SEli Cohen { 476170bd7fb8SLeon Romanovsky int port = MLX5_GET(ads, path, vhca_port_num); 476270bd7fb8SLeon Romanovsky int static_rate; 4763e126ba97SEli Cohen 4764d8966fcdSDasaratharaman Chandramouli memset(ah_attr, 0, sizeof(*ah_attr)); 4765e126ba97SEli Cohen 476670bd7fb8SLeon Romanovsky if (!port || port > ibdev->num_ports) 4767e126ba97SEli Cohen return; 4768e126ba97SEli Cohen 476970bd7fb8SLeon Romanovsky ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port); 4770ae59c3f0SLeon Romanovsky 477170bd7fb8SLeon Romanovsky rdma_ah_set_port_num(ah_attr, port); 477270bd7fb8SLeon Romanovsky rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl)); 4773e126ba97SEli Cohen 477470bd7fb8SLeon Romanovsky rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid)); 477570bd7fb8SLeon Romanovsky rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid)); 47762d7e3ff7SAharon Landau 477770bd7fb8SLeon Romanovsky static_rate = MLX5_GET(ads, path, stat_rate); 47786fe6e568SMark Zhang rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate)); 477970bd7fb8SLeon Romanovsky if (MLX5_GET(ads, path, grh) || 47802d7e3ff7SAharon Landau ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 478170bd7fb8SLeon Romanovsky rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label), 478270bd7fb8SLeon Romanovsky MLX5_GET(ads, path, src_addr_index), 478370bd7fb8SLeon Romanovsky MLX5_GET(ads, path, hop_limit), 478470bd7fb8SLeon Romanovsky MLX5_GET(ads, path, tclass)); 4785d4433557SMaor Gottlieb rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip)); 4786e126ba97SEli Cohen } 4787e126ba97SEli Cohen } 4788e126ba97SEli Cohen 47896d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, 47906d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq, 47916d2f89dfSmajd@mellanox.com u8 *sq_state) 4792e126ba97SEli Cohen { 47936d2f89dfSmajd@mellanox.com int err; 47946d2f89dfSmajd@mellanox.com 479528160771SEran Ben Elisha err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); 47966d2f89dfSmajd@mellanox.com if (err) 47976d2f89dfSmajd@mellanox.com goto out; 47986d2f89dfSmajd@mellanox.com sq->state = *sq_state; 47996d2f89dfSmajd@mellanox.com 48006d2f89dfSmajd@mellanox.com out: 48016d2f89dfSmajd@mellanox.com return err; 48026d2f89dfSmajd@mellanox.com } 48036d2f89dfSmajd@mellanox.com 48046d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, 48056d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq, 48066d2f89dfSmajd@mellanox.com u8 *rq_state) 48076d2f89dfSmajd@mellanox.com { 48086d2f89dfSmajd@mellanox.com void *out; 48096d2f89dfSmajd@mellanox.com void *rqc; 48106d2f89dfSmajd@mellanox.com int inlen; 48116d2f89dfSmajd@mellanox.com int err; 48126d2f89dfSmajd@mellanox.com 48136d2f89dfSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(query_rq_out); 48141b9a07eeSLeon Romanovsky out = kvzalloc(inlen, GFP_KERNEL); 48156d2f89dfSmajd@mellanox.com if (!out) 48166d2f89dfSmajd@mellanox.com return -ENOMEM; 48176d2f89dfSmajd@mellanox.com 48186d2f89dfSmajd@mellanox.com err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); 48196d2f89dfSmajd@mellanox.com if (err) 48206d2f89dfSmajd@mellanox.com goto out; 48216d2f89dfSmajd@mellanox.com 48226d2f89dfSmajd@mellanox.com rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); 48236d2f89dfSmajd@mellanox.com *rq_state = MLX5_GET(rqc, rqc, state); 48246d2f89dfSmajd@mellanox.com rq->state = *rq_state; 48256d2f89dfSmajd@mellanox.com 48266d2f89dfSmajd@mellanox.com out: 48276d2f89dfSmajd@mellanox.com kvfree(out); 48286d2f89dfSmajd@mellanox.com return err; 48296d2f89dfSmajd@mellanox.com } 48306d2f89dfSmajd@mellanox.com 48316d2f89dfSmajd@mellanox.com static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, 48326d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, u8 *qp_state) 48336d2f89dfSmajd@mellanox.com { 48346d2f89dfSmajd@mellanox.com static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = { 48356d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RST] = { 48366d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 48376d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 48386d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD, 48396d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_RESET, 48406d2f89dfSmajd@mellanox.com }, 48416d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RDY] = { 4842c94e272bSMaor Gottlieb [MLX5_SQC_STATE_RST] = MLX5_QP_STATE, 48436d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 48446d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_SQE, 48456d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE, 48466d2f89dfSmajd@mellanox.com }, 48476d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_ERR] = { 48486d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 48496d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 48506d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_ERR, 48516d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_ERR, 48526d2f89dfSmajd@mellanox.com }, 48536d2f89dfSmajd@mellanox.com [MLX5_RQ_STATE_NA] = { 4854c94e272bSMaor Gottlieb [MLX5_SQC_STATE_RST] = MLX5_QP_STATE, 48556d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 48566d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE, 48576d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD, 48586d2f89dfSmajd@mellanox.com }, 48596d2f89dfSmajd@mellanox.com }; 48606d2f89dfSmajd@mellanox.com 48616d2f89dfSmajd@mellanox.com *qp_state = sqrq_trans[rq_state][sq_state]; 48626d2f89dfSmajd@mellanox.com 48636d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE_BAD) { 48646d2f89dfSmajd@mellanox.com WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", 48656d2f89dfSmajd@mellanox.com qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, 48666d2f89dfSmajd@mellanox.com qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); 48676d2f89dfSmajd@mellanox.com return -EINVAL; 48686d2f89dfSmajd@mellanox.com } 48696d2f89dfSmajd@mellanox.com 48706d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE) 48716d2f89dfSmajd@mellanox.com *qp_state = qp->state; 48726d2f89dfSmajd@mellanox.com 48736d2f89dfSmajd@mellanox.com return 0; 48746d2f89dfSmajd@mellanox.com } 48756d2f89dfSmajd@mellanox.com 48766d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, 48776d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, 48786d2f89dfSmajd@mellanox.com u8 *raw_packet_qp_state) 48796d2f89dfSmajd@mellanox.com { 48806d2f89dfSmajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 48816d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 48826d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 48836d2f89dfSmajd@mellanox.com int err; 48846d2f89dfSmajd@mellanox.com u8 sq_state = MLX5_SQ_STATE_NA; 48856d2f89dfSmajd@mellanox.com u8 rq_state = MLX5_RQ_STATE_NA; 48866d2f89dfSmajd@mellanox.com 48876d2f89dfSmajd@mellanox.com if (qp->sq.wqe_cnt) { 48886d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); 48896d2f89dfSmajd@mellanox.com if (err) 48906d2f89dfSmajd@mellanox.com return err; 48916d2f89dfSmajd@mellanox.com } 48926d2f89dfSmajd@mellanox.com 48936d2f89dfSmajd@mellanox.com if (qp->rq.wqe_cnt) { 48946d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_rq_state(dev, rq, &rq_state); 48956d2f89dfSmajd@mellanox.com if (err) 48966d2f89dfSmajd@mellanox.com return err; 48976d2f89dfSmajd@mellanox.com } 48986d2f89dfSmajd@mellanox.com 48996d2f89dfSmajd@mellanox.com return sqrq_state_to_qp_state(sq_state, rq_state, qp, 49006d2f89dfSmajd@mellanox.com raw_packet_qp_state); 49016d2f89dfSmajd@mellanox.com } 49026d2f89dfSmajd@mellanox.com 49036d2f89dfSmajd@mellanox.com static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 49046d2f89dfSmajd@mellanox.com struct ib_qp_attr *qp_attr) 49056d2f89dfSmajd@mellanox.com { 490609a7d9ecSSaeed Mahameed int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 490770bd7fb8SLeon Romanovsky void *qpc, *pri_path, *alt_path; 490809a7d9ecSSaeed Mahameed u32 *outb; 490970bd7fb8SLeon Romanovsky int err; 4910e126ba97SEli Cohen 491109a7d9ecSSaeed Mahameed outb = kzalloc(outlen, GFP_KERNEL); 49126d2f89dfSmajd@mellanox.com if (!outb) 49136d2f89dfSmajd@mellanox.com return -ENOMEM; 49146d2f89dfSmajd@mellanox.com 49158067fd8bSPatrisious Haddad err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, 49168067fd8bSPatrisious Haddad false); 4917e126ba97SEli Cohen if (err) 49186d2f89dfSmajd@mellanox.com goto out; 4919e126ba97SEli Cohen 492070bd7fb8SLeon Romanovsky qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc); 492109a7d9ecSSaeed Mahameed 492270bd7fb8SLeon Romanovsky qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state)); 492370bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING) 492470bd7fb8SLeon Romanovsky qp_attr->sq_draining = 1; 4925e126ba97SEli Cohen 492670bd7fb8SLeon Romanovsky qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu); 492770bd7fb8SLeon Romanovsky qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state)); 492870bd7fb8SLeon Romanovsky qp_attr->qkey = MLX5_GET(qpc, qpc, q_key); 492970bd7fb8SLeon Romanovsky qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn); 493070bd7fb8SLeon Romanovsky qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn); 493170bd7fb8SLeon Romanovsky qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn); 493270bd7fb8SLeon Romanovsky 493370bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rre)) 493470bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 493570bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rwe)) 493670bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE; 493770bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rae)) 493870bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC; 493970bd7fb8SLeon Romanovsky 494070bd7fb8SLeon Romanovsky qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max); 494170bd7fb8SLeon Romanovsky qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max); 494270bd7fb8SLeon Romanovsky qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak); 494370bd7fb8SLeon Romanovsky qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count); 494470bd7fb8SLeon Romanovsky qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry); 494570bd7fb8SLeon Romanovsky 494670bd7fb8SLeon Romanovsky pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 494770bd7fb8SLeon Romanovsky alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path); 4948e126ba97SEli Cohen 49499ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC || 49509ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) { 495170bd7fb8SLeon Romanovsky to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path); 495270bd7fb8SLeon Romanovsky to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path); 495370bd7fb8SLeon Romanovsky qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index); 495470bd7fb8SLeon Romanovsky qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num); 4955e126ba97SEli Cohen } 4956e126ba97SEli Cohen 495770bd7fb8SLeon Romanovsky qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index); 495870bd7fb8SLeon Romanovsky qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num); 495970bd7fb8SLeon Romanovsky qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout); 496070bd7fb8SLeon Romanovsky qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout); 49616d2f89dfSmajd@mellanox.com 49626d2f89dfSmajd@mellanox.com out: 49636d2f89dfSmajd@mellanox.com kfree(outb); 49646d2f89dfSmajd@mellanox.com return err; 49656d2f89dfSmajd@mellanox.com } 49666d2f89dfSmajd@mellanox.com 4967776a3906SMoni Shoua static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, 4968776a3906SMoni Shoua struct ib_qp_attr *qp_attr, int qp_attr_mask, 4969776a3906SMoni Shoua struct ib_qp_init_attr *qp_init_attr) 4970776a3906SMoni Shoua { 4971776a3906SMoni Shoua struct mlx5_core_dct *dct = &mqp->dct.mdct; 4972776a3906SMoni Shoua u32 *out; 4973776a3906SMoni Shoua u32 access_flags = 0; 4974776a3906SMoni Shoua int outlen = MLX5_ST_SZ_BYTES(query_dct_out); 4975776a3906SMoni Shoua void *dctc; 4976776a3906SMoni Shoua int err; 4977776a3906SMoni Shoua int supported_mask = IB_QP_STATE | 4978776a3906SMoni Shoua IB_QP_ACCESS_FLAGS | 4979776a3906SMoni Shoua IB_QP_PORT | 4980776a3906SMoni Shoua IB_QP_MIN_RNR_TIMER | 4981776a3906SMoni Shoua IB_QP_AV | 4982776a3906SMoni Shoua IB_QP_PATH_MTU | 4983776a3906SMoni Shoua IB_QP_PKEY_INDEX; 4984776a3906SMoni Shoua 4985776a3906SMoni Shoua if (qp_attr_mask & ~supported_mask) 4986776a3906SMoni Shoua return -EINVAL; 4987776a3906SMoni Shoua if (mqp->state != IB_QPS_RTR) 4988776a3906SMoni Shoua return -EINVAL; 4989776a3906SMoni Shoua 4990776a3906SMoni Shoua out = kzalloc(outlen, GFP_KERNEL); 4991776a3906SMoni Shoua if (!out) 4992776a3906SMoni Shoua return -ENOMEM; 4993776a3906SMoni Shoua 4994333fbaa0SLeon Romanovsky err = mlx5_core_dct_query(dev, dct, out, outlen); 4995776a3906SMoni Shoua if (err) 4996776a3906SMoni Shoua goto out; 4997776a3906SMoni Shoua 4998776a3906SMoni Shoua dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry); 4999776a3906SMoni Shoua 5000776a3906SMoni Shoua if (qp_attr_mask & IB_QP_STATE) 5001776a3906SMoni Shoua qp_attr->qp_state = IB_QPS_RTR; 5002776a3906SMoni Shoua 5003776a3906SMoni Shoua if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 5004776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rre)) 5005776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_READ; 5006776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rwe)) 5007776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_WRITE; 5008776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rae)) 5009776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_ATOMIC; 5010776a3906SMoni Shoua qp_attr->qp_access_flags = access_flags; 5011776a3906SMoni Shoua } 5012776a3906SMoni Shoua 5013776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PORT) 5014776a3906SMoni Shoua qp_attr->port_num = MLX5_GET(dctc, dctc, port); 5015776a3906SMoni Shoua if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) 5016776a3906SMoni Shoua qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); 5017776a3906SMoni Shoua if (qp_attr_mask & IB_QP_AV) { 5018776a3906SMoni Shoua qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass); 5019776a3906SMoni Shoua qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label); 5020776a3906SMoni Shoua qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index); 5021776a3906SMoni Shoua qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit); 5022776a3906SMoni Shoua } 5023776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PATH_MTU) 5024776a3906SMoni Shoua qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu); 5025776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PKEY_INDEX) 5026776a3906SMoni Shoua qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index); 5027776a3906SMoni Shoua out: 5028776a3906SMoni Shoua kfree(out); 5029776a3906SMoni Shoua return err; 5030776a3906SMoni Shoua } 5031776a3906SMoni Shoua 50326d2f89dfSmajd@mellanox.com int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 50336d2f89dfSmajd@mellanox.com int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 50346d2f89dfSmajd@mellanox.com { 50356d2f89dfSmajd@mellanox.com struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 50366d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp = to_mqp(ibqp); 50376d2f89dfSmajd@mellanox.com int err = 0; 50386d2f89dfSmajd@mellanox.com u8 raw_packet_qp_state; 50396d2f89dfSmajd@mellanox.com 504028d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 504128d61370SYishai Hadas return -ENOSYS; 504228d61370SYishai Hadas 50439ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_GSI) 5044d16e91daSHaggai Eran return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 5045d16e91daSHaggai Eran qp_init_attr); 5046d16e91daSHaggai Eran 5047c2e53b2cSYishai Hadas /* Not all of output fields are applicable, make sure to zero them */ 5048c2e53b2cSYishai Hadas memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 5049c2e53b2cSYishai Hadas memset(qp_attr, 0, sizeof(*qp_attr)); 5050c2e53b2cSYishai Hadas 50517aede1a2SLeon Romanovsky if (unlikely(qp->type == MLX5_IB_QPT_DCT)) 5052776a3906SMoni Shoua return mlx5_ib_dct_query_qp(dev, qp, qp_attr, 5053776a3906SMoni Shoua qp_attr_mask, qp_init_attr); 5054776a3906SMoni Shoua 50556d2f89dfSmajd@mellanox.com mutex_lock(&qp->mutex); 50566d2f89dfSmajd@mellanox.com 50579ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || 50582be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 50596d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); 50606d2f89dfSmajd@mellanox.com if (err) 50616d2f89dfSmajd@mellanox.com goto out; 50626d2f89dfSmajd@mellanox.com qp->state = raw_packet_qp_state; 50636d2f89dfSmajd@mellanox.com qp_attr->port_num = 1; 50646d2f89dfSmajd@mellanox.com } else { 50656d2f89dfSmajd@mellanox.com err = query_qp_attr(dev, qp, qp_attr); 50666d2f89dfSmajd@mellanox.com if (err) 50676d2f89dfSmajd@mellanox.com goto out; 50686d2f89dfSmajd@mellanox.com } 50696d2f89dfSmajd@mellanox.com 50706d2f89dfSmajd@mellanox.com qp_attr->qp_state = qp->state; 5071e126ba97SEli Cohen qp_attr->cur_qp_state = qp_attr->qp_state; 5072e126ba97SEli Cohen qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 5073e126ba97SEli Cohen qp_attr->cap.max_recv_sge = qp->rq.max_gs; 5074e126ba97SEli Cohen 5075e126ba97SEli Cohen if (!ibqp->uobject) { 50760540d814SNoa Osherovich qp_attr->cap.max_send_wr = qp->sq.max_post; 5077e126ba97SEli Cohen qp_attr->cap.max_send_sge = qp->sq.max_gs; 50780540d814SNoa Osherovich qp_init_attr->qp_context = ibqp->qp_context; 5079e126ba97SEli Cohen } else { 5080e126ba97SEli Cohen qp_attr->cap.max_send_wr = 0; 5081e126ba97SEli Cohen qp_attr->cap.max_send_sge = 0; 5082e126ba97SEli Cohen } 5083e126ba97SEli Cohen 50849ecf6ac1SMaor Gottlieb qp_init_attr->qp_type = qp->type; 50850540d814SNoa Osherovich qp_init_attr->recv_cq = ibqp->recv_cq; 50860540d814SNoa Osherovich qp_init_attr->send_cq = ibqp->send_cq; 50870540d814SNoa Osherovich qp_init_attr->srq = ibqp->srq; 50880540d814SNoa Osherovich qp_attr->cap.max_inline_data = qp->max_inline_data; 5089e126ba97SEli Cohen 5090e126ba97SEli Cohen qp_init_attr->cap = qp_attr->cap; 5091e126ba97SEli Cohen 5092a8f3ea61SLeon Romanovsky qp_init_attr->create_flags = qp->flags; 5093051f2630SLeon Romanovsky 5094e126ba97SEli Cohen qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 5095e126ba97SEli Cohen IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 5096e126ba97SEli Cohen 5097e126ba97SEli Cohen out: 5098e126ba97SEli Cohen mutex_unlock(&qp->mutex); 5099e126ba97SEli Cohen return err; 5100e126ba97SEli Cohen } 5101e126ba97SEli Cohen 510228ad5f65SLeon Romanovsky int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) 5103e126ba97SEli Cohen { 510428ad5f65SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device); 510528ad5f65SLeon Romanovsky struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd); 5106e126ba97SEli Cohen 5107938fe83cSSaeed Mahameed if (!MLX5_CAP_GEN(dev->mdev, xrc)) 510828ad5f65SLeon Romanovsky return -EOPNOTSUPP; 5109e126ba97SEli Cohen 511028ad5f65SLeon Romanovsky return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); 5111e126ba97SEli Cohen } 5112e126ba97SEli Cohen 5113d0c45c85SLeon Romanovsky int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) 5114e126ba97SEli Cohen { 5115e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 5116e126ba97SEli Cohen u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 5117e126ba97SEli Cohen 5118d0c45c85SLeon Romanovsky return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); 5119e126ba97SEli Cohen } 512079b20a6cSYishai Hadas 5121350d0e4cSYishai Hadas static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) 5122350d0e4cSYishai Hadas { 5123350d0e4cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp); 5124350d0e4cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device); 5125350d0e4cSYishai Hadas struct ib_event event; 5126350d0e4cSYishai Hadas 5127350d0e4cSYishai Hadas if (rwq->ibwq.event_handler) { 5128350d0e4cSYishai Hadas event.device = rwq->ibwq.device; 5129350d0e4cSYishai Hadas event.element.wq = &rwq->ibwq; 5130350d0e4cSYishai Hadas switch (type) { 5131350d0e4cSYishai Hadas case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 5132350d0e4cSYishai Hadas event.event = IB_EVENT_WQ_FATAL; 5133350d0e4cSYishai Hadas break; 5134350d0e4cSYishai Hadas default: 5135350d0e4cSYishai Hadas mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn); 5136350d0e4cSYishai Hadas return; 5137350d0e4cSYishai Hadas } 5138350d0e4cSYishai Hadas 5139350d0e4cSYishai Hadas rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); 5140350d0e4cSYishai Hadas } 5141350d0e4cSYishai Hadas } 5142350d0e4cSYishai Hadas 514303404e8aSMaor Gottlieb static int set_delay_drop(struct mlx5_ib_dev *dev) 514403404e8aSMaor Gottlieb { 514503404e8aSMaor Gottlieb int err = 0; 514603404e8aSMaor Gottlieb 514703404e8aSMaor Gottlieb mutex_lock(&dev->delay_drop.lock); 514803404e8aSMaor Gottlieb if (dev->delay_drop.activate) 514903404e8aSMaor Gottlieb goto out; 515003404e8aSMaor Gottlieb 5151333fbaa0SLeon Romanovsky err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout); 515203404e8aSMaor Gottlieb if (err) 515303404e8aSMaor Gottlieb goto out; 515403404e8aSMaor Gottlieb 515503404e8aSMaor Gottlieb dev->delay_drop.activate = true; 515603404e8aSMaor Gottlieb out: 515703404e8aSMaor Gottlieb mutex_unlock(&dev->delay_drop.lock); 5158fe248c3aSMaor Gottlieb 5159fe248c3aSMaor Gottlieb if (!err) 5160fe248c3aSMaor Gottlieb atomic_inc(&dev->delay_drop.rqs_cnt); 516103404e8aSMaor Gottlieb return err; 516203404e8aSMaor Gottlieb } 516303404e8aSMaor Gottlieb 516479b20a6cSYishai Hadas static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, 516579b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr) 516679b20a6cSYishai Hadas { 516779b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 51684be6da1eSNoa Osherovich int has_net_offloads; 516979b20a6cSYishai Hadas __be64 *rq_pas0; 51708256c69bSMaor Gottlieb int ts_format; 517179b20a6cSYishai Hadas void *in; 517279b20a6cSYishai Hadas void *rqc; 517379b20a6cSYishai Hadas void *wq; 517479b20a6cSYishai Hadas int inlen; 517579b20a6cSYishai Hadas int err; 517679b20a6cSYishai Hadas 517779b20a6cSYishai Hadas dev = to_mdev(pd->device); 517879b20a6cSYishai Hadas 51798256c69bSMaor Gottlieb ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); 51808256c69bSMaor Gottlieb if (ts_format < 0) 51818256c69bSMaor Gottlieb return ts_format; 51828256c69bSMaor Gottlieb 518379b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 51841b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 518579b20a6cSYishai Hadas if (!in) 518679b20a6cSYishai Hadas return -ENOMEM; 518779b20a6cSYishai Hadas 518834d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 518979b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 519079b20a6cSYishai Hadas MLX5_SET(rqc, rqc, mem_rq_type, 519179b20a6cSYishai Hadas MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 51928256c69bSMaor Gottlieb MLX5_SET(rqc, rqc, ts_format, ts_format); 519379b20a6cSYishai Hadas MLX5_SET(rqc, rqc, user_index, rwq->user_index); 519479b20a6cSYishai Hadas MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 519579b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 519679b20a6cSYishai Hadas MLX5_SET(rqc, rqc, flush_in_error_en, 1); 519779b20a6cSYishai Hadas wq = MLX5_ADDR_OF(rqc, rqc, wq); 5198ccc87087SNoa Osherovich MLX5_SET(wq, wq, wq_type, 5199ccc87087SNoa Osherovich rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ? 5200ccc87087SNoa Osherovich MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC); 5201b1383aa6SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5202b1383aa6SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { 5203b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Scatter end padding is not supported\n"); 5204b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 5205b1383aa6SNoa Osherovich goto out; 5206b1383aa6SNoa Osherovich } else { 520779b20a6cSYishai Hadas MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 5208b1383aa6SNoa Osherovich } 5209b1383aa6SNoa Osherovich } 521079b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); 5211ccc87087SNoa Osherovich if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) { 5212c16339b6SMark Zhang /* 5213c16339b6SMark Zhang * In Firmware number of strides in each WQE is: 5214c16339b6SMark Zhang * "512 * 2^single_wqe_log_num_of_strides" 5215c16339b6SMark Zhang * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are 5216c16339b6SMark Zhang * accepted as 0 to 9 5217c16339b6SMark Zhang */ 5218c16339b6SMark Zhang static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1, 5219c16339b6SMark Zhang 2, 3, 4, 5, 6, 7, 8, 9 }; 5220ccc87087SNoa Osherovich MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); 5221ccc87087SNoa Osherovich MLX5_SET(wq, wq, log_wqe_stride_size, 5222ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes - 5223ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES); 5224c16339b6SMark Zhang MLX5_SET(wq, wq, log_wqe_num_of_strides, 5225c16339b6SMark Zhang fw_map[rwq->log_num_strides - 5226c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]); 5227ccc87087SNoa Osherovich } 522879b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); 522979b20a6cSYishai Hadas MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); 523079b20a6cSYishai Hadas MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); 523179b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); 523279b20a6cSYishai Hadas MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); 523379b20a6cSYishai Hadas MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); 52344be6da1eSNoa Osherovich has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads); 5235b1f74a84SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) { 52364be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5237b1f74a84SNoa Osherovich mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); 5238b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 5239b1f74a84SNoa Osherovich goto out; 5240b1f74a84SNoa Osherovich } 5241b1f74a84SNoa Osherovich } else { 5242b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 1); 5243b1f74a84SNoa Osherovich } 52444be6da1eSNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) { 52454be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) { 52464be6da1eSNoa Osherovich mlx5_ib_dbg(dev, "Scatter FCS is not supported\n"); 52474be6da1eSNoa Osherovich err = -EOPNOTSUPP; 52484be6da1eSNoa Osherovich goto out; 52494be6da1eSNoa Osherovich } 52504be6da1eSNoa Osherovich MLX5_SET(rqc, rqc, scatter_fcs, 1); 52514be6da1eSNoa Osherovich } 525203404e8aSMaor Gottlieb if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 525303404e8aSMaor Gottlieb if (!(dev->ib_dev.attrs.raw_packet_caps & 525403404e8aSMaor Gottlieb IB_RAW_PACKET_CAP_DELAY_DROP)) { 525503404e8aSMaor Gottlieb mlx5_ib_dbg(dev, "Delay drop is not supported\n"); 525603404e8aSMaor Gottlieb err = -EOPNOTSUPP; 525703404e8aSMaor Gottlieb goto out; 525803404e8aSMaor Gottlieb } 525903404e8aSMaor Gottlieb MLX5_SET(rqc, rqc, delay_drop_en, 1); 526003404e8aSMaor Gottlieb } 526179b20a6cSYishai Hadas rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 5262aab8d396SJason Gunthorpe mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0); 5263333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); 526403404e8aSMaor Gottlieb if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 526503404e8aSMaor Gottlieb err = set_delay_drop(dev); 526603404e8aSMaor Gottlieb if (err) { 526703404e8aSMaor Gottlieb mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", 526803404e8aSMaor Gottlieb err); 5269333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 527003404e8aSMaor Gottlieb } else { 527103404e8aSMaor Gottlieb rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; 527203404e8aSMaor Gottlieb } 527303404e8aSMaor Gottlieb } 5274b1f74a84SNoa Osherovich out: 527579b20a6cSYishai Hadas kvfree(in); 527679b20a6cSYishai Hadas return err; 527779b20a6cSYishai Hadas } 527879b20a6cSYishai Hadas 527979b20a6cSYishai Hadas static int set_user_rq_size(struct mlx5_ib_dev *dev, 528079b20a6cSYishai Hadas struct ib_wq_init_attr *wq_init_attr, 528179b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd, 528279b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 528379b20a6cSYishai Hadas { 528479b20a6cSYishai Hadas /* Sanity check RQ size before proceeding */ 528579b20a6cSYishai Hadas if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) 528679b20a6cSYishai Hadas return -EINVAL; 528779b20a6cSYishai Hadas 528879b20a6cSYishai Hadas if (!ucmd->rq_wqe_count) 528979b20a6cSYishai Hadas return -EINVAL; 529079b20a6cSYishai Hadas 529179b20a6cSYishai Hadas rwq->wqe_count = ucmd->rq_wqe_count; 529279b20a6cSYishai Hadas rwq->wqe_shift = ucmd->rq_wqe_shift; 52930dfe4522SLeon Romanovsky if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) 52940dfe4522SLeon Romanovsky return -EINVAL; 52950dfe4522SLeon Romanovsky 529679b20a6cSYishai Hadas rwq->log_rq_stride = rwq->wqe_shift; 529779b20a6cSYishai Hadas rwq->log_rq_size = ilog2(rwq->wqe_count); 529879b20a6cSYishai Hadas return 0; 529979b20a6cSYishai Hadas } 530079b20a6cSYishai Hadas 5301c16339b6SMark Zhang static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides) 5302c16339b6SMark Zhang { 5303c16339b6SMark Zhang if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) || 5304c16339b6SMark Zhang (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 5305c16339b6SMark Zhang return false; 5306c16339b6SMark Zhang 5307c16339b6SMark Zhang if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) && 5308c16339b6SMark Zhang (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 5309c16339b6SMark Zhang return false; 5310c16339b6SMark Zhang 5311c16339b6SMark Zhang return true; 5312c16339b6SMark Zhang } 5313c16339b6SMark Zhang 531479b20a6cSYishai Hadas static int prepare_user_rq(struct ib_pd *pd, 531579b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 531679b20a6cSYishai Hadas struct ib_udata *udata, 531779b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 531879b20a6cSYishai Hadas { 531979b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(pd->device); 532079b20a6cSYishai Hadas struct mlx5_ib_create_wq ucmd = {}; 532179b20a6cSYishai Hadas int err; 532279b20a6cSYishai Hadas size_t required_cmd_sz; 532379b20a6cSYishai Hadas 532470c1430fSLeon Romanovsky required_cmd_sz = offsetofend(struct mlx5_ib_create_wq, 532570c1430fSLeon Romanovsky single_stride_log_num_of_bytes); 532679b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) { 532779b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid inlen\n"); 532879b20a6cSYishai Hadas return -EINVAL; 532979b20a6cSYishai Hadas } 533079b20a6cSYishai Hadas 533179b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 533279b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 533379b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) { 533479b20a6cSYishai Hadas mlx5_ib_dbg(dev, "inlen is not supported\n"); 533579b20a6cSYishai Hadas return -EOPNOTSUPP; 533679b20a6cSYishai Hadas } 533779b20a6cSYishai Hadas 533879b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 533979b20a6cSYishai Hadas mlx5_ib_dbg(dev, "copy failed\n"); 534079b20a6cSYishai Hadas return -EFAULT; 534179b20a6cSYishai Hadas } 534279b20a6cSYishai Hadas 5343ccc87087SNoa Osherovich if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) { 534479b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 534579b20a6cSYishai Hadas return -EOPNOTSUPP; 5346ccc87087SNoa Osherovich } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) { 5347ccc87087SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) { 5348ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Striding RQ is not supported\n"); 534979b20a6cSYishai Hadas return -EOPNOTSUPP; 535079b20a6cSYishai Hadas } 5351ccc87087SNoa Osherovich if ((ucmd.single_stride_log_num_of_bytes < 5352ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) || 5353ccc87087SNoa Osherovich (ucmd.single_stride_log_num_of_bytes > 5354ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) { 5355ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n", 5356ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes, 5357ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES, 5358ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES); 5359ccc87087SNoa Osherovich return -EINVAL; 5360ccc87087SNoa Osherovich } 5361c16339b6SMark Zhang if (!log_of_strides_valid(dev, 5362c16339b6SMark Zhang ucmd.single_wqe_log_num_of_strides)) { 5363c16339b6SMark Zhang mlx5_ib_dbg( 5364c16339b6SMark Zhang dev, 5365c16339b6SMark Zhang "Invalid log num strides (%u. Range is %u - %u)\n", 5366ccc87087SNoa Osherovich ucmd.single_wqe_log_num_of_strides, 5367c16339b6SMark Zhang MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ? 5368c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES : 5369ccc87087SNoa Osherovich MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES, 5370ccc87087SNoa Osherovich MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES); 5371ccc87087SNoa Osherovich return -EINVAL; 5372ccc87087SNoa Osherovich } 5373ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes = 5374ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes; 5375ccc87087SNoa Osherovich rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; 5376ccc87087SNoa Osherovich rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; 5377ccc87087SNoa Osherovich rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ; 5378ccc87087SNoa Osherovich } 537979b20a6cSYishai Hadas 538079b20a6cSYishai Hadas err = set_user_rq_size(dev, init_attr, &ucmd, rwq); 538179b20a6cSYishai Hadas if (err) { 538279b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 538379b20a6cSYishai Hadas return err; 538479b20a6cSYishai Hadas } 538579b20a6cSYishai Hadas 5386b0ea0fa5SJason Gunthorpe err = create_user_rq(dev, pd, udata, rwq, &ucmd); 538779b20a6cSYishai Hadas if (err) { 538879b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 538979b20a6cSYishai Hadas return err; 539079b20a6cSYishai Hadas } 539179b20a6cSYishai Hadas 539279b20a6cSYishai Hadas rwq->user_index = ucmd.user_index; 539379b20a6cSYishai Hadas return 0; 539479b20a6cSYishai Hadas } 539579b20a6cSYishai Hadas 539679b20a6cSYishai Hadas struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 539779b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 539879b20a6cSYishai Hadas struct ib_udata *udata) 539979b20a6cSYishai Hadas { 540079b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 540179b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq; 540279b20a6cSYishai Hadas struct mlx5_ib_create_wq_resp resp = {}; 540379b20a6cSYishai Hadas size_t min_resp_len; 540479b20a6cSYishai Hadas int err; 540579b20a6cSYishai Hadas 540679b20a6cSYishai Hadas if (!udata) 540779b20a6cSYishai Hadas return ERR_PTR(-ENOSYS); 540879b20a6cSYishai Hadas 540970c1430fSLeon Romanovsky min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved); 541079b20a6cSYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 541179b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 541279b20a6cSYishai Hadas 5413ba80013fSMaor Gottlieb if (!capable(CAP_SYS_RAWIO) && 5414ba80013fSMaor Gottlieb init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) 5415ba80013fSMaor Gottlieb return ERR_PTR(-EPERM); 5416ba80013fSMaor Gottlieb 541779b20a6cSYishai Hadas dev = to_mdev(pd->device); 541879b20a6cSYishai Hadas switch (init_attr->wq_type) { 541979b20a6cSYishai Hadas case IB_WQT_RQ: 542079b20a6cSYishai Hadas rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); 542179b20a6cSYishai Hadas if (!rwq) 542279b20a6cSYishai Hadas return ERR_PTR(-ENOMEM); 542379b20a6cSYishai Hadas err = prepare_user_rq(pd, init_attr, udata, rwq); 542479b20a6cSYishai Hadas if (err) 542579b20a6cSYishai Hadas goto err; 542679b20a6cSYishai Hadas err = create_rq(rwq, pd, init_attr); 542779b20a6cSYishai Hadas if (err) 542879b20a6cSYishai Hadas goto err_user_rq; 542979b20a6cSYishai Hadas break; 543079b20a6cSYishai Hadas default: 543179b20a6cSYishai Hadas mlx5_ib_dbg(dev, "unsupported wq type %d\n", 543279b20a6cSYishai Hadas init_attr->wq_type); 543379b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 543479b20a6cSYishai Hadas } 543579b20a6cSYishai Hadas 5436350d0e4cSYishai Hadas rwq->ibwq.wq_num = rwq->core_qp.qpn; 543779b20a6cSYishai Hadas rwq->ibwq.state = IB_WQS_RESET; 543879b20a6cSYishai Hadas if (udata->outlen) { 543970c1430fSLeon Romanovsky resp.response_length = offsetofend( 544070c1430fSLeon Romanovsky struct mlx5_ib_create_wq_resp, response_length); 544179b20a6cSYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 544279b20a6cSYishai Hadas if (err) 544379b20a6cSYishai Hadas goto err_copy; 544479b20a6cSYishai Hadas } 544579b20a6cSYishai Hadas 5446350d0e4cSYishai Hadas rwq->core_qp.event = mlx5_ib_wq_event; 5447350d0e4cSYishai Hadas rwq->ibwq.event_handler = init_attr->event_handler; 544879b20a6cSYishai Hadas return &rwq->ibwq; 544979b20a6cSYishai Hadas 545079b20a6cSYishai Hadas err_copy: 5451333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 545279b20a6cSYishai Hadas err_user_rq: 5453bdeacabdSShamir Rabinovitch destroy_user_rq(dev, pd, rwq, udata); 545479b20a6cSYishai Hadas err: 545579b20a6cSYishai Hadas kfree(rwq); 545679b20a6cSYishai Hadas return ERR_PTR(err); 545779b20a6cSYishai Hadas } 545879b20a6cSYishai Hadas 5459add53535SLeon Romanovsky int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) 546079b20a6cSYishai Hadas { 546179b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 546279b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 5463add53535SLeon Romanovsky int ret; 546479b20a6cSYishai Hadas 5465add53535SLeon Romanovsky ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 5466add53535SLeon Romanovsky if (ret) 5467add53535SLeon Romanovsky return ret; 5468bdeacabdSShamir Rabinovitch destroy_user_rq(dev, wq->pd, rwq, udata); 546979b20a6cSYishai Hadas kfree(rwq); 5470add53535SLeon Romanovsky return 0; 547179b20a6cSYishai Hadas } 547279b20a6cSYishai Hadas 5473c0a6b5ecSLeon Romanovsky int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, 5474c5f90929SYishai Hadas struct ib_rwq_ind_table_init_attr *init_attr, 5475c5f90929SYishai Hadas struct ib_udata *udata) 5476c5f90929SYishai Hadas { 5477c0a6b5ecSLeon Romanovsky struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = 5478c0a6b5ecSLeon Romanovsky to_mrwq_ind_table(ib_rwq_ind_table); 5479c0a6b5ecSLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device); 5480c5f90929SYishai Hadas int sz = 1 << init_attr->log_ind_tbl_size; 5481c5f90929SYishai Hadas struct mlx5_ib_create_rwq_ind_tbl_resp resp = {}; 5482c5f90929SYishai Hadas size_t min_resp_len; 5483c5f90929SYishai Hadas int inlen; 5484c5f90929SYishai Hadas int err; 5485c5f90929SYishai Hadas int i; 5486c5f90929SYishai Hadas u32 *in; 5487c5f90929SYishai Hadas void *rqtc; 5488c5f90929SYishai Hadas 5489c5f90929SYishai Hadas if (udata->inlen > 0 && 5490c5f90929SYishai Hadas !ib_is_udata_cleared(udata, 0, 5491c5f90929SYishai Hadas udata->inlen)) 5492c0a6b5ecSLeon Romanovsky return -EOPNOTSUPP; 5493c5f90929SYishai Hadas 5494efd7f400SMaor Gottlieb if (init_attr->log_ind_tbl_size > 5495efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 5496efd7f400SMaor Gottlieb mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 5497efd7f400SMaor Gottlieb init_attr->log_ind_tbl_size, 5498efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 5499c0a6b5ecSLeon Romanovsky return -EINVAL; 5500efd7f400SMaor Gottlieb } 5501efd7f400SMaor Gottlieb 550270c1430fSLeon Romanovsky min_resp_len = 550370c1430fSLeon Romanovsky offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved); 5504c5f90929SYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 5505c0a6b5ecSLeon Romanovsky return -EINVAL; 5506c5f90929SYishai Hadas 5507c5f90929SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 55081b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 5509c0a6b5ecSLeon Romanovsky if (!in) 5510c0a6b5ecSLeon Romanovsky return -ENOMEM; 5511c5f90929SYishai Hadas 5512c5f90929SYishai Hadas rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 5513c5f90929SYishai Hadas 5514c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 5515c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 5516c5f90929SYishai Hadas 5517c5f90929SYishai Hadas for (i = 0; i < sz; i++) 5518c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num); 5519c5f90929SYishai Hadas 55205deba86eSYishai Hadas rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid; 55215deba86eSYishai Hadas MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid); 55225deba86eSYishai Hadas 5523c5f90929SYishai Hadas err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); 5524c5f90929SYishai Hadas kvfree(in); 5525c5f90929SYishai Hadas if (err) 5526c0a6b5ecSLeon Romanovsky return err; 5527c5f90929SYishai Hadas 5528c5f90929SYishai Hadas rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn; 5529c5f90929SYishai Hadas if (udata->outlen) { 553070c1430fSLeon Romanovsky resp.response_length = 553170c1430fSLeon Romanovsky offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, 553270c1430fSLeon Romanovsky response_length); 5533c5f90929SYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 5534c5f90929SYishai Hadas if (err) 5535c5f90929SYishai Hadas goto err_copy; 5536c5f90929SYishai Hadas } 5537c5f90929SYishai Hadas 5538c0a6b5ecSLeon Romanovsky return 0; 5539c5f90929SYishai Hadas 5540c5f90929SYishai Hadas err_copy: 55415deba86eSYishai Hadas mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 5542c0a6b5ecSLeon Romanovsky return err; 5543c5f90929SYishai Hadas } 5544c5f90929SYishai Hadas 5545c5f90929SYishai Hadas int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 5546c5f90929SYishai Hadas { 5547c5f90929SYishai Hadas struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl); 5548c5f90929SYishai Hadas struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device); 5549c5f90929SYishai Hadas 5550c0a6b5ecSLeon Romanovsky return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 5551c5f90929SYishai Hadas } 5552c5f90929SYishai Hadas 555379b20a6cSYishai Hadas int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 555479b20a6cSYishai Hadas u32 wq_attr_mask, struct ib_udata *udata) 555579b20a6cSYishai Hadas { 555679b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 555779b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 555879b20a6cSYishai Hadas struct mlx5_ib_modify_wq ucmd = {}; 555979b20a6cSYishai Hadas size_t required_cmd_sz; 556079b20a6cSYishai Hadas int curr_wq_state; 556179b20a6cSYishai Hadas int wq_state; 556279b20a6cSYishai Hadas int inlen; 556379b20a6cSYishai Hadas int err; 556479b20a6cSYishai Hadas void *rqc; 556579b20a6cSYishai Hadas void *in; 556679b20a6cSYishai Hadas 556770c1430fSLeon Romanovsky required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved); 556879b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) 556979b20a6cSYishai Hadas return -EINVAL; 557079b20a6cSYishai Hadas 557179b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 557279b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 557379b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) 557479b20a6cSYishai Hadas return -EOPNOTSUPP; 557579b20a6cSYishai Hadas 557679b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) 557779b20a6cSYishai Hadas return -EFAULT; 557879b20a6cSYishai Hadas 557979b20a6cSYishai Hadas if (ucmd.comp_mask || ucmd.reserved) 558079b20a6cSYishai Hadas return -EOPNOTSUPP; 558179b20a6cSYishai Hadas 558279b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 55831b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 558479b20a6cSYishai Hadas if (!in) 558579b20a6cSYishai Hadas return -ENOMEM; 558679b20a6cSYishai Hadas 558779b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 558879b20a6cSYishai Hadas 5589f9744288SLeon Romanovsky curr_wq_state = wq_attr->curr_wq_state; 5590f9744288SLeon Romanovsky wq_state = wq_attr->wq_state; 559179b20a6cSYishai Hadas if (curr_wq_state == IB_WQS_ERR) 559279b20a6cSYishai Hadas curr_wq_state = MLX5_RQC_STATE_ERR; 559379b20a6cSYishai Hadas if (wq_state == IB_WQS_ERR) 559479b20a6cSYishai Hadas wq_state = MLX5_RQC_STATE_ERR; 559579b20a6cSYishai Hadas MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); 559634d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid); 559779b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, wq_state); 559879b20a6cSYishai Hadas 5599b1f74a84SNoa Osherovich if (wq_attr_mask & IB_WQ_FLAGS) { 5600b1f74a84SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) { 5601b1f74a84SNoa Osherovich if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 5602b1f74a84SNoa Osherovich MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5603c4526fe2SRohit Chavan mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); 5604b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 5605b1f74a84SNoa Osherovich goto out; 5606b1f74a84SNoa Osherovich } 5607b1f74a84SNoa Osherovich MLX5_SET64(modify_rq_in, in, modify_bitmask, 5608b1f74a84SNoa Osherovich MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); 5609b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 5610b1f74a84SNoa Osherovich (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1); 5611b1f74a84SNoa Osherovich } 5612b1383aa6SNoa Osherovich 5613b1383aa6SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5614b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n"); 5615b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 5616b1383aa6SNoa Osherovich goto out; 5617b1383aa6SNoa Osherovich } 5618b1f74a84SNoa Osherovich } 5619b1f74a84SNoa Osherovich 562023a6964eSMajd Dibbiny if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { 56213e1f000fSParav Pandit u16 set_id; 56223e1f000fSParav Pandit 56233e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, 0); 562423a6964eSMajd Dibbiny if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 562523a6964eSMajd Dibbiny MLX5_SET64(modify_rq_in, in, modify_bitmask, 562623a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 56273e1f000fSParav Pandit MLX5_SET(rqc, rqc, counter_set_id, set_id); 562823a6964eSMajd Dibbiny } else 56295a738b5dSJason Gunthorpe dev_info_once( 56305a738b5dSJason Gunthorpe &dev->ib_dev.dev, 56315a738b5dSJason Gunthorpe "Receive WQ counters are not supported on current FW\n"); 563223a6964eSMajd Dibbiny } 563323a6964eSMajd Dibbiny 5634e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); 563579b20a6cSYishai Hadas if (!err) 563679b20a6cSYishai Hadas rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; 563779b20a6cSYishai Hadas 5638b1f74a84SNoa Osherovich out: 5639b1f74a84SNoa Osherovich kvfree(in); 564079b20a6cSYishai Hadas return err; 564179b20a6cSYishai Hadas } 5642d0e84c0aSYishai Hadas 5643d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe { 5644d0e84c0aSYishai Hadas struct ib_cqe cqe; 5645d0e84c0aSYishai Hadas struct completion done; 5646d0e84c0aSYishai Hadas }; 5647d0e84c0aSYishai Hadas 5648d0e84c0aSYishai Hadas static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 5649d0e84c0aSYishai Hadas { 5650d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, 5651d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe, 5652d0e84c0aSYishai Hadas cqe); 5653d0e84c0aSYishai Hadas 5654d0e84c0aSYishai Hadas complete(&cqe->done); 5655d0e84c0aSYishai Hadas } 5656d0e84c0aSYishai Hadas 5657d0e84c0aSYishai Hadas /* This function returns only once the drained WR was completed */ 5658d0e84c0aSYishai Hadas static void handle_drain_completion(struct ib_cq *cq, 5659d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *sdrain, 5660d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev) 5661d0e84c0aSYishai Hadas { 5662d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5663d0e84c0aSYishai Hadas 5664d0e84c0aSYishai Hadas if (cq->poll_ctx == IB_POLL_DIRECT) { 5665d0e84c0aSYishai Hadas while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) 5666d0e84c0aSYishai Hadas ib_process_cq_direct(cq, -1); 5667d0e84c0aSYishai Hadas return; 5668d0e84c0aSYishai Hadas } 5669d0e84c0aSYishai Hadas 5670d0e84c0aSYishai Hadas if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5671d0e84c0aSYishai Hadas struct mlx5_ib_cq *mcq = to_mcq(cq); 5672d0e84c0aSYishai Hadas bool triggered = false; 5673d0e84c0aSYishai Hadas unsigned long flags; 5674d0e84c0aSYishai Hadas 5675d0e84c0aSYishai Hadas spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 5676d0e84c0aSYishai Hadas /* Make sure that the CQ handler won't run if wasn't run yet */ 5677d0e84c0aSYishai Hadas if (!mcq->mcq.reset_notify_added) 5678d0e84c0aSYishai Hadas mcq->mcq.reset_notify_added = 1; 5679d0e84c0aSYishai Hadas else 5680d0e84c0aSYishai Hadas triggered = true; 5681d0e84c0aSYishai Hadas spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 5682d0e84c0aSYishai Hadas 5683d0e84c0aSYishai Hadas if (triggered) { 5684d0e84c0aSYishai Hadas /* Wait for any scheduled/running task to be ended */ 5685d0e84c0aSYishai Hadas switch (cq->poll_ctx) { 5686d0e84c0aSYishai Hadas case IB_POLL_SOFTIRQ: 5687d0e84c0aSYishai Hadas irq_poll_disable(&cq->iop); 5688d0e84c0aSYishai Hadas irq_poll_enable(&cq->iop); 5689d0e84c0aSYishai Hadas break; 5690d0e84c0aSYishai Hadas case IB_POLL_WORKQUEUE: 5691d0e84c0aSYishai Hadas cancel_work_sync(&cq->work); 5692d0e84c0aSYishai Hadas break; 5693d0e84c0aSYishai Hadas default: 5694d0e84c0aSYishai Hadas WARN_ON_ONCE(1); 5695d0e84c0aSYishai Hadas } 5696d0e84c0aSYishai Hadas } 5697d0e84c0aSYishai Hadas 5698d0e84c0aSYishai Hadas /* Run the CQ handler - this makes sure that the drain WR will 5699d0e84c0aSYishai Hadas * be processed if wasn't processed yet. 5700d0e84c0aSYishai Hadas */ 57014e0e2ea1SYishai Hadas mcq->mcq.comp(&mcq->mcq, NULL); 5702d0e84c0aSYishai Hadas } 5703d0e84c0aSYishai Hadas 5704d0e84c0aSYishai Hadas wait_for_completion(&sdrain->done); 5705d0e84c0aSYishai Hadas } 5706d0e84c0aSYishai Hadas 5707d0e84c0aSYishai Hadas void mlx5_ib_drain_sq(struct ib_qp *qp) 5708d0e84c0aSYishai Hadas { 5709d0e84c0aSYishai Hadas struct ib_cq *cq = qp->send_cq; 5710d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5711d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe sdrain; 5712d34ac5cdSBart Van Assche const struct ib_send_wr *bad_swr; 5713d0e84c0aSYishai Hadas struct ib_rdma_wr swr = { 5714d0e84c0aSYishai Hadas .wr = { 5715d0e84c0aSYishai Hadas .next = NULL, 5716d0e84c0aSYishai Hadas { .wr_cqe = &sdrain.cqe, }, 5717d0e84c0aSYishai Hadas .opcode = IB_WR_RDMA_WRITE, 5718d0e84c0aSYishai Hadas }, 5719d0e84c0aSYishai Hadas }; 5720d0e84c0aSYishai Hadas int ret; 5721d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 5722d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5723d0e84c0aSYishai Hadas 5724d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5725d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5726d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5727d0e84c0aSYishai Hadas return; 5728d0e84c0aSYishai Hadas } 5729d0e84c0aSYishai Hadas 5730d0e84c0aSYishai Hadas sdrain.cqe.done = mlx5_ib_drain_qp_done; 5731d0e84c0aSYishai Hadas init_completion(&sdrain.done); 5732d0e84c0aSYishai Hadas 5733029e88fdSLeon Romanovsky ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr); 5734d0e84c0aSYishai Hadas if (ret) { 5735d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5736d0e84c0aSYishai Hadas return; 5737d0e84c0aSYishai Hadas } 5738d0e84c0aSYishai Hadas 5739d0e84c0aSYishai Hadas handle_drain_completion(cq, &sdrain, dev); 5740d0e84c0aSYishai Hadas } 5741d0e84c0aSYishai Hadas 5742d0e84c0aSYishai Hadas void mlx5_ib_drain_rq(struct ib_qp *qp) 5743d0e84c0aSYishai Hadas { 5744d0e84c0aSYishai Hadas struct ib_cq *cq = qp->recv_cq; 5745d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5746d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe rdrain; 5747d34ac5cdSBart Van Assche struct ib_recv_wr rwr = {}; 5748d34ac5cdSBart Van Assche const struct ib_recv_wr *bad_rwr; 5749d0e84c0aSYishai Hadas int ret; 5750d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 5751d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5752d0e84c0aSYishai Hadas 5753d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5754d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5755d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5756d0e84c0aSYishai Hadas return; 5757d0e84c0aSYishai Hadas } 5758d0e84c0aSYishai Hadas 5759d0e84c0aSYishai Hadas rwr.wr_cqe = &rdrain.cqe; 5760d0e84c0aSYishai Hadas rdrain.cqe.done = mlx5_ib_drain_qp_done; 5761d0e84c0aSYishai Hadas init_completion(&rdrain.done); 5762d0e84c0aSYishai Hadas 5763029e88fdSLeon Romanovsky ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr); 5764d0e84c0aSYishai Hadas if (ret) { 5765d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5766d0e84c0aSYishai Hadas return; 5767d0e84c0aSYishai Hadas } 5768d0e84c0aSYishai Hadas 5769d0e84c0aSYishai Hadas handle_drain_completion(cq, &rdrain, dev); 5770d0e84c0aSYishai Hadas } 5771d14133ddSMark Zhang 577230cd9fc5SLee Jones /* 5773d14133ddSMark Zhang * Bind a qp to a counter. If @counter is NULL then bind the qp to 5774d14133ddSMark Zhang * the default counter 5775d14133ddSMark Zhang */ 5776d14133ddSMark Zhang int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) 5777d14133ddSMark Zhang { 577810189e8eSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 5779d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 5780d14133ddSMark Zhang int err = 0; 5781d14133ddSMark Zhang 5782d14133ddSMark Zhang mutex_lock(&mqp->mutex); 5783d14133ddSMark Zhang if (mqp->state == IB_QPS_RESET) { 5784d14133ddSMark Zhang qp->counter = counter; 5785d14133ddSMark Zhang goto out; 5786d14133ddSMark Zhang } 5787d14133ddSMark Zhang 578810189e8eSMark Zhang if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { 578910189e8eSMark Zhang err = -EOPNOTSUPP; 579010189e8eSMark Zhang goto out; 579110189e8eSMark Zhang } 579210189e8eSMark Zhang 5793d14133ddSMark Zhang if (mqp->state == IB_QPS_RTS) { 5794d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(qp, counter); 5795d14133ddSMark Zhang if (!err) 5796d14133ddSMark Zhang qp->counter = counter; 5797d14133ddSMark Zhang 5798d14133ddSMark Zhang goto out; 5799d14133ddSMark Zhang } 5800d14133ddSMark Zhang 5801d14133ddSMark Zhang mqp->counter_pending = 1; 5802d14133ddSMark Zhang qp->counter = counter; 5803d14133ddSMark Zhang 5804d14133ddSMark Zhang out: 5805d14133ddSMark Zhang mutex_unlock(&mqp->mutex); 5806d14133ddSMark Zhang return err; 5807d14133ddSMark Zhang } 5808312b8f79SMark Zhang 5809312b8f79SMark Zhang int mlx5_ib_qp_event_init(void) 5810312b8f79SMark Zhang { 5811312b8f79SMark Zhang mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0); 5812312b8f79SMark Zhang if (!mlx5_ib_qp_event_wq) 5813312b8f79SMark Zhang return -ENOMEM; 5814312b8f79SMark Zhang 5815312b8f79SMark Zhang return 0; 5816312b8f79SMark Zhang } 5817312b8f79SMark Zhang 5818312b8f79SMark Zhang void mlx5_ib_qp_event_cleanup(void) 5819312b8f79SMark Zhang { 5820312b8f79SMark Zhang destroy_workqueue(mlx5_ib_qp_event_wq); 5821312b8f79SMark Zhang } 5822