1e126ba97SEli Cohen /* 26cf0a15fSSaeed Mahameed * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3e126ba97SEli Cohen * 4e126ba97SEli Cohen * This software is available to you under a choice of one of two 5e126ba97SEli Cohen * licenses. You may choose to be licensed under the terms of the GNU 6e126ba97SEli Cohen * General Public License (GPL) Version 2, available from the file 7e126ba97SEli Cohen * COPYING in the main directory of this source tree, or the 8e126ba97SEli Cohen * OpenIB.org BSD license below: 9e126ba97SEli Cohen * 10e126ba97SEli Cohen * Redistribution and use in source and binary forms, with or 11e126ba97SEli Cohen * without modification, are permitted provided that the following 12e126ba97SEli Cohen * conditions are met: 13e126ba97SEli Cohen * 14e126ba97SEli Cohen * - Redistributions of source code must retain the above 15e126ba97SEli Cohen * copyright notice, this list of conditions and the following 16e126ba97SEli Cohen * disclaimer. 17e126ba97SEli Cohen * 18e126ba97SEli Cohen * - Redistributions in binary form must reproduce the above 19e126ba97SEli Cohen * copyright notice, this list of conditions and the following 20e126ba97SEli Cohen * disclaimer in the documentation and/or other materials 21e126ba97SEli Cohen * provided with the distribution. 22e126ba97SEli Cohen * 23e126ba97SEli Cohen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e126ba97SEli Cohen * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e126ba97SEli Cohen * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e126ba97SEli Cohen * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e126ba97SEli Cohen * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e126ba97SEli Cohen * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e126ba97SEli Cohen * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e126ba97SEli Cohen * SOFTWARE. 31e126ba97SEli Cohen */ 32e126ba97SEli Cohen 33e126ba97SEli Cohen #include <linux/module.h> 34e126ba97SEli Cohen #include <rdma/ib_umem.h> 352811ba51SAchiad Shochat #include <rdma/ib_cache.h> 36cfb5e088SHaggai Abramovsky #include <rdma/ib_user_verbs.h> 37d14133ddSMark Zhang #include <rdma/rdma_counter.h> 38c2e53b2cSYishai Hadas #include <linux/mlx5/fs.h> 39e126ba97SEli Cohen #include "mlx5_ib.h" 40b96c9ddeSMark Bloch #include "ib_rep.h" 41443c1cf9SYishai Hadas #include "cmd.h" 42333fbaa0SLeon Romanovsky #include "qp.h" 43e126ba97SEli Cohen 44e126ba97SEli Cohen enum { 45e126ba97SEli Cohen MLX5_IB_ACK_REQ_FREQ = 8, 46e126ba97SEli Cohen }; 47e126ba97SEli Cohen 48e126ba97SEli Cohen enum { 49e126ba97SEli Cohen MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 50e126ba97SEli Cohen MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 51e126ba97SEli Cohen MLX5_IB_LINK_TYPE_IB = 0, 52e126ba97SEli Cohen MLX5_IB_LINK_TYPE_ETH = 1 53e126ba97SEli Cohen }; 54e126ba97SEli Cohen 55e126ba97SEli Cohen enum { 56e126ba97SEli Cohen MLX5_IB_SQ_STRIDE = 6, 57064e5262SIdan Burstein MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64, 58e126ba97SEli Cohen }; 59e126ba97SEli Cohen 60e126ba97SEli Cohen static const u32 mlx5_ib_opcode[] = { 61e126ba97SEli Cohen [IB_WR_SEND] = MLX5_OPCODE_SEND, 62f0313965SErez Shitrit [IB_WR_LSO] = MLX5_OPCODE_LSO, 63e126ba97SEli Cohen [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, 64e126ba97SEli Cohen [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, 65e126ba97SEli Cohen [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, 66e126ba97SEli Cohen [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, 67e126ba97SEli Cohen [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, 68e126ba97SEli Cohen [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 69e126ba97SEli Cohen [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 70e126ba97SEli Cohen [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 718a187ee5SSagi Grimberg [IB_WR_REG_MR] = MLX5_OPCODE_UMR, 72e126ba97SEli Cohen [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 73e126ba97SEli Cohen [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 74e126ba97SEli Cohen [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, 75e126ba97SEli Cohen }; 76e126ba97SEli Cohen 77f0313965SErez Shitrit struct mlx5_wqe_eth_pad { 78f0313965SErez Shitrit u8 rsvd0[16]; 79f0313965SErez Shitrit }; 80e126ba97SEli Cohen 81eb49ab0cSAlex Vesker enum raw_qp_set_mask_map { 82eb49ab0cSAlex Vesker MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, 837d29f349SBodong Wang MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, 84eb49ab0cSAlex Vesker }; 85eb49ab0cSAlex Vesker 860680efa2SAlex Vesker struct mlx5_modify_raw_qp_param { 870680efa2SAlex Vesker u16 operation; 88eb49ab0cSAlex Vesker 89eb49ab0cSAlex Vesker u32 set_mask; /* raw_qp_set_mask_map */ 9061147f39SBodong Wang 9161147f39SBodong Wang struct mlx5_rate_limit rl; 9261147f39SBodong Wang 93eb49ab0cSAlex Vesker u8 rq_q_ctr_id; 94d5ed8ac3SMark Bloch u16 port; 950680efa2SAlex Vesker }; 960680efa2SAlex Vesker 9789ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 9889ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 9989ea94a7SMaor Gottlieb struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 10089ea94a7SMaor Gottlieb 101e126ba97SEli Cohen static int is_qp0(enum ib_qp_type qp_type) 102e126ba97SEli Cohen { 103e126ba97SEli Cohen return qp_type == IB_QPT_SMI; 104e126ba97SEli Cohen } 105e126ba97SEli Cohen 106e126ba97SEli Cohen static int is_sqp(enum ib_qp_type qp_type) 107e126ba97SEli Cohen { 108e126ba97SEli Cohen return is_qp0(qp_type) || is_qp1(qp_type); 109e126ba97SEli Cohen } 110e126ba97SEli Cohen 111c1395a2aSHaggai Eran /** 112fbeb4075SMoni Shoua * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ 113fbeb4075SMoni Shoua * to kernel buffer 114c1395a2aSHaggai Eran * 115fbeb4075SMoni Shoua * @umem: User space memory where the WQ is 116fbeb4075SMoni Shoua * @buffer: buffer to copy to 117fbeb4075SMoni Shoua * @buflen: buffer length 118fbeb4075SMoni Shoua * @wqe_index: index of WQE to copy from 119fbeb4075SMoni Shoua * @wq_offset: offset to start of WQ 120fbeb4075SMoni Shoua * @wq_wqe_cnt: number of WQEs in WQ 121fbeb4075SMoni Shoua * @wq_wqe_shift: log2 of WQE size 122fbeb4075SMoni Shoua * @bcnt: number of bytes to copy 123fbeb4075SMoni Shoua * @bytes_copied: number of bytes to copy (return value) 124c1395a2aSHaggai Eran * 125fbeb4075SMoni Shoua * Copies from start of WQE bcnt or less bytes. 126fbeb4075SMoni Shoua * Does not gurantee to copy the entire WQE. 127c1395a2aSHaggai Eran * 128fbeb4075SMoni Shoua * Return: zero on success, or an error code. 129c1395a2aSHaggai Eran */ 130da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer, 131da9ee9d8SMoni Shoua size_t buflen, int wqe_index, 132da9ee9d8SMoni Shoua int wq_offset, int wq_wqe_cnt, 133da9ee9d8SMoni Shoua int wq_wqe_shift, int bcnt, 134fbeb4075SMoni Shoua size_t *bytes_copied) 135c1395a2aSHaggai Eran { 136fbeb4075SMoni Shoua size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift); 137fbeb4075SMoni Shoua size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift); 138fbeb4075SMoni Shoua size_t copy_length; 139c1395a2aSHaggai Eran int ret; 140c1395a2aSHaggai Eran 141fbeb4075SMoni Shoua /* don't copy more than requested, more than buffer length or 142fbeb4075SMoni Shoua * beyond WQ end 143fbeb4075SMoni Shoua */ 144fbeb4075SMoni Shoua copy_length = min_t(u32, buflen, wq_end - offset); 145fbeb4075SMoni Shoua copy_length = min_t(u32, copy_length, bcnt); 146c1395a2aSHaggai Eran 147fbeb4075SMoni Shoua ret = ib_umem_copy_from(buffer, umem, offset, copy_length); 148c1395a2aSHaggai Eran if (ret) 149c1395a2aSHaggai Eran return ret; 150c1395a2aSHaggai Eran 151fbeb4075SMoni Shoua if (!ret && bytes_copied) 152fbeb4075SMoni Shoua *bytes_copied = copy_length; 153c1395a2aSHaggai Eran 154fbeb4075SMoni Shoua return 0; 155fbeb4075SMoni Shoua } 156fbeb4075SMoni Shoua 157da9ee9d8SMoni Shoua static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 158da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 159da9ee9d8SMoni Shoua { 160da9ee9d8SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 161da9ee9d8SMoni Shoua size_t bytes_copied = 0; 162da9ee9d8SMoni Shoua size_t wqe_length; 163da9ee9d8SMoni Shoua void *p; 164da9ee9d8SMoni Shoua int ds; 165da9ee9d8SMoni Shoua 166da9ee9d8SMoni Shoua wqe_index = wqe_index & qp->sq.fbc.sz_m1; 167da9ee9d8SMoni Shoua 168da9ee9d8SMoni Shoua /* read the control segment first */ 169da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 170da9ee9d8SMoni Shoua ctrl = p; 171da9ee9d8SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 172da9ee9d8SMoni Shoua wqe_length = ds * MLX5_WQE_DS_UNITS; 173da9ee9d8SMoni Shoua 174da9ee9d8SMoni Shoua /* read rest of WQE if it spreads over more than one stride */ 175da9ee9d8SMoni Shoua while (bytes_copied < wqe_length) { 176da9ee9d8SMoni Shoua size_t copy_length = 177da9ee9d8SMoni Shoua min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB); 178da9ee9d8SMoni Shoua 179da9ee9d8SMoni Shoua if (!copy_length) 180da9ee9d8SMoni Shoua break; 181da9ee9d8SMoni Shoua 182da9ee9d8SMoni Shoua memcpy(buffer + bytes_copied, p, copy_length); 183da9ee9d8SMoni Shoua bytes_copied += copy_length; 184da9ee9d8SMoni Shoua 185da9ee9d8SMoni Shoua wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; 186da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 187da9ee9d8SMoni Shoua } 188da9ee9d8SMoni Shoua *bc = bytes_copied; 189da9ee9d8SMoni Shoua return 0; 190da9ee9d8SMoni Shoua } 191da9ee9d8SMoni Shoua 192da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 193da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 194fbeb4075SMoni Shoua { 195fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 196fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 197fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->sq; 198fbeb4075SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 199fbeb4075SMoni Shoua size_t bytes_copied; 200fbeb4075SMoni Shoua size_t bytes_copied2; 201fbeb4075SMoni Shoua size_t wqe_length; 202fbeb4075SMoni Shoua int ret; 203fbeb4075SMoni Shoua int ds; 204fbeb4075SMoni Shoua 205fbeb4075SMoni Shoua /* at first read as much as possible */ 206da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 207da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 208da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 209fbeb4075SMoni Shoua &bytes_copied); 210fbeb4075SMoni Shoua if (ret) 211fbeb4075SMoni Shoua return ret; 212fbeb4075SMoni Shoua 213fbeb4075SMoni Shoua /* we need at least control segment size to proceed */ 214fbeb4075SMoni Shoua if (bytes_copied < sizeof(*ctrl)) 215fbeb4075SMoni Shoua return -EINVAL; 216fbeb4075SMoni Shoua 217fbeb4075SMoni Shoua ctrl = buffer; 218fbeb4075SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 219c1395a2aSHaggai Eran wqe_length = ds * MLX5_WQE_DS_UNITS; 220fbeb4075SMoni Shoua 221fbeb4075SMoni Shoua /* if we copied enough then we are done */ 222fbeb4075SMoni Shoua if (bytes_copied >= wqe_length) { 223fbeb4075SMoni Shoua *bc = bytes_copied; 224fbeb4075SMoni Shoua return 0; 225c1395a2aSHaggai Eran } 226c1395a2aSHaggai Eran 227fbeb4075SMoni Shoua /* otherwise this a wrapped around wqe 228fbeb4075SMoni Shoua * so read the remaining bytes starting 229fbeb4075SMoni Shoua * from wqe_index 0 230fbeb4075SMoni Shoua */ 231da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied, 232da9ee9d8SMoni Shoua buflen - bytes_copied, 0, wq->offset, 233da9ee9d8SMoni Shoua wq->wqe_cnt, wq->wqe_shift, 234fbeb4075SMoni Shoua wqe_length - bytes_copied, 235fbeb4075SMoni Shoua &bytes_copied2); 236c1395a2aSHaggai Eran 237c1395a2aSHaggai Eran if (ret) 238c1395a2aSHaggai Eran return ret; 239fbeb4075SMoni Shoua *bc = bytes_copied + bytes_copied2; 240fbeb4075SMoni Shoua return 0; 241fbeb4075SMoni Shoua } 242c1395a2aSHaggai Eran 243da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 244da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 245da9ee9d8SMoni Shoua { 246da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 247da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 248da9ee9d8SMoni Shoua 249da9ee9d8SMoni Shoua if (buflen < sizeof(struct mlx5_wqe_ctrl_seg)) 250da9ee9d8SMoni Shoua return -EINVAL; 251da9ee9d8SMoni Shoua 252da9ee9d8SMoni Shoua if (!umem) 253da9ee9d8SMoni Shoua return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer, 254da9ee9d8SMoni Shoua buflen, bc); 255da9ee9d8SMoni Shoua 256da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc); 257da9ee9d8SMoni Shoua } 258da9ee9d8SMoni Shoua 259da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, 260da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 261fbeb4075SMoni Shoua { 262fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 263fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 264fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 265fbeb4075SMoni Shoua size_t bytes_copied; 266fbeb4075SMoni Shoua int ret; 267fbeb4075SMoni Shoua 268da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 269da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 270da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 271fbeb4075SMoni Shoua &bytes_copied); 272fbeb4075SMoni Shoua 273fbeb4075SMoni Shoua if (ret) 274fbeb4075SMoni Shoua return ret; 275fbeb4075SMoni Shoua *bc = bytes_copied; 276fbeb4075SMoni Shoua return 0; 277fbeb4075SMoni Shoua } 278fbeb4075SMoni Shoua 279da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 280da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 281da9ee9d8SMoni Shoua { 282da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 283da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 284da9ee9d8SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 285da9ee9d8SMoni Shoua size_t wqe_size = 1 << wq->wqe_shift; 286da9ee9d8SMoni Shoua 287da9ee9d8SMoni Shoua if (buflen < wqe_size) 288da9ee9d8SMoni Shoua return -EINVAL; 289da9ee9d8SMoni Shoua 290da9ee9d8SMoni Shoua if (!umem) 291da9ee9d8SMoni Shoua return -EOPNOTSUPP; 292da9ee9d8SMoni Shoua 293da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc); 294da9ee9d8SMoni Shoua } 295da9ee9d8SMoni Shoua 296da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, 297da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 298fbeb4075SMoni Shoua { 299fbeb4075SMoni Shoua struct ib_umem *umem = srq->umem; 300fbeb4075SMoni Shoua size_t bytes_copied; 301fbeb4075SMoni Shoua int ret; 302fbeb4075SMoni Shoua 303da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0, 304da9ee9d8SMoni Shoua srq->msrq.max, srq->msrq.wqe_shift, 305da9ee9d8SMoni Shoua buflen, &bytes_copied); 306fbeb4075SMoni Shoua 307fbeb4075SMoni Shoua if (ret) 308fbeb4075SMoni Shoua return ret; 309fbeb4075SMoni Shoua *bc = bytes_copied; 310fbeb4075SMoni Shoua return 0; 311c1395a2aSHaggai Eran } 312c1395a2aSHaggai Eran 313da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, 314da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 315da9ee9d8SMoni Shoua { 316da9ee9d8SMoni Shoua struct ib_umem *umem = srq->umem; 317da9ee9d8SMoni Shoua size_t wqe_size = 1 << srq->msrq.wqe_shift; 318da9ee9d8SMoni Shoua 319da9ee9d8SMoni Shoua if (buflen < wqe_size) 320da9ee9d8SMoni Shoua return -EINVAL; 321da9ee9d8SMoni Shoua 322da9ee9d8SMoni Shoua if (!umem) 323da9ee9d8SMoni Shoua return -EOPNOTSUPP; 324da9ee9d8SMoni Shoua 325da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc); 326da9ee9d8SMoni Shoua } 327da9ee9d8SMoni Shoua 328e126ba97SEli Cohen static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 329e126ba97SEli Cohen { 330e126ba97SEli Cohen struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 331e126ba97SEli Cohen struct ib_event event; 332e126ba97SEli Cohen 33319098df2Smajd@mellanox.com if (type == MLX5_EVENT_TYPE_PATH_MIG) { 33419098df2Smajd@mellanox.com /* This event is only valid for trans_qps */ 33519098df2Smajd@mellanox.com to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; 33619098df2Smajd@mellanox.com } 337e126ba97SEli Cohen 338e126ba97SEli Cohen if (ibqp->event_handler) { 339e126ba97SEli Cohen event.device = ibqp->device; 340e126ba97SEli Cohen event.element.qp = ibqp; 341e126ba97SEli Cohen switch (type) { 342e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG: 343e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG; 344e126ba97SEli Cohen break; 345e126ba97SEli Cohen case MLX5_EVENT_TYPE_COMM_EST: 346e126ba97SEli Cohen event.event = IB_EVENT_COMM_EST; 347e126ba97SEli Cohen break; 348e126ba97SEli Cohen case MLX5_EVENT_TYPE_SQ_DRAINED: 349e126ba97SEli Cohen event.event = IB_EVENT_SQ_DRAINED; 350e126ba97SEli Cohen break; 351e126ba97SEli Cohen case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 352e126ba97SEli Cohen event.event = IB_EVENT_QP_LAST_WQE_REACHED; 353e126ba97SEli Cohen break; 354e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 355e126ba97SEli Cohen event.event = IB_EVENT_QP_FATAL; 356e126ba97SEli Cohen break; 357e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 358e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG_ERR; 359e126ba97SEli Cohen break; 360e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 361e126ba97SEli Cohen event.event = IB_EVENT_QP_REQ_ERR; 362e126ba97SEli Cohen break; 363e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 364e126ba97SEli Cohen event.event = IB_EVENT_QP_ACCESS_ERR; 365e126ba97SEli Cohen break; 366e126ba97SEli Cohen default: 367e126ba97SEli Cohen pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); 368e126ba97SEli Cohen return; 369e126ba97SEli Cohen } 370e126ba97SEli Cohen 371e126ba97SEli Cohen ibqp->event_handler(&event, ibqp->qp_context); 372e126ba97SEli Cohen } 373e126ba97SEli Cohen } 374e126ba97SEli Cohen 375e126ba97SEli Cohen static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 376e126ba97SEli Cohen int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 377e126ba97SEli Cohen { 378e126ba97SEli Cohen int wqe_size; 379e126ba97SEli Cohen int wq_size; 380e126ba97SEli Cohen 381e126ba97SEli Cohen /* Sanity check RQ size before proceeding */ 382938fe83cSSaeed Mahameed if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 383e126ba97SEli Cohen return -EINVAL; 384e126ba97SEli Cohen 385e126ba97SEli Cohen if (!has_rq) { 386e126ba97SEli Cohen qp->rq.max_gs = 0; 387e126ba97SEli Cohen qp->rq.wqe_cnt = 0; 388e126ba97SEli Cohen qp->rq.wqe_shift = 0; 3890540d814SNoa Osherovich cap->max_recv_wr = 0; 3900540d814SNoa Osherovich cap->max_recv_sge = 0; 391e126ba97SEli Cohen } else { 392c95e6d53SLeon Romanovsky int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE); 393c95e6d53SLeon Romanovsky 394e126ba97SEli Cohen if (ucmd) { 395e126ba97SEli Cohen qp->rq.wqe_cnt = ucmd->rq_wqe_count; 396002bf228SLeon Romanovsky if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) 397002bf228SLeon Romanovsky return -EINVAL; 398e126ba97SEli Cohen qp->rq.wqe_shift = ucmd->rq_wqe_shift; 399c95e6d53SLeon Romanovsky if ((1 << qp->rq.wqe_shift) / 400c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) < 401c95e6d53SLeon Romanovsky wq_sig) 402002bf228SLeon Romanovsky return -EINVAL; 403c95e6d53SLeon Romanovsky qp->rq.max_gs = 404c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 405c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 406c95e6d53SLeon Romanovsky wq_sig; 407e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 408e126ba97SEli Cohen } else { 409c95e6d53SLeon Romanovsky wqe_size = 410c95e6d53SLeon Romanovsky wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 411c95e6d53SLeon Romanovsky 0; 412e126ba97SEli Cohen wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 413e126ba97SEli Cohen wqe_size = roundup_pow_of_two(wqe_size); 414e126ba97SEli Cohen wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 415e126ba97SEli Cohen wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 416e126ba97SEli Cohen qp->rq.wqe_cnt = wq_size / wqe_size; 417938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 418e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 419e126ba97SEli Cohen wqe_size, 420938fe83cSSaeed Mahameed MLX5_CAP_GEN(dev->mdev, 421938fe83cSSaeed Mahameed max_wqe_sz_rq)); 422e126ba97SEli Cohen return -EINVAL; 423e126ba97SEli Cohen } 424e126ba97SEli Cohen qp->rq.wqe_shift = ilog2(wqe_size); 425c95e6d53SLeon Romanovsky qp->rq.max_gs = 426c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 427c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 428c95e6d53SLeon Romanovsky wq_sig; 429e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 430e126ba97SEli Cohen } 431e126ba97SEli Cohen } 432e126ba97SEli Cohen 433e126ba97SEli Cohen return 0; 434e126ba97SEli Cohen } 435e126ba97SEli Cohen 436f0313965SErez Shitrit static int sq_overhead(struct ib_qp_init_attr *attr) 437e126ba97SEli Cohen { 438618af384SAndi Shyti int size = 0; 439e126ba97SEli Cohen 440f0313965SErez Shitrit switch (attr->qp_type) { 441e126ba97SEli Cohen case IB_QPT_XRC_INI: 442b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_xrc_seg); 443e126ba97SEli Cohen /* fall through */ 444e126ba97SEli Cohen case IB_QPT_RC: 445e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 44675c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_atomic_seg) + 44775c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_raddr_seg), 44875c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_umr_ctrl_seg) + 449064e5262SIdan Burstein sizeof(struct mlx5_mkey_seg) + 450064e5262SIdan Burstein MLX5_IB_SQ_UMR_INLINE_THRESHOLD / 451064e5262SIdan Burstein MLX5_IB_UMR_OCTOWORD); 452e126ba97SEli Cohen break; 453e126ba97SEli Cohen 454b125a54bSEli Cohen case IB_QPT_XRC_TGT: 455b125a54bSEli Cohen return 0; 456b125a54bSEli Cohen 457e126ba97SEli Cohen case IB_QPT_UC: 458b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 45975c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_raddr_seg), 4609e65dc37SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 46175c1657eSLeon Romanovsky sizeof(struct mlx5_mkey_seg)); 462e126ba97SEli Cohen break; 463e126ba97SEli Cohen 464e126ba97SEli Cohen case IB_QPT_UD: 465f0313965SErez Shitrit if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 466f0313965SErez Shitrit size += sizeof(struct mlx5_wqe_eth_pad) + 467f0313965SErez Shitrit sizeof(struct mlx5_wqe_eth_seg); 468f0313965SErez Shitrit /* fall through */ 469e126ba97SEli Cohen case IB_QPT_SMI: 470d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 471b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 472e126ba97SEli Cohen sizeof(struct mlx5_wqe_datagram_seg); 473e126ba97SEli Cohen break; 474e126ba97SEli Cohen 475e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 476b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 477e126ba97SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 478e126ba97SEli Cohen sizeof(struct mlx5_mkey_seg); 479e126ba97SEli Cohen break; 480e126ba97SEli Cohen 481e126ba97SEli Cohen default: 482e126ba97SEli Cohen return -EINVAL; 483e126ba97SEli Cohen } 484e126ba97SEli Cohen 485e126ba97SEli Cohen return size; 486e126ba97SEli Cohen } 487e126ba97SEli Cohen 488e126ba97SEli Cohen static int calc_send_wqe(struct ib_qp_init_attr *attr) 489e126ba97SEli Cohen { 490e126ba97SEli Cohen int inl_size = 0; 491e126ba97SEli Cohen int size; 492e126ba97SEli Cohen 493f0313965SErez Shitrit size = sq_overhead(attr); 494e126ba97SEli Cohen if (size < 0) 495e126ba97SEli Cohen return size; 496e126ba97SEli Cohen 497e126ba97SEli Cohen if (attr->cap.max_inline_data) { 498e126ba97SEli Cohen inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 499e126ba97SEli Cohen attr->cap.max_inline_data; 500e126ba97SEli Cohen } 501e126ba97SEli Cohen 502e126ba97SEli Cohen size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 503c0a6cbb9SIsrael Rukshin if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN && 504e1e66cc2SSagi Grimberg ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) 505e1e66cc2SSagi Grimberg return MLX5_SIG_WQE_SIZE; 506e1e66cc2SSagi Grimberg else 507e126ba97SEli Cohen return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 508e126ba97SEli Cohen } 509e126ba97SEli Cohen 510288c01b7SEli Cohen static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) 511288c01b7SEli Cohen { 512288c01b7SEli Cohen int max_sge; 513288c01b7SEli Cohen 514288c01b7SEli Cohen if (attr->qp_type == IB_QPT_RC) 515288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 516288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 517288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 518288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 519288c01b7SEli Cohen else if (attr->qp_type == IB_QPT_XRC_INI) 520288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 521288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 522288c01b7SEli Cohen sizeof(struct mlx5_wqe_xrc_seg) - 523288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 524288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 525288c01b7SEli Cohen else 526288c01b7SEli Cohen max_sge = (wqe_size - sq_overhead(attr)) / 527288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 528288c01b7SEli Cohen 529288c01b7SEli Cohen return min_t(int, max_sge, wqe_size - sq_overhead(attr) / 530288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg)); 531288c01b7SEli Cohen } 532288c01b7SEli Cohen 533e126ba97SEli Cohen static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 534e126ba97SEli Cohen struct mlx5_ib_qp *qp) 535e126ba97SEli Cohen { 536e126ba97SEli Cohen int wqe_size; 537e126ba97SEli Cohen int wq_size; 538e126ba97SEli Cohen 539e126ba97SEli Cohen if (!attr->cap.max_send_wr) 540e126ba97SEli Cohen return 0; 541e126ba97SEli Cohen 542e126ba97SEli Cohen wqe_size = calc_send_wqe(attr); 543e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 544e126ba97SEli Cohen if (wqe_size < 0) 545e126ba97SEli Cohen return wqe_size; 546e126ba97SEli Cohen 547938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 548b125a54bSEli Cohen mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 549938fe83cSSaeed Mahameed wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 550e126ba97SEli Cohen return -EINVAL; 551e126ba97SEli Cohen } 552e126ba97SEli Cohen 553f0313965SErez Shitrit qp->max_inline_data = wqe_size - sq_overhead(attr) - 554e126ba97SEli Cohen sizeof(struct mlx5_wqe_inline_seg); 555e126ba97SEli Cohen attr->cap.max_inline_data = qp->max_inline_data; 556e126ba97SEli Cohen 557e126ba97SEli Cohen wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 558e126ba97SEli Cohen qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 559938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 5601974ab9dSBart Van Assche mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n", 5611974ab9dSBart Van Assche attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB, 562938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 563938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 564b125a54bSEli Cohen return -ENOMEM; 565b125a54bSEli Cohen } 566e126ba97SEli Cohen qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 567288c01b7SEli Cohen qp->sq.max_gs = get_send_sge(attr, wqe_size); 568288c01b7SEli Cohen if (qp->sq.max_gs < attr->cap.max_send_sge) 569288c01b7SEli Cohen return -ENOMEM; 570288c01b7SEli Cohen 571288c01b7SEli Cohen attr->cap.max_send_sge = qp->sq.max_gs; 572b125a54bSEli Cohen qp->sq.max_post = wq_size / wqe_size; 573b125a54bSEli Cohen attr->cap.max_send_wr = qp->sq.max_post; 574e126ba97SEli Cohen 575e126ba97SEli Cohen return wq_size; 576e126ba97SEli Cohen } 577e126ba97SEli Cohen 578e126ba97SEli Cohen static int set_user_buf_size(struct mlx5_ib_dev *dev, 579e126ba97SEli Cohen struct mlx5_ib_qp *qp, 58019098df2Smajd@mellanox.com struct mlx5_ib_create_qp *ucmd, 5810fb2ed66Smajd@mellanox.com struct mlx5_ib_qp_base *base, 5820fb2ed66Smajd@mellanox.com struct ib_qp_init_attr *attr) 583e126ba97SEli Cohen { 584e126ba97SEli Cohen int desc_sz = 1 << qp->sq.wqe_shift; 585e126ba97SEli Cohen 586938fe83cSSaeed Mahameed if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 587e126ba97SEli Cohen mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 588938fe83cSSaeed Mahameed desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 589e126ba97SEli Cohen return -EINVAL; 590e126ba97SEli Cohen } 591e126ba97SEli Cohen 592af8b38edSGal Pressman if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { 593af8b38edSGal Pressman mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n", 594af8b38edSGal Pressman ucmd->sq_wqe_count); 595e126ba97SEli Cohen return -EINVAL; 596e126ba97SEli Cohen } 597e126ba97SEli Cohen 598e126ba97SEli Cohen qp->sq.wqe_cnt = ucmd->sq_wqe_count; 599e126ba97SEli Cohen 600938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 601e126ba97SEli Cohen mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 602938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 603938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 604e126ba97SEli Cohen return -EINVAL; 605e126ba97SEli Cohen } 606e126ba97SEli Cohen 607c2e53b2cSYishai Hadas if (attr->qp_type == IB_QPT_RAW_PACKET || 6082be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 6090fb2ed66Smajd@mellanox.com base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 6100fb2ed66Smajd@mellanox.com qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; 6110fb2ed66Smajd@mellanox.com } else { 61219098df2Smajd@mellanox.com base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 613e126ba97SEli Cohen (qp->sq.wqe_cnt << 6); 6140fb2ed66Smajd@mellanox.com } 615e126ba97SEli Cohen 616e126ba97SEli Cohen return 0; 617e126ba97SEli Cohen } 618e126ba97SEli Cohen 619e126ba97SEli Cohen static int qp_has_rq(struct ib_qp_init_attr *attr) 620e126ba97SEli Cohen { 621e126ba97SEli Cohen if (attr->qp_type == IB_QPT_XRC_INI || 622e126ba97SEli Cohen attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 623e126ba97SEli Cohen attr->qp_type == MLX5_IB_QPT_REG_UMR || 624e126ba97SEli Cohen !attr->cap.max_recv_wr) 625e126ba97SEli Cohen return 0; 626e126ba97SEli Cohen 627e126ba97SEli Cohen return 1; 628e126ba97SEli Cohen } 629e126ba97SEli Cohen 6300b80c14fSEli Cohen enum { 6310b80c14fSEli Cohen /* this is the first blue flame register in the array of bfregs assigned 6320b80c14fSEli Cohen * to a processes. Since we do not use it for blue flame but rather 6330b80c14fSEli Cohen * regular 64 bit doorbells, we do not need a lock for maintaiing 6340b80c14fSEli Cohen * "odd/even" order 6350b80c14fSEli Cohen */ 6360b80c14fSEli Cohen NUM_NON_BLUE_FLAME_BFREGS = 1, 6370b80c14fSEli Cohen }; 6380b80c14fSEli Cohen 639b037c29aSEli Cohen static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) 640b037c29aSEli Cohen { 64131a78a5aSYishai Hadas return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR; 642b037c29aSEli Cohen } 643b037c29aSEli Cohen 644b037c29aSEli Cohen static int num_med_bfreg(struct mlx5_ib_dev *dev, 645b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 646c1be5232SEli Cohen { 647c1be5232SEli Cohen int n; 648c1be5232SEli Cohen 649b037c29aSEli Cohen n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - 650b037c29aSEli Cohen NUM_NON_BLUE_FLAME_BFREGS; 651c1be5232SEli Cohen 652c1be5232SEli Cohen return n >= 0 ? n : 0; 653c1be5232SEli Cohen } 654c1be5232SEli Cohen 65518b0362eSYishai Hadas static int first_med_bfreg(struct mlx5_ib_dev *dev, 65618b0362eSYishai Hadas struct mlx5_bfreg_info *bfregi) 65718b0362eSYishai Hadas { 65818b0362eSYishai Hadas return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; 65918b0362eSYishai Hadas } 66018b0362eSYishai Hadas 661b037c29aSEli Cohen static int first_hi_bfreg(struct mlx5_ib_dev *dev, 662b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 663c1be5232SEli Cohen { 664c1be5232SEli Cohen int med; 665c1be5232SEli Cohen 666b037c29aSEli Cohen med = num_med_bfreg(dev, bfregi); 667b037c29aSEli Cohen return ++med; 668c1be5232SEli Cohen } 669c1be5232SEli Cohen 670b037c29aSEli Cohen static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, 671b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 672e126ba97SEli Cohen { 673e126ba97SEli Cohen int i; 674e126ba97SEli Cohen 675b037c29aSEli Cohen for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { 676b037c29aSEli Cohen if (!bfregi->count[i]) { 6772f5ff264SEli Cohen bfregi->count[i]++; 678e126ba97SEli Cohen return i; 679e126ba97SEli Cohen } 680e126ba97SEli Cohen } 681e126ba97SEli Cohen 682e126ba97SEli Cohen return -ENOMEM; 683e126ba97SEli Cohen } 684e126ba97SEli Cohen 685b037c29aSEli Cohen static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 686b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 687e126ba97SEli Cohen { 68818b0362eSYishai Hadas int minidx = first_med_bfreg(dev, bfregi); 689e126ba97SEli Cohen int i; 690e126ba97SEli Cohen 69118b0362eSYishai Hadas if (minidx < 0) 69218b0362eSYishai Hadas return minidx; 69318b0362eSYishai Hadas 69418b0362eSYishai Hadas for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { 6952f5ff264SEli Cohen if (bfregi->count[i] < bfregi->count[minidx]) 696e126ba97SEli Cohen minidx = i; 6970b80c14fSEli Cohen if (!bfregi->count[minidx]) 6980b80c14fSEli Cohen break; 699e126ba97SEli Cohen } 700e126ba97SEli Cohen 7012f5ff264SEli Cohen bfregi->count[minidx]++; 702e126ba97SEli Cohen return minidx; 703e126ba97SEli Cohen } 704e126ba97SEli Cohen 705b037c29aSEli Cohen static int alloc_bfreg(struct mlx5_ib_dev *dev, 706ffaf58deSLeon Romanovsky struct mlx5_bfreg_info *bfregi) 707e126ba97SEli Cohen { 708ffaf58deSLeon Romanovsky int bfregn = -ENOMEM; 709e126ba97SEli Cohen 7100a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 7110a2fd01cSYishai Hadas return -EINVAL; 7120a2fd01cSYishai Hadas 7132f5ff264SEli Cohen mutex_lock(&bfregi->lock); 714ffaf58deSLeon Romanovsky if (bfregi->ver >= 2) { 715ffaf58deSLeon Romanovsky bfregn = alloc_high_class_bfreg(dev, bfregi); 716ffaf58deSLeon Romanovsky if (bfregn < 0) 717ffaf58deSLeon Romanovsky bfregn = alloc_med_class_bfreg(dev, bfregi); 718ffaf58deSLeon Romanovsky } 719ffaf58deSLeon Romanovsky 720ffaf58deSLeon Romanovsky if (bfregn < 0) { 7210b80c14fSEli Cohen BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); 7222f5ff264SEli Cohen bfregn = 0; 7232f5ff264SEli Cohen bfregi->count[bfregn]++; 724e126ba97SEli Cohen } 7252f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 726e126ba97SEli Cohen 7272f5ff264SEli Cohen return bfregn; 728e126ba97SEli Cohen } 729e126ba97SEli Cohen 7304ed131d0SYishai Hadas void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) 731e126ba97SEli Cohen { 7322f5ff264SEli Cohen mutex_lock(&bfregi->lock); 733b037c29aSEli Cohen bfregi->count[bfregn]--; 7342f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 735e126ba97SEli Cohen } 736e126ba97SEli Cohen 737e126ba97SEli Cohen static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 738e126ba97SEli Cohen { 739e126ba97SEli Cohen switch (state) { 740e126ba97SEli Cohen case IB_QPS_RESET: return MLX5_QP_STATE_RST; 741e126ba97SEli Cohen case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 742e126ba97SEli Cohen case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 743e126ba97SEli Cohen case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 744e126ba97SEli Cohen case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 745e126ba97SEli Cohen case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 746e126ba97SEli Cohen case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 747e126ba97SEli Cohen default: return -1; 748e126ba97SEli Cohen } 749e126ba97SEli Cohen } 750e126ba97SEli Cohen 751e126ba97SEli Cohen static int to_mlx5_st(enum ib_qp_type type) 752e126ba97SEli Cohen { 753e126ba97SEli Cohen switch (type) { 754e126ba97SEli Cohen case IB_QPT_RC: return MLX5_QP_ST_RC; 755e126ba97SEli Cohen case IB_QPT_UC: return MLX5_QP_ST_UC; 756e126ba97SEli Cohen case IB_QPT_UD: return MLX5_QP_ST_UD; 757e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; 758e126ba97SEli Cohen case IB_QPT_XRC_INI: 759e126ba97SEli Cohen case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 760e126ba97SEli Cohen case IB_QPT_SMI: return MLX5_QP_ST_QP0; 761d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; 762c32a4f29SMoni Shoua case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI; 7633ae7e66aSLeon Romanovsky case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE; 764e126ba97SEli Cohen default: return -EINVAL; 765e126ba97SEli Cohen } 766e126ba97SEli Cohen } 767e126ba97SEli Cohen 76889ea94a7SMaor Gottlieb static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 76989ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 77089ea94a7SMaor Gottlieb static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 77189ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 77289ea94a7SMaor Gottlieb 7737c043e90SYishai Hadas int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 77405f58cebSLeon Romanovsky struct mlx5_bfreg_info *bfregi, u32 bfregn, 7751ee47ab3SYishai Hadas bool dyn_bfreg) 776e126ba97SEli Cohen { 77705f58cebSLeon Romanovsky unsigned int bfregs_per_sys_page; 77805f58cebSLeon Romanovsky u32 index_of_sys_page; 77905f58cebSLeon Romanovsky u32 offset; 780b037c29aSEli Cohen 7810a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 7820a2fd01cSYishai Hadas return -EINVAL; 7830a2fd01cSYishai Hadas 784b037c29aSEli Cohen bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 785b037c29aSEli Cohen MLX5_NON_FP_BFREGS_PER_UAR; 786b037c29aSEli Cohen index_of_sys_page = bfregn / bfregs_per_sys_page; 787b037c29aSEli Cohen 78805f58cebSLeon Romanovsky if (dyn_bfreg) { 78905f58cebSLeon Romanovsky index_of_sys_page += bfregi->num_static_sys_pages; 79005f58cebSLeon Romanovsky 7917c043e90SYishai Hadas if (index_of_sys_page >= bfregi->num_sys_pages) 7927c043e90SYishai Hadas return -EINVAL; 7937c043e90SYishai Hadas 7941ee47ab3SYishai Hadas if (bfregn > bfregi->num_dyn_bfregs || 7951ee47ab3SYishai Hadas bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) { 7961ee47ab3SYishai Hadas mlx5_ib_dbg(dev, "Invalid dynamic uar index\n"); 7971ee47ab3SYishai Hadas return -EINVAL; 7981ee47ab3SYishai Hadas } 7991ee47ab3SYishai Hadas } 800b037c29aSEli Cohen 8011ee47ab3SYishai Hadas offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; 802b037c29aSEli Cohen return bfregi->sys_pages[index_of_sys_page] + offset; 803e126ba97SEli Cohen } 804e126ba97SEli Cohen 805b0ea0fa5SJason Gunthorpe static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, 80619098df2Smajd@mellanox.com unsigned long addr, size_t size, 807b0ea0fa5SJason Gunthorpe struct ib_umem **umem, int *npages, int *page_shift, 808b0ea0fa5SJason Gunthorpe int *ncont, u32 *offset) 80919098df2Smajd@mellanox.com { 81019098df2Smajd@mellanox.com int err; 81119098df2Smajd@mellanox.com 812c320e527SMoni Shoua *umem = ib_umem_get(&dev->ib_dev, addr, size, 0); 81319098df2Smajd@mellanox.com if (IS_ERR(*umem)) { 81419098df2Smajd@mellanox.com mlx5_ib_dbg(dev, "umem_get failed\n"); 81519098df2Smajd@mellanox.com return PTR_ERR(*umem); 81619098df2Smajd@mellanox.com } 81719098df2Smajd@mellanox.com 818762f899aSMajd Dibbiny mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL); 81919098df2Smajd@mellanox.com 82019098df2Smajd@mellanox.com err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); 82119098df2Smajd@mellanox.com if (err) { 82219098df2Smajd@mellanox.com mlx5_ib_warn(dev, "bad offset\n"); 82319098df2Smajd@mellanox.com goto err_umem; 82419098df2Smajd@mellanox.com } 82519098df2Smajd@mellanox.com 82619098df2Smajd@mellanox.com mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n", 82719098df2Smajd@mellanox.com addr, size, *npages, *page_shift, *ncont, *offset); 82819098df2Smajd@mellanox.com 82919098df2Smajd@mellanox.com return 0; 83019098df2Smajd@mellanox.com 83119098df2Smajd@mellanox.com err_umem: 83219098df2Smajd@mellanox.com ib_umem_release(*umem); 83319098df2Smajd@mellanox.com *umem = NULL; 83419098df2Smajd@mellanox.com 83519098df2Smajd@mellanox.com return err; 83619098df2Smajd@mellanox.com } 83719098df2Smajd@mellanox.com 838fe248c3aSMaor Gottlieb static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 839bdeacabdSShamir Rabinovitch struct mlx5_ib_rwq *rwq, struct ib_udata *udata) 84079b20a6cSYishai Hadas { 841bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext *context = 842bdeacabdSShamir Rabinovitch rdma_udata_to_drv_context( 843bdeacabdSShamir Rabinovitch udata, 844bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext, 845bdeacabdSShamir Rabinovitch ibucontext); 84679b20a6cSYishai Hadas 847fe248c3aSMaor Gottlieb if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) 848fe248c3aSMaor Gottlieb atomic_dec(&dev->delay_drop.rqs_cnt); 849fe248c3aSMaor Gottlieb 85079b20a6cSYishai Hadas mlx5_ib_db_unmap_user(context, &rwq->db); 85179b20a6cSYishai Hadas ib_umem_release(rwq->umem); 85279b20a6cSYishai Hadas } 85379b20a6cSYishai Hadas 85479b20a6cSYishai Hadas static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 855b0ea0fa5SJason Gunthorpe struct ib_udata *udata, struct mlx5_ib_rwq *rwq, 85679b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd) 85779b20a6cSYishai Hadas { 85889944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 85989944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 86079b20a6cSYishai Hadas int page_shift = 0; 86179b20a6cSYishai Hadas int npages; 86279b20a6cSYishai Hadas u32 offset = 0; 86379b20a6cSYishai Hadas int ncont = 0; 86479b20a6cSYishai Hadas int err; 86579b20a6cSYishai Hadas 86679b20a6cSYishai Hadas if (!ucmd->buf_addr) 86779b20a6cSYishai Hadas return -EINVAL; 86879b20a6cSYishai Hadas 869c320e527SMoni Shoua rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); 87079b20a6cSYishai Hadas if (IS_ERR(rwq->umem)) { 87179b20a6cSYishai Hadas mlx5_ib_dbg(dev, "umem_get failed\n"); 87279b20a6cSYishai Hadas err = PTR_ERR(rwq->umem); 87379b20a6cSYishai Hadas return err; 87479b20a6cSYishai Hadas } 87579b20a6cSYishai Hadas 876762f899aSMajd Dibbiny mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift, 87779b20a6cSYishai Hadas &ncont, NULL); 87879b20a6cSYishai Hadas err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, 87979b20a6cSYishai Hadas &rwq->rq_page_offset); 88079b20a6cSYishai Hadas if (err) { 88179b20a6cSYishai Hadas mlx5_ib_warn(dev, "bad offset\n"); 88279b20a6cSYishai Hadas goto err_umem; 88379b20a6cSYishai Hadas } 88479b20a6cSYishai Hadas 88579b20a6cSYishai Hadas rwq->rq_num_pas = ncont; 88679b20a6cSYishai Hadas rwq->page_shift = page_shift; 88779b20a6cSYishai Hadas rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 88879b20a6cSYishai Hadas rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); 88979b20a6cSYishai Hadas 89079b20a6cSYishai Hadas mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n", 89179b20a6cSYishai Hadas (unsigned long long)ucmd->buf_addr, rwq->buf_size, 89279b20a6cSYishai Hadas npages, page_shift, ncont, offset); 89379b20a6cSYishai Hadas 89489944450SShamir Rabinovitch err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db); 89579b20a6cSYishai Hadas if (err) { 89679b20a6cSYishai Hadas mlx5_ib_dbg(dev, "map failed\n"); 89779b20a6cSYishai Hadas goto err_umem; 89879b20a6cSYishai Hadas } 89979b20a6cSYishai Hadas 90079b20a6cSYishai Hadas return 0; 90179b20a6cSYishai Hadas 90279b20a6cSYishai Hadas err_umem: 90379b20a6cSYishai Hadas ib_umem_release(rwq->umem); 90479b20a6cSYishai Hadas return err; 90579b20a6cSYishai Hadas } 90679b20a6cSYishai Hadas 907b037c29aSEli Cohen static int adjust_bfregn(struct mlx5_ib_dev *dev, 908b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi, int bfregn) 909b037c29aSEli Cohen { 910b037c29aSEli Cohen return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + 911b037c29aSEli Cohen bfregn % MLX5_NON_FP_BFREGS_PER_UAR; 912b037c29aSEli Cohen } 913b037c29aSEli Cohen 91498fc1126SLeon Romanovsky static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 915e126ba97SEli Cohen struct mlx5_ib_qp *qp, struct ib_udata *udata, 91676883a6cSLeon Romanovsky struct ib_qp_init_attr *attr, u32 **in, 91719098df2Smajd@mellanox.com struct mlx5_ib_create_qp_resp *resp, int *inlen, 91876883a6cSLeon Romanovsky struct mlx5_ib_qp_base *base, 91976883a6cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd) 920e126ba97SEli Cohen { 921e126ba97SEli Cohen struct mlx5_ib_ucontext *context; 92219098df2Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer; 9239e9c47d0SEli Cohen int page_shift = 0; 9241ee47ab3SYishai Hadas int uar_index = 0; 925e126ba97SEli Cohen int npages; 9269e9c47d0SEli Cohen u32 offset = 0; 9272f5ff264SEli Cohen int bfregn; 9289e9c47d0SEli Cohen int ncont = 0; 92909a7d9ecSSaeed Mahameed __be64 *pas; 93009a7d9ecSSaeed Mahameed void *qpc; 931e126ba97SEli Cohen int err; 9325aa3771dSYishai Hadas u16 uid; 933ac42a5eeSYishai Hadas u32 uar_flags; 934e126ba97SEli Cohen 93589944450SShamir Rabinovitch context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext, 93689944450SShamir Rabinovitch ibucontext); 93776883a6cSLeon Romanovsky uar_flags = qp->flags_en & 93876883a6cSLeon Romanovsky (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX); 939ac42a5eeSYishai Hadas switch (uar_flags) { 940ac42a5eeSYishai Hadas case MLX5_QP_FLAG_UAR_PAGE_INDEX: 94176883a6cSLeon Romanovsky uar_index = ucmd->bfreg_index; 942ac42a5eeSYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 943ac42a5eeSYishai Hadas break; 944ac42a5eeSYishai Hadas case MLX5_QP_FLAG_BFREG_INDEX: 9451ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, 94676883a6cSLeon Romanovsky ucmd->bfreg_index, true); 9471ee47ab3SYishai Hadas if (uar_index < 0) 9481ee47ab3SYishai Hadas return uar_index; 9491ee47ab3SYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 950ac42a5eeSYishai Hadas break; 951ac42a5eeSYishai Hadas case 0: 9522be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 953ac42a5eeSYishai Hadas return -EINVAL; 954ffaf58deSLeon Romanovsky bfregn = alloc_bfreg(dev, &context->bfregi); 955ffaf58deSLeon Romanovsky if (bfregn < 0) 9562f5ff264SEli Cohen return bfregn; 957ac42a5eeSYishai Hadas break; 958ac42a5eeSYishai Hadas default: 959ac42a5eeSYishai Hadas return -EINVAL; 960e126ba97SEli Cohen } 961e126ba97SEli Cohen 9622f5ff264SEli Cohen mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); 9631ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 9641ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn, 9651ee47ab3SYishai Hadas false); 966e126ba97SEli Cohen 96748fea837SHaggai Eran qp->rq.offset = 0; 96848fea837SHaggai Eran qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 96948fea837SHaggai Eran qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 97048fea837SHaggai Eran 97176883a6cSLeon Romanovsky err = set_user_buf_size(dev, qp, ucmd, base, attr); 972e126ba97SEli Cohen if (err) 9732f5ff264SEli Cohen goto err_bfreg; 974e126ba97SEli Cohen 97576883a6cSLeon Romanovsky if (ucmd->buf_addr && ubuffer->buf_size) { 97676883a6cSLeon Romanovsky ubuffer->buf_addr = ucmd->buf_addr; 977b0ea0fa5SJason Gunthorpe err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, 978b0ea0fa5SJason Gunthorpe ubuffer->buf_size, &ubuffer->umem, 979b0ea0fa5SJason Gunthorpe &npages, &page_shift, &ncont, &offset); 98019098df2Smajd@mellanox.com if (err) 9812f5ff264SEli Cohen goto err_bfreg; 9829e9c47d0SEli Cohen } else { 98319098df2Smajd@mellanox.com ubuffer->umem = NULL; 9849e9c47d0SEli Cohen } 985e126ba97SEli Cohen 98609a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 98709a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; 9881b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 989e126ba97SEli Cohen if (!*in) { 990e126ba97SEli Cohen err = -ENOMEM; 991e126ba97SEli Cohen goto err_umem; 992e126ba97SEli Cohen } 993e126ba97SEli Cohen 99404bcc1c2SLeon Romanovsky uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0; 9955aa3771dSYishai Hadas MLX5_SET(create_qp_in, *in, uid, uid); 99609a7d9ecSSaeed Mahameed pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); 99709a7d9ecSSaeed Mahameed if (ubuffer->umem) 99809a7d9ecSSaeed Mahameed mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); 99909a7d9ecSSaeed Mahameed 100009a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 100109a7d9ecSSaeed Mahameed 100209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 100309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, page_offset, offset); 100409a7d9ecSSaeed Mahameed 100509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 10061ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 1007b037c29aSEli Cohen resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); 10081ee47ab3SYishai Hadas else 10091ee47ab3SYishai Hadas resp->bfreg_index = MLX5_IB_INVALID_BFREG; 10102f5ff264SEli Cohen qp->bfregn = bfregn; 1011e126ba97SEli Cohen 101276883a6cSLeon Romanovsky err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db); 1013e126ba97SEli Cohen if (err) { 1014e126ba97SEli Cohen mlx5_ib_dbg(dev, "map failed\n"); 1015e126ba97SEli Cohen goto err_free; 1016e126ba97SEli Cohen } 1017e126ba97SEli Cohen 1018e126ba97SEli Cohen return 0; 1019e126ba97SEli Cohen 1020e126ba97SEli Cohen err_free: 1021479163f4SAl Viro kvfree(*in); 1022e126ba97SEli Cohen 1023e126ba97SEli Cohen err_umem: 102419098df2Smajd@mellanox.com ib_umem_release(ubuffer->umem); 1025e126ba97SEli Cohen 10262f5ff264SEli Cohen err_bfreg: 10271ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 10284ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn); 1029e126ba97SEli Cohen return err; 1030e126ba97SEli Cohen } 1031e126ba97SEli Cohen 1032747c519cSLeon Romanovsky static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1033747c519cSLeon Romanovsky struct mlx5_ib_qp_base *base, struct ib_udata *udata) 1034e126ba97SEli Cohen { 1035747c519cSLeon Romanovsky struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1036747c519cSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 1037e126ba97SEli Cohen 1038747c519cSLeon Romanovsky if (udata) { 1039747c519cSLeon Romanovsky /* User QP */ 1040e126ba97SEli Cohen mlx5_ib_db_unmap_user(context, &qp->db); 104119098df2Smajd@mellanox.com ib_umem_release(base->ubuffer.umem); 10421ee47ab3SYishai Hadas 10431ee47ab3SYishai Hadas /* 10441ee47ab3SYishai Hadas * Free only the BFREGs which are handled by the kernel. 10451ee47ab3SYishai Hadas * BFREGs of UARs allocated dynamically are handled by user. 10461ee47ab3SYishai Hadas */ 10471ee47ab3SYishai Hadas if (qp->bfregn != MLX5_IB_INVALID_BFREG) 10484ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); 1049747c519cSLeon Romanovsky return; 1050747c519cSLeon Romanovsky } 1051747c519cSLeon Romanovsky 1052747c519cSLeon Romanovsky /* Kernel QP */ 1053747c519cSLeon Romanovsky kvfree(qp->sq.wqe_head); 1054747c519cSLeon Romanovsky kvfree(qp->sq.w_list); 1055747c519cSLeon Romanovsky kvfree(qp->sq.wrid); 1056747c519cSLeon Romanovsky kvfree(qp->sq.wr_data); 1057747c519cSLeon Romanovsky kvfree(qp->rq.wrid); 1058747c519cSLeon Romanovsky if (qp->db.db) 1059747c519cSLeon Romanovsky mlx5_db_free(dev->mdev, &qp->db); 1060747c519cSLeon Romanovsky if (qp->buf.frags) 1061747c519cSLeon Romanovsky mlx5_frag_buf_free(dev->mdev, &qp->buf); 1062e126ba97SEli Cohen } 1063e126ba97SEli Cohen 106434f4c955SGuy Levi /* get_sq_edge - Get the next nearby edge. 106534f4c955SGuy Levi * 106634f4c955SGuy Levi * An 'edge' is defined as the first following address after the end 106734f4c955SGuy Levi * of the fragment or the SQ. Accordingly, during the WQE construction 106834f4c955SGuy Levi * which repetitively increases the pointer to write the next data, it 106934f4c955SGuy Levi * simply should check if it gets to an edge. 107034f4c955SGuy Levi * 107134f4c955SGuy Levi * @sq - SQ buffer. 107234f4c955SGuy Levi * @idx - Stride index in the SQ buffer. 107334f4c955SGuy Levi * 107434f4c955SGuy Levi * Return: 107534f4c955SGuy Levi * The new edge. 107634f4c955SGuy Levi */ 107734f4c955SGuy Levi static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx) 107834f4c955SGuy Levi { 107934f4c955SGuy Levi void *fragment_end; 108034f4c955SGuy Levi 108134f4c955SGuy Levi fragment_end = mlx5_frag_buf_get_wqe 108234f4c955SGuy Levi (&sq->fbc, 108334f4c955SGuy Levi mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx)); 108434f4c955SGuy Levi 108534f4c955SGuy Levi return fragment_end + MLX5_SEND_WQE_BB; 108634f4c955SGuy Levi } 108734f4c955SGuy Levi 108898fc1126SLeon Romanovsky static int _create_kernel_qp(struct mlx5_ib_dev *dev, 1089e126ba97SEli Cohen struct ib_qp_init_attr *init_attr, 109098fc1126SLeon Romanovsky struct mlx5_ib_qp *qp, u32 **in, int *inlen, 109119098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base) 1092e126ba97SEli Cohen { 1093e126ba97SEli Cohen int uar_index; 109409a7d9ecSSaeed Mahameed void *qpc; 1095e126ba97SEli Cohen int err; 1096e126ba97SEli Cohen 1097e126ba97SEli Cohen if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 10985fe9dec0SEli Cohen qp->bf.bfreg = &dev->fp_bfreg; 10992978975cSLeon Romanovsky else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) 110011f552e2SMichael Guralnik qp->bf.bfreg = &dev->wc_bfreg; 11015fe9dec0SEli Cohen else 11025fe9dec0SEli Cohen qp->bf.bfreg = &dev->bfreg; 1103e126ba97SEli Cohen 1104d8030b0dSEli Cohen /* We need to divide by two since each register is comprised of 1105d8030b0dSEli Cohen * two buffers of identical size, namely odd and even 1106d8030b0dSEli Cohen */ 1107d8030b0dSEli Cohen qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; 11085fe9dec0SEli Cohen uar_index = qp->bf.bfreg->index; 1109e126ba97SEli Cohen 1110e126ba97SEli Cohen err = calc_sq_size(dev, init_attr, qp); 1111e126ba97SEli Cohen if (err < 0) { 1112e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11135fe9dec0SEli Cohen return err; 1114e126ba97SEli Cohen } 1115e126ba97SEli Cohen 1116e126ba97SEli Cohen qp->rq.offset = 0; 1117e126ba97SEli Cohen qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 111819098df2Smajd@mellanox.com base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 1119e126ba97SEli Cohen 112034f4c955SGuy Levi err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size, 112134f4c955SGuy Levi &qp->buf, dev->mdev->priv.numa_node); 1122e126ba97SEli Cohen if (err) { 1123e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11245fe9dec0SEli Cohen return err; 1125e126ba97SEli Cohen } 1126e126ba97SEli Cohen 112734f4c955SGuy Levi if (qp->rq.wqe_cnt) 112834f4c955SGuy Levi mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift, 112934f4c955SGuy Levi ilog2(qp->rq.wqe_cnt), &qp->rq.fbc); 113034f4c955SGuy Levi 113134f4c955SGuy Levi if (qp->sq.wqe_cnt) { 113234f4c955SGuy Levi int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) / 113334f4c955SGuy Levi MLX5_SEND_WQE_BB; 113434f4c955SGuy Levi mlx5_init_fbc_offset(qp->buf.frags + 113534f4c955SGuy Levi (qp->sq.offset / PAGE_SIZE), 113634f4c955SGuy Levi ilog2(MLX5_SEND_WQE_BB), 113734f4c955SGuy Levi ilog2(qp->sq.wqe_cnt), 113834f4c955SGuy Levi sq_strides_offset, &qp->sq.fbc); 113934f4c955SGuy Levi 114034f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 114134f4c955SGuy Levi } 114234f4c955SGuy Levi 114309a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 114409a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; 11451b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 1146e126ba97SEli Cohen if (!*in) { 1147e126ba97SEli Cohen err = -ENOMEM; 1148e126ba97SEli Cohen goto err_buf; 1149e126ba97SEli Cohen } 115009a7d9ecSSaeed Mahameed 115109a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 115209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 115309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 115409a7d9ecSSaeed Mahameed 1155e126ba97SEli Cohen /* Set "fast registration enabled" for all kernel QPs */ 115609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, fre, 1); 115709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rlky, 1); 1158e126ba97SEli Cohen 11592978975cSLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 116009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, deth_sqpn, 1); 1161b11a4f9cSHaggai Eran 116234f4c955SGuy Levi mlx5_fill_page_frag_array(&qp->buf, 116334f4c955SGuy Levi (__be64 *)MLX5_ADDR_OF(create_qp_in, 116434f4c955SGuy Levi *in, pas)); 1165e126ba97SEli Cohen 11669603b61dSJack Morgenstein err = mlx5_db_alloc(dev->mdev, &qp->db); 1167e126ba97SEli Cohen if (err) { 1168e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 1169e126ba97SEli Cohen goto err_free; 1170e126ba97SEli Cohen } 1171e126ba97SEli Cohen 1172b5883008SLi Dongyang qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, 1173b5883008SLi Dongyang sizeof(*qp->sq.wrid), GFP_KERNEL); 1174b5883008SLi Dongyang qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, 1175b5883008SLi Dongyang sizeof(*qp->sq.wr_data), GFP_KERNEL); 1176b5883008SLi Dongyang qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, 1177b5883008SLi Dongyang sizeof(*qp->rq.wrid), GFP_KERNEL); 1178b5883008SLi Dongyang qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, 1179b5883008SLi Dongyang sizeof(*qp->sq.w_list), GFP_KERNEL); 1180b5883008SLi Dongyang qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, 1181b5883008SLi Dongyang sizeof(*qp->sq.wqe_head), GFP_KERNEL); 1182e126ba97SEli Cohen 1183e126ba97SEli Cohen if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || 1184e126ba97SEli Cohen !qp->sq.w_list || !qp->sq.wqe_head) { 1185e126ba97SEli Cohen err = -ENOMEM; 1186e126ba97SEli Cohen goto err_wrid; 1187e126ba97SEli Cohen } 1188e126ba97SEli Cohen 1189e126ba97SEli Cohen return 0; 1190e126ba97SEli Cohen 1191e126ba97SEli Cohen err_wrid: 1192b5883008SLi Dongyang kvfree(qp->sq.wqe_head); 1193b5883008SLi Dongyang kvfree(qp->sq.w_list); 1194b5883008SLi Dongyang kvfree(qp->sq.wrid); 1195b5883008SLi Dongyang kvfree(qp->sq.wr_data); 1196b5883008SLi Dongyang kvfree(qp->rq.wrid); 1197f4044dacSEli Cohen mlx5_db_free(dev->mdev, &qp->db); 1198e126ba97SEli Cohen 1199e126ba97SEli Cohen err_free: 1200479163f4SAl Viro kvfree(*in); 1201e126ba97SEli Cohen 1202e126ba97SEli Cohen err_buf: 120334f4c955SGuy Levi mlx5_frag_buf_free(dev->mdev, &qp->buf); 1204e126ba97SEli Cohen return err; 1205e126ba97SEli Cohen } 1206e126ba97SEli Cohen 120709a7d9ecSSaeed Mahameed static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 1208e126ba97SEli Cohen { 12097aede1a2SLeon Romanovsky if (attr->srq || (qp->type == IB_QPT_XRC_TGT) || 12107aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI)) 121109a7d9ecSSaeed Mahameed return MLX5_SRQ_RQ; 1212e126ba97SEli Cohen else if (!qp->has_rq) 121309a7d9ecSSaeed Mahameed return MLX5_ZERO_LEN_RQ; 12147aede1a2SLeon Romanovsky 121509a7d9ecSSaeed Mahameed return MLX5_NON_ZERO_RQ; 1216e126ba97SEli Cohen } 1217e126ba97SEli Cohen 12180fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 1219c2e53b2cSYishai Hadas struct mlx5_ib_qp *qp, 12201cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u32 tdn, 12211cd6dbd3SYishai Hadas struct ib_pd *pd) 12220fb2ed66Smajd@mellanox.com { 1223e0b4b472SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 12240fb2ed66Smajd@mellanox.com void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 12250fb2ed66Smajd@mellanox.com 12261cd6dbd3SYishai Hadas MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); 12270fb2ed66Smajd@mellanox.com MLX5_SET(tisc, tisc, transport_domain, tdn); 12282be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 1229c2e53b2cSYishai Hadas MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); 1230c2e53b2cSYishai Hadas 1231e0b4b472SLeon Romanovsky return mlx5_core_create_tis(dev->mdev, in, &sq->tisn); 12320fb2ed66Smajd@mellanox.com } 12330fb2ed66Smajd@mellanox.com 12340fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 12351cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, struct ib_pd *pd) 12360fb2ed66Smajd@mellanox.com { 12371cd6dbd3SYishai Hadas mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); 12380fb2ed66Smajd@mellanox.com } 12390fb2ed66Smajd@mellanox.com 1240d5ed8ac3SMark Bloch static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq) 1241b96c9ddeSMark Bloch { 1242b96c9ddeSMark Bloch if (sq->flow_rule) 1243b96c9ddeSMark Bloch mlx5_del_flow_rules(sq->flow_rule); 1244d5ed8ac3SMark Bloch sq->flow_rule = NULL; 1245b96c9ddeSMark Bloch } 1246b96c9ddeSMark Bloch 12470fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1248b0ea0fa5SJason Gunthorpe struct ib_udata *udata, 12490fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq, void *qpin, 12500fb2ed66Smajd@mellanox.com struct ib_pd *pd) 12510fb2ed66Smajd@mellanox.com { 12520fb2ed66Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; 12530fb2ed66Smajd@mellanox.com __be64 *pas; 12540fb2ed66Smajd@mellanox.com void *in; 12550fb2ed66Smajd@mellanox.com void *sqc; 12560fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 12570fb2ed66Smajd@mellanox.com void *wq; 12580fb2ed66Smajd@mellanox.com int inlen; 12590fb2ed66Smajd@mellanox.com int err; 12600fb2ed66Smajd@mellanox.com int page_shift = 0; 12610fb2ed66Smajd@mellanox.com int npages; 12620fb2ed66Smajd@mellanox.com int ncont = 0; 12630fb2ed66Smajd@mellanox.com u32 offset = 0; 12640fb2ed66Smajd@mellanox.com 1265b0ea0fa5SJason Gunthorpe err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size, 1266b0ea0fa5SJason Gunthorpe &sq->ubuffer.umem, &npages, &page_shift, &ncont, 1267b0ea0fa5SJason Gunthorpe &offset); 12680fb2ed66Smajd@mellanox.com if (err) 12690fb2ed66Smajd@mellanox.com return err; 12700fb2ed66Smajd@mellanox.com 12710fb2ed66Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; 12721b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 12730fb2ed66Smajd@mellanox.com if (!in) { 12740fb2ed66Smajd@mellanox.com err = -ENOMEM; 12750fb2ed66Smajd@mellanox.com goto err_umem; 12760fb2ed66Smajd@mellanox.com } 12770fb2ed66Smajd@mellanox.com 1278c14003f0SYishai Hadas MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid); 12790fb2ed66Smajd@mellanox.com sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 12800fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1281795b609cSBodong Wang if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) 1282795b609cSBodong Wang MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1); 12830fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 12840fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); 12850fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); 12860fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_lst_sz, 1); 12870fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); 128896dc3fc5SNoa Osherovich if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 128996dc3fc5SNoa Osherovich MLX5_CAP_ETH(dev->mdev, swp)) 129096dc3fc5SNoa Osherovich MLX5_SET(sqc, sqc, allow_swp, 1); 12910fb2ed66Smajd@mellanox.com 12920fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(sqc, sqc, wq); 12930fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 12940fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 12950fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); 12960fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 12970fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 12980fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); 12990fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 13000fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, page_offset, offset); 13010fb2ed66Smajd@mellanox.com 13020fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 13030fb2ed66Smajd@mellanox.com mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); 13040fb2ed66Smajd@mellanox.com 1305333fbaa0SLeon Romanovsky err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp); 13060fb2ed66Smajd@mellanox.com 13070fb2ed66Smajd@mellanox.com kvfree(in); 13080fb2ed66Smajd@mellanox.com 13090fb2ed66Smajd@mellanox.com if (err) 13100fb2ed66Smajd@mellanox.com goto err_umem; 13110fb2ed66Smajd@mellanox.com 13120fb2ed66Smajd@mellanox.com return 0; 13130fb2ed66Smajd@mellanox.com 13140fb2ed66Smajd@mellanox.com err_umem: 13150fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 13160fb2ed66Smajd@mellanox.com sq->ubuffer.umem = NULL; 13170fb2ed66Smajd@mellanox.com 13180fb2ed66Smajd@mellanox.com return err; 13190fb2ed66Smajd@mellanox.com } 13200fb2ed66Smajd@mellanox.com 13210fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 13220fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq) 13230fb2ed66Smajd@mellanox.com { 1324d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 1325333fbaa0SLeon Romanovsky mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp); 13260fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 13270fb2ed66Smajd@mellanox.com } 13280fb2ed66Smajd@mellanox.com 13292c292dbbSBoris Pismenny static size_t get_rq_pas_size(void *qpc) 13300fb2ed66Smajd@mellanox.com { 13310fb2ed66Smajd@mellanox.com u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 13320fb2ed66Smajd@mellanox.com u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 13330fb2ed66Smajd@mellanox.com u32 log_rq_size = MLX5_GET(qpc, qpc, log_rq_size); 13340fb2ed66Smajd@mellanox.com u32 page_offset = MLX5_GET(qpc, qpc, page_offset); 13350fb2ed66Smajd@mellanox.com u32 po_quanta = 1 << (log_page_size - 6); 13360fb2ed66Smajd@mellanox.com u32 rq_sz = 1 << (log_rq_size + 4 + log_rq_stride); 13370fb2ed66Smajd@mellanox.com u32 page_size = 1 << log_page_size; 13380fb2ed66Smajd@mellanox.com u32 rq_sz_po = rq_sz + (page_offset * po_quanta); 13390fb2ed66Smajd@mellanox.com u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; 13400fb2ed66Smajd@mellanox.com 13410fb2ed66Smajd@mellanox.com return rq_num_pas * sizeof(u64); 13420fb2ed66Smajd@mellanox.com } 13430fb2ed66Smajd@mellanox.com 13440fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 13452c292dbbSBoris Pismenny struct mlx5_ib_rq *rq, void *qpin, 134634d57585SYishai Hadas size_t qpinlen, struct ib_pd *pd) 13470fb2ed66Smajd@mellanox.com { 1348358e42eaSMajd Dibbiny struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 13490fb2ed66Smajd@mellanox.com __be64 *pas; 13500fb2ed66Smajd@mellanox.com __be64 *qp_pas; 13510fb2ed66Smajd@mellanox.com void *in; 13520fb2ed66Smajd@mellanox.com void *rqc; 13530fb2ed66Smajd@mellanox.com void *wq; 13540fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 13552c292dbbSBoris Pismenny size_t rq_pas_size = get_rq_pas_size(qpc); 13562c292dbbSBoris Pismenny size_t inlen; 13570fb2ed66Smajd@mellanox.com int err; 13582c292dbbSBoris Pismenny 13592c292dbbSBoris Pismenny if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) 13602c292dbbSBoris Pismenny return -EINVAL; 13610fb2ed66Smajd@mellanox.com 13620fb2ed66Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 13631b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 13640fb2ed66Smajd@mellanox.com if (!in) 13650fb2ed66Smajd@mellanox.com return -ENOMEM; 13660fb2ed66Smajd@mellanox.com 136734d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 13680fb2ed66Smajd@mellanox.com rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1369e4cc4fa7SNoa Osherovich if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING)) 13700fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, vsd, 1); 13710fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 13720fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 13730fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, flush_in_error_en, 1); 13740fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index)); 13750fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv)); 13760fb2ed66Smajd@mellanox.com 13772be08c30SLeon Romanovsky if (mqp->flags & IB_QP_CREATE_SCATTER_FCS) 1378358e42eaSMajd Dibbiny MLX5_SET(rqc, rqc, scatter_fcs, 1); 1379358e42eaSMajd Dibbiny 13800fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(rqc, rqc, wq); 13810fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1382b1383aa6SNoa Osherovich if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING) 1383b1383aa6SNoa Osherovich MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 13840fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset)); 13850fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 13860fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 13870fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); 13880fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size)); 13890fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); 13900fb2ed66Smajd@mellanox.com 13910fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 13920fb2ed66Smajd@mellanox.com qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas); 13930fb2ed66Smajd@mellanox.com memcpy(pas, qp_pas, rq_pas_size); 13940fb2ed66Smajd@mellanox.com 1395333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp); 13960fb2ed66Smajd@mellanox.com 13970fb2ed66Smajd@mellanox.com kvfree(in); 13980fb2ed66Smajd@mellanox.com 13990fb2ed66Smajd@mellanox.com return err; 14000fb2ed66Smajd@mellanox.com } 14010fb2ed66Smajd@mellanox.com 14020fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 14030fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq) 14040fb2ed66Smajd@mellanox.com { 1405333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp); 14060fb2ed66Smajd@mellanox.com } 14070fb2ed66Smajd@mellanox.com 14080042f9e4SMark Bloch static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 14090042f9e4SMark Bloch struct mlx5_ib_rq *rq, 1410443c1cf9SYishai Hadas u32 qp_flags_en, 1411443c1cf9SYishai Hadas struct ib_pd *pd) 14120042f9e4SMark Bloch { 14130042f9e4SMark Bloch if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 14140042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 14150042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1416443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid); 14170042f9e4SMark Bloch } 14180042f9e4SMark Bloch 14190fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1420f95ef6cbSMaor Gottlieb struct mlx5_ib_rq *rq, u32 tdn, 1421e0b4b472SLeon Romanovsky u32 *qp_flags_en, struct ib_pd *pd, 1422e0b4b472SLeon Romanovsky u32 *out) 14230fb2ed66Smajd@mellanox.com { 1424175edba8SMark Bloch u8 lb_flag = 0; 14250fb2ed66Smajd@mellanox.com u32 *in; 14260fb2ed66Smajd@mellanox.com void *tirc; 14270fb2ed66Smajd@mellanox.com int inlen; 14280fb2ed66Smajd@mellanox.com int err; 14290fb2ed66Smajd@mellanox.com 14300fb2ed66Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(create_tir_in); 14311b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 14320fb2ed66Smajd@mellanox.com if (!in) 14330fb2ed66Smajd@mellanox.com return -ENOMEM; 14340fb2ed66Smajd@mellanox.com 1435443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 14360fb2ed66Smajd@mellanox.com tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 14370fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 14380fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); 14390fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, transport_domain, tdn); 1440175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1441f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 14420fb2ed66Smajd@mellanox.com 1443175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 1444175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1445175edba8SMark Bloch 1446175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1447175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1448175edba8SMark Bloch 14496a4d00beSMark Bloch if (dev->is_rep) { 1450175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1451175edba8SMark Bloch *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1452175edba8SMark Bloch } 1453175edba8SMark Bloch 1454175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1455e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1456e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 14571f1d6abbSAriel Levkovich rq->tirn = MLX5_GET(create_tir_out, out, tirn); 14580042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 14590042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 14600042f9e4SMark Bloch 14610042f9e4SMark Bloch if (err) 1462443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, 0, pd); 14630042f9e4SMark Bloch } 14640fb2ed66Smajd@mellanox.com kvfree(in); 14650fb2ed66Smajd@mellanox.com 14660fb2ed66Smajd@mellanox.com return err; 14670fb2ed66Smajd@mellanox.com } 14680fb2ed66Smajd@mellanox.com 14690fb2ed66Smajd@mellanox.com static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 14702c292dbbSBoris Pismenny u32 *in, size_t inlen, 14717f72052cSYishai Hadas struct ib_pd *pd, 14727f72052cSYishai Hadas struct ib_udata *udata, 14737f72052cSYishai Hadas struct mlx5_ib_create_qp_resp *resp) 14740fb2ed66Smajd@mellanox.com { 14750fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 14760fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 14770fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 147889944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 147989944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 14800fb2ed66Smajd@mellanox.com int err; 14810fb2ed66Smajd@mellanox.com u32 tdn = mucontext->tdn; 14827f72052cSYishai Hadas u16 uid = to_mpd(pd)->uid; 14831f1d6abbSAriel Levkovich u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 14840fb2ed66Smajd@mellanox.com 14850eacc574SAharon Landau if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) 14860eacc574SAharon Landau return -EINVAL; 14870fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 14881cd6dbd3SYishai Hadas err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); 14890fb2ed66Smajd@mellanox.com if (err) 14900fb2ed66Smajd@mellanox.com return err; 14910fb2ed66Smajd@mellanox.com 1492b0ea0fa5SJason Gunthorpe err = create_raw_packet_qp_sq(dev, udata, sq, in, pd); 14930fb2ed66Smajd@mellanox.com if (err) 14940fb2ed66Smajd@mellanox.com goto err_destroy_tis; 14950fb2ed66Smajd@mellanox.com 14967f72052cSYishai Hadas if (uid) { 14977f72052cSYishai Hadas resp->tisn = sq->tisn; 14987f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN; 14997f72052cSYishai Hadas resp->sqn = sq->base.mqp.qpn; 15007f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN; 15017f72052cSYishai Hadas } 15027f72052cSYishai Hadas 15030fb2ed66Smajd@mellanox.com sq->base.container_mibqp = qp; 15041d31e9c0SMajd Dibbiny sq->base.mqp.event = mlx5_ib_qp_event; 15050fb2ed66Smajd@mellanox.com } 15060fb2ed66Smajd@mellanox.com 15070fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1508358e42eaSMajd Dibbiny rq->base.container_mibqp = qp; 1509358e42eaSMajd Dibbiny 15102be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING) 1511e4cc4fa7SNoa Osherovich rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 15122be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) 1513b1383aa6SNoa Osherovich rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 151434d57585SYishai Hadas err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd); 15150fb2ed66Smajd@mellanox.com if (err) 15160fb2ed66Smajd@mellanox.com goto err_destroy_sq; 15170fb2ed66Smajd@mellanox.com 1518e0b4b472SLeon Romanovsky err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd, 1519e0b4b472SLeon Romanovsky out); 15200fb2ed66Smajd@mellanox.com if (err) 15210fb2ed66Smajd@mellanox.com goto err_destroy_rq; 15227f72052cSYishai Hadas 15237f72052cSYishai Hadas if (uid) { 15247f72052cSYishai Hadas resp->rqn = rq->base.mqp.qpn; 15257f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN; 15267f72052cSYishai Hadas resp->tirn = rq->tirn; 15277f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 15281f1d6abbSAriel Levkovich if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) { 15291f1d6abbSAriel Levkovich resp->tir_icm_addr = MLX5_GET( 15301f1d6abbSAriel Levkovich create_tir_out, out, icm_address_31_0); 15311f1d6abbSAriel Levkovich resp->tir_icm_addr |= 15321f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 15331f1d6abbSAriel Levkovich icm_address_39_32) 15341f1d6abbSAriel Levkovich << 32; 15351f1d6abbSAriel Levkovich resp->tir_icm_addr |= 15361f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 15371f1d6abbSAriel Levkovich icm_address_63_40) 15381f1d6abbSAriel Levkovich << 40; 15391f1d6abbSAriel Levkovich resp->comp_mask |= 15401f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 15411f1d6abbSAriel Levkovich } 15427f72052cSYishai Hadas } 15430fb2ed66Smajd@mellanox.com } 15440fb2ed66Smajd@mellanox.com 15450fb2ed66Smajd@mellanox.com qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : 15460fb2ed66Smajd@mellanox.com rq->base.mqp.qpn; 15470fb2ed66Smajd@mellanox.com return 0; 15480fb2ed66Smajd@mellanox.com 15490fb2ed66Smajd@mellanox.com err_destroy_rq: 15500fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 15510fb2ed66Smajd@mellanox.com err_destroy_sq: 15520fb2ed66Smajd@mellanox.com if (!qp->sq.wqe_cnt) 15530fb2ed66Smajd@mellanox.com return err; 15540fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 15550fb2ed66Smajd@mellanox.com err_destroy_tis: 15561cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, pd); 15570fb2ed66Smajd@mellanox.com 15580fb2ed66Smajd@mellanox.com return err; 15590fb2ed66Smajd@mellanox.com } 15600fb2ed66Smajd@mellanox.com 15610fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, 15620fb2ed66Smajd@mellanox.com struct mlx5_ib_qp *qp) 15630fb2ed66Smajd@mellanox.com { 15640fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 15650fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 15660fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 15670fb2ed66Smajd@mellanox.com 15680fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1569443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd); 15700fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 15710fb2ed66Smajd@mellanox.com } 15720fb2ed66Smajd@mellanox.com 15730fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 15740fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 15751cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); 15760fb2ed66Smajd@mellanox.com } 15770fb2ed66Smajd@mellanox.com } 15780fb2ed66Smajd@mellanox.com 15790fb2ed66Smajd@mellanox.com static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, 15800fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp) 15810fb2ed66Smajd@mellanox.com { 15820fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 15830fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 15840fb2ed66Smajd@mellanox.com 15850fb2ed66Smajd@mellanox.com sq->sq = &qp->sq; 15860fb2ed66Smajd@mellanox.com rq->rq = &qp->rq; 15870fb2ed66Smajd@mellanox.com sq->doorbell = &qp->db; 15880fb2ed66Smajd@mellanox.com rq->doorbell = &qp->db; 15890fb2ed66Smajd@mellanox.com } 15900fb2ed66Smajd@mellanox.com 159128d61370SYishai Hadas static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 159228d61370SYishai Hadas { 15930042f9e4SMark Bloch if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 15940042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 15950042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1596443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1597443c1cf9SYishai Hadas to_mpd(qp->ibqp.pd)->uid); 159828d61370SYishai Hadas } 159928d61370SYishai Hadas 1600f78d358cSLeon Romanovsky struct mlx5_create_qp_params { 1601f78d358cSLeon Romanovsky struct ib_udata *udata; 1602f78d358cSLeon Romanovsky size_t inlen; 16036f2cf76eSLeon Romanovsky size_t outlen; 1604f78d358cSLeon Romanovsky void *ucmd; 1605f78d358cSLeon Romanovsky u8 is_rss_raw : 1; 1606f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr; 1607f78d358cSLeon Romanovsky u32 uidx; 160808d53976SLeon Romanovsky struct mlx5_ib_create_qp_resp resp; 1609f78d358cSLeon Romanovsky }; 1610f78d358cSLeon Romanovsky 1611f78d358cSLeon Romanovsky static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1612f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 1613f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 161428d61370SYishai Hadas { 1615f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 1616f78d358cSLeon Romanovsky struct mlx5_ib_create_qp_rss *ucmd = params->ucmd; 1617f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 161889944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 161989944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 162028d61370SYishai Hadas int inlen; 16211f1d6abbSAriel Levkovich int outlen; 162228d61370SYishai Hadas int err; 162328d61370SYishai Hadas u32 *in; 16241f1d6abbSAriel Levkovich u32 *out; 162528d61370SYishai Hadas void *tirc; 162628d61370SYishai Hadas void *hfso; 162728d61370SYishai Hadas u32 selected_fields = 0; 16282d93fc85SMatan Barak u32 outer_l4; 162928d61370SYishai Hadas u32 tdn = mucontext->tdn; 1630175edba8SMark Bloch u8 lb_flag = 0; 163128d61370SYishai Hadas 16325ce0592bSLeon Romanovsky if (ucmd->comp_mask) { 163328d61370SYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 163428d61370SYishai Hadas return -EOPNOTSUPP; 163528d61370SYishai Hadas } 163628d61370SYishai Hadas 16375ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER && 16385ce0592bSLeon Romanovsky !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) { 1639309fa347SMaor Gottlieb mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n"); 1640309fa347SMaor Gottlieb return -EOPNOTSUPP; 1641309fa347SMaor Gottlieb } 1642309fa347SMaor Gottlieb 164337518fa4SLeon Romanovsky if (dev->is_rep) 1644175edba8SMark Bloch qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1645175edba8SMark Bloch 164637518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 164737518fa4SLeon Romanovsky lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 164837518fa4SLeon Romanovsky 164937518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1650175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1651175edba8SMark Bloch 165228d61370SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_tir_in); 16531f1d6abbSAriel Levkovich outlen = MLX5_ST_SZ_BYTES(create_tir_out); 16541f1d6abbSAriel Levkovich in = kvzalloc(inlen + outlen, GFP_KERNEL); 165528d61370SYishai Hadas if (!in) 165628d61370SYishai Hadas return -ENOMEM; 165728d61370SYishai Hadas 16581f1d6abbSAriel Levkovich out = in + MLX5_ST_SZ_DW(create_tir_in); 1659443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 166028d61370SYishai Hadas tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 166128d61370SYishai Hadas MLX5_SET(tirc, tirc, disp_type, 166228d61370SYishai Hadas MLX5_TIRC_DISP_TYPE_INDIRECT); 166328d61370SYishai Hadas MLX5_SET(tirc, tirc, indirect_table, 166428d61370SYishai Hadas init_attr->rwq_ind_tbl->ind_tbl_num); 166528d61370SYishai Hadas MLX5_SET(tirc, tirc, transport_domain, tdn); 166628d61370SYishai Hadas 166728d61370SYishai Hadas hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1668f95ef6cbSMaor Gottlieb 16695ce0592bSLeon Romanovsky if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1670f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 1671f95ef6cbSMaor Gottlieb 1672175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1673175edba8SMark Bloch 16745ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER) 1675309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); 1676309fa347SMaor Gottlieb else 1677309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1678309fa347SMaor Gottlieb 16795ce0592bSLeon Romanovsky switch (ucmd->rx_hash_function) { 168028d61370SYishai Hadas case MLX5_RX_HASH_FUNC_TOEPLITZ: 168128d61370SYishai Hadas { 168228d61370SYishai Hadas void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 168328d61370SYishai Hadas size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); 168428d61370SYishai Hadas 16855ce0592bSLeon Romanovsky if (len != ucmd->rx_key_len) { 168628d61370SYishai Hadas err = -EINVAL; 168728d61370SYishai Hadas goto err; 168828d61370SYishai Hadas } 168928d61370SYishai Hadas 169028d61370SYishai Hadas MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 16915ce0592bSLeon Romanovsky memcpy(rss_key, ucmd->rx_hash_key, len); 169228d61370SYishai Hadas break; 169328d61370SYishai Hadas } 169428d61370SYishai Hadas default: 169528d61370SYishai Hadas err = -EOPNOTSUPP; 169628d61370SYishai Hadas goto err; 169728d61370SYishai Hadas } 169828d61370SYishai Hadas 16995ce0592bSLeon Romanovsky if (!ucmd->rx_hash_fields_mask) { 170028d61370SYishai Hadas /* special case when this TIR serves as steering entry without hashing */ 170128d61370SYishai Hadas if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) 170228d61370SYishai Hadas goto create_tir; 170328d61370SYishai Hadas err = -EINVAL; 170428d61370SYishai Hadas goto err; 170528d61370SYishai Hadas } 170628d61370SYishai Hadas 17075ce0592bSLeon Romanovsky if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 17085ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && 17095ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 17105ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { 171128d61370SYishai Hadas err = -EINVAL; 171228d61370SYishai Hadas goto err; 171328d61370SYishai Hadas } 171428d61370SYishai Hadas 171528d61370SYishai Hadas /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */ 17165ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 17175ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) 171828d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 171928d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV4); 17205ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 17215ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 172228d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 172328d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV6); 172428d61370SYishai Hadas 17255ce0592bSLeon Romanovsky outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 17265ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 17275ce0592bSLeon Romanovsky << 0 | 17285ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 17295ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 17305ce0592bSLeon Romanovsky << 1 | 17315ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2; 17322d93fc85SMatan Barak 17332d93fc85SMatan Barak /* Check that only one l4 protocol is set */ 17342d93fc85SMatan Barak if (outer_l4 & (outer_l4 - 1)) { 173528d61370SYishai Hadas err = -EINVAL; 173628d61370SYishai Hadas goto err; 173728d61370SYishai Hadas } 173828d61370SYishai Hadas 173928d61370SYishai Hadas /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */ 17405ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 17415ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 174228d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 174328d61370SYishai Hadas MLX5_L4_PROT_TYPE_TCP); 17445ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 17455ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 174628d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 174728d61370SYishai Hadas MLX5_L4_PROT_TYPE_UDP); 174828d61370SYishai Hadas 17495ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 17505ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) 175128d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP; 175228d61370SYishai Hadas 17535ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || 17545ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 175528d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP; 175628d61370SYishai Hadas 17575ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 17585ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) 175928d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT; 176028d61370SYishai Hadas 17615ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || 17625ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 176328d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT; 176428d61370SYishai Hadas 17655ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) 17662d93fc85SMatan Barak selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI; 17672d93fc85SMatan Barak 176828d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 176928d61370SYishai Hadas 177028d61370SYishai Hadas create_tir: 1771e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1772e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 177328d61370SYishai Hadas 17741f1d6abbSAriel Levkovich qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); 17750042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 17760042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 17770042f9e4SMark Bloch 17780042f9e4SMark Bloch if (err) 1779443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1780443c1cf9SYishai Hadas to_mpd(pd)->uid); 17810042f9e4SMark Bloch } 17820042f9e4SMark Bloch 178328d61370SYishai Hadas if (err) 178428d61370SYishai Hadas goto err; 178528d61370SYishai Hadas 17867f72052cSYishai Hadas if (mucontext->devx_uid) { 178708d53976SLeon Romanovsky params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 178808d53976SLeon Romanovsky params->resp.tirn = qp->rss_qp.tirn; 17891f1d6abbSAriel Levkovich if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) { 179008d53976SLeon Romanovsky params->resp.tir_icm_addr = 17911f1d6abbSAriel Levkovich MLX5_GET(create_tir_out, out, icm_address_31_0); 179208d53976SLeon Romanovsky params->resp.tir_icm_addr |= 179308d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 17941f1d6abbSAriel Levkovich icm_address_39_32) 17951f1d6abbSAriel Levkovich << 32; 179608d53976SLeon Romanovsky params->resp.tir_icm_addr |= 179708d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 17981f1d6abbSAriel Levkovich icm_address_63_40) 17991f1d6abbSAriel Levkovich << 40; 180008d53976SLeon Romanovsky params->resp.comp_mask |= 18011f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 18021f1d6abbSAriel Levkovich } 18037f72052cSYishai Hadas } 18047f72052cSYishai Hadas 180528d61370SYishai Hadas kvfree(in); 180628d61370SYishai Hadas /* qpn is reserved for that QP */ 180728d61370SYishai Hadas qp->trans_qp.base.mqp.qpn = 0; 18082be08c30SLeon Romanovsky qp->is_rss = true; 180928d61370SYishai Hadas return 0; 181028d61370SYishai Hadas 181128d61370SYishai Hadas err: 181228d61370SYishai Hadas kvfree(in); 181328d61370SYishai Hadas return err; 181428d61370SYishai Hadas } 181528d61370SYishai Hadas 18165d6ff1baSYonatan Cohen static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, 18175d6ff1baSYonatan Cohen struct ib_qp_init_attr *init_attr, 18186f4bc0eaSYonatan Cohen struct mlx5_ib_create_qp *ucmd, 18195d6ff1baSYonatan Cohen void *qpc) 18205d6ff1baSYonatan Cohen { 18215d6ff1baSYonatan Cohen int scqe_sz; 18222ab367a7Szhengbin bool allow_scat_cqe = false; 18235d6ff1baSYonatan Cohen 18246f4bc0eaSYonatan Cohen if (ucmd) 18256f4bc0eaSYonatan Cohen allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; 18266f4bc0eaSYonatan Cohen 18276f4bc0eaSYonatan Cohen if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) 18285d6ff1baSYonatan Cohen return; 18295d6ff1baSYonatan Cohen 18305d6ff1baSYonatan Cohen scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); 18315d6ff1baSYonatan Cohen if (scqe_sz == 128) { 18325d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); 18335d6ff1baSYonatan Cohen return; 18345d6ff1baSYonatan Cohen } 18355d6ff1baSYonatan Cohen 18365d6ff1baSYonatan Cohen if (init_attr->qp_type != MLX5_IB_QPT_DCI || 18375d6ff1baSYonatan Cohen MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe)) 18385d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 18395d6ff1baSYonatan Cohen } 18405d6ff1baSYonatan Cohen 1841a60109dcSYonatan Cohen static int atomic_size_to_mode(int size_mask) 1842a60109dcSYonatan Cohen { 1843a60109dcSYonatan Cohen /* driver does not support atomic_size > 256B 1844a60109dcSYonatan Cohen * and does not know how to translate bigger sizes 1845a60109dcSYonatan Cohen */ 1846a60109dcSYonatan Cohen int supported_size_mask = size_mask & 0x1ff; 1847a60109dcSYonatan Cohen int log_max_size; 1848a60109dcSYonatan Cohen 1849a60109dcSYonatan Cohen if (!supported_size_mask) 1850a60109dcSYonatan Cohen return -EOPNOTSUPP; 1851a60109dcSYonatan Cohen 1852a60109dcSYonatan Cohen log_max_size = __fls(supported_size_mask); 1853a60109dcSYonatan Cohen 1854a60109dcSYonatan Cohen if (log_max_size > 3) 1855a60109dcSYonatan Cohen return log_max_size; 1856a60109dcSYonatan Cohen 1857a60109dcSYonatan Cohen return MLX5_ATOMIC_MODE_8B; 1858a60109dcSYonatan Cohen } 1859a60109dcSYonatan Cohen 1860a60109dcSYonatan Cohen static int get_atomic_mode(struct mlx5_ib_dev *dev, 1861a60109dcSYonatan Cohen enum ib_qp_type qp_type) 1862a60109dcSYonatan Cohen { 1863a60109dcSYonatan Cohen u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 1864a60109dcSYonatan Cohen u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic); 1865a60109dcSYonatan Cohen int atomic_mode = -EOPNOTSUPP; 1866a60109dcSYonatan Cohen int atomic_size_mask; 1867a60109dcSYonatan Cohen 1868a60109dcSYonatan Cohen if (!atomic) 1869a60109dcSYonatan Cohen return -EOPNOTSUPP; 1870a60109dcSYonatan Cohen 1871a60109dcSYonatan Cohen if (qp_type == MLX5_IB_QPT_DCT) 1872a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 1873a60109dcSYonatan Cohen else 1874a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 1875a60109dcSYonatan Cohen 1876a60109dcSYonatan Cohen if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) || 1877a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD)) 1878a60109dcSYonatan Cohen atomic_mode = atomic_size_to_mode(atomic_size_mask); 1879a60109dcSYonatan Cohen 1880a60109dcSYonatan Cohen if (atomic_mode <= 0 && 1881a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP && 1882a60109dcSYonatan Cohen atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD)) 1883a60109dcSYonatan Cohen atomic_mode = MLX5_ATOMIC_MODE_IB_COMP; 1884a60109dcSYonatan Cohen 1885a60109dcSYonatan Cohen return atomic_mode; 1886a60109dcSYonatan Cohen } 1887a60109dcSYonatan Cohen 1888f78d358cSLeon Romanovsky static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1889f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 189004bcc1c2SLeon Romanovsky { 1891f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 1892f78d358cSLeon Romanovsky u32 uidx = params->uidx; 189304bcc1c2SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 189404bcc1c2SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 189504bcc1c2SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 189604bcc1c2SLeon Romanovsky struct mlx5_ib_qp_base *base; 189704bcc1c2SLeon Romanovsky unsigned long flags; 189804bcc1c2SLeon Romanovsky void *qpc; 189904bcc1c2SLeon Romanovsky u32 *in; 190004bcc1c2SLeon Romanovsky int err; 190104bcc1c2SLeon Romanovsky 190204bcc1c2SLeon Romanovsky mutex_init(&qp->mutex); 190304bcc1c2SLeon Romanovsky 190404bcc1c2SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 190504bcc1c2SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 190604bcc1c2SLeon Romanovsky 190704bcc1c2SLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 190804bcc1c2SLeon Romanovsky if (!in) 190904bcc1c2SLeon Romanovsky return -ENOMEM; 191004bcc1c2SLeon Romanovsky 191104bcc1c2SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 191204bcc1c2SLeon Romanovsky 191304bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC); 191404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 191504bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn); 191604bcc1c2SLeon Romanovsky 191704bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 191804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 191904bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 192004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_master, 1); 192104bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 192204bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_send, 1); 192304bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 192404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_receive, 1); 192504bcc1c2SLeon Romanovsky 192604bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); 192704bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 192804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 192904bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); 193004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 193104bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn); 193204bcc1c2SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 193304bcc1c2SLeon Romanovsky 193404bcc1c2SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 193504bcc1c2SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 193604bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 193704bcc1c2SLeon Romanovsky 193804bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 193904bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, end_padding_mode, 194004bcc1c2SLeon Romanovsky MLX5_WQ_END_PAD_MODE_ALIGN); 194104bcc1c2SLeon Romanovsky /* Special case to clean flag */ 194204bcc1c2SLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 194304bcc1c2SLeon Romanovsky } 194404bcc1c2SLeon Romanovsky 194504bcc1c2SLeon Romanovsky base = &qp->trans_qp.base; 194604bcc1c2SLeon Romanovsky err = mlx5_core_create_qp(dev, &base->mqp, in, inlen); 194704bcc1c2SLeon Romanovsky kvfree(in); 19486367da46SLeon Romanovsky if (err) 194904bcc1c2SLeon Romanovsky return err; 195004bcc1c2SLeon Romanovsky 195104bcc1c2SLeon Romanovsky base->container_mibqp = qp; 195204bcc1c2SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 195304bcc1c2SLeon Romanovsky 195404bcc1c2SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 195504bcc1c2SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 195604bcc1c2SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 195704bcc1c2SLeon Romanovsky 1958968f0b6fSLeon Romanovsky qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn; 195904bcc1c2SLeon Romanovsky return 0; 196004bcc1c2SLeon Romanovsky } 196104bcc1c2SLeon Romanovsky 196298fc1126SLeon Romanovsky static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1963f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 1964f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 1965e126ba97SEli Cohen { 1966f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 1967f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 1968f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 1969f78d358cSLeon Romanovsky u32 uidx = params->uidx; 1970e126ba97SEli Cohen struct mlx5_ib_resources *devr = &dev->devr; 197109a7d9ecSSaeed Mahameed int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 1972938fe83cSSaeed Mahameed struct mlx5_core_dev *mdev = dev->mdev; 197389ea94a7SMaor Gottlieb struct mlx5_ib_cq *send_cq; 197489ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq; 197589ea94a7SMaor Gottlieb unsigned long flags; 197609a7d9ecSSaeed Mahameed struct mlx5_ib_qp_base *base; 1977e7b169f3SNoa Osherovich int mlx5_st; 1978cfb5e088SHaggai Abramovsky void *qpc; 197909a7d9ecSSaeed Mahameed u32 *in; 198009a7d9ecSSaeed Mahameed int err; 1981e126ba97SEli Cohen 1982e126ba97SEli Cohen mutex_init(&qp->mutex); 1983e126ba97SEli Cohen spin_lock_init(&qp->sq.lock); 1984e126ba97SEli Cohen spin_lock_init(&qp->rq.lock); 1985e126ba97SEli Cohen 19867aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 1987e7b169f3SNoa Osherovich if (mlx5_st < 0) 1988e7b169f3SNoa Osherovich return -EINVAL; 1989e7b169f3SNoa Osherovich 1990e126ba97SEli Cohen if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1991e126ba97SEli Cohen qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 1992e126ba97SEli Cohen 19932978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 19942978975cSLeon Romanovsky qp->underlay_qpn = init_attr->source_qpn; 19952978975cSLeon Romanovsky 1996c2e53b2cSYishai Hadas base = (init_attr->qp_type == IB_QPT_RAW_PACKET || 19972be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 1998c2e53b2cSYishai Hadas &qp->raw_packet_qp.rq.base : 1999c2e53b2cSYishai Hadas &qp->trans_qp.base; 2000c2e53b2cSYishai Hadas 2001e126ba97SEli Cohen qp->has_rq = qp_has_rq(init_attr); 20022dfac92dSLeon Romanovsky err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); 2003e126ba97SEli Cohen if (err) { 2004e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 2005e126ba97SEli Cohen return err; 2006e126ba97SEli Cohen } 2007e126ba97SEli Cohen 20082dfac92dSLeon Romanovsky if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || 200998fc1126SLeon Romanovsky ucmd->rq_wqe_count != qp->rq.wqe_cnt) 2010e126ba97SEli Cohen return -EINVAL; 2011e126ba97SEli Cohen 201298fc1126SLeon Romanovsky if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) 201398fc1126SLeon Romanovsky return -EINVAL; 201498fc1126SLeon Romanovsky 201508d53976SLeon Romanovsky err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, 201608d53976SLeon Romanovsky &inlen, base, ucmd); 2017e126ba97SEli Cohen if (err) 2018e126ba97SEli Cohen return err; 2019e126ba97SEli Cohen 2020e126ba97SEli Cohen if (is_sqp(init_attr->qp_type)) 2021e126ba97SEli Cohen qp->port = init_attr->port_num; 2022e126ba97SEli Cohen 202309a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 202409a7d9ecSSaeed Mahameed 2025e7b169f3SNoa Osherovich MLX5_SET(qpc, qpc, st, mlx5_st); 202609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 202798fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); 2028e126ba97SEli Cohen 2029c95e6d53SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 203009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, wq_signature, 1); 2031e126ba97SEli Cohen 20322be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 203309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, block_lb_mc, 1); 2034f360d88aSEli Cohen 20352be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 203609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_master, 1); 20372be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 203809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_send, 1); 20392be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 204009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_receive, 1); 20412be08c30SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) 2042569c6651SDanit Goldberg MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1); 204390ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 204490ecb37aSLeon Romanovsky (init_attr->qp_type == IB_QPT_RC || 20458bde2c50SLeon Romanovsky init_attr->qp_type == IB_QPT_UC)) { 20468bde2c50SLeon Romanovsky int rcqe_sz = rcqe_sz = 20478bde2c50SLeon Romanovsky mlx5_ib_get_cqe_size(init_attr->recv_cq); 20488bde2c50SLeon Romanovsky 20498bde2c50SLeon Romanovsky MLX5_SET(qpc, qpc, cs_res, 20508bde2c50SLeon Romanovsky rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE : 20518bde2c50SLeon Romanovsky MLX5_RES_SCAT_DATA32_CQE); 20528bde2c50SLeon Romanovsky } 205390ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 20547aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) 20552dfac92dSLeon Romanovsky configure_requester_scat_cqe(dev, init_attr, ucmd, qpc); 2056e126ba97SEli Cohen 2057e126ba97SEli Cohen if (qp->rq.wqe_cnt) { 205809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 205909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 2060e126ba97SEli Cohen } 2061e126ba97SEli Cohen 206209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 2063e126ba97SEli Cohen 20643fd3307eSArtemy Kovalyov if (qp->sq.wqe_cnt) { 206509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 20663fd3307eSArtemy Kovalyov } else { 206709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, no_sq, 1); 20683fd3307eSArtemy Kovalyov if (init_attr->srq && 20693fd3307eSArtemy Kovalyov init_attr->srq->srq_type == IB_SRQT_TM) 20703fd3307eSArtemy Kovalyov MLX5_SET(qpc, qpc, offload_type, 20713fd3307eSArtemy Kovalyov MLX5_QPC_OFFLOAD_TYPE_RNDV); 20723fd3307eSArtemy Kovalyov } 2073e126ba97SEli Cohen 2074e126ba97SEli Cohen /* Set default resources */ 2075e126ba97SEli Cohen switch (init_attr->qp_type) { 2076e126ba97SEli Cohen case IB_QPT_XRC_INI: 207709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 207809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 207909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 2080e126ba97SEli Cohen break; 2081e126ba97SEli Cohen default: 2082e126ba97SEli Cohen if (init_attr->srq) { 208309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); 208409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); 2085e126ba97SEli Cohen } else { 208609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 208709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); 2088e126ba97SEli Cohen } 2089e126ba97SEli Cohen } 2090e126ba97SEli Cohen 2091e126ba97SEli Cohen if (init_attr->send_cq) 209209a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); 2093e126ba97SEli Cohen 2094e126ba97SEli Cohen if (init_attr->recv_cq) 209509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); 2096e126ba97SEli Cohen 209709a7d9ecSSaeed Mahameed MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 2098e126ba97SEli Cohen 2099cfb5e088SHaggai Abramovsky /* 0xffffff means we ask to work with cqe version 0 */ 210009a7d9ecSSaeed Mahameed if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 2101cfb5e088SHaggai Abramovsky MLX5_SET(qpc, qpc, user_index, uidx); 210209a7d9ecSSaeed Mahameed 21032978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING && 21042978975cSLeon Romanovsky init_attr->qp_type != IB_QPT_RAW_PACKET) { 2105b1383aa6SNoa Osherovich MLX5_SET(qpc, qpc, end_padding_mode, 2106b1383aa6SNoa Osherovich MLX5_WQ_END_PAD_MODE_ALIGN); 21072978975cSLeon Romanovsky /* Special case to clean flag */ 21082978975cSLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 2109b1383aa6SNoa Osherovich } 2110b1383aa6SNoa Osherovich 2111c2e53b2cSYishai Hadas if (init_attr->qp_type == IB_QPT_RAW_PACKET || 21122be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 21132dfac92dSLeon Romanovsky qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr; 21140fb2ed66Smajd@mellanox.com raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 21157f72052cSYishai Hadas err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, 211608d53976SLeon Romanovsky ¶ms->resp); 211704bcc1c2SLeon Romanovsky } else 2118333fbaa0SLeon Romanovsky err = mlx5_core_create_qp(dev, &base->mqp, in, inlen); 2119e126ba97SEli Cohen 2120479163f4SAl Viro kvfree(in); 212104bcc1c2SLeon Romanovsky if (err) 212204bcc1c2SLeon Romanovsky goto err_create; 2123e126ba97SEli Cohen 212419098df2Smajd@mellanox.com base->container_mibqp = qp; 212519098df2Smajd@mellanox.com base->mqp.event = mlx5_ib_qp_event; 2126e126ba97SEli Cohen 21277aede1a2SLeon Romanovsky get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, 212889ea94a7SMaor Gottlieb &send_cq, &recv_cq); 212989ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 213089ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 213189ea94a7SMaor Gottlieb /* Maintain device to QPs access, needed for further handling via reset 213289ea94a7SMaor Gottlieb * flow 213389ea94a7SMaor Gottlieb */ 213489ea94a7SMaor Gottlieb list_add_tail(&qp->qps_list, &dev->qp_list); 213589ea94a7SMaor Gottlieb /* Maintain CQ to QPs access, needed for further handling via reset flow 213689ea94a7SMaor Gottlieb */ 213789ea94a7SMaor Gottlieb if (send_cq) 213889ea94a7SMaor Gottlieb list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 213989ea94a7SMaor Gottlieb if (recv_cq) 214089ea94a7SMaor Gottlieb list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 214189ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 214289ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 214389ea94a7SMaor Gottlieb 2144e126ba97SEli Cohen return 0; 2145e126ba97SEli Cohen 2146e126ba97SEli Cohen err_create: 2147747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2148e126ba97SEli Cohen return err; 2149e126ba97SEli Cohen } 2150e126ba97SEli Cohen 215198fc1126SLeon Romanovsky static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2152f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2153f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 215498fc1126SLeon Romanovsky { 2155f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2156f78d358cSLeon Romanovsky u32 uidx = params->uidx; 215798fc1126SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 215898fc1126SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 215998fc1126SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 216098fc1126SLeon Romanovsky struct mlx5_ib_cq *send_cq; 216198fc1126SLeon Romanovsky struct mlx5_ib_cq *recv_cq; 216298fc1126SLeon Romanovsky unsigned long flags; 216398fc1126SLeon Romanovsky struct mlx5_ib_qp_base *base; 216498fc1126SLeon Romanovsky int mlx5_st; 216598fc1126SLeon Romanovsky void *qpc; 216698fc1126SLeon Romanovsky u32 *in; 216798fc1126SLeon Romanovsky int err; 216898fc1126SLeon Romanovsky 216998fc1126SLeon Romanovsky mutex_init(&qp->mutex); 217098fc1126SLeon Romanovsky spin_lock_init(&qp->sq.lock); 217198fc1126SLeon Romanovsky spin_lock_init(&qp->rq.lock); 217298fc1126SLeon Romanovsky 217398fc1126SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 217498fc1126SLeon Romanovsky if (mlx5_st < 0) 217598fc1126SLeon Romanovsky return -EINVAL; 217698fc1126SLeon Romanovsky 217798fc1126SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 217898fc1126SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 217998fc1126SLeon Romanovsky 218098fc1126SLeon Romanovsky base = &qp->trans_qp.base; 218198fc1126SLeon Romanovsky 218298fc1126SLeon Romanovsky qp->has_rq = qp_has_rq(attr); 218398fc1126SLeon Romanovsky err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL); 218498fc1126SLeon Romanovsky if (err) { 218598fc1126SLeon Romanovsky mlx5_ib_dbg(dev, "err %d\n", err); 218698fc1126SLeon Romanovsky return err; 218798fc1126SLeon Romanovsky } 218898fc1126SLeon Romanovsky 218998fc1126SLeon Romanovsky err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base); 219098fc1126SLeon Romanovsky if (err) 219198fc1126SLeon Romanovsky return err; 219298fc1126SLeon Romanovsky 219398fc1126SLeon Romanovsky if (is_sqp(attr->qp_type)) 219498fc1126SLeon Romanovsky qp->port = attr->port_num; 219598fc1126SLeon Romanovsky 219698fc1126SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 219798fc1126SLeon Romanovsky 219898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, st, mlx5_st); 219998fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 220098fc1126SLeon Romanovsky 220198fc1126SLeon Romanovsky if (attr->qp_type != MLX5_IB_QPT_REG_UMR) 220298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); 220398fc1126SLeon Romanovsky else 220498fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, latency_sensitive, 1); 220598fc1126SLeon Romanovsky 220698fc1126SLeon Romanovsky 220798fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 220898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 220998fc1126SLeon Romanovsky 221098fc1126SLeon Romanovsky if (qp->rq.wqe_cnt) { 221198fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 221298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 221398fc1126SLeon Romanovsky } 221498fc1126SLeon Romanovsky 221598fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr)); 221698fc1126SLeon Romanovsky 221798fc1126SLeon Romanovsky if (qp->sq.wqe_cnt) 221898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 221998fc1126SLeon Romanovsky else 222098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 222198fc1126SLeon Romanovsky 222298fc1126SLeon Romanovsky if (attr->srq) { 222398fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); 222498fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 222598fc1126SLeon Romanovsky to_msrq(attr->srq)->msrq.srqn); 222698fc1126SLeon Romanovsky } else { 222798fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 222898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 222998fc1126SLeon Romanovsky to_msrq(devr->s1)->msrq.srqn); 223098fc1126SLeon Romanovsky } 223198fc1126SLeon Romanovsky 223298fc1126SLeon Romanovsky if (attr->send_cq) 223398fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn); 223498fc1126SLeon Romanovsky 223598fc1126SLeon Romanovsky if (attr->recv_cq) 223698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn); 223798fc1126SLeon Romanovsky 223898fc1126SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 223998fc1126SLeon Romanovsky 224098fc1126SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 224198fc1126SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 224298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 224398fc1126SLeon Romanovsky 224498fc1126SLeon Romanovsky /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 224598fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) 224698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 224798fc1126SLeon Romanovsky 224898fc1126SLeon Romanovsky err = mlx5_core_create_qp(dev, &base->mqp, in, inlen); 224998fc1126SLeon Romanovsky kvfree(in); 225098fc1126SLeon Romanovsky if (err) 225198fc1126SLeon Romanovsky goto err_create; 225298fc1126SLeon Romanovsky 225398fc1126SLeon Romanovsky base->container_mibqp = qp; 225498fc1126SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 225598fc1126SLeon Romanovsky 225698fc1126SLeon Romanovsky get_cqs(qp->type, attr->send_cq, attr->recv_cq, 225798fc1126SLeon Romanovsky &send_cq, &recv_cq); 225898fc1126SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 225998fc1126SLeon Romanovsky mlx5_ib_lock_cqs(send_cq, recv_cq); 226098fc1126SLeon Romanovsky /* Maintain device to QPs access, needed for further handling via reset 226198fc1126SLeon Romanovsky * flow 226298fc1126SLeon Romanovsky */ 226398fc1126SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 226498fc1126SLeon Romanovsky /* Maintain CQ to QPs access, needed for further handling via reset flow 226598fc1126SLeon Romanovsky */ 226698fc1126SLeon Romanovsky if (send_cq) 226798fc1126SLeon Romanovsky list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 226898fc1126SLeon Romanovsky if (recv_cq) 226998fc1126SLeon Romanovsky list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 227098fc1126SLeon Romanovsky mlx5_ib_unlock_cqs(send_cq, recv_cq); 227198fc1126SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 227298fc1126SLeon Romanovsky 227398fc1126SLeon Romanovsky return 0; 227498fc1126SLeon Romanovsky 227598fc1126SLeon Romanovsky err_create: 2276747c519cSLeon Romanovsky destroy_qp(dev, qp, base, NULL); 227798fc1126SLeon Romanovsky return err; 227898fc1126SLeon Romanovsky } 227998fc1126SLeon Romanovsky 2280e126ba97SEli Cohen static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2281e126ba97SEli Cohen __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 2282e126ba97SEli Cohen { 2283e126ba97SEli Cohen if (send_cq) { 2284e126ba97SEli Cohen if (recv_cq) { 2285e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 228689ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2287e126ba97SEli Cohen spin_lock_nested(&recv_cq->lock, 2288e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2289e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 229089ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2291e126ba97SEli Cohen __acquire(&recv_cq->lock); 2292e126ba97SEli Cohen } else { 229389ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 2294e126ba97SEli Cohen spin_lock_nested(&send_cq->lock, 2295e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2296e126ba97SEli Cohen } 2297e126ba97SEli Cohen } else { 229889ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 22996a4f139aSEli Cohen __acquire(&recv_cq->lock); 2300e126ba97SEli Cohen } 2301e126ba97SEli Cohen } else if (recv_cq) { 230289ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 23036a4f139aSEli Cohen __acquire(&send_cq->lock); 23046a4f139aSEli Cohen } else { 23056a4f139aSEli Cohen __acquire(&send_cq->lock); 23066a4f139aSEli Cohen __acquire(&recv_cq->lock); 2307e126ba97SEli Cohen } 2308e126ba97SEli Cohen } 2309e126ba97SEli Cohen 2310e126ba97SEli Cohen static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2311e126ba97SEli Cohen __releases(&send_cq->lock) __releases(&recv_cq->lock) 2312e126ba97SEli Cohen { 2313e126ba97SEli Cohen if (send_cq) { 2314e126ba97SEli Cohen if (recv_cq) { 2315e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 2316e126ba97SEli Cohen spin_unlock(&recv_cq->lock); 231789ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2318e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 2319e126ba97SEli Cohen __release(&recv_cq->lock); 232089ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2321e126ba97SEli Cohen } else { 2322e126ba97SEli Cohen spin_unlock(&send_cq->lock); 232389ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 2324e126ba97SEli Cohen } 2325e126ba97SEli Cohen } else { 23266a4f139aSEli Cohen __release(&recv_cq->lock); 232789ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2328e126ba97SEli Cohen } 2329e126ba97SEli Cohen } else if (recv_cq) { 23306a4f139aSEli Cohen __release(&send_cq->lock); 233189ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 23326a4f139aSEli Cohen } else { 23336a4f139aSEli Cohen __release(&recv_cq->lock); 23346a4f139aSEli Cohen __release(&send_cq->lock); 2335e126ba97SEli Cohen } 2336e126ba97SEli Cohen } 2337e126ba97SEli Cohen 2338e126ba97SEli Cohen static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) 2339e126ba97SEli Cohen { 2340e126ba97SEli Cohen return to_mpd(qp->ibqp.pd); 2341e126ba97SEli Cohen } 2342e126ba97SEli Cohen 234389ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 234489ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 2345e126ba97SEli Cohen struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 2346e126ba97SEli Cohen { 234789ea94a7SMaor Gottlieb switch (qp_type) { 2348e126ba97SEli Cohen case IB_QPT_XRC_TGT: 2349e126ba97SEli Cohen *send_cq = NULL; 2350e126ba97SEli Cohen *recv_cq = NULL; 2351e126ba97SEli Cohen break; 2352e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 2353e126ba97SEli Cohen case IB_QPT_XRC_INI: 235489ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 2355e126ba97SEli Cohen *recv_cq = NULL; 2356e126ba97SEli Cohen break; 2357e126ba97SEli Cohen 2358e126ba97SEli Cohen case IB_QPT_SMI: 2359d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 2360e126ba97SEli Cohen case IB_QPT_RC: 2361e126ba97SEli Cohen case IB_QPT_UC: 2362e126ba97SEli Cohen case IB_QPT_UD: 23630fb2ed66Smajd@mellanox.com case IB_QPT_RAW_PACKET: 236489ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 236589ea94a7SMaor Gottlieb *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 2366e126ba97SEli Cohen break; 2367e126ba97SEli Cohen default: 2368e126ba97SEli Cohen *send_cq = NULL; 2369e126ba97SEli Cohen *recv_cq = NULL; 2370e126ba97SEli Cohen break; 2371e126ba97SEli Cohen } 2372e126ba97SEli Cohen } 2373e126ba97SEli Cohen 2374ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 237513eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 237613eab21fSAviv Heller u8 lag_tx_affinity); 2377ad5f8e96Smajd@mellanox.com 2378bdeacabdSShamir Rabinovitch static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2379bdeacabdSShamir Rabinovitch struct ib_udata *udata) 2380e126ba97SEli Cohen { 2381e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 2382c2e53b2cSYishai Hadas struct mlx5_ib_qp_base *base; 238389ea94a7SMaor Gottlieb unsigned long flags; 2384e126ba97SEli Cohen int err; 2385e126ba97SEli Cohen 238628d61370SYishai Hadas if (qp->ibqp.rwq_ind_tbl) { 238728d61370SYishai Hadas destroy_rss_raw_qp_tir(dev, qp); 238828d61370SYishai Hadas return; 238928d61370SYishai Hadas } 239028d61370SYishai Hadas 2391c2e53b2cSYishai Hadas base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 23922be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 23930fb2ed66Smajd@mellanox.com &qp->raw_packet_qp.rq.base : 23940fb2ed66Smajd@mellanox.com &qp->trans_qp.base; 23950fb2ed66Smajd@mellanox.com 23966aec21f6SHaggai Eran if (qp->state != IB_QPS_RESET) { 2397c2e53b2cSYishai Hadas if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && 23982be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { 2399333fbaa0SLeon Romanovsky err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, 24001a412fb1SSaeed Mahameed NULL, &base->mqp); 2401ad5f8e96Smajd@mellanox.com } else { 24020680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = { 24030680efa2SAlex Vesker .operation = MLX5_CMD_OP_2RST_QP 24040680efa2SAlex Vesker }; 24050680efa2SAlex Vesker 240613eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); 2407ad5f8e96Smajd@mellanox.com } 2408ad5f8e96Smajd@mellanox.com if (err) 2409427c1e7bSmajd@mellanox.com mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 241019098df2Smajd@mellanox.com base->mqp.qpn); 24116aec21f6SHaggai Eran } 2412e126ba97SEli Cohen 241389ea94a7SMaor Gottlieb get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 241489ea94a7SMaor Gottlieb &send_cq, &recv_cq); 241589ea94a7SMaor Gottlieb 241689ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 241789ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 241889ea94a7SMaor Gottlieb /* del from lists under both locks above to protect reset flow paths */ 241989ea94a7SMaor Gottlieb list_del(&qp->qps_list); 242089ea94a7SMaor Gottlieb if (send_cq) 242189ea94a7SMaor Gottlieb list_del(&qp->cq_send_list); 242289ea94a7SMaor Gottlieb 242389ea94a7SMaor Gottlieb if (recv_cq) 242489ea94a7SMaor Gottlieb list_del(&qp->cq_recv_list); 2425e126ba97SEli Cohen 242603c4077bSLeon Romanovsky if (!udata) { 242719098df2Smajd@mellanox.com __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 2428e126ba97SEli Cohen qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 2429e126ba97SEli Cohen if (send_cq != recv_cq) 243019098df2Smajd@mellanox.com __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 243119098df2Smajd@mellanox.com NULL); 2432e126ba97SEli Cohen } 243389ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 243489ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 2435e126ba97SEli Cohen 2436c2e53b2cSYishai Hadas if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 24372be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 24380fb2ed66Smajd@mellanox.com destroy_raw_packet_qp(dev, qp); 24390fb2ed66Smajd@mellanox.com } else { 2440333fbaa0SLeon Romanovsky err = mlx5_core_destroy_qp(dev, &base->mqp); 2441e126ba97SEli Cohen if (err) 24420fb2ed66Smajd@mellanox.com mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 24430fb2ed66Smajd@mellanox.com base->mqp.qpn); 24440fb2ed66Smajd@mellanox.com } 2445e126ba97SEli Cohen 2446747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2447e126ba97SEli Cohen } 2448e126ba97SEli Cohen 244947c80612SLeon Romanovsky static int create_dct(struct ib_pd *pd, struct mlx5_ib_qp *qp, 2450f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 2451b4aaa1f0SMoni Shoua { 2452f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2453f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 2454f78d358cSLeon Romanovsky u32 uidx = params->uidx; 2455b4aaa1f0SMoni Shoua void *dctc; 2456b4aaa1f0SMoni Shoua 2457b4aaa1f0SMoni Shoua qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); 24589c2ba4edSLeon Romanovsky if (!qp->dct.in) 245947c80612SLeon Romanovsky return -ENOMEM; 2460b4aaa1f0SMoni Shoua 2461a01a5860SYishai Hadas MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid); 2462b4aaa1f0SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 2463b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn); 2464b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); 2465b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); 2466b4aaa1f0SMoni Shoua MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); 2467b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, user_index, uidx); 2468b4aaa1f0SMoni Shoua 246937518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) { 2470fd9dab7eSLeon Romanovsky int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq); 2471fd9dab7eSLeon Romanovsky 2472fd9dab7eSLeon Romanovsky if (rcqe_sz == 128) 2473fd9dab7eSLeon Romanovsky MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 2474fd9dab7eSLeon Romanovsky } 24755d6ff1baSYonatan Cohen 2476b4aaa1f0SMoni Shoua qp->state = IB_QPS_RESET; 2477b4aaa1f0SMoni Shoua 247847c80612SLeon Romanovsky return 0; 2479b4aaa1f0SMoni Shoua } 2480b4aaa1f0SMoni Shoua 24817aede1a2SLeon Romanovsky static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 24827aede1a2SLeon Romanovsky enum ib_qp_type *type) 24836eb7edffSLeon Romanovsky { 24846eb7edffSLeon Romanovsky if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct)) 24856eb7edffSLeon Romanovsky goto out; 24866eb7edffSLeon Romanovsky 24876eb7edffSLeon Romanovsky switch (attr->qp_type) { 24886eb7edffSLeon Romanovsky case IB_QPT_XRC_TGT: 24896eb7edffSLeon Romanovsky case IB_QPT_XRC_INI: 24906eb7edffSLeon Romanovsky if (!MLX5_CAP_GEN(dev->mdev, xrc)) 24916eb7edffSLeon Romanovsky goto out; 24926eb7edffSLeon Romanovsky fallthrough; 24936eb7edffSLeon Romanovsky case IB_QPT_RAW_PACKET: 24946eb7edffSLeon Romanovsky case IB_QPT_RC: 24956eb7edffSLeon Romanovsky case IB_QPT_UC: 24966eb7edffSLeon Romanovsky case IB_QPT_UD: 24976eb7edffSLeon Romanovsky case IB_QPT_SMI: 24986eb7edffSLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 24996eb7edffSLeon Romanovsky case MLX5_IB_QPT_REG_UMR: 25006eb7edffSLeon Romanovsky case IB_QPT_DRIVER: 25016eb7edffSLeon Romanovsky case IB_QPT_GSI: 25027aede1a2SLeon Romanovsky break; 25036eb7edffSLeon Romanovsky default: 25046eb7edffSLeon Romanovsky goto out; 2505b4aaa1f0SMoni Shoua } 2506b4aaa1f0SMoni Shoua 25077aede1a2SLeon Romanovsky *type = attr->qp_type; 2508b4aaa1f0SMoni Shoua return 0; 25096eb7edffSLeon Romanovsky 25106eb7edffSLeon Romanovsky out: 25116eb7edffSLeon Romanovsky mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type); 25126eb7edffSLeon Romanovsky return -EOPNOTSUPP; 2513b4aaa1f0SMoni Shoua } 2514b4aaa1f0SMoni Shoua 25152242cc25SLeon Romanovsky static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd, 25162242cc25SLeon Romanovsky struct ib_qp_init_attr *attr, 25172242cc25SLeon Romanovsky struct ib_udata *udata) 25182242cc25SLeon Romanovsky { 25192242cc25SLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 25202242cc25SLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 25212242cc25SLeon Romanovsky 25222242cc25SLeon Romanovsky if (!udata) { 25232242cc25SLeon Romanovsky /* Kernel create_qp callers */ 25242242cc25SLeon Romanovsky if (attr->rwq_ind_tbl) 25252242cc25SLeon Romanovsky return -EOPNOTSUPP; 25262242cc25SLeon Romanovsky 25272242cc25SLeon Romanovsky switch (attr->qp_type) { 25282242cc25SLeon Romanovsky case IB_QPT_RAW_PACKET: 25292242cc25SLeon Romanovsky case IB_QPT_DRIVER: 25302242cc25SLeon Romanovsky return -EOPNOTSUPP; 25312242cc25SLeon Romanovsky default: 25322242cc25SLeon Romanovsky return 0; 25332242cc25SLeon Romanovsky } 25342242cc25SLeon Romanovsky } 25352242cc25SLeon Romanovsky 25362242cc25SLeon Romanovsky /* Userspace create_qp callers */ 25372242cc25SLeon Romanovsky if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) { 25382242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 25392242cc25SLeon Romanovsky "Raw Packet QP is only supported for CQE version > 0\n"); 25402242cc25SLeon Romanovsky return -EINVAL; 25412242cc25SLeon Romanovsky } 25422242cc25SLeon Romanovsky 25432242cc25SLeon Romanovsky if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) { 25442242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 25452242cc25SLeon Romanovsky "Wrong QP type %d for the RWQ indirect table\n", 25462242cc25SLeon Romanovsky attr->qp_type); 25472242cc25SLeon Romanovsky return -EINVAL; 25482242cc25SLeon Romanovsky } 25492242cc25SLeon Romanovsky 25502242cc25SLeon Romanovsky switch (attr->qp_type) { 25512242cc25SLeon Romanovsky case IB_QPT_SMI: 25522242cc25SLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 25532242cc25SLeon Romanovsky case MLX5_IB_QPT_REG_UMR: 25542242cc25SLeon Romanovsky case IB_QPT_GSI: 25552242cc25SLeon Romanovsky mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n", 25562242cc25SLeon Romanovsky attr->qp_type); 25572242cc25SLeon Romanovsky return -EINVAL; 25582242cc25SLeon Romanovsky default: 25592242cc25SLeon Romanovsky break; 25602242cc25SLeon Romanovsky } 25612242cc25SLeon Romanovsky 25622242cc25SLeon Romanovsky /* 25632242cc25SLeon Romanovsky * We don't need to see this warning, it means that kernel code 25642242cc25SLeon Romanovsky * missing ib_pd. Placed here to catch developer's mistakes. 25652242cc25SLeon Romanovsky */ 25662242cc25SLeon Romanovsky WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT, 25672242cc25SLeon Romanovsky "There is a missing PD pointer assignment\n"); 25682242cc25SLeon Romanovsky return 0; 25692242cc25SLeon Romanovsky } 25702242cc25SLeon Romanovsky 257137518fa4SLeon Romanovsky static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 257237518fa4SLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 257337518fa4SLeon Romanovsky { 257437518fa4SLeon Romanovsky if (!(*flags & flag)) 257537518fa4SLeon Romanovsky return; 257637518fa4SLeon Romanovsky 257737518fa4SLeon Romanovsky if (cond) { 257837518fa4SLeon Romanovsky qp->flags_en |= flag; 257937518fa4SLeon Romanovsky *flags &= ~flag; 258037518fa4SLeon Romanovsky return; 258137518fa4SLeon Romanovsky } 258237518fa4SLeon Romanovsky 258337518fa4SLeon Romanovsky if (flag == MLX5_QP_FLAG_SCATTER_CQE) { 258437518fa4SLeon Romanovsky /* 258537518fa4SLeon Romanovsky * We don't return error if this flag was provided, 258637518fa4SLeon Romanovsky * and mlx5 doesn't have right capability. 258737518fa4SLeon Romanovsky */ 258837518fa4SLeon Romanovsky *flags &= ~MLX5_QP_FLAG_SCATTER_CQE; 258937518fa4SLeon Romanovsky return; 259037518fa4SLeon Romanovsky } 259137518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); 259237518fa4SLeon Romanovsky } 259337518fa4SLeon Romanovsky 259437518fa4SLeon Romanovsky static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 25955ce0592bSLeon Romanovsky void *ucmd, struct ib_qp_init_attr *attr) 25962fdddbd5SLeon Romanovsky { 259737518fa4SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 259837518fa4SLeon Romanovsky bool cond; 25995ce0592bSLeon Romanovsky int flags; 26005ce0592bSLeon Romanovsky 26015ce0592bSLeon Romanovsky if (attr->rwq_ind_tbl) 26025ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags; 26035ce0592bSLeon Romanovsky else 26045ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp *)ucmd)->flags; 260537518fa4SLeon Romanovsky 260637518fa4SLeon Romanovsky switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) { 26072fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCI: 26087aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCI; 26092fdddbd5SLeon Romanovsky break; 26102fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCT: 26117aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCT; 261237518fa4SLeon Romanovsky break; 26137aede1a2SLeon Romanovsky default: 26147aede1a2SLeon Romanovsky if (qp->type != IB_QPT_DRIVER) 26157aede1a2SLeon Romanovsky break; 26167aede1a2SLeon Romanovsky /* 26177aede1a2SLeon Romanovsky * It is IB_QPT_DRIVER and or no subtype or 26187aede1a2SLeon Romanovsky * wrong subtype were provided. 26197aede1a2SLeon Romanovsky */ 262037518fa4SLeon Romanovsky return -EINVAL; 26217aede1a2SLeon Romanovsky } 262237518fa4SLeon Romanovsky 262337518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); 262437518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); 262537518fa4SLeon Romanovsky 262637518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); 262737518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, 262837518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); 262937518fa4SLeon Romanovsky 26307aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RAW_PACKET) { 263137518fa4SLeon Romanovsky cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || 263237518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_gre) || 263337518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx); 263437518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS, 263537518fa4SLeon Romanovsky cond, qp); 263637518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 263737518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true, 263837518fa4SLeon Romanovsky qp); 263937518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 264037518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true, 264137518fa4SLeon Romanovsky qp); 264237518fa4SLeon Romanovsky } 264337518fa4SLeon Romanovsky 26447aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RC) 264537518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 264637518fa4SLeon Romanovsky MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE, 264737518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, qp_packet_based), qp); 264837518fa4SLeon Romanovsky 264976883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp); 265076883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp); 265176883a6cSLeon Romanovsky 26525d6fffedSLeon Romanovsky cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS | 26535d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 26545d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC); 26555d6fffedSLeon Romanovsky if (attr->rwq_ind_tbl && cond) { 26565d6fffedSLeon Romanovsky mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n", 26575d6fffedSLeon Romanovsky cond); 26585d6fffedSLeon Romanovsky return -EINVAL; 26595d6fffedSLeon Romanovsky } 26605d6fffedSLeon Romanovsky 266137518fa4SLeon Romanovsky if (flags) 266237518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags); 266337518fa4SLeon Romanovsky 266437518fa4SLeon Romanovsky return (flags) ? -EINVAL : 0; 26652fdddbd5SLeon Romanovsky } 26662fdddbd5SLeon Romanovsky 26672978975cSLeon Romanovsky static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 26682978975cSLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 26692978975cSLeon Romanovsky { 26702978975cSLeon Romanovsky if (!(*flags & flag)) 26712978975cSLeon Romanovsky return; 26722978975cSLeon Romanovsky 26732978975cSLeon Romanovsky if (cond) { 26742978975cSLeon Romanovsky qp->flags |= flag; 26752978975cSLeon Romanovsky *flags &= ~flag; 26762978975cSLeon Romanovsky return; 26772978975cSLeon Romanovsky } 26782978975cSLeon Romanovsky 26792978975cSLeon Romanovsky if (flag == MLX5_IB_QP_CREATE_WC_TEST) { 26802978975cSLeon Romanovsky /* 26812978975cSLeon Romanovsky * Special case, if condition didn't meet, it won't be error, 26822978975cSLeon Romanovsky * just different in-kernel flow. 26832978975cSLeon Romanovsky */ 26842978975cSLeon Romanovsky *flags &= ~MLX5_IB_QP_CREATE_WC_TEST; 26852978975cSLeon Romanovsky return; 26862978975cSLeon Romanovsky } 26872978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag); 26882978975cSLeon Romanovsky } 26892978975cSLeon Romanovsky 26902978975cSLeon Romanovsky static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 26912978975cSLeon Romanovsky struct ib_qp_init_attr *attr) 26922978975cSLeon Romanovsky { 26937aede1a2SLeon Romanovsky enum ib_qp_type qp_type = qp->type; 26942978975cSLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 26952978975cSLeon Romanovsky int create_flags = attr->create_flags; 26962978975cSLeon Romanovsky bool cond; 26972978975cSLeon Romanovsky 26987aede1a2SLeon Romanovsky if (qp_type == MLX5_IB_QPT_DCT) 26992978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 27002978975cSLeon Romanovsky 27012978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) 27022978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 27032978975cSLeon Romanovsky 27042978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 27052978975cSLeon Romanovsky IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 27062978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, block_lb_mc), qp); 27072978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL, 27082978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 27092978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND, 27102978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 27112978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV, 27122978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 27132978975cSLeon Romanovsky 27142978975cSLeon Romanovsky if (qp_type == IB_QPT_UD) { 27152978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 27162978975cSLeon Romanovsky IB_QP_CREATE_IPOIB_UD_LSO, 27172978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, ipoib_basic_offloads), 27182978975cSLeon Romanovsky qp); 27192978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB; 27202978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN, 27212978975cSLeon Romanovsky cond, qp); 27222978975cSLeon Romanovsky } 27232978975cSLeon Romanovsky 27242978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET) { 27252978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 27262978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, scatter_fcs); 27272978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 27282978975cSLeon Romanovsky IB_QP_CREATE_SCATTER_FCS, cond, qp); 27292978975cSLeon Romanovsky 27302978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 27312978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, vlan_cap); 27322978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 27332978975cSLeon Romanovsky IB_QP_CREATE_CVLAN_STRIPPING, cond, qp); 27342978975cSLeon Romanovsky } 27352978975cSLeon Romanovsky 27362978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 27372978975cSLeon Romanovsky IB_QP_CREATE_PCI_WRITE_END_PADDING, 27382978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, end_pad), qp); 27392978975cSLeon Romanovsky 27402978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST, 27412978975cSLeon Romanovsky qp_type != MLX5_IB_QPT_REG_UMR, qp); 27422978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1, 27432978975cSLeon Romanovsky true, qp); 27442978975cSLeon Romanovsky 27452978975cSLeon Romanovsky if (create_flags) 27462978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n", 27472978975cSLeon Romanovsky create_flags); 27482978975cSLeon Romanovsky 27492978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 27502978975cSLeon Romanovsky } 27512978975cSLeon Romanovsky 27526f2cf76eSLeon Romanovsky static int process_udata_size(struct mlx5_ib_dev *dev, 27536f2cf76eSLeon Romanovsky struct mlx5_create_qp_params *params) 27542fdddbd5SLeon Romanovsky { 27552fdddbd5SLeon Romanovsky size_t ucmd = sizeof(struct mlx5_ib_create_qp); 27566f2cf76eSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 27576f2cf76eSLeon Romanovsky struct ib_udata *udata = params->udata; 27586f2cf76eSLeon Romanovsky size_t outlen = udata->outlen; 27595ce0592bSLeon Romanovsky size_t inlen = udata->inlen; 27602fdddbd5SLeon Romanovsky 27616f2cf76eSLeon Romanovsky params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp)); 27626f2cf76eSLeon Romanovsky if (attr->qp_type == IB_QPT_DRIVER) { 27636f2cf76eSLeon Romanovsky params->inlen = (inlen < ucmd) ? 0 : ucmd; 27646f2cf76eSLeon Romanovsky goto out; 27656f2cf76eSLeon Romanovsky } 27662dfac92dSLeon Romanovsky 27676f2cf76eSLeon Romanovsky if (!params->is_rss_raw) { 27686f2cf76eSLeon Romanovsky params->inlen = ucmd; 27696f2cf76eSLeon Romanovsky goto out; 27706f2cf76eSLeon Romanovsky } 27715ce0592bSLeon Romanovsky 27726f2cf76eSLeon Romanovsky /* RSS RAW QP */ 27735ce0592bSLeon Romanovsky if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags)) 27746f2cf76eSLeon Romanovsky return -EINVAL; 27756f2cf76eSLeon Romanovsky 27766f2cf76eSLeon Romanovsky if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index)) 27776f2cf76eSLeon Romanovsky return -EINVAL; 27785ce0592bSLeon Romanovsky 27795ce0592bSLeon Romanovsky ucmd = sizeof(struct mlx5_ib_create_qp_rss); 27805ce0592bSLeon Romanovsky if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd)) 27816f2cf76eSLeon Romanovsky return -EINVAL; 27825ce0592bSLeon Romanovsky 27836f2cf76eSLeon Romanovsky params->inlen = min(ucmd, inlen); 27846f2cf76eSLeon Romanovsky out: 27856f2cf76eSLeon Romanovsky if (!params->inlen) 27866f2cf76eSLeon Romanovsky mlx5_ib_dbg(dev, "udata is too small or not cleared\n"); 27876f2cf76eSLeon Romanovsky 27886f2cf76eSLeon Romanovsky return (params->inlen) ? 0 : -EINVAL; 27892fdddbd5SLeon Romanovsky } 27902fdddbd5SLeon Romanovsky 2791968f0b6fSLeon Romanovsky static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2792f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2793f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 27945d0dc3d9SLeon Romanovsky { 2795968f0b6fSLeon Romanovsky int err; 27965d0dc3d9SLeon Romanovsky 2797968f0b6fSLeon Romanovsky if (params->is_rss_raw) { 2798968f0b6fSLeon Romanovsky err = create_rss_raw_qp_tir(dev, pd, qp, params); 2799968f0b6fSLeon Romanovsky goto out; 2800968f0b6fSLeon Romanovsky } 2801968f0b6fSLeon Romanovsky 2802968f0b6fSLeon Romanovsky if (qp->type == MLX5_IB_QPT_DCT) { 2803968f0b6fSLeon Romanovsky err = create_dct(pd, qp, params); 2804968f0b6fSLeon Romanovsky goto out; 2805968f0b6fSLeon Romanovsky } 2806968f0b6fSLeon Romanovsky 2807968f0b6fSLeon Romanovsky if (qp->type == IB_QPT_XRC_TGT) { 2808968f0b6fSLeon Romanovsky err = create_xrc_tgt_qp(dev, qp, params); 2809968f0b6fSLeon Romanovsky goto out; 2810968f0b6fSLeon Romanovsky } 2811968f0b6fSLeon Romanovsky 2812968f0b6fSLeon Romanovsky if (params->udata) 2813968f0b6fSLeon Romanovsky err = create_user_qp(dev, pd, qp, params); 2814968f0b6fSLeon Romanovsky else 2815968f0b6fSLeon Romanovsky err = create_kernel_qp(dev, pd, qp, params); 2816968f0b6fSLeon Romanovsky 2817968f0b6fSLeon Romanovsky out: 2818968f0b6fSLeon Romanovsky if (err) { 2819968f0b6fSLeon Romanovsky mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type); 2820968f0b6fSLeon Romanovsky return err; 2821968f0b6fSLeon Romanovsky } 2822968f0b6fSLeon Romanovsky 2823968f0b6fSLeon Romanovsky if (is_qp0(qp->type)) 2824968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 0; 2825968f0b6fSLeon Romanovsky else if (is_qp1(qp->type)) 2826968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 1; 2827968f0b6fSLeon Romanovsky else 2828968f0b6fSLeon Romanovsky qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; 2829968f0b6fSLeon Romanovsky 2830968f0b6fSLeon Romanovsky mlx5_ib_dbg(dev, 2831968f0b6fSLeon Romanovsky "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 2832968f0b6fSLeon Romanovsky qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 2833968f0b6fSLeon Romanovsky params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn : 2834968f0b6fSLeon Romanovsky -1, 2835968f0b6fSLeon Romanovsky params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn : 2836968f0b6fSLeon Romanovsky -1); 2837968f0b6fSLeon Romanovsky 2838968f0b6fSLeon Romanovsky return 0; 28395d0dc3d9SLeon Romanovsky } 28405d0dc3d9SLeon Romanovsky 28417aede1a2SLeon Romanovsky static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 28427aede1a2SLeon Romanovsky struct ib_qp_init_attr *attr) 28437aede1a2SLeon Romanovsky { 28447aede1a2SLeon Romanovsky int ret = 0; 28457aede1a2SLeon Romanovsky 28467aede1a2SLeon Romanovsky switch (qp->type) { 28477aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCT: 28487aede1a2SLeon Romanovsky ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0; 28497aede1a2SLeon Romanovsky break; 28507aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCI: 28517aede1a2SLeon Romanovsky ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ? 28527aede1a2SLeon Romanovsky -EINVAL : 28537aede1a2SLeon Romanovsky 0; 28547aede1a2SLeon Romanovsky break; 2855266424ebSLeon Romanovsky case IB_QPT_RAW_PACKET: 2856266424ebSLeon Romanovsky ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0; 2857266424ebSLeon Romanovsky break; 28587aede1a2SLeon Romanovsky default: 28597aede1a2SLeon Romanovsky break; 28607aede1a2SLeon Romanovsky } 28617aede1a2SLeon Romanovsky 28627aede1a2SLeon Romanovsky if (ret) 28637aede1a2SLeon Romanovsky mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type); 28647aede1a2SLeon Romanovsky 28657aede1a2SLeon Romanovsky return ret; 28667aede1a2SLeon Romanovsky } 28677aede1a2SLeon Romanovsky 2868f78d358cSLeon Romanovsky static int get_qp_uidx(struct mlx5_ib_qp *qp, 2869f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 287021aad80bSLeon Romanovsky { 2871f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 2872f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 287321aad80bSLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 287421aad80bSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 287521aad80bSLeon Romanovsky 2876f78d358cSLeon Romanovsky if (params->is_rss_raw) 287721aad80bSLeon Romanovsky return 0; 287821aad80bSLeon Romanovsky 2879f78d358cSLeon Romanovsky return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), ¶ms->uidx); 288021aad80bSLeon Romanovsky } 288121aad80bSLeon Romanovsky 288208d53976SLeon Romanovsky static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) 288308d53976SLeon Romanovsky { 288408d53976SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device); 288508d53976SLeon Romanovsky 288608d53976SLeon Romanovsky if (mqp->state == IB_QPS_RTR) { 288708d53976SLeon Romanovsky int err; 288808d53976SLeon Romanovsky 288908d53976SLeon Romanovsky err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct); 289008d53976SLeon Romanovsky if (err) { 289108d53976SLeon Romanovsky mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); 289208d53976SLeon Romanovsky return err; 289308d53976SLeon Romanovsky } 289408d53976SLeon Romanovsky } 289508d53976SLeon Romanovsky 289608d53976SLeon Romanovsky kfree(mqp->dct.in); 289708d53976SLeon Romanovsky kfree(mqp); 289808d53976SLeon Romanovsky return 0; 289908d53976SLeon Romanovsky } 290008d53976SLeon Romanovsky 2901f78d358cSLeon Romanovsky struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, 2902e126ba97SEli Cohen struct ib_udata *udata) 2903e126ba97SEli Cohen { 2904f78d358cSLeon Romanovsky struct mlx5_create_qp_params params = {}; 2905e126ba97SEli Cohen struct mlx5_ib_dev *dev; 2906e126ba97SEli Cohen struct mlx5_ib_qp *qp; 29077aede1a2SLeon Romanovsky enum ib_qp_type type; 2908e126ba97SEli Cohen int err; 2909e126ba97SEli Cohen 29106eb7edffSLeon Romanovsky dev = pd ? to_mdev(pd->device) : 2911f78d358cSLeon Romanovsky to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device); 29120fb2ed66Smajd@mellanox.com 2913f78d358cSLeon Romanovsky err = check_qp_type(dev, attr, &type); 29142242cc25SLeon Romanovsky if (err) 29152242cc25SLeon Romanovsky return ERR_PTR(err); 2916e126ba97SEli Cohen 2917f78d358cSLeon Romanovsky err = check_valid_flow(dev, pd, attr, udata); 2918f78d358cSLeon Romanovsky if (err) 2919f78d358cSLeon Romanovsky return ERR_PTR(err); 2920f78d358cSLeon Romanovsky 2921f78d358cSLeon Romanovsky if (attr->qp_type == IB_QPT_GSI) 2922f78d358cSLeon Romanovsky return mlx5_ib_gsi_create_qp(pd, attr); 2923f78d358cSLeon Romanovsky 2924f78d358cSLeon Romanovsky params.udata = udata; 2925f78d358cSLeon Romanovsky params.uidx = MLX5_IB_DEFAULT_UIDX; 2926f78d358cSLeon Romanovsky params.attr = attr; 2927f78d358cSLeon Romanovsky params.is_rss_raw = !!attr->rwq_ind_tbl; 29289c2ba4edSLeon Romanovsky 29295ce0592bSLeon Romanovsky if (udata) { 29306f2cf76eSLeon Romanovsky err = process_udata_size(dev, ¶ms); 29316f2cf76eSLeon Romanovsky if (err) 29326f2cf76eSLeon Romanovsky return ERR_PTR(err); 29332fdddbd5SLeon Romanovsky 2934f78d358cSLeon Romanovsky params.ucmd = kzalloc(params.inlen, GFP_KERNEL); 2935f78d358cSLeon Romanovsky if (!params.ucmd) 29365ce0592bSLeon Romanovsky return ERR_PTR(-ENOMEM); 29375ce0592bSLeon Romanovsky 2938f78d358cSLeon Romanovsky err = ib_copy_from_udata(params.ucmd, udata, params.inlen); 29392fdddbd5SLeon Romanovsky if (err) 29405ce0592bSLeon Romanovsky goto free_ucmd; 29412fdddbd5SLeon Romanovsky } 29422fdddbd5SLeon Romanovsky 29439c2ba4edSLeon Romanovsky qp = kzalloc(sizeof(*qp), GFP_KERNEL); 29445ce0592bSLeon Romanovsky if (!qp) { 29455ce0592bSLeon Romanovsky err = -ENOMEM; 29465ce0592bSLeon Romanovsky goto free_ucmd; 29475ce0592bSLeon Romanovsky } 29489c2ba4edSLeon Romanovsky 29497aede1a2SLeon Romanovsky qp->type = type; 295037518fa4SLeon Romanovsky if (udata) { 2951f78d358cSLeon Romanovsky err = process_vendor_flags(dev, qp, params.ucmd, attr); 2952b4aaa1f0SMoni Shoua if (err) 29539c2ba4edSLeon Romanovsky goto free_qp; 295421aad80bSLeon Romanovsky 2955f78d358cSLeon Romanovsky err = get_qp_uidx(qp, ¶ms); 295621aad80bSLeon Romanovsky if (err) 295721aad80bSLeon Romanovsky goto free_qp; 2958b4aaa1f0SMoni Shoua } 2959f78d358cSLeon Romanovsky err = process_create_flags(dev, qp, attr); 29602978975cSLeon Romanovsky if (err) 29612978975cSLeon Romanovsky goto free_qp; 2962b4aaa1f0SMoni Shoua 2963f78d358cSLeon Romanovsky err = check_qp_attr(dev, qp, attr); 29647aede1a2SLeon Romanovsky if (err) 29657aede1a2SLeon Romanovsky goto free_qp; 29667aede1a2SLeon Romanovsky 2967968f0b6fSLeon Romanovsky err = create_qp(dev, pd, qp, ¶ms); 2968968f0b6fSLeon Romanovsky if (err) 29699c2ba4edSLeon Romanovsky goto free_qp; 2970e126ba97SEli Cohen 2971f78d358cSLeon Romanovsky kfree(params.ucmd); 297208d53976SLeon Romanovsky params.ucmd = NULL; 29735ce0592bSLeon Romanovsky 297408d53976SLeon Romanovsky if (udata) 297508d53976SLeon Romanovsky /* 297608d53976SLeon Romanovsky * It is safe to copy response for all user create QP flows, 297708d53976SLeon Romanovsky * including MLX5_IB_QPT_DCT, which doesn't need it. 297808d53976SLeon Romanovsky * In that case, resp will be filled with zeros. 297908d53976SLeon Romanovsky */ 298008d53976SLeon Romanovsky err = ib_copy_to_udata(udata, ¶ms.resp, params.outlen); 298108d53976SLeon Romanovsky if (err) 298208d53976SLeon Romanovsky goto destroy_qp; 298308d53976SLeon Romanovsky 2984e126ba97SEli Cohen return &qp->ibqp; 29859c2ba4edSLeon Romanovsky 298608d53976SLeon Romanovsky destroy_qp: 298708d53976SLeon Romanovsky if (qp->type == MLX5_IB_QPT_DCT) 298808d53976SLeon Romanovsky mlx5_ib_destroy_dct(qp); 298908d53976SLeon Romanovsky else 299008d53976SLeon Romanovsky destroy_qp_common(dev, qp, udata); 299108d53976SLeon Romanovsky qp = NULL; 29929c2ba4edSLeon Romanovsky free_qp: 29939c2ba4edSLeon Romanovsky kfree(qp); 29945ce0592bSLeon Romanovsky free_ucmd: 2995f78d358cSLeon Romanovsky kfree(params.ucmd); 29969c2ba4edSLeon Romanovsky return ERR_PTR(err); 2997e126ba97SEli Cohen } 2998e126ba97SEli Cohen 2999c4367a26SShamir Rabinovitch int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 3000e126ba97SEli Cohen { 3001e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(qp->device); 3002e126ba97SEli Cohen struct mlx5_ib_qp *mqp = to_mqp(qp); 3003e126ba97SEli Cohen 3004d16e91daSHaggai Eran if (unlikely(qp->qp_type == IB_QPT_GSI)) 3005d16e91daSHaggai Eran return mlx5_ib_gsi_destroy_qp(qp); 3006d16e91daSHaggai Eran 30077aede1a2SLeon Romanovsky if (mqp->type == MLX5_IB_QPT_DCT) 3008776a3906SMoni Shoua return mlx5_ib_destroy_dct(mqp); 3009776a3906SMoni Shoua 3010bdeacabdSShamir Rabinovitch destroy_qp_common(dev, mqp, udata); 3011e126ba97SEli Cohen 3012e126ba97SEli Cohen kfree(mqp); 3013e126ba97SEli Cohen 3014e126ba97SEli Cohen return 0; 3015e126ba97SEli Cohen } 3016e126ba97SEli Cohen 3017a60109dcSYonatan Cohen static int to_mlx5_access_flags(struct mlx5_ib_qp *qp, 3018a60109dcSYonatan Cohen const struct ib_qp_attr *attr, 3019bf3b4f06SBart Van Assche int attr_mask, __be32 *hw_access_flags_be) 3020e126ba97SEli Cohen { 3021e126ba97SEli Cohen u8 dest_rd_atomic; 3022bf3b4f06SBart Van Assche u32 access_flags, hw_access_flags = 0; 3023e126ba97SEli Cohen 3024a60109dcSYonatan Cohen struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 3025a60109dcSYonatan Cohen 3026e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 3027e126ba97SEli Cohen dest_rd_atomic = attr->max_dest_rd_atomic; 3028e126ba97SEli Cohen else 302919098df2Smajd@mellanox.com dest_rd_atomic = qp->trans_qp.resp_depth; 3030e126ba97SEli Cohen 3031e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 3032e126ba97SEli Cohen access_flags = attr->qp_access_flags; 3033e126ba97SEli Cohen else 303419098df2Smajd@mellanox.com access_flags = qp->trans_qp.atomic_rd_en; 3035e126ba97SEli Cohen 3036e126ba97SEli Cohen if (!dest_rd_atomic) 3037e126ba97SEli Cohen access_flags &= IB_ACCESS_REMOTE_WRITE; 3038e126ba97SEli Cohen 3039e126ba97SEli Cohen if (access_flags & IB_ACCESS_REMOTE_READ) 3040bf3b4f06SBart Van Assche hw_access_flags |= MLX5_QP_BIT_RRE; 304113f8d9c1SYonatan Cohen if (access_flags & IB_ACCESS_REMOTE_ATOMIC) { 3042a60109dcSYonatan Cohen int atomic_mode; 3043e126ba97SEli Cohen 3044a60109dcSYonatan Cohen atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); 3045a60109dcSYonatan Cohen if (atomic_mode < 0) 3046a60109dcSYonatan Cohen return -EOPNOTSUPP; 3047a60109dcSYonatan Cohen 3048bf3b4f06SBart Van Assche hw_access_flags |= MLX5_QP_BIT_RAE; 3049bf3b4f06SBart Van Assche hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET; 3050a60109dcSYonatan Cohen } 3051a60109dcSYonatan Cohen 3052a60109dcSYonatan Cohen if (access_flags & IB_ACCESS_REMOTE_WRITE) 3053bf3b4f06SBart Van Assche hw_access_flags |= MLX5_QP_BIT_RWE; 3054a60109dcSYonatan Cohen 3055bf3b4f06SBart Van Assche *hw_access_flags_be = cpu_to_be32(hw_access_flags); 3056a60109dcSYonatan Cohen 3057a60109dcSYonatan Cohen return 0; 3058e126ba97SEli Cohen } 3059e126ba97SEli Cohen 3060e126ba97SEli Cohen enum { 3061e126ba97SEli Cohen MLX5_PATH_FLAG_FL = 1 << 0, 3062e126ba97SEli Cohen MLX5_PATH_FLAG_FREE_AR = 1 << 1, 3063e126ba97SEli Cohen MLX5_PATH_FLAG_COUNTER = 1 << 2, 3064e126ba97SEli Cohen }; 3065e126ba97SEli Cohen 3066e126ba97SEli Cohen static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 3067e126ba97SEli Cohen { 30684f32ac2eSDanit Goldberg if (rate == IB_RATE_PORT_CURRENT) 3069e126ba97SEli Cohen return 0; 30704f32ac2eSDanit Goldberg 3071a5a5d199SMichael Guralnik if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS) 3072e126ba97SEli Cohen return -EINVAL; 30734f32ac2eSDanit Goldberg 30744f32ac2eSDanit Goldberg while (rate != IB_RATE_PORT_CURRENT && 3075e126ba97SEli Cohen !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 3076938fe83cSSaeed Mahameed MLX5_CAP_GEN(dev->mdev, stat_rate_support))) 3077e126ba97SEli Cohen --rate; 3078e126ba97SEli Cohen 30794f32ac2eSDanit Goldberg return rate ? rate + MLX5_STAT_RATE_OFFSET : rate; 3080e126ba97SEli Cohen } 3081e126ba97SEli Cohen 308275850d0bSmajd@mellanox.com static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, 30831cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 sl, 30841cd6dbd3SYishai Hadas struct ib_pd *pd) 308575850d0bSmajd@mellanox.com { 308675850d0bSmajd@mellanox.com void *in; 308775850d0bSmajd@mellanox.com void *tisc; 308875850d0bSmajd@mellanox.com int inlen; 308975850d0bSmajd@mellanox.com int err; 309075850d0bSmajd@mellanox.com 309175850d0bSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 30921b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 309375850d0bSmajd@mellanox.com if (!in) 309475850d0bSmajd@mellanox.com return -ENOMEM; 309575850d0bSmajd@mellanox.com 309675850d0bSmajd@mellanox.com MLX5_SET(modify_tis_in, in, bitmask.prio, 1); 30971cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 309875850d0bSmajd@mellanox.com 309975850d0bSmajd@mellanox.com tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 310075850d0bSmajd@mellanox.com MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); 310175850d0bSmajd@mellanox.com 3102e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 310375850d0bSmajd@mellanox.com 310475850d0bSmajd@mellanox.com kvfree(in); 310575850d0bSmajd@mellanox.com 310675850d0bSmajd@mellanox.com return err; 310775850d0bSmajd@mellanox.com } 310875850d0bSmajd@mellanox.com 310913eab21fSAviv Heller static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, 31101cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 tx_affinity, 31111cd6dbd3SYishai Hadas struct ib_pd *pd) 311213eab21fSAviv Heller { 311313eab21fSAviv Heller void *in; 311413eab21fSAviv Heller void *tisc; 311513eab21fSAviv Heller int inlen; 311613eab21fSAviv Heller int err; 311713eab21fSAviv Heller 311813eab21fSAviv Heller inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 31191b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 312013eab21fSAviv Heller if (!in) 312113eab21fSAviv Heller return -ENOMEM; 312213eab21fSAviv Heller 312313eab21fSAviv Heller MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1); 31241cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 312513eab21fSAviv Heller 312613eab21fSAviv Heller tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 312713eab21fSAviv Heller MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity); 312813eab21fSAviv Heller 3129e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 313013eab21fSAviv Heller 313113eab21fSAviv Heller kvfree(in); 313213eab21fSAviv Heller 313313eab21fSAviv Heller return err; 313413eab21fSAviv Heller } 313513eab21fSAviv Heller 313675850d0bSmajd@mellanox.com static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 313790898850SDasaratharaman Chandramouli const struct rdma_ah_attr *ah, 3138e126ba97SEli Cohen struct mlx5_qp_path *path, u8 port, int attr_mask, 3139f879ee8dSAchiad Shochat u32 path_flags, const struct ib_qp_attr *attr, 3140f879ee8dSAchiad Shochat bool alt) 3141e126ba97SEli Cohen { 3142d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grh = rdma_ah_read_grh(ah); 3143e126ba97SEli Cohen int err; 3144ed88451eSMajd Dibbiny enum ib_gid_type gid_type; 3145d8966fcdSDasaratharaman Chandramouli u8 ah_flags = rdma_ah_get_ah_flags(ah); 3146d8966fcdSDasaratharaman Chandramouli u8 sl = rdma_ah_get_sl(ah); 3147e126ba97SEli Cohen 3148e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 3149f879ee8dSAchiad Shochat path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : 3150f879ee8dSAchiad Shochat attr->pkey_index); 3151e126ba97SEli Cohen 3152d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 3153d8966fcdSDasaratharaman Chandramouli if (grh->sgid_index >= 3154938fe83cSSaeed Mahameed dev->mdev->port_caps[port - 1].gid_table_len) { 3155f4f01b54SJoe Perches pr_err("sgid_index (%u) too large. max is %d\n", 3156d8966fcdSDasaratharaman Chandramouli grh->sgid_index, 3157938fe83cSSaeed Mahameed dev->mdev->port_caps[port - 1].gid_table_len); 3158f83b4263SEli Cohen return -EINVAL; 3159f83b4263SEli Cohen } 31602811ba51SAchiad Shochat } 316144c58487SDasaratharaman Chandramouli 316244c58487SDasaratharaman Chandramouli if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { 3163d8966fcdSDasaratharaman Chandramouli if (!(ah_flags & IB_AH_GRH)) 31642811ba51SAchiad Shochat return -EINVAL; 316547ec3866SParav Pandit 316644c58487SDasaratharaman Chandramouli memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac)); 31672b621851SMajd Dibbiny if (qp->ibqp.qp_type == IB_QPT_RC || 31682b621851SMajd Dibbiny qp->ibqp.qp_type == IB_QPT_UC || 31692b621851SMajd Dibbiny qp->ibqp.qp_type == IB_QPT_XRC_INI || 31702b621851SMajd Dibbiny qp->ibqp.qp_type == IB_QPT_XRC_TGT) 317147ec3866SParav Pandit path->udp_sport = 317247ec3866SParav Pandit mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr); 3173d8966fcdSDasaratharaman Chandramouli path->dci_cfi_prio_sl = (sl & 0x7) << 4; 317447ec3866SParav Pandit gid_type = ah->grh.sgid_attr->gid_type; 3175ed88451eSMajd Dibbiny if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3176d8966fcdSDasaratharaman Chandramouli path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; 31772811ba51SAchiad Shochat } else { 3178d3ae2bdeSNoa Osherovich path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 3179d3ae2bdeSNoa Osherovich path->fl_free_ar |= 3180d3ae2bdeSNoa Osherovich (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; 3181d8966fcdSDasaratharaman Chandramouli path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); 3182d8966fcdSDasaratharaman Chandramouli path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f; 3183d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) 3184e126ba97SEli Cohen path->grh_mlid |= 1 << 7; 3185d8966fcdSDasaratharaman Chandramouli path->dci_cfi_prio_sl = sl & 0xf; 31862811ba51SAchiad Shochat } 31872811ba51SAchiad Shochat 3188d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 3189d8966fcdSDasaratharaman Chandramouli path->mgid_index = grh->sgid_index; 3190d8966fcdSDasaratharaman Chandramouli path->hop_limit = grh->hop_limit; 3191e126ba97SEli Cohen path->tclass_flowlabel = 3192d8966fcdSDasaratharaman Chandramouli cpu_to_be32((grh->traffic_class << 20) | 3193d8966fcdSDasaratharaman Chandramouli (grh->flow_label)); 3194d8966fcdSDasaratharaman Chandramouli memcpy(path->rgid, grh->dgid.raw, 16); 3195e126ba97SEli Cohen } 3196e126ba97SEli Cohen 3197d8966fcdSDasaratharaman Chandramouli err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); 3198e126ba97SEli Cohen if (err < 0) 3199e126ba97SEli Cohen return err; 3200e126ba97SEli Cohen path->static_rate = err; 3201e126ba97SEli Cohen path->port = port; 3202e126ba97SEli Cohen 3203e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 3204f879ee8dSAchiad Shochat path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; 3205e126ba97SEli Cohen 320675850d0bSmajd@mellanox.com if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 320775850d0bSmajd@mellanox.com return modify_raw_packet_eth_prio(dev->mdev, 320875850d0bSmajd@mellanox.com &qp->raw_packet_qp.sq, 32091cd6dbd3SYishai Hadas sl & 0xf, qp->ibqp.pd); 321075850d0bSmajd@mellanox.com 3211e126ba97SEli Cohen return 0; 3212e126ba97SEli Cohen } 3213e126ba97SEli Cohen 3214e126ba97SEli Cohen static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 3215e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3216e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3217e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3218e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3219e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3220e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3221e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT, 3222e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3223e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3224e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT, 3225e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3226e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY | 3227e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT, 32288f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 32298f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 32308f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 32318f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PKEY_INDEX | 32328f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PRI_PORT, 3233e126ba97SEli Cohen }, 3234e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3235e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3236e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3237e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3238e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3239e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX, 3240e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3241e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3242e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX, 3243e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3244e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3245e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 3246e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3247a4774e90SEli Cohen [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3248a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE | 3249a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3250a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3251a4774e90SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX, 3252e126ba97SEli Cohen }, 3253e126ba97SEli Cohen }, 3254e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3255e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3256e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3257e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3258e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3259e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3260e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3261e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT, 3262e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3263e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3264e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE, 3265e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 32668f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 32678f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE | 32688f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 32698f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 32708f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 32718f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT, 3272e126ba97SEli Cohen }, 3273e126ba97SEli Cohen }, 3274e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3275e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3276e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3277e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3278e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3279e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT | 3280c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3281c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3282e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3283c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3284c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3285e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 3286e126ba97SEli Cohen MLX5_QP_OPTPAR_SRQN | 3287e126ba97SEli Cohen MLX5_QP_OPTPAR_CQN_RCV, 32888f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 32898f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 32908f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 32918f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT | 32928f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 32938f4426aaSJack Morgenstein MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3294e126ba97SEli Cohen }, 3295e126ba97SEli Cohen }, 3296e126ba97SEli Cohen [MLX5_QP_STATE_SQER] = { 3297e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3298e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 3299e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 330075959f56SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 3301a4774e90SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 3302a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3303a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3304a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE, 33058f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 33068f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 33078f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 33088f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE, 3309e126ba97SEli Cohen }, 3310e126ba97SEli Cohen }, 3311e126ba97SEli Cohen }; 3312e126ba97SEli Cohen 3313e126ba97SEli Cohen static int ib_nr_to_mlx5_nr(int ib_mask) 3314e126ba97SEli Cohen { 3315e126ba97SEli Cohen switch (ib_mask) { 3316e126ba97SEli Cohen case IB_QP_STATE: 3317e126ba97SEli Cohen return 0; 3318e126ba97SEli Cohen case IB_QP_CUR_STATE: 3319e126ba97SEli Cohen return 0; 3320e126ba97SEli Cohen case IB_QP_EN_SQD_ASYNC_NOTIFY: 3321e126ba97SEli Cohen return 0; 3322e126ba97SEli Cohen case IB_QP_ACCESS_FLAGS: 3323e126ba97SEli Cohen return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 3324e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE; 3325e126ba97SEli Cohen case IB_QP_PKEY_INDEX: 3326e126ba97SEli Cohen return MLX5_QP_OPTPAR_PKEY_INDEX; 3327e126ba97SEli Cohen case IB_QP_PORT: 3328e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRI_PORT; 3329e126ba97SEli Cohen case IB_QP_QKEY: 3330e126ba97SEli Cohen return MLX5_QP_OPTPAR_Q_KEY; 3331e126ba97SEli Cohen case IB_QP_AV: 3332e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 3333e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT; 3334e126ba97SEli Cohen case IB_QP_PATH_MTU: 3335e126ba97SEli Cohen return 0; 3336e126ba97SEli Cohen case IB_QP_TIMEOUT: 3337e126ba97SEli Cohen return MLX5_QP_OPTPAR_ACK_TIMEOUT; 3338e126ba97SEli Cohen case IB_QP_RETRY_CNT: 3339e126ba97SEli Cohen return MLX5_QP_OPTPAR_RETRY_COUNT; 3340e126ba97SEli Cohen case IB_QP_RNR_RETRY: 3341e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_RETRY; 3342e126ba97SEli Cohen case IB_QP_RQ_PSN: 3343e126ba97SEli Cohen return 0; 3344e126ba97SEli Cohen case IB_QP_MAX_QP_RD_ATOMIC: 3345e126ba97SEli Cohen return MLX5_QP_OPTPAR_SRA_MAX; 3346e126ba97SEli Cohen case IB_QP_ALT_PATH: 3347e126ba97SEli Cohen return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 3348e126ba97SEli Cohen case IB_QP_MIN_RNR_TIMER: 3349e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_TIMEOUT; 3350e126ba97SEli Cohen case IB_QP_SQ_PSN: 3351e126ba97SEli Cohen return 0; 3352e126ba97SEli Cohen case IB_QP_MAX_DEST_RD_ATOMIC: 3353e126ba97SEli Cohen return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 3354e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 3355e126ba97SEli Cohen case IB_QP_PATH_MIG_STATE: 3356e126ba97SEli Cohen return MLX5_QP_OPTPAR_PM_STATE; 3357e126ba97SEli Cohen case IB_QP_CAP: 3358e126ba97SEli Cohen return 0; 3359e126ba97SEli Cohen case IB_QP_DEST_QPN: 3360e126ba97SEli Cohen return 0; 3361e126ba97SEli Cohen } 3362e126ba97SEli Cohen return 0; 3363e126ba97SEli Cohen } 3364e126ba97SEli Cohen 3365e126ba97SEli Cohen static int ib_mask_to_mlx5_opt(int ib_mask) 3366e126ba97SEli Cohen { 3367e126ba97SEli Cohen int result = 0; 3368e126ba97SEli Cohen int i; 3369e126ba97SEli Cohen 3370e126ba97SEli Cohen for (i = 0; i < 8 * sizeof(int); i++) { 3371e126ba97SEli Cohen if ((1 << i) & ib_mask) 3372e126ba97SEli Cohen result |= ib_nr_to_mlx5_nr(1 << i); 3373e126ba97SEli Cohen } 3374e126ba97SEli Cohen 3375e126ba97SEli Cohen return result; 3376e126ba97SEli Cohen } 3377e126ba97SEli Cohen 337834d57585SYishai Hadas static int modify_raw_packet_qp_rq( 337934d57585SYishai Hadas struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state, 338034d57585SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3381ad5f8e96Smajd@mellanox.com { 3382ad5f8e96Smajd@mellanox.com void *in; 3383ad5f8e96Smajd@mellanox.com void *rqc; 3384ad5f8e96Smajd@mellanox.com int inlen; 3385ad5f8e96Smajd@mellanox.com int err; 3386ad5f8e96Smajd@mellanox.com 3387ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 33881b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3389ad5f8e96Smajd@mellanox.com if (!in) 3390ad5f8e96Smajd@mellanox.com return -ENOMEM; 3391ad5f8e96Smajd@mellanox.com 3392ad5f8e96Smajd@mellanox.com MLX5_SET(modify_rq_in, in, rq_state, rq->state); 339334d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid); 3394ad5f8e96Smajd@mellanox.com 3395ad5f8e96Smajd@mellanox.com rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 3396ad5f8e96Smajd@mellanox.com MLX5_SET(rqc, rqc, state, new_state); 3397ad5f8e96Smajd@mellanox.com 3398eb49ab0cSAlex Vesker if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) { 3399eb49ab0cSAlex Vesker if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 3400eb49ab0cSAlex Vesker MLX5_SET64(modify_rq_in, in, modify_bitmask, 340123a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 3402eb49ab0cSAlex Vesker MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id); 3403eb49ab0cSAlex Vesker } else 34045a738b5dSJason Gunthorpe dev_info_once( 34055a738b5dSJason Gunthorpe &dev->ib_dev.dev, 34065a738b5dSJason Gunthorpe "RAW PACKET QP counters are not supported on current FW\n"); 3407eb49ab0cSAlex Vesker } 3408eb49ab0cSAlex Vesker 3409e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in); 3410ad5f8e96Smajd@mellanox.com if (err) 3411ad5f8e96Smajd@mellanox.com goto out; 3412ad5f8e96Smajd@mellanox.com 3413ad5f8e96Smajd@mellanox.com rq->state = new_state; 3414ad5f8e96Smajd@mellanox.com 3415ad5f8e96Smajd@mellanox.com out: 3416ad5f8e96Smajd@mellanox.com kvfree(in); 3417ad5f8e96Smajd@mellanox.com return err; 3418ad5f8e96Smajd@mellanox.com } 3419ad5f8e96Smajd@mellanox.com 3420c14003f0SYishai Hadas static int modify_raw_packet_qp_sq( 3421c14003f0SYishai Hadas struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state, 3422c14003f0SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3423ad5f8e96Smajd@mellanox.com { 34247d29f349SBodong Wang struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; 342561147f39SBodong Wang struct mlx5_rate_limit old_rl = ibqp->rl; 342661147f39SBodong Wang struct mlx5_rate_limit new_rl = old_rl; 342761147f39SBodong Wang bool new_rate_added = false; 34287d29f349SBodong Wang u16 rl_index = 0; 3429ad5f8e96Smajd@mellanox.com void *in; 3430ad5f8e96Smajd@mellanox.com void *sqc; 3431ad5f8e96Smajd@mellanox.com int inlen; 3432ad5f8e96Smajd@mellanox.com int err; 3433ad5f8e96Smajd@mellanox.com 3434ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 34351b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3436ad5f8e96Smajd@mellanox.com if (!in) 3437ad5f8e96Smajd@mellanox.com return -ENOMEM; 3438ad5f8e96Smajd@mellanox.com 3439c14003f0SYishai Hadas MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid); 3440ad5f8e96Smajd@mellanox.com MLX5_SET(modify_sq_in, in, sq_state, sq->state); 3441ad5f8e96Smajd@mellanox.com 3442ad5f8e96Smajd@mellanox.com sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 3443ad5f8e96Smajd@mellanox.com MLX5_SET(sqc, sqc, state, new_state); 3444ad5f8e96Smajd@mellanox.com 34457d29f349SBodong Wang if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { 34467d29f349SBodong Wang if (new_state != MLX5_SQC_STATE_RDY) 34477d29f349SBodong Wang pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", 34487d29f349SBodong Wang __func__); 34497d29f349SBodong Wang else 345061147f39SBodong Wang new_rl = raw_qp_param->rl; 34517d29f349SBodong Wang } 3452ad5f8e96Smajd@mellanox.com 345361147f39SBodong Wang if (!mlx5_rl_are_equal(&old_rl, &new_rl)) { 345461147f39SBodong Wang if (new_rl.rate) { 345561147f39SBodong Wang err = mlx5_rl_add_rate(dev, &rl_index, &new_rl); 34567d29f349SBodong Wang if (err) { 345761147f39SBodong Wang pr_err("Failed configuring rate limit(err %d): \ 345861147f39SBodong Wang rate %u, max_burst_sz %u, typical_pkt_sz %u\n", 345961147f39SBodong Wang err, new_rl.rate, new_rl.max_burst_sz, 346061147f39SBodong Wang new_rl.typical_pkt_sz); 346161147f39SBodong Wang 34627d29f349SBodong Wang goto out; 34637d29f349SBodong Wang } 346461147f39SBodong Wang new_rate_added = true; 34657d29f349SBodong Wang } 34667d29f349SBodong Wang 34677d29f349SBodong Wang MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 346861147f39SBodong Wang /* index 0 means no limit */ 34697d29f349SBodong Wang MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); 34707d29f349SBodong Wang } 34717d29f349SBodong Wang 3472e0b4b472SLeon Romanovsky err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in); 34737d29f349SBodong Wang if (err) { 34747d29f349SBodong Wang /* Remove new rate from table if failed */ 347561147f39SBodong Wang if (new_rate_added) 347661147f39SBodong Wang mlx5_rl_remove_rate(dev, &new_rl); 34777d29f349SBodong Wang goto out; 34787d29f349SBodong Wang } 34797d29f349SBodong Wang 34807d29f349SBodong Wang /* Only remove the old rate after new rate was set */ 3481c8973df2SRafi Wiener if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) || 3482c8973df2SRafi Wiener (new_state != MLX5_SQC_STATE_RDY)) { 348361147f39SBodong Wang mlx5_rl_remove_rate(dev, &old_rl); 3484c8973df2SRafi Wiener if (new_state != MLX5_SQC_STATE_RDY) 3485c8973df2SRafi Wiener memset(&new_rl, 0, sizeof(new_rl)); 3486c8973df2SRafi Wiener } 34877d29f349SBodong Wang 348861147f39SBodong Wang ibqp->rl = new_rl; 3489ad5f8e96Smajd@mellanox.com sq->state = new_state; 3490ad5f8e96Smajd@mellanox.com 3491ad5f8e96Smajd@mellanox.com out: 3492ad5f8e96Smajd@mellanox.com kvfree(in); 3493ad5f8e96Smajd@mellanox.com return err; 3494ad5f8e96Smajd@mellanox.com } 3495ad5f8e96Smajd@mellanox.com 3496ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 349713eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 349813eab21fSAviv Heller u8 tx_affinity) 3499ad5f8e96Smajd@mellanox.com { 3500ad5f8e96Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 3501ad5f8e96Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 3502ad5f8e96Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 35037d29f349SBodong Wang int modify_rq = !!qp->rq.wqe_cnt; 35047d29f349SBodong Wang int modify_sq = !!qp->sq.wqe_cnt; 3505ad5f8e96Smajd@mellanox.com int rq_state; 3506ad5f8e96Smajd@mellanox.com int sq_state; 3507ad5f8e96Smajd@mellanox.com int err; 3508ad5f8e96Smajd@mellanox.com 35090680efa2SAlex Vesker switch (raw_qp_param->operation) { 3510ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RST2INIT_QP: 3511ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RDY; 3512ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_RDY; 3513ad5f8e96Smajd@mellanox.com break; 3514ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2ERR_QP: 3515ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_ERR; 3516ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_ERR; 3517ad5f8e96Smajd@mellanox.com break; 3518ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2RST_QP: 3519ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RST; 3520ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_RST; 3521ad5f8e96Smajd@mellanox.com break; 3522ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTR2RTS_QP: 3523ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTS2RTS_QP: 35247d29f349SBodong Wang if (raw_qp_param->set_mask == 35257d29f349SBodong Wang MLX5_RAW_QP_RATE_LIMIT) { 35267d29f349SBodong Wang modify_rq = 0; 35277d29f349SBodong Wang sq_state = sq->state; 35287d29f349SBodong Wang } else { 35297d29f349SBodong Wang return raw_qp_param->set_mask ? -EINVAL : 0; 35307d29f349SBodong Wang } 35317d29f349SBodong Wang break; 35327d29f349SBodong Wang case MLX5_CMD_OP_INIT2INIT_QP: 35337d29f349SBodong Wang case MLX5_CMD_OP_INIT2RTR_QP: 3534eb49ab0cSAlex Vesker if (raw_qp_param->set_mask) 3535eb49ab0cSAlex Vesker return -EINVAL; 3536eb49ab0cSAlex Vesker else 3537ad5f8e96Smajd@mellanox.com return 0; 3538ad5f8e96Smajd@mellanox.com default: 3539ad5f8e96Smajd@mellanox.com WARN_ON(1); 3540ad5f8e96Smajd@mellanox.com return -EINVAL; 3541ad5f8e96Smajd@mellanox.com } 3542ad5f8e96Smajd@mellanox.com 35437d29f349SBodong Wang if (modify_rq) { 354434d57585SYishai Hadas err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param, 354534d57585SYishai Hadas qp->ibqp.pd); 3546ad5f8e96Smajd@mellanox.com if (err) 3547ad5f8e96Smajd@mellanox.com return err; 3548ad5f8e96Smajd@mellanox.com } 3549ad5f8e96Smajd@mellanox.com 35507d29f349SBodong Wang if (modify_sq) { 3551d5ed8ac3SMark Bloch struct mlx5_flow_handle *flow_rule; 3552d5ed8ac3SMark Bloch 355313eab21fSAviv Heller if (tx_affinity) { 355413eab21fSAviv Heller err = modify_raw_packet_tx_affinity(dev->mdev, sq, 35551cd6dbd3SYishai Hadas tx_affinity, 35561cd6dbd3SYishai Hadas qp->ibqp.pd); 355713eab21fSAviv Heller if (err) 355813eab21fSAviv Heller return err; 355913eab21fSAviv Heller } 356013eab21fSAviv Heller 3561d5ed8ac3SMark Bloch flow_rule = create_flow_rule_vport_sq(dev, sq, 3562d5ed8ac3SMark Bloch raw_qp_param->port); 3563d5ed8ac3SMark Bloch if (IS_ERR(flow_rule)) 35641db86318SColin Ian King return PTR_ERR(flow_rule); 3565d5ed8ac3SMark Bloch 3566d5ed8ac3SMark Bloch err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, 3567c14003f0SYishai Hadas raw_qp_param, qp->ibqp.pd); 3568d5ed8ac3SMark Bloch if (err) { 3569d5ed8ac3SMark Bloch if (flow_rule) 3570d5ed8ac3SMark Bloch mlx5_del_flow_rules(flow_rule); 3571d5ed8ac3SMark Bloch return err; 3572d5ed8ac3SMark Bloch } 3573d5ed8ac3SMark Bloch 3574d5ed8ac3SMark Bloch if (flow_rule) { 3575d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 3576d5ed8ac3SMark Bloch sq->flow_rule = flow_rule; 3577d5ed8ac3SMark Bloch } 3578d5ed8ac3SMark Bloch 3579d5ed8ac3SMark Bloch return err; 358013eab21fSAviv Heller } 3581ad5f8e96Smajd@mellanox.com 3582ad5f8e96Smajd@mellanox.com return 0; 3583ad5f8e96Smajd@mellanox.com } 3584ad5f8e96Smajd@mellanox.com 35855163b274SMaor Gottlieb static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev, 35865163b274SMaor Gottlieb struct ib_udata *udata) 3587c6a21c38SMajd Dibbiny { 358889944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 358989944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 35905163b274SMaor Gottlieb u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 35915163b274SMaor Gottlieb atomic_t *tx_port_affinity; 3592c6a21c38SMajd Dibbiny 35935163b274SMaor Gottlieb if (ucontext) 35945163b274SMaor Gottlieb tx_port_affinity = &ucontext->tx_port_affinity; 35955163b274SMaor Gottlieb else 35965163b274SMaor Gottlieb tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity; 35975163b274SMaor Gottlieb 35985163b274SMaor Gottlieb return (unsigned int)atomic_add_return(1, tx_port_affinity) % 35995163b274SMaor Gottlieb MLX5_MAX_PORTS + 1; 3600c6a21c38SMajd Dibbiny } 3601c6a21c38SMajd Dibbiny 36025163b274SMaor Gottlieb static bool qp_supports_affinity(struct ib_qp *qp) 36035163b274SMaor Gottlieb { 36045163b274SMaor Gottlieb struct mlx5_ib_qp *mqp = to_mqp(qp); 36055163b274SMaor Gottlieb 36065163b274SMaor Gottlieb if ((qp->qp_type == IB_QPT_RC) || 36075163b274SMaor Gottlieb (qp->qp_type == IB_QPT_UD && 36085163b274SMaor Gottlieb !(mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)) || 36095163b274SMaor Gottlieb (qp->qp_type == IB_QPT_UC) || 36105163b274SMaor Gottlieb (qp->qp_type == IB_QPT_RAW_PACKET) || 36115163b274SMaor Gottlieb (qp->qp_type == IB_QPT_XRC_INI) || 36125163b274SMaor Gottlieb (qp->qp_type == IB_QPT_XRC_TGT)) 36135163b274SMaor Gottlieb return true; 36145163b274SMaor Gottlieb return false; 36155163b274SMaor Gottlieb } 36165163b274SMaor Gottlieb 36175163b274SMaor Gottlieb static unsigned int get_tx_affinity(struct ib_qp *qp, u8 init, 36185163b274SMaor Gottlieb struct ib_udata *udata) 36195163b274SMaor Gottlieb { 36205163b274SMaor Gottlieb struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 36215163b274SMaor Gottlieb udata, struct mlx5_ib_ucontext, ibucontext); 36225163b274SMaor Gottlieb struct mlx5_ib_dev *dev = to_mdev(qp->device); 36235163b274SMaor Gottlieb struct mlx5_ib_qp *mqp = to_mqp(qp); 36245163b274SMaor Gottlieb struct mlx5_ib_qp_base *qp_base; 36255163b274SMaor Gottlieb unsigned int tx_affinity; 36265163b274SMaor Gottlieb 36275163b274SMaor Gottlieb if (!(dev->lag_active && init && qp_supports_affinity(qp))) 36285163b274SMaor Gottlieb return 0; 36295163b274SMaor Gottlieb 36305163b274SMaor Gottlieb tx_affinity = get_tx_affinity_rr(dev, udata); 36315163b274SMaor Gottlieb 36325163b274SMaor Gottlieb qp_base = &mqp->trans_qp.base; 36335163b274SMaor Gottlieb if (ucontext) 36345163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n", 36355163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn, ucontext); 36365163b274SMaor Gottlieb else 36375163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n", 36385163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn); 36395163b274SMaor Gottlieb return tx_affinity; 3640c6a21c38SMajd Dibbiny } 3641c6a21c38SMajd Dibbiny 3642d14133ddSMark Zhang static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, 3643d14133ddSMark Zhang struct rdma_counter *counter) 3644d14133ddSMark Zhang { 3645d14133ddSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 3646d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 3647d14133ddSMark Zhang struct mlx5_qp_context context = {}; 3648d14133ddSMark Zhang struct mlx5_ib_qp_base *base; 3649d14133ddSMark Zhang u32 set_id; 3650d14133ddSMark Zhang 36513e1f000fSParav Pandit if (counter) 3652d14133ddSMark Zhang set_id = counter->id; 36533e1f000fSParav Pandit else 36543e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); 3655d14133ddSMark Zhang 3656d14133ddSMark Zhang base = &mqp->trans_qp.base; 3657d14133ddSMark Zhang context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); 3658d14133ddSMark Zhang context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24); 3659333fbaa0SLeon Romanovsky return mlx5_core_qp_modify(dev, MLX5_CMD_OP_RTS2RTS_QP, 3660333fbaa0SLeon Romanovsky MLX5_QP_OPTPAR_COUNTER_SET_ID, &context, 3661333fbaa0SLeon Romanovsky &base->mqp); 3662d14133ddSMark Zhang } 3663d14133ddSMark Zhang 3664e126ba97SEli Cohen static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 3665e126ba97SEli Cohen const struct ib_qp_attr *attr, int attr_mask, 366689944450SShamir Rabinovitch enum ib_qp_state cur_state, 366789944450SShamir Rabinovitch enum ib_qp_state new_state, 366889944450SShamir Rabinovitch const struct mlx5_ib_modify_qp *ucmd, 366989944450SShamir Rabinovitch struct ib_udata *udata) 3670e126ba97SEli Cohen { 3671427c1e7bSmajd@mellanox.com static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 3672427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = { 3673427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3674427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3675427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 3676427c1e7bSmajd@mellanox.com }, 3677427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = { 3678427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3679427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3680427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 3681427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 3682427c1e7bSmajd@mellanox.com }, 3683427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = { 3684427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3685427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3686427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 3687427c1e7bSmajd@mellanox.com }, 3688427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = { 3689427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3690427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3691427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 3692427c1e7bSmajd@mellanox.com }, 3693427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQD] = { 3694427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3695427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3696427c1e7bSmajd@mellanox.com }, 3697427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQER] = { 3698427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3699427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3700427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 3701427c1e7bSmajd@mellanox.com }, 3702427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = { 3703427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 3704427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 3705427c1e7bSmajd@mellanox.com } 3706427c1e7bSmajd@mellanox.com }; 3707427c1e7bSmajd@mellanox.com 3708e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3709e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 371019098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 3711e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 3712e126ba97SEli Cohen struct mlx5_qp_context *context; 3713e126ba97SEli Cohen struct mlx5_ib_pd *pd; 3714e126ba97SEli Cohen enum mlx5_qp_state mlx5_cur, mlx5_new; 3715e126ba97SEli Cohen enum mlx5_qp_optpar optpar; 3716d14133ddSMark Zhang u32 set_id = 0; 3717e126ba97SEli Cohen int mlx5_st; 3718e126ba97SEli Cohen int err; 3719427c1e7bSmajd@mellanox.com u16 op; 372013eab21fSAviv Heller u8 tx_affinity = 0; 3721e126ba97SEli Cohen 37227aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 372355de9a77SLeon Romanovsky if (mlx5_st < 0) 372455de9a77SLeon Romanovsky return -EINVAL; 372555de9a77SLeon Romanovsky 37261a412fb1SSaeed Mahameed context = kzalloc(sizeof(*context), GFP_KERNEL); 37271a412fb1SSaeed Mahameed if (!context) 3728e126ba97SEli Cohen return -ENOMEM; 3729e126ba97SEli Cohen 3730c6a21c38SMajd Dibbiny pd = get_pd(qp); 373155de9a77SLeon Romanovsky context->flags = cpu_to_be32(mlx5_st << 16); 3732e126ba97SEli Cohen 3733e126ba97SEli Cohen if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 3734e126ba97SEli Cohen context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 3735e126ba97SEli Cohen } else { 3736e126ba97SEli Cohen switch (attr->path_mig_state) { 3737e126ba97SEli Cohen case IB_MIG_MIGRATED: 3738e126ba97SEli Cohen context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 3739e126ba97SEli Cohen break; 3740e126ba97SEli Cohen case IB_MIG_REARM: 3741e126ba97SEli Cohen context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); 3742e126ba97SEli Cohen break; 3743e126ba97SEli Cohen case IB_MIG_ARMED: 3744e126ba97SEli Cohen context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); 3745e126ba97SEli Cohen break; 3746e126ba97SEli Cohen } 3747e126ba97SEli Cohen } 3748e126ba97SEli Cohen 37495163b274SMaor Gottlieb tx_affinity = get_tx_affinity(ibqp, 37505163b274SMaor Gottlieb cur_state == IB_QPS_RESET && 37515163b274SMaor Gottlieb new_state == IB_QPS_INIT, udata); 375213eab21fSAviv Heller context->flags |= cpu_to_be32(tx_affinity << 24); 375313eab21fSAviv Heller 3754d16e91daSHaggai Eran if (is_sqp(ibqp->qp_type)) { 3755e126ba97SEli Cohen context->mtu_msgmax = (IB_MTU_256 << 5) | 8; 3756c2e53b2cSYishai Hadas } else if ((ibqp->qp_type == IB_QPT_UD && 37572be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) || 3758e126ba97SEli Cohen ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { 3759e126ba97SEli Cohen context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 3760e126ba97SEli Cohen } else if (attr_mask & IB_QP_PATH_MTU) { 3761e126ba97SEli Cohen if (attr->path_mtu < IB_MTU_256 || 3762e126ba97SEli Cohen attr->path_mtu > IB_MTU_4096) { 3763e126ba97SEli Cohen mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 3764e126ba97SEli Cohen err = -EINVAL; 3765e126ba97SEli Cohen goto out; 3766e126ba97SEli Cohen } 3767938fe83cSSaeed Mahameed context->mtu_msgmax = (attr->path_mtu << 5) | 3768938fe83cSSaeed Mahameed (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); 3769e126ba97SEli Cohen } 3770e126ba97SEli Cohen 3771e126ba97SEli Cohen if (attr_mask & IB_QP_DEST_QPN) 3772e126ba97SEli Cohen context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); 3773e126ba97SEli Cohen 3774e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 3775d3ae2bdeSNoa Osherovich context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); 3776e126ba97SEli Cohen 3777e126ba97SEli Cohen /* todo implement counter_index functionality */ 3778e126ba97SEli Cohen 3779e126ba97SEli Cohen if (is_sqp(ibqp->qp_type)) 3780e126ba97SEli Cohen context->pri_path.port = qp->port; 3781e126ba97SEli Cohen 3782e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 3783e126ba97SEli Cohen context->pri_path.port = attr->port_num; 3784e126ba97SEli Cohen 3785e126ba97SEli Cohen if (attr_mask & IB_QP_AV) { 378675850d0bSmajd@mellanox.com err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, 3787e126ba97SEli Cohen attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 3788f879ee8dSAchiad Shochat attr_mask, 0, attr, false); 3789e126ba97SEli Cohen if (err) 3790e126ba97SEli Cohen goto out; 3791e126ba97SEli Cohen } 3792e126ba97SEli Cohen 3793e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 3794e126ba97SEli Cohen context->pri_path.ackto_lt |= attr->timeout << 3; 3795e126ba97SEli Cohen 3796e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) { 379775850d0bSmajd@mellanox.com err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, 379875850d0bSmajd@mellanox.com &context->alt_path, 3799f879ee8dSAchiad Shochat attr->alt_port_num, 3800f879ee8dSAchiad Shochat attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, 3801f879ee8dSAchiad Shochat 0, attr, true); 3802e126ba97SEli Cohen if (err) 3803e126ba97SEli Cohen goto out; 3804e126ba97SEli Cohen } 3805e126ba97SEli Cohen 380689ea94a7SMaor Gottlieb get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 380789ea94a7SMaor Gottlieb &send_cq, &recv_cq); 3808e126ba97SEli Cohen 3809e126ba97SEli Cohen context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 3810e126ba97SEli Cohen context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; 3811e126ba97SEli Cohen context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; 3812e126ba97SEli Cohen context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); 3813e126ba97SEli Cohen 3814e126ba97SEli Cohen if (attr_mask & IB_QP_RNR_RETRY) 3815e126ba97SEli Cohen context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 3816e126ba97SEli Cohen 3817e126ba97SEli Cohen if (attr_mask & IB_QP_RETRY_CNT) 3818e126ba97SEli Cohen context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 3819e126ba97SEli Cohen 3820e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 3821e126ba97SEli Cohen if (attr->max_rd_atomic) 3822e126ba97SEli Cohen context->params1 |= 3823e126ba97SEli Cohen cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 3824e126ba97SEli Cohen } 3825e126ba97SEli Cohen 3826e126ba97SEli Cohen if (attr_mask & IB_QP_SQ_PSN) 3827e126ba97SEli Cohen context->next_send_psn = cpu_to_be32(attr->sq_psn); 3828e126ba97SEli Cohen 3829e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 3830e126ba97SEli Cohen if (attr->max_dest_rd_atomic) 3831e126ba97SEli Cohen context->params2 |= 3832e126ba97SEli Cohen cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 3833e126ba97SEli Cohen } 3834e126ba97SEli Cohen 3835a60109dcSYonatan Cohen if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 3836bf3b4f06SBart Van Assche __be32 access_flags; 3837a60109dcSYonatan Cohen 3838a60109dcSYonatan Cohen err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags); 3839a60109dcSYonatan Cohen if (err) 3840a60109dcSYonatan Cohen goto out; 3841a60109dcSYonatan Cohen 3842a60109dcSYonatan Cohen context->params2 |= access_flags; 3843a60109dcSYonatan Cohen } 3844e126ba97SEli Cohen 3845e126ba97SEli Cohen if (attr_mask & IB_QP_MIN_RNR_TIMER) 3846e126ba97SEli Cohen context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 3847e126ba97SEli Cohen 3848e126ba97SEli Cohen if (attr_mask & IB_QP_RQ_PSN) 3849e126ba97SEli Cohen context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 3850e126ba97SEli Cohen 3851e126ba97SEli Cohen if (attr_mask & IB_QP_QKEY) 3852e126ba97SEli Cohen context->qkey = cpu_to_be32(attr->qkey); 3853e126ba97SEli Cohen 3854e126ba97SEli Cohen if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 3855e126ba97SEli Cohen context->db_rec_addr = cpu_to_be64(qp->db.dma); 3856e126ba97SEli Cohen 38570837e86aSMark Bloch if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 38580837e86aSMark Bloch u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 38590837e86aSMark Bloch qp->port) - 1; 3860c2e53b2cSYishai Hadas 3861c2e53b2cSYishai Hadas /* Underlay port should be used - index 0 function per port */ 38622be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 3863c2e53b2cSYishai Hadas port_num = 0; 3864c2e53b2cSYishai Hadas 3865d14133ddSMark Zhang if (ibqp->counter) 3866d14133ddSMark Zhang set_id = ibqp->counter->id; 3867d14133ddSMark Zhang else 38683e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, port_num); 38690837e86aSMark Bloch context->qp_counter_set_usr_page |= 3870d14133ddSMark Zhang cpu_to_be32(set_id << 24); 38710837e86aSMark Bloch } 38720837e86aSMark Bloch 3873e126ba97SEli Cohen if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 3874e126ba97SEli Cohen context->sq_crq_size |= cpu_to_be16(1 << 4); 3875e126ba97SEli Cohen 38762be08c30SLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 3877b11a4f9cSHaggai Eran context->deth_sqpn = cpu_to_be32(1); 3878e126ba97SEli Cohen 3879e126ba97SEli Cohen mlx5_cur = to_mlx5_state(cur_state); 3880e126ba97SEli Cohen mlx5_new = to_mlx5_state(new_state); 3881e126ba97SEli Cohen 3882427c1e7bSmajd@mellanox.com if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 38835d414b17SDan Carpenter !optab[mlx5_cur][mlx5_new]) { 38845d414b17SDan Carpenter err = -EINVAL; 3885427c1e7bSmajd@mellanox.com goto out; 38865d414b17SDan Carpenter } 3887427c1e7bSmajd@mellanox.com 3888427c1e7bSmajd@mellanox.com op = optab[mlx5_cur][mlx5_new]; 3889e126ba97SEli Cohen optpar = ib_mask_to_mlx5_opt(attr_mask); 3890e126ba97SEli Cohen optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 3891ad5f8e96Smajd@mellanox.com 3892c2e53b2cSYishai Hadas if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 38932be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 38940680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = {}; 38950680efa2SAlex Vesker 38960680efa2SAlex Vesker raw_qp_param.operation = op; 3897eb49ab0cSAlex Vesker if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3898d14133ddSMark Zhang raw_qp_param.rq_q_ctr_id = set_id; 3899eb49ab0cSAlex Vesker raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; 3900eb49ab0cSAlex Vesker } 39017d29f349SBodong Wang 3902d5ed8ac3SMark Bloch if (attr_mask & IB_QP_PORT) 3903d5ed8ac3SMark Bloch raw_qp_param.port = attr->port_num; 3904d5ed8ac3SMark Bloch 39057d29f349SBodong Wang if (attr_mask & IB_QP_RATE_LIMIT) { 390661147f39SBodong Wang raw_qp_param.rl.rate = attr->rate_limit; 390761147f39SBodong Wang 390861147f39SBodong Wang if (ucmd->burst_info.max_burst_sz) { 390961147f39SBodong Wang if (attr->rate_limit && 391061147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { 391161147f39SBodong Wang raw_qp_param.rl.max_burst_sz = 391261147f39SBodong Wang ucmd->burst_info.max_burst_sz; 391361147f39SBodong Wang } else { 391461147f39SBodong Wang err = -EINVAL; 391561147f39SBodong Wang goto out; 391661147f39SBodong Wang } 391761147f39SBodong Wang } 391861147f39SBodong Wang 391961147f39SBodong Wang if (ucmd->burst_info.typical_pkt_sz) { 392061147f39SBodong Wang if (attr->rate_limit && 392161147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { 392261147f39SBodong Wang raw_qp_param.rl.typical_pkt_sz = 392361147f39SBodong Wang ucmd->burst_info.typical_pkt_sz; 392461147f39SBodong Wang } else { 392561147f39SBodong Wang err = -EINVAL; 392661147f39SBodong Wang goto out; 392761147f39SBodong Wang } 392861147f39SBodong Wang } 392961147f39SBodong Wang 39307d29f349SBodong Wang raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; 39317d29f349SBodong Wang } 39327d29f349SBodong Wang 393313eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); 39340680efa2SAlex Vesker } else { 3935333fbaa0SLeon Romanovsky err = mlx5_core_qp_modify(dev, op, optpar, context, &base->mqp); 39360680efa2SAlex Vesker } 39370680efa2SAlex Vesker 3938e126ba97SEli Cohen if (err) 3939e126ba97SEli Cohen goto out; 3940e126ba97SEli Cohen 3941e126ba97SEli Cohen qp->state = new_state; 3942e126ba97SEli Cohen 3943e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 394419098df2Smajd@mellanox.com qp->trans_qp.atomic_rd_en = attr->qp_access_flags; 3945e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 394619098df2Smajd@mellanox.com qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; 3947e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 3948e126ba97SEli Cohen qp->port = attr->port_num; 3949e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) 395019098df2Smajd@mellanox.com qp->trans_qp.alt_port = attr->alt_port_num; 3951e126ba97SEli Cohen 3952e126ba97SEli Cohen /* 3953e126ba97SEli Cohen * If we moved a kernel QP to RESET, clean up all old CQ 3954e126ba97SEli Cohen * entries and reinitialize the QP. 3955e126ba97SEli Cohen */ 395675a45982SLeon Romanovsky if (new_state == IB_QPS_RESET && 395775a45982SLeon Romanovsky !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) { 395819098df2Smajd@mellanox.com mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 3959e126ba97SEli Cohen ibqp->srq ? to_msrq(ibqp->srq) : NULL); 3960e126ba97SEli Cohen if (send_cq != recv_cq) 396119098df2Smajd@mellanox.com mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); 3962e126ba97SEli Cohen 3963e126ba97SEli Cohen qp->rq.head = 0; 3964e126ba97SEli Cohen qp->rq.tail = 0; 3965e126ba97SEli Cohen qp->sq.head = 0; 3966e126ba97SEli Cohen qp->sq.tail = 0; 3967e126ba97SEli Cohen qp->sq.cur_post = 0; 396834f4c955SGuy Levi if (qp->sq.wqe_cnt) 396934f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 3970950bf4f1SLeon Romanovsky qp->sq.last_poll = 0; 3971e126ba97SEli Cohen qp->db.db[MLX5_RCV_DBR] = 0; 3972e126ba97SEli Cohen qp->db.db[MLX5_SND_DBR] = 0; 3973e126ba97SEli Cohen } 3974e126ba97SEli Cohen 3975d14133ddSMark Zhang if ((new_state == IB_QPS_RTS) && qp->counter_pending) { 3976d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter); 3977d14133ddSMark Zhang if (!err) 3978d14133ddSMark Zhang qp->counter_pending = 0; 3979d14133ddSMark Zhang } 3980d14133ddSMark Zhang 3981e126ba97SEli Cohen out: 39821a412fb1SSaeed Mahameed kfree(context); 3983e126ba97SEli Cohen return err; 3984e126ba97SEli Cohen } 3985e126ba97SEli Cohen 3986c32a4f29SMoni Shoua static inline bool is_valid_mask(int mask, int req, int opt) 3987c32a4f29SMoni Shoua { 3988c32a4f29SMoni Shoua if ((mask & req) != req) 3989c32a4f29SMoni Shoua return false; 3990c32a4f29SMoni Shoua 3991c32a4f29SMoni Shoua if (mask & ~(req | opt)) 3992c32a4f29SMoni Shoua return false; 3993c32a4f29SMoni Shoua 3994c32a4f29SMoni Shoua return true; 3995c32a4f29SMoni Shoua } 3996c32a4f29SMoni Shoua 3997c32a4f29SMoni Shoua /* check valid transition for driver QP types 3998c32a4f29SMoni Shoua * for now the only QP type that this function supports is DCI 3999c32a4f29SMoni Shoua */ 4000c32a4f29SMoni Shoua static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state, 4001c32a4f29SMoni Shoua enum ib_qp_attr_mask attr_mask) 4002c32a4f29SMoni Shoua { 4003c32a4f29SMoni Shoua int req = IB_QP_STATE; 4004c32a4f29SMoni Shoua int opt = 0; 4005c32a4f29SMoni Shoua 400699ed748eSMoni Shoua if (new_state == IB_QPS_RESET) { 400799ed748eSMoni Shoua return is_valid_mask(attr_mask, req, opt); 400899ed748eSMoni Shoua } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4009c32a4f29SMoni Shoua req |= IB_QP_PKEY_INDEX | IB_QP_PORT; 4010c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4011c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 4012c32a4f29SMoni Shoua opt = IB_QP_PKEY_INDEX | IB_QP_PORT; 4013c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4014c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4015c32a4f29SMoni Shoua req |= IB_QP_PATH_MTU; 40165ec0304cSArtemy Kovalyov opt = IB_QP_PKEY_INDEX | IB_QP_AV; 4017c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4018c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 4019c32a4f29SMoni Shoua req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 4020c32a4f29SMoni Shoua IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN; 4021c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4022c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4023c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) { 4024c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4025c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4026c32a4f29SMoni Shoua } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) { 4027c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4028c32a4f29SMoni Shoua } 4029c32a4f29SMoni Shoua return false; 4030c32a4f29SMoni Shoua } 4031c32a4f29SMoni Shoua 4032776a3906SMoni Shoua /* mlx5_ib_modify_dct: modify a DCT QP 4033776a3906SMoni Shoua * valid transitions are: 4034776a3906SMoni Shoua * RESET to INIT: must set access_flags, pkey_index and port 4035776a3906SMoni Shoua * INIT to RTR : must set min_rnr_timer, tclass, flow_label, 4036776a3906SMoni Shoua * mtu, gid_index and hop_limit 4037776a3906SMoni Shoua * Other transitions and attributes are illegal 4038776a3906SMoni Shoua */ 4039776a3906SMoni Shoua static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4040776a3906SMoni Shoua int attr_mask, struct ib_udata *udata) 4041776a3906SMoni Shoua { 4042776a3906SMoni Shoua struct mlx5_ib_qp *qp = to_mqp(ibqp); 4043776a3906SMoni Shoua struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4044776a3906SMoni Shoua enum ib_qp_state cur_state, new_state; 4045776a3906SMoni Shoua int err = 0; 4046776a3906SMoni Shoua int required = IB_QP_STATE; 4047776a3906SMoni Shoua void *dctc; 4048776a3906SMoni Shoua 4049776a3906SMoni Shoua if (!(attr_mask & IB_QP_STATE)) 4050776a3906SMoni Shoua return -EINVAL; 4051776a3906SMoni Shoua 4052776a3906SMoni Shoua cur_state = qp->state; 4053776a3906SMoni Shoua new_state = attr->qp_state; 4054776a3906SMoni Shoua 4055776a3906SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 4056776a3906SMoni Shoua if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 40573e1f000fSParav Pandit u16 set_id; 40583e1f000fSParav Pandit 4059776a3906SMoni Shoua required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 4060776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4061776a3906SMoni Shoua return -EINVAL; 4062776a3906SMoni Shoua 4063776a3906SMoni Shoua if (attr->port_num == 0 || 4064776a3906SMoni Shoua attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) { 4065776a3906SMoni Shoua mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4066776a3906SMoni Shoua attr->port_num, dev->num_ports); 4067776a3906SMoni Shoua return -EINVAL; 4068776a3906SMoni Shoua } 4069776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 4070776a3906SMoni Shoua MLX5_SET(dctc, dctc, rre, 1); 4071776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 4072776a3906SMoni Shoua MLX5_SET(dctc, dctc, rwe, 1); 4073776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { 4074a60109dcSYonatan Cohen int atomic_mode; 4075a60109dcSYonatan Cohen 4076a60109dcSYonatan Cohen atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT); 4077a60109dcSYonatan Cohen if (atomic_mode < 0) 4078776a3906SMoni Shoua return -EOPNOTSUPP; 4079a60109dcSYonatan Cohen 4080a60109dcSYonatan Cohen MLX5_SET(dctc, dctc, atomic_mode, atomic_mode); 4081776a3906SMoni Shoua MLX5_SET(dctc, dctc, rae, 1); 4082776a3906SMoni Shoua } 4083776a3906SMoni Shoua MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); 4084776a3906SMoni Shoua MLX5_SET(dctc, dctc, port, attr->port_num); 40853e1f000fSParav Pandit 40863e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); 40873e1f000fSParav Pandit MLX5_SET(dctc, dctc, counter_set_id, set_id); 4088776a3906SMoni Shoua 4089776a3906SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4090776a3906SMoni Shoua struct mlx5_ib_modify_qp_resp resp = {}; 4091c5ae1954SYishai Hadas u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0}; 4092776a3906SMoni Shoua u32 min_resp_len = offsetof(typeof(resp), dctn) + 4093776a3906SMoni Shoua sizeof(resp.dctn); 4094776a3906SMoni Shoua 4095776a3906SMoni Shoua if (udata->outlen < min_resp_len) 4096776a3906SMoni Shoua return -EINVAL; 4097776a3906SMoni Shoua resp.response_length = min_resp_len; 4098776a3906SMoni Shoua 4099776a3906SMoni Shoua required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU; 4100776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4101776a3906SMoni Shoua return -EINVAL; 4102776a3906SMoni Shoua MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer); 4103776a3906SMoni Shoua MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class); 4104776a3906SMoni Shoua MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label); 4105776a3906SMoni Shoua MLX5_SET(dctc, dctc, mtu, attr->path_mtu); 4106776a3906SMoni Shoua MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); 4107776a3906SMoni Shoua MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 4108776a3906SMoni Shoua 4109333fbaa0SLeon Romanovsky err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, 4110c5ae1954SYishai Hadas MLX5_ST_SZ_BYTES(create_dct_in), out, 4111c5ae1954SYishai Hadas sizeof(out)); 4112776a3906SMoni Shoua if (err) 4113776a3906SMoni Shoua return err; 4114776a3906SMoni Shoua resp.dctn = qp->dct.mdct.mqp.qpn; 4115776a3906SMoni Shoua err = ib_copy_to_udata(udata, &resp, resp.response_length); 4116776a3906SMoni Shoua if (err) { 4117333fbaa0SLeon Romanovsky mlx5_core_destroy_dct(dev, &qp->dct.mdct); 4118776a3906SMoni Shoua return err; 4119776a3906SMoni Shoua } 4120776a3906SMoni Shoua } else { 4121776a3906SMoni Shoua mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state); 4122776a3906SMoni Shoua return -EINVAL; 4123776a3906SMoni Shoua } 4124776a3906SMoni Shoua if (err) 4125776a3906SMoni Shoua qp->state = IB_QPS_ERR; 4126776a3906SMoni Shoua else 4127776a3906SMoni Shoua qp->state = new_state; 4128776a3906SMoni Shoua return err; 4129776a3906SMoni Shoua } 4130776a3906SMoni Shoua 4131e126ba97SEli Cohen int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4132e126ba97SEli Cohen int attr_mask, struct ib_udata *udata) 4133e126ba97SEli Cohen { 4134e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4135e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 413661147f39SBodong Wang struct mlx5_ib_modify_qp ucmd = {}; 4137d16e91daSHaggai Eran enum ib_qp_type qp_type; 4138e126ba97SEli Cohen enum ib_qp_state cur_state, new_state; 413961147f39SBodong Wang size_t required_cmd_sz; 4140e126ba97SEli Cohen int err = -EINVAL; 4141e126ba97SEli Cohen int port; 4142e126ba97SEli Cohen 414328d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 414428d61370SYishai Hadas return -ENOSYS; 414528d61370SYishai Hadas 414661147f39SBodong Wang if (udata && udata->inlen) { 414761147f39SBodong Wang required_cmd_sz = offsetof(typeof(ucmd), reserved) + 414861147f39SBodong Wang sizeof(ucmd.reserved); 414961147f39SBodong Wang if (udata->inlen < required_cmd_sz) 415061147f39SBodong Wang return -EINVAL; 415161147f39SBodong Wang 415261147f39SBodong Wang if (udata->inlen > sizeof(ucmd) && 415361147f39SBodong Wang !ib_is_udata_cleared(udata, sizeof(ucmd), 415461147f39SBodong Wang udata->inlen - sizeof(ucmd))) 415561147f39SBodong Wang return -EOPNOTSUPP; 415661147f39SBodong Wang 415761147f39SBodong Wang if (ib_copy_from_udata(&ucmd, udata, 415861147f39SBodong Wang min(udata->inlen, sizeof(ucmd)))) 415961147f39SBodong Wang return -EFAULT; 416061147f39SBodong Wang 416161147f39SBodong Wang if (ucmd.comp_mask || 416261147f39SBodong Wang memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) || 416361147f39SBodong Wang memchr_inv(&ucmd.burst_info.reserved, 0, 416461147f39SBodong Wang sizeof(ucmd.burst_info.reserved))) 416561147f39SBodong Wang return -EOPNOTSUPP; 416661147f39SBodong Wang } 416761147f39SBodong Wang 4168d16e91daSHaggai Eran if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4169d16e91daSHaggai Eran return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 4170d16e91daSHaggai Eran 41717aede1a2SLeon Romanovsky qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI : 41727aede1a2SLeon Romanovsky qp->type; 4173d16e91daSHaggai Eran 4174776a3906SMoni Shoua if (qp_type == MLX5_IB_QPT_DCT) 4175776a3906SMoni Shoua return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata); 4176c32a4f29SMoni Shoua 4177e126ba97SEli Cohen mutex_lock(&qp->mutex); 4178e126ba97SEli Cohen 4179e126ba97SEli Cohen cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 4180e126ba97SEli Cohen new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 4181e126ba97SEli Cohen 41822811ba51SAchiad Shochat if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) { 41832811ba51SAchiad Shochat port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 41842811ba51SAchiad Shochat } 41852811ba51SAchiad Shochat 41862be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) { 4187c2e53b2cSYishai Hadas if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) { 4188c2e53b2cSYishai Hadas mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n", 4189c2e53b2cSYishai Hadas attr_mask); 4190c2e53b2cSYishai Hadas goto out; 4191c2e53b2cSYishai Hadas } 4192c2e53b2cSYishai Hadas } else if (qp_type != MLX5_IB_QPT_REG_UMR && 4193c32a4f29SMoni Shoua qp_type != MLX5_IB_QPT_DCI && 4194d31131bbSKamal Heib !ib_modify_qp_is_ok(cur_state, new_state, qp_type, 4195d31131bbSKamal Heib attr_mask)) { 4196158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 4197158abf86SHaggai Eran cur_state, new_state, ibqp->qp_type, attr_mask); 4198e126ba97SEli Cohen goto out; 4199c32a4f29SMoni Shoua } else if (qp_type == MLX5_IB_QPT_DCI && 4200c32a4f29SMoni Shoua !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) { 4201c32a4f29SMoni Shoua mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 4202c32a4f29SMoni Shoua cur_state, new_state, qp_type, attr_mask); 4203c32a4f29SMoni Shoua goto out; 4204158abf86SHaggai Eran } 4205e126ba97SEli Cohen 4206e126ba97SEli Cohen if ((attr_mask & IB_QP_PORT) && 4207938fe83cSSaeed Mahameed (attr->port_num == 0 || 4208508562d6SDaniel Jurgens attr->port_num > dev->num_ports)) { 4209158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4210158abf86SHaggai Eran attr->port_num, dev->num_ports); 4211e126ba97SEli Cohen goto out; 4212158abf86SHaggai Eran } 4213e126ba97SEli Cohen 4214e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) { 4215e126ba97SEli Cohen port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 4216938fe83cSSaeed Mahameed if (attr->pkey_index >= 4217158abf86SHaggai Eran dev->mdev->port_caps[port - 1].pkey_table_len) { 4218158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid pkey index %d\n", 4219158abf86SHaggai Eran attr->pkey_index); 4220e126ba97SEli Cohen goto out; 4221e126ba97SEli Cohen } 4222158abf86SHaggai Eran } 4223e126ba97SEli Cohen 4224e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 4225938fe83cSSaeed Mahameed attr->max_rd_atomic > 4226158abf86SHaggai Eran (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { 4227158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", 4228158abf86SHaggai Eran attr->max_rd_atomic); 4229e126ba97SEli Cohen goto out; 4230158abf86SHaggai Eran } 4231e126ba97SEli Cohen 4232e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 4233938fe83cSSaeed Mahameed attr->max_dest_rd_atomic > 4234158abf86SHaggai Eran (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { 4235158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", 4236158abf86SHaggai Eran attr->max_dest_rd_atomic); 4237e126ba97SEli Cohen goto out; 4238158abf86SHaggai Eran } 4239e126ba97SEli Cohen 4240e126ba97SEli Cohen if (cur_state == new_state && cur_state == IB_QPS_RESET) { 4241e126ba97SEli Cohen err = 0; 4242e126ba97SEli Cohen goto out; 4243e126ba97SEli Cohen } 4244e126ba97SEli Cohen 424561147f39SBodong Wang err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, 424689944450SShamir Rabinovitch new_state, &ucmd, udata); 4247e126ba97SEli Cohen 4248e126ba97SEli Cohen out: 4249e126ba97SEli Cohen mutex_unlock(&qp->mutex); 4250e126ba97SEli Cohen return err; 4251e126ba97SEli Cohen } 4252e126ba97SEli Cohen 425334f4c955SGuy Levi static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, 425434f4c955SGuy Levi u32 wqe_sz, void **cur_edge) 425534f4c955SGuy Levi { 425634f4c955SGuy Levi u32 idx; 425734f4c955SGuy Levi 425834f4c955SGuy Levi idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1); 425934f4c955SGuy Levi *cur_edge = get_sq_edge(sq, idx); 426034f4c955SGuy Levi 426134f4c955SGuy Levi *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); 426234f4c955SGuy Levi } 426334f4c955SGuy Levi 426434f4c955SGuy Levi /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the 426534f4c955SGuy Levi * next nearby edge and get new address translation for current WQE position. 426634f4c955SGuy Levi * @sq - SQ buffer. 426734f4c955SGuy Levi * @seg: Current WQE position (16B aligned). 426834f4c955SGuy Levi * @wqe_sz: Total current WQE size [16B]. 426934f4c955SGuy Levi * @cur_edge: Updated current edge. 427034f4c955SGuy Levi */ 427134f4c955SGuy Levi static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, 427234f4c955SGuy Levi u32 wqe_sz, void **cur_edge) 427334f4c955SGuy Levi { 427434f4c955SGuy Levi if (likely(*seg != *cur_edge)) 427534f4c955SGuy Levi return; 427634f4c955SGuy Levi 427734f4c955SGuy Levi _handle_post_send_edge(sq, seg, wqe_sz, cur_edge); 427834f4c955SGuy Levi } 427934f4c955SGuy Levi 428034f4c955SGuy Levi /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's 428134f4c955SGuy Levi * pointers. At the end @seg is aligned to 16B regardless the copied size. 428234f4c955SGuy Levi * @sq - SQ buffer. 428334f4c955SGuy Levi * @cur_edge: Updated current edge. 428434f4c955SGuy Levi * @seg: Current WQE position (16B aligned). 428534f4c955SGuy Levi * @wqe_sz: Total current WQE size [16B]. 428634f4c955SGuy Levi * @src: Pointer to copy from. 428734f4c955SGuy Levi * @n: Number of bytes to copy. 428834f4c955SGuy Levi */ 428934f4c955SGuy Levi static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge, 429034f4c955SGuy Levi void **seg, u32 *wqe_sz, const void *src, 429134f4c955SGuy Levi size_t n) 429234f4c955SGuy Levi { 429334f4c955SGuy Levi while (likely(n)) { 429434f4c955SGuy Levi size_t leftlen = *cur_edge - *seg; 429534f4c955SGuy Levi size_t copysz = min_t(size_t, leftlen, n); 429634f4c955SGuy Levi size_t stride; 429734f4c955SGuy Levi 429834f4c955SGuy Levi memcpy(*seg, src, copysz); 429934f4c955SGuy Levi 430034f4c955SGuy Levi n -= copysz; 430134f4c955SGuy Levi src += copysz; 430234f4c955SGuy Levi stride = !n ? ALIGN(copysz, 16) : copysz; 430334f4c955SGuy Levi *seg += stride; 430434f4c955SGuy Levi *wqe_sz += stride >> 4; 430534f4c955SGuy Levi handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); 430634f4c955SGuy Levi } 430734f4c955SGuy Levi } 430834f4c955SGuy Levi 4309e126ba97SEli Cohen static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 4310e126ba97SEli Cohen { 4311e126ba97SEli Cohen struct mlx5_ib_cq *cq; 4312e126ba97SEli Cohen unsigned cur; 4313e126ba97SEli Cohen 4314e126ba97SEli Cohen cur = wq->head - wq->tail; 4315e126ba97SEli Cohen if (likely(cur + nreq < wq->max_post)) 4316e126ba97SEli Cohen return 0; 4317e126ba97SEli Cohen 4318e126ba97SEli Cohen cq = to_mcq(ib_cq); 4319e126ba97SEli Cohen spin_lock(&cq->lock); 4320e126ba97SEli Cohen cur = wq->head - wq->tail; 4321e126ba97SEli Cohen spin_unlock(&cq->lock); 4322e126ba97SEli Cohen 4323e126ba97SEli Cohen return cur + nreq >= wq->max_post; 4324e126ba97SEli Cohen } 4325e126ba97SEli Cohen 4326e126ba97SEli Cohen static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, 4327e126ba97SEli Cohen u64 remote_addr, u32 rkey) 4328e126ba97SEli Cohen { 4329e126ba97SEli Cohen rseg->raddr = cpu_to_be64(remote_addr); 4330e126ba97SEli Cohen rseg->rkey = cpu_to_be32(rkey); 4331e126ba97SEli Cohen rseg->reserved = 0; 4332e126ba97SEli Cohen } 4333e126ba97SEli Cohen 433434f4c955SGuy Levi static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, 433534f4c955SGuy Levi void **seg, int *size, void **cur_edge) 4336f0313965SErez Shitrit { 433734f4c955SGuy Levi struct mlx5_wqe_eth_seg *eseg = *seg; 4338f0313965SErez Shitrit 4339f0313965SErez Shitrit memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg)); 4340f0313965SErez Shitrit 4341f0313965SErez Shitrit if (wr->send_flags & IB_SEND_IP_CSUM) 4342f0313965SErez Shitrit eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | 4343f0313965SErez Shitrit MLX5_ETH_WQE_L4_CSUM; 4344f0313965SErez Shitrit 4345f0313965SErez Shitrit if (wr->opcode == IB_WR_LSO) { 4346f0313965SErez Shitrit struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); 434734f4c955SGuy Levi size_t left, copysz; 4348f0313965SErez Shitrit void *pdata = ud_wr->header; 434934f4c955SGuy Levi size_t stride; 4350f0313965SErez Shitrit 4351f0313965SErez Shitrit left = ud_wr->hlen; 4352f0313965SErez Shitrit eseg->mss = cpu_to_be16(ud_wr->mss); 43532b31f7aeSSaeed Mahameed eseg->inline_hdr.sz = cpu_to_be16(left); 4354f0313965SErez Shitrit 435534f4c955SGuy Levi /* memcpy_send_wqe should get a 16B align address. Hence, we 435634f4c955SGuy Levi * first copy up to the current edge and then, if needed, 435734f4c955SGuy Levi * fall-through to memcpy_send_wqe. 4358f0313965SErez Shitrit */ 435934f4c955SGuy Levi copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, 436034f4c955SGuy Levi left); 436134f4c955SGuy Levi memcpy(eseg->inline_hdr.start, pdata, copysz); 436234f4c955SGuy Levi stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - 436334f4c955SGuy Levi sizeof(eseg->inline_hdr.start) + copysz, 16); 436434f4c955SGuy Levi *size += stride / 16; 436534f4c955SGuy Levi *seg += stride; 4366f0313965SErez Shitrit 436734f4c955SGuy Levi if (copysz < left) { 436834f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4369f0313965SErez Shitrit left -= copysz; 4370f0313965SErez Shitrit pdata += copysz; 437134f4c955SGuy Levi memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata, 437234f4c955SGuy Levi left); 4373f0313965SErez Shitrit } 4374f0313965SErez Shitrit 437534f4c955SGuy Levi return; 437634f4c955SGuy Levi } 437734f4c955SGuy Levi 437834f4c955SGuy Levi *seg += sizeof(struct mlx5_wqe_eth_seg); 437934f4c955SGuy Levi *size += sizeof(struct mlx5_wqe_eth_seg) / 16; 4380f0313965SErez Shitrit } 4381f0313965SErez Shitrit 4382e126ba97SEli Cohen static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 4383f696bf6dSBart Van Assche const struct ib_send_wr *wr) 4384e126ba97SEli Cohen { 4385e622f2f4SChristoph Hellwig memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); 4386e622f2f4SChristoph Hellwig dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); 4387e622f2f4SChristoph Hellwig dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); 4388e126ba97SEli Cohen } 4389e126ba97SEli Cohen 4390e126ba97SEli Cohen static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 4391e126ba97SEli Cohen { 4392e126ba97SEli Cohen dseg->byte_count = cpu_to_be32(sg->length); 4393e126ba97SEli Cohen dseg->lkey = cpu_to_be32(sg->lkey); 4394e126ba97SEli Cohen dseg->addr = cpu_to_be64(sg->addr); 4395e126ba97SEli Cohen } 4396e126ba97SEli Cohen 439731616255SArtemy Kovalyov static u64 get_xlt_octo(u64 bytes) 4398e126ba97SEli Cohen { 439931616255SArtemy Kovalyov return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) / 440031616255SArtemy Kovalyov MLX5_IB_UMR_OCTOWORD; 4401e126ba97SEli Cohen } 4402e126ba97SEli Cohen 4403841b07f9SMoni Shoua static __be64 frwr_mkey_mask(bool atomic) 4404e126ba97SEli Cohen { 4405e126ba97SEli Cohen u64 result; 4406e126ba97SEli Cohen 4407e126ba97SEli Cohen result = MLX5_MKEY_MASK_LEN | 4408e126ba97SEli Cohen MLX5_MKEY_MASK_PAGE_SIZE | 4409e126ba97SEli Cohen MLX5_MKEY_MASK_START_ADDR | 4410e126ba97SEli Cohen MLX5_MKEY_MASK_EN_RINVAL | 4411e126ba97SEli Cohen MLX5_MKEY_MASK_KEY | 4412e126ba97SEli Cohen MLX5_MKEY_MASK_LR | 4413e126ba97SEli Cohen MLX5_MKEY_MASK_LW | 4414e126ba97SEli Cohen MLX5_MKEY_MASK_RR | 4415e126ba97SEli Cohen MLX5_MKEY_MASK_RW | 4416e126ba97SEli Cohen MLX5_MKEY_MASK_SMALL_FENCE | 4417e126ba97SEli Cohen MLX5_MKEY_MASK_FREE; 4418e126ba97SEli Cohen 4419841b07f9SMoni Shoua if (atomic) 4420841b07f9SMoni Shoua result |= MLX5_MKEY_MASK_A; 4421841b07f9SMoni Shoua 4422e126ba97SEli Cohen return cpu_to_be64(result); 4423e126ba97SEli Cohen } 4424e126ba97SEli Cohen 4425e6631814SSagi Grimberg static __be64 sig_mkey_mask(void) 4426e6631814SSagi Grimberg { 4427e6631814SSagi Grimberg u64 result; 4428e6631814SSagi Grimberg 4429e6631814SSagi Grimberg result = MLX5_MKEY_MASK_LEN | 4430e6631814SSagi Grimberg MLX5_MKEY_MASK_PAGE_SIZE | 4431e6631814SSagi Grimberg MLX5_MKEY_MASK_START_ADDR | 4432d5436ba0SSagi Grimberg MLX5_MKEY_MASK_EN_SIGERR | 4433e6631814SSagi Grimberg MLX5_MKEY_MASK_EN_RINVAL | 4434e6631814SSagi Grimberg MLX5_MKEY_MASK_KEY | 4435e6631814SSagi Grimberg MLX5_MKEY_MASK_LR | 4436e6631814SSagi Grimberg MLX5_MKEY_MASK_LW | 4437e6631814SSagi Grimberg MLX5_MKEY_MASK_RR | 4438e6631814SSagi Grimberg MLX5_MKEY_MASK_RW | 4439e6631814SSagi Grimberg MLX5_MKEY_MASK_SMALL_FENCE | 4440e6631814SSagi Grimberg MLX5_MKEY_MASK_FREE | 4441e6631814SSagi Grimberg MLX5_MKEY_MASK_BSF_EN; 4442e6631814SSagi Grimberg 4443e6631814SSagi Grimberg return cpu_to_be64(result); 4444e6631814SSagi Grimberg } 4445e6631814SSagi Grimberg 44468a187ee5SSagi Grimberg static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4447841b07f9SMoni Shoua struct mlx5_ib_mr *mr, u8 flags, bool atomic) 44488a187ee5SSagi Grimberg { 444938ca87c6SMax Gurtovoy int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 44508a187ee5SSagi Grimberg 44518a187ee5SSagi Grimberg memset(umr, 0, sizeof(*umr)); 4452b005d316SSagi Grimberg 44539ac7c4bcSMax Gurtovoy umr->flags = flags; 445431616255SArtemy Kovalyov umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4455841b07f9SMoni Shoua umr->mkey_mask = frwr_mkey_mask(atomic); 44568a187ee5SSagi Grimberg } 44578a187ee5SSagi Grimberg 4458dd01e66aSSagi Grimberg static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4459e126ba97SEli Cohen { 4460e126ba97SEli Cohen memset(umr, 0, sizeof(*umr)); 4461e126ba97SEli Cohen umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 44622d221588SMax Gurtovoy umr->flags = MLX5_UMR_INLINE; 4463e126ba97SEli Cohen } 4464e126ba97SEli Cohen 446531616255SArtemy Kovalyov static __be64 get_umr_enable_mr_mask(void) 4466e126ba97SEli Cohen { 4467968e78ddSHaggai Eran u64 result; 4468e126ba97SEli Cohen 446931616255SArtemy Kovalyov result = MLX5_MKEY_MASK_KEY | 4470e126ba97SEli Cohen MLX5_MKEY_MASK_FREE; 4471968e78ddSHaggai Eran 4472968e78ddSHaggai Eran return cpu_to_be64(result); 4473968e78ddSHaggai Eran } 4474968e78ddSHaggai Eran 447531616255SArtemy Kovalyov static __be64 get_umr_disable_mr_mask(void) 4476968e78ddSHaggai Eran { 4477968e78ddSHaggai Eran u64 result; 4478968e78ddSHaggai Eran 4479968e78ddSHaggai Eran result = MLX5_MKEY_MASK_FREE; 4480968e78ddSHaggai Eran 4481968e78ddSHaggai Eran return cpu_to_be64(result); 4482968e78ddSHaggai Eran } 4483968e78ddSHaggai Eran 448456e11d62SNoa Osherovich static __be64 get_umr_update_translation_mask(void) 448556e11d62SNoa Osherovich { 448656e11d62SNoa Osherovich u64 result; 448756e11d62SNoa Osherovich 448856e11d62SNoa Osherovich result = MLX5_MKEY_MASK_LEN | 448956e11d62SNoa Osherovich MLX5_MKEY_MASK_PAGE_SIZE | 449031616255SArtemy Kovalyov MLX5_MKEY_MASK_START_ADDR; 449156e11d62SNoa Osherovich 449256e11d62SNoa Osherovich return cpu_to_be64(result); 449356e11d62SNoa Osherovich } 449456e11d62SNoa Osherovich 449531616255SArtemy Kovalyov static __be64 get_umr_update_access_mask(int atomic) 449656e11d62SNoa Osherovich { 449756e11d62SNoa Osherovich u64 result; 449856e11d62SNoa Osherovich 449931616255SArtemy Kovalyov result = MLX5_MKEY_MASK_LR | 450031616255SArtemy Kovalyov MLX5_MKEY_MASK_LW | 450156e11d62SNoa Osherovich MLX5_MKEY_MASK_RR | 450231616255SArtemy Kovalyov MLX5_MKEY_MASK_RW; 450331616255SArtemy Kovalyov 450431616255SArtemy Kovalyov if (atomic) 450531616255SArtemy Kovalyov result |= MLX5_MKEY_MASK_A; 450656e11d62SNoa Osherovich 450756e11d62SNoa Osherovich return cpu_to_be64(result); 450856e11d62SNoa Osherovich } 450956e11d62SNoa Osherovich 451056e11d62SNoa Osherovich static __be64 get_umr_update_pd_mask(void) 451156e11d62SNoa Osherovich { 451256e11d62SNoa Osherovich u64 result; 451356e11d62SNoa Osherovich 451431616255SArtemy Kovalyov result = MLX5_MKEY_MASK_PD; 451556e11d62SNoa Osherovich 451656e11d62SNoa Osherovich return cpu_to_be64(result); 451756e11d62SNoa Osherovich } 451856e11d62SNoa Osherovich 4519c8d75a98SMajd Dibbiny static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) 4520c8d75a98SMajd Dibbiny { 4521c8d75a98SMajd Dibbiny if ((mask & MLX5_MKEY_MASK_PAGE_SIZE && 4522c8d75a98SMajd Dibbiny MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) || 4523c8d75a98SMajd Dibbiny (mask & MLX5_MKEY_MASK_A && 4524c8d75a98SMajd Dibbiny MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))) 4525c8d75a98SMajd Dibbiny return -EPERM; 4526c8d75a98SMajd Dibbiny return 0; 4527c8d75a98SMajd Dibbiny } 4528c8d75a98SMajd Dibbiny 4529c8d75a98SMajd Dibbiny static int set_reg_umr_segment(struct mlx5_ib_dev *dev, 4530c8d75a98SMajd Dibbiny struct mlx5_wqe_umr_ctrl_seg *umr, 4531f696bf6dSBart Van Assche const struct ib_send_wr *wr, int atomic) 4532968e78ddSHaggai Eran { 4533f696bf6dSBart Van Assche const struct mlx5_umr_wr *umrwr = umr_wr(wr); 4534968e78ddSHaggai Eran 4535968e78ddSHaggai Eran memset(umr, 0, sizeof(*umr)); 4536968e78ddSHaggai Eran 45376a053953SYishai Hadas if (!umrwr->ignore_free_state) { 4538968e78ddSHaggai Eran if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) 45396a053953SYishai Hadas /* fail if free */ 45406a053953SYishai Hadas umr->flags = MLX5_UMR_CHECK_FREE; 4541968e78ddSHaggai Eran else 45426a053953SYishai Hadas /* fail if not free */ 45436a053953SYishai Hadas umr->flags = MLX5_UMR_CHECK_NOT_FREE; 45446a053953SYishai Hadas } 4545968e78ddSHaggai Eran 454631616255SArtemy Kovalyov umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); 454731616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { 454831616255SArtemy Kovalyov u64 offset = get_xlt_octo(umrwr->offset); 454931616255SArtemy Kovalyov 455031616255SArtemy Kovalyov umr->xlt_offset = cpu_to_be16(offset & 0xffff); 455131616255SArtemy Kovalyov umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16); 4552968e78ddSHaggai Eran umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; 4553968e78ddSHaggai Eran } 455456e11d62SNoa Osherovich if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) 455556e11d62SNoa Osherovich umr->mkey_mask |= get_umr_update_translation_mask(); 455631616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { 455731616255SArtemy Kovalyov umr->mkey_mask |= get_umr_update_access_mask(atomic); 455856e11d62SNoa Osherovich umr->mkey_mask |= get_umr_update_pd_mask(); 4559e126ba97SEli Cohen } 456031616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) 456131616255SArtemy Kovalyov umr->mkey_mask |= get_umr_enable_mr_mask(); 456231616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) 456331616255SArtemy Kovalyov umr->mkey_mask |= get_umr_disable_mr_mask(); 4564e126ba97SEli Cohen 4565e126ba97SEli Cohen if (!wr->num_sge) 4566968e78ddSHaggai Eran umr->flags |= MLX5_UMR_INLINE; 4567c8d75a98SMajd Dibbiny 4568c8d75a98SMajd Dibbiny return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask)); 4569e126ba97SEli Cohen } 4570e126ba97SEli Cohen 4571e126ba97SEli Cohen static u8 get_umr_flags(int acc) 4572e126ba97SEli Cohen { 4573e126ba97SEli Cohen return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 4574e126ba97SEli Cohen (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 4575e126ba97SEli Cohen (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 4576e126ba97SEli Cohen (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 45772ac45934SSagi Grimberg MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; 4578e126ba97SEli Cohen } 4579e126ba97SEli Cohen 45808a187ee5SSagi Grimberg static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, 45818a187ee5SSagi Grimberg struct mlx5_ib_mr *mr, 45828a187ee5SSagi Grimberg u32 key, int access) 45838a187ee5SSagi Grimberg { 458438ca87c6SMax Gurtovoy int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1; 45858a187ee5SSagi Grimberg 45868a187ee5SSagi Grimberg memset(seg, 0, sizeof(*seg)); 4587b005d316SSagi Grimberg 4588ec22eb53SSaeed Mahameed if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT) 4589b005d316SSagi Grimberg seg->log2_page_size = ilog2(mr->ibmr.page_size); 4590ec22eb53SSaeed Mahameed else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 4591b005d316SSagi Grimberg /* KLMs take twice the size of MTTs */ 4592b005d316SSagi Grimberg ndescs *= 2; 4593b005d316SSagi Grimberg 4594b005d316SSagi Grimberg seg->flags = get_umr_flags(access) | mr->access_mode; 45958a187ee5SSagi Grimberg seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); 45968a187ee5SSagi Grimberg seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 45978a187ee5SSagi Grimberg seg->start_addr = cpu_to_be64(mr->ibmr.iova); 45988a187ee5SSagi Grimberg seg->len = cpu_to_be64(mr->ibmr.length); 45998a187ee5SSagi Grimberg seg->xlt_oct_size = cpu_to_be32(ndescs); 46008a187ee5SSagi Grimberg } 46018a187ee5SSagi Grimberg 4602dd01e66aSSagi Grimberg static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) 4603e126ba97SEli Cohen { 4604e126ba97SEli Cohen memset(seg, 0, sizeof(*seg)); 4605968e78ddSHaggai Eran seg->status = MLX5_MKEY_STATUS_FREE; 4606e126ba97SEli Cohen } 4607e126ba97SEli Cohen 4608f696bf6dSBart Van Assche static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, 4609f696bf6dSBart Van Assche const struct ib_send_wr *wr) 4610e126ba97SEli Cohen { 4611f696bf6dSBart Van Assche const struct mlx5_umr_wr *umrwr = umr_wr(wr); 4612968e78ddSHaggai Eran 4613e126ba97SEli Cohen memset(seg, 0, sizeof(*seg)); 461431616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) 4615968e78ddSHaggai Eran seg->status = MLX5_MKEY_STATUS_FREE; 4616e126ba97SEli Cohen 4617968e78ddSHaggai Eran seg->flags = convert_access(umrwr->access_flags); 461856e11d62SNoa Osherovich if (umrwr->pd) 4619968e78ddSHaggai Eran seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); 462031616255SArtemy Kovalyov if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && 462131616255SArtemy Kovalyov !umrwr->length) 462231616255SArtemy Kovalyov seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); 462331616255SArtemy Kovalyov 462431616255SArtemy Kovalyov seg->start_addr = cpu_to_be64(umrwr->virt_addr); 4625968e78ddSHaggai Eran seg->len = cpu_to_be64(umrwr->length); 4626968e78ddSHaggai Eran seg->log2_page_size = umrwr->page_shift; 4627746b5583SEli Cohen seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | 4628968e78ddSHaggai Eran mlx5_mkey_variant(umrwr->mkey)); 4629e126ba97SEli Cohen } 4630e126ba97SEli Cohen 46318a187ee5SSagi Grimberg static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, 46328a187ee5SSagi Grimberg struct mlx5_ib_mr *mr, 46338a187ee5SSagi Grimberg struct mlx5_ib_pd *pd) 46348a187ee5SSagi Grimberg { 463538ca87c6SMax Gurtovoy int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs); 46368a187ee5SSagi Grimberg 46378a187ee5SSagi Grimberg dseg->addr = cpu_to_be64(mr->desc_map); 46388a187ee5SSagi Grimberg dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); 46398a187ee5SSagi Grimberg dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 46408a187ee5SSagi Grimberg } 46418a187ee5SSagi Grimberg 4642f696bf6dSBart Van Assche static __be32 send_ieth(const struct ib_send_wr *wr) 4643e126ba97SEli Cohen { 4644e126ba97SEli Cohen switch (wr->opcode) { 4645e126ba97SEli Cohen case IB_WR_SEND_WITH_IMM: 4646e126ba97SEli Cohen case IB_WR_RDMA_WRITE_WITH_IMM: 4647e126ba97SEli Cohen return wr->ex.imm_data; 4648e126ba97SEli Cohen 4649e126ba97SEli Cohen case IB_WR_SEND_WITH_INV: 4650e126ba97SEli Cohen return cpu_to_be32(wr->ex.invalidate_rkey); 4651e126ba97SEli Cohen 4652e126ba97SEli Cohen default: 4653e126ba97SEli Cohen return 0; 4654e126ba97SEli Cohen } 4655e126ba97SEli Cohen } 4656e126ba97SEli Cohen 4657e126ba97SEli Cohen static u8 calc_sig(void *wqe, int size) 4658e126ba97SEli Cohen { 4659e126ba97SEli Cohen u8 *p = wqe; 4660e126ba97SEli Cohen u8 res = 0; 4661e126ba97SEli Cohen int i; 4662e126ba97SEli Cohen 4663e126ba97SEli Cohen for (i = 0; i < size; i++) 4664e126ba97SEli Cohen res ^= p[i]; 4665e126ba97SEli Cohen 4666e126ba97SEli Cohen return ~res; 4667e126ba97SEli Cohen } 4668e126ba97SEli Cohen 4669e126ba97SEli Cohen static u8 wq_sig(void *wqe) 4670e126ba97SEli Cohen { 4671e126ba97SEli Cohen return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); 4672e126ba97SEli Cohen } 4673e126ba97SEli Cohen 4674f696bf6dSBart Van Assche static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, 467534f4c955SGuy Levi void **wqe, int *wqe_sz, void **cur_edge) 4676e126ba97SEli Cohen { 4677e126ba97SEli Cohen struct mlx5_wqe_inline_seg *seg; 467834f4c955SGuy Levi size_t offset; 4679e126ba97SEli Cohen int inl = 0; 4680e126ba97SEli Cohen int i; 4681e126ba97SEli Cohen 468234f4c955SGuy Levi seg = *wqe; 468334f4c955SGuy Levi *wqe += sizeof(*seg); 468434f4c955SGuy Levi offset = sizeof(*seg); 468534f4c955SGuy Levi 4686e126ba97SEli Cohen for (i = 0; i < wr->num_sge; i++) { 468734f4c955SGuy Levi size_t len = wr->sg_list[i].length; 468834f4c955SGuy Levi void *addr = (void *)(unsigned long)(wr->sg_list[i].addr); 468934f4c955SGuy Levi 4690e126ba97SEli Cohen inl += len; 4691e126ba97SEli Cohen 4692e126ba97SEli Cohen if (unlikely(inl > qp->max_inline_data)) 4693e126ba97SEli Cohen return -ENOMEM; 4694e126ba97SEli Cohen 469534f4c955SGuy Levi while (likely(len)) { 469634f4c955SGuy Levi size_t leftlen; 469734f4c955SGuy Levi size_t copysz; 469834f4c955SGuy Levi 469934f4c955SGuy Levi handle_post_send_edge(&qp->sq, wqe, 470034f4c955SGuy Levi *wqe_sz + (offset >> 4), 470134f4c955SGuy Levi cur_edge); 470234f4c955SGuy Levi 470334f4c955SGuy Levi leftlen = *cur_edge - *wqe; 470434f4c955SGuy Levi copysz = min_t(size_t, leftlen, len); 470534f4c955SGuy Levi 470634f4c955SGuy Levi memcpy(*wqe, addr, copysz); 470734f4c955SGuy Levi len -= copysz; 470834f4c955SGuy Levi addr += copysz; 470934f4c955SGuy Levi *wqe += copysz; 471034f4c955SGuy Levi offset += copysz; 4711e126ba97SEli Cohen } 4712e126ba97SEli Cohen } 4713e126ba97SEli Cohen 4714e126ba97SEli Cohen seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); 4715e126ba97SEli Cohen 471634f4c955SGuy Levi *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; 4717e126ba97SEli Cohen 4718e126ba97SEli Cohen return 0; 4719e126ba97SEli Cohen } 4720e126ba97SEli Cohen 4721e6631814SSagi Grimberg static u16 prot_field_size(enum ib_signature_type type) 4722e6631814SSagi Grimberg { 4723e6631814SSagi Grimberg switch (type) { 4724e6631814SSagi Grimberg case IB_SIG_TYPE_T10_DIF: 4725e6631814SSagi Grimberg return MLX5_DIF_SIZE; 4726e6631814SSagi Grimberg default: 4727e6631814SSagi Grimberg return 0; 4728e6631814SSagi Grimberg } 4729e6631814SSagi Grimberg } 4730e6631814SSagi Grimberg 4731e6631814SSagi Grimberg static u8 bs_selector(int block_size) 4732e6631814SSagi Grimberg { 4733e6631814SSagi Grimberg switch (block_size) { 4734e6631814SSagi Grimberg case 512: return 0x1; 4735e6631814SSagi Grimberg case 520: return 0x2; 4736e6631814SSagi Grimberg case 4096: return 0x3; 4737e6631814SSagi Grimberg case 4160: return 0x4; 4738e6631814SSagi Grimberg case 1073741824: return 0x5; 4739e6631814SSagi Grimberg default: return 0; 4740e6631814SSagi Grimberg } 4741e6631814SSagi Grimberg } 4742e6631814SSagi Grimberg 474378eda2bbSSagi Grimberg static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain, 4744142537f4SSagi Grimberg struct mlx5_bsf_inl *inl) 4745e6631814SSagi Grimberg { 4746142537f4SSagi Grimberg /* Valid inline section and allow BSF refresh */ 4747142537f4SSagi Grimberg inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | 4748142537f4SSagi Grimberg MLX5_BSF_REFRESH_DIF); 4749142537f4SSagi Grimberg inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); 4750142537f4SSagi Grimberg inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); 4751142537f4SSagi Grimberg /* repeating block */ 4752142537f4SSagi Grimberg inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; 4753142537f4SSagi Grimberg inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? 4754142537f4SSagi Grimberg MLX5_DIF_CRC : MLX5_DIF_IPCS; 4755e6631814SSagi Grimberg 475678eda2bbSSagi Grimberg if (domain->sig.dif.ref_remap) 475778eda2bbSSagi Grimberg inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; 4758e6631814SSagi Grimberg 475978eda2bbSSagi Grimberg if (domain->sig.dif.app_escape) { 476078eda2bbSSagi Grimberg if (domain->sig.dif.ref_escape) 476178eda2bbSSagi Grimberg inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; 476278eda2bbSSagi Grimberg else 476378eda2bbSSagi Grimberg inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; 4764e6631814SSagi Grimberg } 4765e6631814SSagi Grimberg 476678eda2bbSSagi Grimberg inl->dif_app_bitmask_check = 476778eda2bbSSagi Grimberg cpu_to_be16(domain->sig.dif.apptag_check_mask); 4768e6631814SSagi Grimberg } 4769e6631814SSagi Grimberg 4770e6631814SSagi Grimberg static int mlx5_set_bsf(struct ib_mr *sig_mr, 4771e6631814SSagi Grimberg struct ib_sig_attrs *sig_attrs, 4772e6631814SSagi Grimberg struct mlx5_bsf *bsf, u32 data_size) 4773e6631814SSagi Grimberg { 4774e6631814SSagi Grimberg struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; 4775e6631814SSagi Grimberg struct mlx5_bsf_basic *basic = &bsf->basic; 4776e6631814SSagi Grimberg struct ib_sig_domain *mem = &sig_attrs->mem; 4777e6631814SSagi Grimberg struct ib_sig_domain *wire = &sig_attrs->wire; 4778e6631814SSagi Grimberg 4779c7f44fbdSSagi Grimberg memset(bsf, 0, sizeof(*bsf)); 4780e6631814SSagi Grimberg 4781142537f4SSagi Grimberg /* Basic + Extended + Inline */ 4782142537f4SSagi Grimberg basic->bsf_size_sbs = 1 << 7; 4783e6631814SSagi Grimberg /* Input domain check byte mask */ 4784e6631814SSagi Grimberg basic->check_byte_mask = sig_attrs->check_mask; 478578eda2bbSSagi Grimberg basic->raw_data_size = cpu_to_be32(data_size); 478678eda2bbSSagi Grimberg 478778eda2bbSSagi Grimberg /* Memory domain */ 478878eda2bbSSagi Grimberg switch (sig_attrs->mem.sig_type) { 478978eda2bbSSagi Grimberg case IB_SIG_TYPE_NONE: 479078eda2bbSSagi Grimberg break; 479178eda2bbSSagi Grimberg case IB_SIG_TYPE_T10_DIF: 479278eda2bbSSagi Grimberg basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); 479378eda2bbSSagi Grimberg basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); 479478eda2bbSSagi Grimberg mlx5_fill_inl_bsf(mem, &bsf->m_inl); 479578eda2bbSSagi Grimberg break; 479678eda2bbSSagi Grimberg default: 479778eda2bbSSagi Grimberg return -EINVAL; 479878eda2bbSSagi Grimberg } 479978eda2bbSSagi Grimberg 480078eda2bbSSagi Grimberg /* Wire domain */ 480178eda2bbSSagi Grimberg switch (sig_attrs->wire.sig_type) { 480278eda2bbSSagi Grimberg case IB_SIG_TYPE_NONE: 480378eda2bbSSagi Grimberg break; 480478eda2bbSSagi Grimberg case IB_SIG_TYPE_T10_DIF: 4805e6631814SSagi Grimberg if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && 480678eda2bbSSagi Grimberg mem->sig_type == wire->sig_type) { 4807e6631814SSagi Grimberg /* Same block structure */ 4808142537f4SSagi Grimberg basic->bsf_size_sbs |= 1 << 4; 4809e6631814SSagi Grimberg if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) 4810fd22f78cSSagi Grimberg basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; 4811c7f44fbdSSagi Grimberg if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) 4812fd22f78cSSagi Grimberg basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; 4813c7f44fbdSSagi Grimberg if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) 4814fd22f78cSSagi Grimberg basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; 4815e6631814SSagi Grimberg } else 4816e6631814SSagi Grimberg basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); 4817e6631814SSagi Grimberg 4818142537f4SSagi Grimberg basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); 481978eda2bbSSagi Grimberg mlx5_fill_inl_bsf(wire, &bsf->w_inl); 4820e6631814SSagi Grimberg break; 4821e6631814SSagi Grimberg default: 4822e6631814SSagi Grimberg return -EINVAL; 4823e6631814SSagi Grimberg } 4824e6631814SSagi Grimberg 4825e6631814SSagi Grimberg return 0; 4826e6631814SSagi Grimberg } 4827e6631814SSagi Grimberg 482838ca87c6SMax Gurtovoy static int set_sig_data_segment(const struct ib_send_wr *send_wr, 482938ca87c6SMax Gurtovoy struct ib_mr *sig_mr, 483038ca87c6SMax Gurtovoy struct ib_sig_attrs *sig_attrs, 483138ca87c6SMax Gurtovoy struct mlx5_ib_qp *qp, void **seg, int *size, 483238ca87c6SMax Gurtovoy void **cur_edge) 4833e6631814SSagi Grimberg { 4834e6631814SSagi Grimberg struct mlx5_bsf *bsf; 483538ca87c6SMax Gurtovoy u32 data_len; 483638ca87c6SMax Gurtovoy u32 data_key; 483738ca87c6SMax Gurtovoy u64 data_va; 483838ca87c6SMax Gurtovoy u32 prot_len = 0; 483938ca87c6SMax Gurtovoy u32 prot_key = 0; 484038ca87c6SMax Gurtovoy u64 prot_va = 0; 484138ca87c6SMax Gurtovoy bool prot = false; 4842e6631814SSagi Grimberg int ret; 4843e6631814SSagi Grimberg int wqe_size; 484438ca87c6SMax Gurtovoy struct mlx5_ib_mr *mr = to_mmr(sig_mr); 484538ca87c6SMax Gurtovoy struct mlx5_ib_mr *pi_mr = mr->pi_mr; 484638ca87c6SMax Gurtovoy 484738ca87c6SMax Gurtovoy data_len = pi_mr->data_length; 484838ca87c6SMax Gurtovoy data_key = pi_mr->ibmr.lkey; 48492563e2f3SMax Gurtovoy data_va = pi_mr->data_iova; 485038ca87c6SMax Gurtovoy if (pi_mr->meta_ndescs) { 485138ca87c6SMax Gurtovoy prot_len = pi_mr->meta_length; 485238ca87c6SMax Gurtovoy prot_key = pi_mr->ibmr.lkey; 4853de0ae958SIsrael Rukshin prot_va = pi_mr->pi_iova; 485438ca87c6SMax Gurtovoy prot = true; 485538ca87c6SMax Gurtovoy } 485638ca87c6SMax Gurtovoy 485738ca87c6SMax Gurtovoy if (!prot || (data_key == prot_key && data_va == prot_va && 485838ca87c6SMax Gurtovoy data_len == prot_len)) { 4859e6631814SSagi Grimberg /** 4860e6631814SSagi Grimberg * Source domain doesn't contain signature information 48615c273b16SSagi Grimberg * or data and protection are interleaved in memory. 4862e6631814SSagi Grimberg * So need construct: 4863e6631814SSagi Grimberg * ------------------ 4864e6631814SSagi Grimberg * | data_klm | 4865e6631814SSagi Grimberg * ------------------ 4866e6631814SSagi Grimberg * | BSF | 4867e6631814SSagi Grimberg * ------------------ 4868e6631814SSagi Grimberg **/ 4869e6631814SSagi Grimberg struct mlx5_klm *data_klm = *seg; 4870e6631814SSagi Grimberg 4871e6631814SSagi Grimberg data_klm->bcount = cpu_to_be32(data_len); 4872e6631814SSagi Grimberg data_klm->key = cpu_to_be32(data_key); 4873e6631814SSagi Grimberg data_klm->va = cpu_to_be64(data_va); 4874e6631814SSagi Grimberg wqe_size = ALIGN(sizeof(*data_klm), 64); 4875e6631814SSagi Grimberg } else { 4876e6631814SSagi Grimberg /** 4877e6631814SSagi Grimberg * Source domain contains signature information 4878e6631814SSagi Grimberg * So need construct a strided block format: 4879e6631814SSagi Grimberg * --------------------------- 4880e6631814SSagi Grimberg * | stride_block_ctrl | 4881e6631814SSagi Grimberg * --------------------------- 4882e6631814SSagi Grimberg * | data_klm | 4883e6631814SSagi Grimberg * --------------------------- 4884e6631814SSagi Grimberg * | prot_klm | 4885e6631814SSagi Grimberg * --------------------------- 4886e6631814SSagi Grimberg * | BSF | 4887e6631814SSagi Grimberg * --------------------------- 4888e6631814SSagi Grimberg **/ 4889e6631814SSagi Grimberg struct mlx5_stride_block_ctrl_seg *sblock_ctrl; 4890e6631814SSagi Grimberg struct mlx5_stride_block_entry *data_sentry; 4891e6631814SSagi Grimberg struct mlx5_stride_block_entry *prot_sentry; 4892e6631814SSagi Grimberg u16 block_size = sig_attrs->mem.sig.dif.pi_interval; 4893e6631814SSagi Grimberg int prot_size; 4894e6631814SSagi Grimberg 4895e6631814SSagi Grimberg sblock_ctrl = *seg; 4896e6631814SSagi Grimberg data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl); 4897e6631814SSagi Grimberg prot_sentry = (void *)data_sentry + sizeof(*data_sentry); 4898e6631814SSagi Grimberg 4899e6631814SSagi Grimberg prot_size = prot_field_size(sig_attrs->mem.sig_type); 4900e6631814SSagi Grimberg if (!prot_size) { 4901e6631814SSagi Grimberg pr_err("Bad block size given: %u\n", block_size); 4902e6631814SSagi Grimberg return -EINVAL; 4903e6631814SSagi Grimberg } 4904e6631814SSagi Grimberg sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + 4905e6631814SSagi Grimberg prot_size); 4906e6631814SSagi Grimberg sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); 4907e6631814SSagi Grimberg sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); 4908e6631814SSagi Grimberg sblock_ctrl->num_entries = cpu_to_be16(2); 4909e6631814SSagi Grimberg 4910e6631814SSagi Grimberg data_sentry->bcount = cpu_to_be16(block_size); 4911e6631814SSagi Grimberg data_sentry->key = cpu_to_be32(data_key); 4912e6631814SSagi Grimberg data_sentry->va = cpu_to_be64(data_va); 49135c273b16SSagi Grimberg data_sentry->stride = cpu_to_be16(block_size); 49145c273b16SSagi Grimberg 4915e6631814SSagi Grimberg prot_sentry->bcount = cpu_to_be16(prot_size); 4916e6631814SSagi Grimberg prot_sentry->key = cpu_to_be32(prot_key); 4917e6631814SSagi Grimberg prot_sentry->va = cpu_to_be64(prot_va); 4918e6631814SSagi Grimberg prot_sentry->stride = cpu_to_be16(prot_size); 49195c273b16SSagi Grimberg 4920e6631814SSagi Grimberg wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + 4921e6631814SSagi Grimberg sizeof(*prot_sentry), 64); 4922e6631814SSagi Grimberg } 4923e6631814SSagi Grimberg 4924e6631814SSagi Grimberg *seg += wqe_size; 4925e6631814SSagi Grimberg *size += wqe_size / 16; 492634f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4927e6631814SSagi Grimberg 4928e6631814SSagi Grimberg bsf = *seg; 4929e6631814SSagi Grimberg ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); 4930e6631814SSagi Grimberg if (ret) 4931e6631814SSagi Grimberg return -EINVAL; 4932e6631814SSagi Grimberg 4933e6631814SSagi Grimberg *seg += sizeof(*bsf); 4934e6631814SSagi Grimberg *size += sizeof(*bsf) / 16; 493534f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4936e6631814SSagi Grimberg 4937e6631814SSagi Grimberg return 0; 4938e6631814SSagi Grimberg } 4939e6631814SSagi Grimberg 4940e6631814SSagi Grimberg static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, 494122465bbaSMax Gurtovoy struct ib_mr *sig_mr, int access_flags, 494222465bbaSMax Gurtovoy u32 size, u32 length, u32 pdn) 4943e6631814SSagi Grimberg { 4944e6631814SSagi Grimberg u32 sig_key = sig_mr->rkey; 4945d5436ba0SSagi Grimberg u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; 4946e6631814SSagi Grimberg 4947e6631814SSagi Grimberg memset(seg, 0, sizeof(*seg)); 4948e6631814SSagi Grimberg 494922465bbaSMax Gurtovoy seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; 4950e6631814SSagi Grimberg seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); 4951d5436ba0SSagi Grimberg seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | 4952e6631814SSagi Grimberg MLX5_MKEY_BSF_EN | pdn); 4953e6631814SSagi Grimberg seg->len = cpu_to_be64(length); 495431616255SArtemy Kovalyov seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); 4955e6631814SSagi Grimberg seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); 4956e6631814SSagi Grimberg } 4957e6631814SSagi Grimberg 4958e6631814SSagi Grimberg static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 495931616255SArtemy Kovalyov u32 size) 4960e6631814SSagi Grimberg { 4961e6631814SSagi Grimberg memset(umr, 0, sizeof(*umr)); 4962e6631814SSagi Grimberg 4963e6631814SSagi Grimberg umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; 496431616255SArtemy Kovalyov umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4965e6631814SSagi Grimberg umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); 4966e6631814SSagi Grimberg umr->mkey_mask = sig_mkey_mask(); 4967e6631814SSagi Grimberg } 4968e6631814SSagi Grimberg 496938ca87c6SMax Gurtovoy static int set_pi_umr_wr(const struct ib_send_wr *send_wr, 497038ca87c6SMax Gurtovoy struct mlx5_ib_qp *qp, void **seg, int *size, 497138ca87c6SMax Gurtovoy void **cur_edge) 497238ca87c6SMax Gurtovoy { 497338ca87c6SMax Gurtovoy const struct ib_reg_wr *wr = reg_wr(send_wr); 497438ca87c6SMax Gurtovoy struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr); 497538ca87c6SMax Gurtovoy struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr; 497638ca87c6SMax Gurtovoy struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs; 497738ca87c6SMax Gurtovoy u32 pdn = get_pd(qp)->pdn; 497838ca87c6SMax Gurtovoy u32 xlt_size; 497938ca87c6SMax Gurtovoy int region_len, ret; 498038ca87c6SMax Gurtovoy 498138ca87c6SMax Gurtovoy if (unlikely(send_wr->num_sge != 0) || 498238ca87c6SMax Gurtovoy unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) || 4983185eddc4SMax Gurtovoy unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) || 498438ca87c6SMax Gurtovoy unlikely(!sig_mr->sig->sig_status_checked)) 498538ca87c6SMax Gurtovoy return -EINVAL; 498638ca87c6SMax Gurtovoy 498738ca87c6SMax Gurtovoy /* length of the protected region, data + protection */ 498838ca87c6SMax Gurtovoy region_len = pi_mr->ibmr.length; 498938ca87c6SMax Gurtovoy 499038ca87c6SMax Gurtovoy /** 499138ca87c6SMax Gurtovoy * KLM octoword size - if protection was provided 499238ca87c6SMax Gurtovoy * then we use strided block format (3 octowords), 499338ca87c6SMax Gurtovoy * else we use single KLM (1 octoword) 499438ca87c6SMax Gurtovoy **/ 499538ca87c6SMax Gurtovoy if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE) 499638ca87c6SMax Gurtovoy xlt_size = 0x30; 499738ca87c6SMax Gurtovoy else 499838ca87c6SMax Gurtovoy xlt_size = sizeof(struct mlx5_klm); 499938ca87c6SMax Gurtovoy 500038ca87c6SMax Gurtovoy set_sig_umr_segment(*seg, xlt_size); 500138ca87c6SMax Gurtovoy *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 500238ca87c6SMax Gurtovoy *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 500338ca87c6SMax Gurtovoy handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 500438ca87c6SMax Gurtovoy 500538ca87c6SMax Gurtovoy set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, 500638ca87c6SMax Gurtovoy pdn); 500738ca87c6SMax Gurtovoy *seg += sizeof(struct mlx5_mkey_seg); 500838ca87c6SMax Gurtovoy *size += sizeof(struct mlx5_mkey_seg) / 16; 500938ca87c6SMax Gurtovoy handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 501038ca87c6SMax Gurtovoy 501138ca87c6SMax Gurtovoy ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, 501238ca87c6SMax Gurtovoy cur_edge); 501338ca87c6SMax Gurtovoy if (ret) 501438ca87c6SMax Gurtovoy return ret; 501538ca87c6SMax Gurtovoy 501638ca87c6SMax Gurtovoy sig_mr->sig->sig_status_checked = false; 501738ca87c6SMax Gurtovoy return 0; 501838ca87c6SMax Gurtovoy } 5019e6631814SSagi Grimberg 5020e6631814SSagi Grimberg static int set_psv_wr(struct ib_sig_domain *domain, 5021e6631814SSagi Grimberg u32 psv_idx, void **seg, int *size) 5022e6631814SSagi Grimberg { 5023e6631814SSagi Grimberg struct mlx5_seg_set_psv *psv_seg = *seg; 5024e6631814SSagi Grimberg 5025e6631814SSagi Grimberg memset(psv_seg, 0, sizeof(*psv_seg)); 5026e6631814SSagi Grimberg psv_seg->psv_num = cpu_to_be32(psv_idx); 5027e6631814SSagi Grimberg switch (domain->sig_type) { 502878eda2bbSSagi Grimberg case IB_SIG_TYPE_NONE: 502978eda2bbSSagi Grimberg break; 5030e6631814SSagi Grimberg case IB_SIG_TYPE_T10_DIF: 5031e6631814SSagi Grimberg psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | 5032e6631814SSagi Grimberg domain->sig.dif.app_tag); 5033e6631814SSagi Grimberg psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); 5034e6631814SSagi Grimberg break; 5035e6631814SSagi Grimberg default: 503612bbf1eaSLeon Romanovsky pr_err("Bad signature type (%d) is given.\n", 503712bbf1eaSLeon Romanovsky domain->sig_type); 503812bbf1eaSLeon Romanovsky return -EINVAL; 5039e6631814SSagi Grimberg } 5040e6631814SSagi Grimberg 504178eda2bbSSagi Grimberg *seg += sizeof(*psv_seg); 504278eda2bbSSagi Grimberg *size += sizeof(*psv_seg) / 16; 504378eda2bbSSagi Grimberg 5044e6631814SSagi Grimberg return 0; 5045e6631814SSagi Grimberg } 5046e6631814SSagi Grimberg 50478a187ee5SSagi Grimberg static int set_reg_wr(struct mlx5_ib_qp *qp, 5048f696bf6dSBart Van Assche const struct ib_reg_wr *wr, 50499ac7c4bcSMax Gurtovoy void **seg, int *size, void **cur_edge, 50509ac7c4bcSMax Gurtovoy bool check_not_free) 50518a187ee5SSagi Grimberg { 50528a187ee5SSagi Grimberg struct mlx5_ib_mr *mr = to_mmr(wr->mr); 50538a187ee5SSagi Grimberg struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 5054841b07f9SMoni Shoua struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); 505538ca87c6SMax Gurtovoy int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 5056064e5262SIdan Burstein bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 5057841b07f9SMoni Shoua bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; 50589ac7c4bcSMax Gurtovoy u8 flags = 0; 50598a187ee5SSagi Grimberg 5060d6de0bb1SMichael Guralnik if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) { 5061841b07f9SMoni Shoua mlx5_ib_warn(to_mdev(qp->ibqp.device), 5062841b07f9SMoni Shoua "Fast update of %s for MR is disabled\n", 5063841b07f9SMoni Shoua (MLX5_CAP_GEN(dev->mdev, 5064841b07f9SMoni Shoua umr_modify_entity_size_disabled)) ? 5065841b07f9SMoni Shoua "entity size" : 5066841b07f9SMoni Shoua "atomic access"); 5067841b07f9SMoni Shoua return -EINVAL; 5068841b07f9SMoni Shoua } 5069841b07f9SMoni Shoua 50708a187ee5SSagi Grimberg if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 50718a187ee5SSagi Grimberg mlx5_ib_warn(to_mdev(qp->ibqp.device), 50728a187ee5SSagi Grimberg "Invalid IB_SEND_INLINE send flag\n"); 50738a187ee5SSagi Grimberg return -EINVAL; 50748a187ee5SSagi Grimberg } 50758a187ee5SSagi Grimberg 50769ac7c4bcSMax Gurtovoy if (check_not_free) 50779ac7c4bcSMax Gurtovoy flags |= MLX5_UMR_CHECK_NOT_FREE; 50789ac7c4bcSMax Gurtovoy if (umr_inline) 50799ac7c4bcSMax Gurtovoy flags |= MLX5_UMR_INLINE; 50809ac7c4bcSMax Gurtovoy 5081841b07f9SMoni Shoua set_reg_umr_seg(*seg, mr, flags, atomic); 50828a187ee5SSagi Grimberg *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 50838a187ee5SSagi Grimberg *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 508434f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 50858a187ee5SSagi Grimberg 50868a187ee5SSagi Grimberg set_reg_mkey_seg(*seg, mr, wr->key, wr->access); 50878a187ee5SSagi Grimberg *seg += sizeof(struct mlx5_mkey_seg); 50888a187ee5SSagi Grimberg *size += sizeof(struct mlx5_mkey_seg) / 16; 508934f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 50908a187ee5SSagi Grimberg 5091064e5262SIdan Burstein if (umr_inline) { 509234f4c955SGuy Levi memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, 509334f4c955SGuy Levi mr_list_size); 509434f4c955SGuy Levi *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4); 5095064e5262SIdan Burstein } else { 50968a187ee5SSagi Grimberg set_reg_data_seg(*seg, mr, pd); 50978a187ee5SSagi Grimberg *seg += sizeof(struct mlx5_wqe_data_seg); 50988a187ee5SSagi Grimberg *size += (sizeof(struct mlx5_wqe_data_seg) / 16); 5099064e5262SIdan Burstein } 51008a187ee5SSagi Grimberg return 0; 51018a187ee5SSagi Grimberg } 51028a187ee5SSagi Grimberg 510334f4c955SGuy Levi static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size, 510434f4c955SGuy Levi void **cur_edge) 5105e126ba97SEli Cohen { 5106dd01e66aSSagi Grimberg set_linv_umr_seg(*seg); 5107e126ba97SEli Cohen *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 5108e126ba97SEli Cohen *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 510934f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 5110dd01e66aSSagi Grimberg set_linv_mkey_seg(*seg); 5111e126ba97SEli Cohen *seg += sizeof(struct mlx5_mkey_seg); 5112e126ba97SEli Cohen *size += sizeof(struct mlx5_mkey_seg) / 16; 511334f4c955SGuy Levi handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 5114e126ba97SEli Cohen } 5115e126ba97SEli Cohen 511634f4c955SGuy Levi static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16) 5117e126ba97SEli Cohen { 5118e126ba97SEli Cohen __be32 *p = NULL; 5119e126ba97SEli Cohen int i, j; 5120e126ba97SEli Cohen 512134f4c955SGuy Levi pr_debug("dump WQE index %u:\n", idx); 5122e126ba97SEli Cohen for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { 5123e126ba97SEli Cohen if ((i & 0xf) == 0) { 51241e5887b7SArtemy Kovalyov p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); 512534f4c955SGuy Levi pr_debug("WQBB at %p:\n", (void *)p); 5126e126ba97SEli Cohen j = 0; 51271e5887b7SArtemy Kovalyov idx = (idx + 1) & (qp->sq.wqe_cnt - 1); 5128e126ba97SEli Cohen } 5129e126ba97SEli Cohen pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), 5130e126ba97SEli Cohen be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), 5131e126ba97SEli Cohen be32_to_cpu(p[j + 3])); 5132e126ba97SEli Cohen } 5133e126ba97SEli Cohen } 5134e126ba97SEli Cohen 51357bb1fafcSBart Van Assche static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, 51366e5eadacSSagi Grimberg struct mlx5_wqe_ctrl_seg **ctrl, 513734f4c955SGuy Levi const struct ib_send_wr *wr, unsigned int *idx, 513834f4c955SGuy Levi int *size, void **cur_edge, int nreq, 513934f4c955SGuy Levi bool send_signaled, bool solicited) 51406e5eadacSSagi Grimberg { 5141b2a232d2SLeon Romanovsky if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) 5142b2a232d2SLeon Romanovsky return -ENOMEM; 51436e5eadacSSagi Grimberg 51446e5eadacSSagi Grimberg *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 514534f4c955SGuy Levi *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); 51466e5eadacSSagi Grimberg *ctrl = *seg; 51476e5eadacSSagi Grimberg *(uint32_t *)(*seg + 8) = 0; 51486e5eadacSSagi Grimberg (*ctrl)->imm = send_ieth(wr); 51496e5eadacSSagi Grimberg (*ctrl)->fm_ce_se = qp->sq_signal_bits | 51507bb1fafcSBart Van Assche (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) | 51517bb1fafcSBart Van Assche (solicited ? MLX5_WQE_CTRL_SOLICITED : 0); 51526e5eadacSSagi Grimberg 51536e5eadacSSagi Grimberg *seg += sizeof(**ctrl); 51546e5eadacSSagi Grimberg *size = sizeof(**ctrl) / 16; 515534f4c955SGuy Levi *cur_edge = qp->sq.cur_edge; 51566e5eadacSSagi Grimberg 5157b2a232d2SLeon Romanovsky return 0; 51586e5eadacSSagi Grimberg } 51596e5eadacSSagi Grimberg 51607bb1fafcSBart Van Assche static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 51617bb1fafcSBart Van Assche struct mlx5_wqe_ctrl_seg **ctrl, 51627bb1fafcSBart Van Assche const struct ib_send_wr *wr, unsigned *idx, 516334f4c955SGuy Levi int *size, void **cur_edge, int nreq) 51647bb1fafcSBart Van Assche { 516534f4c955SGuy Levi return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, 51667bb1fafcSBart Van Assche wr->send_flags & IB_SEND_SIGNALED, 51677bb1fafcSBart Van Assche wr->send_flags & IB_SEND_SOLICITED); 51687bb1fafcSBart Van Assche } 51697bb1fafcSBart Van Assche 51706e5eadacSSagi Grimberg static void finish_wqe(struct mlx5_ib_qp *qp, 51716e5eadacSSagi Grimberg struct mlx5_wqe_ctrl_seg *ctrl, 517234f4c955SGuy Levi void *seg, u8 size, void *cur_edge, 517334f4c955SGuy Levi unsigned int idx, u64 wr_id, int nreq, u8 fence, 517434f4c955SGuy Levi u32 mlx5_opcode) 51756e5eadacSSagi Grimberg { 51766e5eadacSSagi Grimberg u8 opmod = 0; 51776e5eadacSSagi Grimberg 51786e5eadacSSagi Grimberg ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | 51796e5eadacSSagi Grimberg mlx5_opcode | ((u32)opmod << 24)); 518019098df2Smajd@mellanox.com ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 51816e5eadacSSagi Grimberg ctrl->fm_ce_se |= fence; 5182c95e6d53SLeon Romanovsky if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE)) 51836e5eadacSSagi Grimberg ctrl->signature = wq_sig(ctrl); 51846e5eadacSSagi Grimberg 51856e5eadacSSagi Grimberg qp->sq.wrid[idx] = wr_id; 51866e5eadacSSagi Grimberg qp->sq.w_list[idx].opcode = mlx5_opcode; 51876e5eadacSSagi Grimberg qp->sq.wqe_head[idx] = qp->sq.head + nreq; 51886e5eadacSSagi Grimberg qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); 51896e5eadacSSagi Grimberg qp->sq.w_list[idx].next = qp->sq.cur_post; 519034f4c955SGuy Levi 519134f4c955SGuy Levi /* We save the edge which was possibly updated during the WQE 519234f4c955SGuy Levi * construction, into SQ's cache. 519334f4c955SGuy Levi */ 519434f4c955SGuy Levi seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB); 519534f4c955SGuy Levi qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? 519634f4c955SGuy Levi get_sq_edge(&qp->sq, qp->sq.cur_post & 519734f4c955SGuy Levi (qp->sq.wqe_cnt - 1)) : 519834f4c955SGuy Levi cur_edge; 51996e5eadacSSagi Grimberg } 52006e5eadacSSagi Grimberg 5201d34ac5cdSBart Van Assche static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 5202d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr, bool drain) 5203e126ba97SEli Cohen { 5204e126ba97SEli Cohen struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 5205e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 520689ea94a7SMaor Gottlieb struct mlx5_core_dev *mdev = dev->mdev; 520738ca87c6SMax Gurtovoy struct ib_reg_wr reg_pi_wr; 5208d16e91daSHaggai Eran struct mlx5_ib_qp *qp; 5209e6631814SSagi Grimberg struct mlx5_ib_mr *mr; 521038ca87c6SMax Gurtovoy struct mlx5_ib_mr *pi_mr; 52112563e2f3SMax Gurtovoy struct mlx5_ib_mr pa_pi_mr; 521238ca87c6SMax Gurtovoy struct ib_sig_attrs *sig_attrs; 5213e126ba97SEli Cohen struct mlx5_wqe_xrc_seg *xrc; 5214d16e91daSHaggai Eran struct mlx5_bf *bf; 521534f4c955SGuy Levi void *cur_edge; 5216e126ba97SEli Cohen int uninitialized_var(size); 5217e126ba97SEli Cohen unsigned long flags; 5218e126ba97SEli Cohen unsigned idx; 5219e126ba97SEli Cohen int err = 0; 5220e126ba97SEli Cohen int num_sge; 5221e126ba97SEli Cohen void *seg; 5222e126ba97SEli Cohen int nreq; 5223e126ba97SEli Cohen int i; 5224e126ba97SEli Cohen u8 next_fence = 0; 5225e126ba97SEli Cohen u8 fence; 5226e126ba97SEli Cohen 52276c75520fSParav Pandit if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && 52286c75520fSParav Pandit !drain)) { 52296c75520fSParav Pandit *bad_wr = wr; 52306c75520fSParav Pandit return -EIO; 52316c75520fSParav Pandit } 52326c75520fSParav Pandit 5233d16e91daSHaggai Eran if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 5234d16e91daSHaggai Eran return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); 5235d16e91daSHaggai Eran 5236d16e91daSHaggai Eran qp = to_mqp(ibqp); 52375fe9dec0SEli Cohen bf = &qp->bf; 5238d16e91daSHaggai Eran 5239e126ba97SEli Cohen spin_lock_irqsave(&qp->sq.lock, flags); 5240e126ba97SEli Cohen 5241e126ba97SEli Cohen for (nreq = 0; wr; nreq++, wr = wr->next) { 5242a8f731ebSFabian Frederick if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { 5243e126ba97SEli Cohen mlx5_ib_warn(dev, "\n"); 5244e126ba97SEli Cohen err = -EINVAL; 5245e126ba97SEli Cohen *bad_wr = wr; 5246e126ba97SEli Cohen goto out; 5247e126ba97SEli Cohen } 5248e126ba97SEli Cohen 5249e126ba97SEli Cohen num_sge = wr->num_sge; 5250e126ba97SEli Cohen if (unlikely(num_sge > qp->sq.max_gs)) { 5251e126ba97SEli Cohen mlx5_ib_warn(dev, "\n"); 525224be409bSChuck Lever err = -EINVAL; 5253e126ba97SEli Cohen *bad_wr = wr; 5254e126ba97SEli Cohen goto out; 5255e126ba97SEli Cohen } 5256e126ba97SEli Cohen 525734f4c955SGuy Levi err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge, 525834f4c955SGuy Levi nreq); 52596e5eadacSSagi Grimberg if (err) { 52606e5eadacSSagi Grimberg mlx5_ib_warn(dev, "\n"); 52616e5eadacSSagi Grimberg err = -ENOMEM; 52626e5eadacSSagi Grimberg *bad_wr = wr; 52636e5eadacSSagi Grimberg goto out; 52646e5eadacSSagi Grimberg } 5265e126ba97SEli Cohen 526638ca87c6SMax Gurtovoy if (wr->opcode == IB_WR_REG_MR || 526738ca87c6SMax Gurtovoy wr->opcode == IB_WR_REG_MR_INTEGRITY) { 52686e8484c5SMax Gurtovoy fence = dev->umr_fence; 52696e8484c5SMax Gurtovoy next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 5270074fca3aSMajd Dibbiny } else { 5271074fca3aSMajd Dibbiny if (wr->send_flags & IB_SEND_FENCE) { 52726e8484c5SMax Gurtovoy if (qp->next_fence) 52736e8484c5SMax Gurtovoy fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 52746e8484c5SMax Gurtovoy else 52756e8484c5SMax Gurtovoy fence = MLX5_FENCE_MODE_FENCE; 52766e8484c5SMax Gurtovoy } else { 52776e8484c5SMax Gurtovoy fence = qp->next_fence; 52786e8484c5SMax Gurtovoy } 5279074fca3aSMajd Dibbiny } 52806e8484c5SMax Gurtovoy 5281e126ba97SEli Cohen switch (ibqp->qp_type) { 5282e126ba97SEli Cohen case IB_QPT_XRC_INI: 5283e126ba97SEli Cohen xrc = seg; 5284e126ba97SEli Cohen seg += sizeof(*xrc); 5285e126ba97SEli Cohen size += sizeof(*xrc) / 16; 5286e126ba97SEli Cohen /* fall through */ 5287e126ba97SEli Cohen case IB_QPT_RC: 5288e126ba97SEli Cohen switch (wr->opcode) { 5289e126ba97SEli Cohen case IB_WR_RDMA_READ: 5290e126ba97SEli Cohen case IB_WR_RDMA_WRITE: 5291e126ba97SEli Cohen case IB_WR_RDMA_WRITE_WITH_IMM: 5292e622f2f4SChristoph Hellwig set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 5293e622f2f4SChristoph Hellwig rdma_wr(wr)->rkey); 5294e126ba97SEli Cohen seg += sizeof(struct mlx5_wqe_raddr_seg); 5295e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 5296e126ba97SEli Cohen break; 5297e126ba97SEli Cohen 5298e126ba97SEli Cohen case IB_WR_ATOMIC_CMP_AND_SWP: 5299e126ba97SEli Cohen case IB_WR_ATOMIC_FETCH_AND_ADD: 5300e126ba97SEli Cohen case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 530181bea28fSEli Cohen mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); 530281bea28fSEli Cohen err = -ENOSYS; 530381bea28fSEli Cohen *bad_wr = wr; 530481bea28fSEli Cohen goto out; 5305e126ba97SEli Cohen 5306e126ba97SEli Cohen case IB_WR_LOCAL_INV: 5307e126ba97SEli Cohen qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 5308e126ba97SEli Cohen ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 530934f4c955SGuy Levi set_linv_wr(qp, &seg, &size, &cur_edge); 5310e126ba97SEli Cohen num_sge = 0; 5311e126ba97SEli Cohen break; 5312e126ba97SEli Cohen 53138a187ee5SSagi Grimberg case IB_WR_REG_MR: 53148a187ee5SSagi Grimberg qp->sq.wr_data[idx] = IB_WR_REG_MR; 53158a187ee5SSagi Grimberg ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 531634f4c955SGuy Levi err = set_reg_wr(qp, reg_wr(wr), &seg, &size, 53179ac7c4bcSMax Gurtovoy &cur_edge, true); 53188a187ee5SSagi Grimberg if (err) { 53198a187ee5SSagi Grimberg *bad_wr = wr; 53208a187ee5SSagi Grimberg goto out; 53218a187ee5SSagi Grimberg } 53228a187ee5SSagi Grimberg num_sge = 0; 53238a187ee5SSagi Grimberg break; 53248a187ee5SSagi Grimberg 532538ca87c6SMax Gurtovoy case IB_WR_REG_MR_INTEGRITY: 53262563e2f3SMax Gurtovoy qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY; 532738ca87c6SMax Gurtovoy 532838ca87c6SMax Gurtovoy mr = to_mmr(reg_wr(wr)->mr); 532938ca87c6SMax Gurtovoy pi_mr = mr->pi_mr; 533038ca87c6SMax Gurtovoy 53312563e2f3SMax Gurtovoy if (pi_mr) { 53322563e2f3SMax Gurtovoy memset(®_pi_wr, 0, 53332563e2f3SMax Gurtovoy sizeof(struct ib_reg_wr)); 53342563e2f3SMax Gurtovoy 533538ca87c6SMax Gurtovoy reg_pi_wr.mr = &pi_mr->ibmr; 533638ca87c6SMax Gurtovoy reg_pi_wr.access = reg_wr(wr)->access; 533738ca87c6SMax Gurtovoy reg_pi_wr.key = pi_mr->ibmr.rkey; 533838ca87c6SMax Gurtovoy 533938ca87c6SMax Gurtovoy ctrl->imm = cpu_to_be32(reg_pi_wr.key); 53402563e2f3SMax Gurtovoy /* UMR for data + prot registration */ 53412563e2f3SMax Gurtovoy err = set_reg_wr(qp, ®_pi_wr, &seg, 53422563e2f3SMax Gurtovoy &size, &cur_edge, 53432563e2f3SMax Gurtovoy false); 534438ca87c6SMax Gurtovoy if (err) { 534538ca87c6SMax Gurtovoy *bad_wr = wr; 534638ca87c6SMax Gurtovoy goto out; 534738ca87c6SMax Gurtovoy } 53482563e2f3SMax Gurtovoy finish_wqe(qp, ctrl, seg, size, 53492563e2f3SMax Gurtovoy cur_edge, idx, wr->wr_id, 53502563e2f3SMax Gurtovoy nreq, fence, 535138ca87c6SMax Gurtovoy MLX5_OPCODE_UMR); 535238ca87c6SMax Gurtovoy 53532563e2f3SMax Gurtovoy err = begin_wqe(qp, &seg, &ctrl, wr, 53542563e2f3SMax Gurtovoy &idx, &size, &cur_edge, 53552563e2f3SMax Gurtovoy nreq); 535638ca87c6SMax Gurtovoy if (err) { 535738ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 535838ca87c6SMax Gurtovoy err = -ENOMEM; 535938ca87c6SMax Gurtovoy *bad_wr = wr; 536038ca87c6SMax Gurtovoy goto out; 536138ca87c6SMax Gurtovoy } 53622563e2f3SMax Gurtovoy } else { 53632563e2f3SMax Gurtovoy memset(&pa_pi_mr, 0, 53642563e2f3SMax Gurtovoy sizeof(struct mlx5_ib_mr)); 53652563e2f3SMax Gurtovoy /* No UMR, use local_dma_lkey */ 53662563e2f3SMax Gurtovoy pa_pi_mr.ibmr.lkey = 53672563e2f3SMax Gurtovoy mr->ibmr.pd->local_dma_lkey; 53682563e2f3SMax Gurtovoy 53692563e2f3SMax Gurtovoy pa_pi_mr.ndescs = mr->ndescs; 53702563e2f3SMax Gurtovoy pa_pi_mr.data_length = mr->data_length; 53712563e2f3SMax Gurtovoy pa_pi_mr.data_iova = mr->data_iova; 53722563e2f3SMax Gurtovoy if (mr->meta_ndescs) { 53732563e2f3SMax Gurtovoy pa_pi_mr.meta_ndescs = 53742563e2f3SMax Gurtovoy mr->meta_ndescs; 53752563e2f3SMax Gurtovoy pa_pi_mr.meta_length = 53762563e2f3SMax Gurtovoy mr->meta_length; 53772563e2f3SMax Gurtovoy pa_pi_mr.pi_iova = mr->pi_iova; 53782563e2f3SMax Gurtovoy } 53792563e2f3SMax Gurtovoy 53802563e2f3SMax Gurtovoy pa_pi_mr.ibmr.length = mr->ibmr.length; 53812563e2f3SMax Gurtovoy mr->pi_mr = &pa_pi_mr; 53822563e2f3SMax Gurtovoy } 538338ca87c6SMax Gurtovoy ctrl->imm = cpu_to_be32(mr->ibmr.rkey); 538438ca87c6SMax Gurtovoy /* UMR for sig MR */ 538538ca87c6SMax Gurtovoy err = set_pi_umr_wr(wr, qp, &seg, &size, 538638ca87c6SMax Gurtovoy &cur_edge); 538738ca87c6SMax Gurtovoy if (err) { 538838ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 538938ca87c6SMax Gurtovoy *bad_wr = wr; 539038ca87c6SMax Gurtovoy goto out; 539138ca87c6SMax Gurtovoy } 539238ca87c6SMax Gurtovoy finish_wqe(qp, ctrl, seg, size, cur_edge, idx, 539338ca87c6SMax Gurtovoy wr->wr_id, nreq, fence, 539438ca87c6SMax Gurtovoy MLX5_OPCODE_UMR); 539538ca87c6SMax Gurtovoy 539638ca87c6SMax Gurtovoy /* 539738ca87c6SMax Gurtovoy * SET_PSV WQEs are not signaled and solicited 539838ca87c6SMax Gurtovoy * on error 539938ca87c6SMax Gurtovoy */ 540038ca87c6SMax Gurtovoy sig_attrs = mr->ibmr.sig_attrs; 540138ca87c6SMax Gurtovoy err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, 540238ca87c6SMax Gurtovoy &size, &cur_edge, nreq, false, 540338ca87c6SMax Gurtovoy true); 540438ca87c6SMax Gurtovoy if (err) { 540538ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 540638ca87c6SMax Gurtovoy err = -ENOMEM; 540738ca87c6SMax Gurtovoy *bad_wr = wr; 540838ca87c6SMax Gurtovoy goto out; 540938ca87c6SMax Gurtovoy } 541038ca87c6SMax Gurtovoy err = set_psv_wr(&sig_attrs->mem, 541138ca87c6SMax Gurtovoy mr->sig->psv_memory.psv_idx, 541238ca87c6SMax Gurtovoy &seg, &size); 541338ca87c6SMax Gurtovoy if (err) { 541438ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 541538ca87c6SMax Gurtovoy *bad_wr = wr; 541638ca87c6SMax Gurtovoy goto out; 541738ca87c6SMax Gurtovoy } 541838ca87c6SMax Gurtovoy finish_wqe(qp, ctrl, seg, size, cur_edge, idx, 541938ca87c6SMax Gurtovoy wr->wr_id, nreq, next_fence, 542038ca87c6SMax Gurtovoy MLX5_OPCODE_SET_PSV); 542138ca87c6SMax Gurtovoy 542238ca87c6SMax Gurtovoy err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, 542338ca87c6SMax Gurtovoy &size, &cur_edge, nreq, false, 542438ca87c6SMax Gurtovoy true); 542538ca87c6SMax Gurtovoy if (err) { 542638ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 542738ca87c6SMax Gurtovoy err = -ENOMEM; 542838ca87c6SMax Gurtovoy *bad_wr = wr; 542938ca87c6SMax Gurtovoy goto out; 543038ca87c6SMax Gurtovoy } 543138ca87c6SMax Gurtovoy err = set_psv_wr(&sig_attrs->wire, 543238ca87c6SMax Gurtovoy mr->sig->psv_wire.psv_idx, 543338ca87c6SMax Gurtovoy &seg, &size); 543438ca87c6SMax Gurtovoy if (err) { 543538ca87c6SMax Gurtovoy mlx5_ib_warn(dev, "\n"); 543638ca87c6SMax Gurtovoy *bad_wr = wr; 543738ca87c6SMax Gurtovoy goto out; 543838ca87c6SMax Gurtovoy } 543938ca87c6SMax Gurtovoy finish_wqe(qp, ctrl, seg, size, cur_edge, idx, 544038ca87c6SMax Gurtovoy wr->wr_id, nreq, next_fence, 544138ca87c6SMax Gurtovoy MLX5_OPCODE_SET_PSV); 544238ca87c6SMax Gurtovoy 544338ca87c6SMax Gurtovoy qp->next_fence = 544438ca87c6SMax Gurtovoy MLX5_FENCE_MODE_INITIATOR_SMALL; 544538ca87c6SMax Gurtovoy num_sge = 0; 544638ca87c6SMax Gurtovoy goto skip_psv; 544738ca87c6SMax Gurtovoy 5448e126ba97SEli Cohen default: 5449e126ba97SEli Cohen break; 5450e126ba97SEli Cohen } 5451e126ba97SEli Cohen break; 5452e126ba97SEli Cohen 5453e126ba97SEli Cohen case IB_QPT_UC: 5454e126ba97SEli Cohen switch (wr->opcode) { 5455e126ba97SEli Cohen case IB_WR_RDMA_WRITE: 5456e126ba97SEli Cohen case IB_WR_RDMA_WRITE_WITH_IMM: 5457e622f2f4SChristoph Hellwig set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 5458e622f2f4SChristoph Hellwig rdma_wr(wr)->rkey); 5459e126ba97SEli Cohen seg += sizeof(struct mlx5_wqe_raddr_seg); 5460e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 5461e126ba97SEli Cohen break; 5462e126ba97SEli Cohen 5463e126ba97SEli Cohen default: 5464e126ba97SEli Cohen break; 5465e126ba97SEli Cohen } 5466e126ba97SEli Cohen break; 5467e126ba97SEli Cohen 5468e126ba97SEli Cohen case IB_QPT_SMI: 54691e0e50b6SMaor Gottlieb if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) { 54701e0e50b6SMaor Gottlieb mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n"); 54711e0e50b6SMaor Gottlieb err = -EPERM; 54721e0e50b6SMaor Gottlieb *bad_wr = wr; 54731e0e50b6SMaor Gottlieb goto out; 54741e0e50b6SMaor Gottlieb } 5475f6b1ee34SBart Van Assche /* fall through */ 5476d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 5477e126ba97SEli Cohen set_datagram_seg(seg, wr); 5478e126ba97SEli Cohen seg += sizeof(struct mlx5_wqe_datagram_seg); 5479e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 548034f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); 548134f4c955SGuy Levi 5482e126ba97SEli Cohen break; 5483f0313965SErez Shitrit case IB_QPT_UD: 5484f0313965SErez Shitrit set_datagram_seg(seg, wr); 5485f0313965SErez Shitrit seg += sizeof(struct mlx5_wqe_datagram_seg); 5486f0313965SErez Shitrit size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 548734f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); 5488f0313965SErez Shitrit 5489f0313965SErez Shitrit /* handle qp that supports ud offload */ 5490f0313965SErez Shitrit if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { 5491f0313965SErez Shitrit struct mlx5_wqe_eth_pad *pad; 5492f0313965SErez Shitrit 5493f0313965SErez Shitrit pad = seg; 5494f0313965SErez Shitrit memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad)); 5495f0313965SErez Shitrit seg += sizeof(struct mlx5_wqe_eth_pad); 5496f0313965SErez Shitrit size += sizeof(struct mlx5_wqe_eth_pad) / 16; 549734f4c955SGuy Levi set_eth_seg(wr, qp, &seg, &size, &cur_edge); 549834f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, 549934f4c955SGuy Levi &cur_edge); 5500f0313965SErez Shitrit } 5501f0313965SErez Shitrit break; 5502e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 5503e126ba97SEli Cohen if (wr->opcode != MLX5_IB_WR_UMR) { 5504e126ba97SEli Cohen err = -EINVAL; 5505e126ba97SEli Cohen mlx5_ib_warn(dev, "bad opcode\n"); 5506e126ba97SEli Cohen goto out; 5507e126ba97SEli Cohen } 5508e126ba97SEli Cohen qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 5509e622f2f4SChristoph Hellwig ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); 5510c8d75a98SMajd Dibbiny err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic))); 5511c8d75a98SMajd Dibbiny if (unlikely(err)) 5512c8d75a98SMajd Dibbiny goto out; 5513e126ba97SEli Cohen seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 5514e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 551534f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); 5516e126ba97SEli Cohen set_reg_mkey_segment(seg, wr); 5517e126ba97SEli Cohen seg += sizeof(struct mlx5_mkey_seg); 5518e126ba97SEli Cohen size += sizeof(struct mlx5_mkey_seg) / 16; 551934f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); 5520e126ba97SEli Cohen break; 5521e126ba97SEli Cohen 5522e126ba97SEli Cohen default: 5523e126ba97SEli Cohen break; 5524e126ba97SEli Cohen } 5525e126ba97SEli Cohen 5526e126ba97SEli Cohen if (wr->send_flags & IB_SEND_INLINE && num_sge) { 552734f4c955SGuy Levi err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge); 5528e126ba97SEli Cohen if (unlikely(err)) { 5529e126ba97SEli Cohen mlx5_ib_warn(dev, "\n"); 5530e126ba97SEli Cohen *bad_wr = wr; 5531e126ba97SEli Cohen goto out; 5532e126ba97SEli Cohen } 5533e126ba97SEli Cohen } else { 5534e126ba97SEli Cohen for (i = 0; i < num_sge; i++) { 553534f4c955SGuy Levi handle_post_send_edge(&qp->sq, &seg, size, 553634f4c955SGuy Levi &cur_edge); 5537e126ba97SEli Cohen if (likely(wr->sg_list[i].length)) { 553834f4c955SGuy Levi set_data_ptr_seg 553934f4c955SGuy Levi ((struct mlx5_wqe_data_seg *)seg, 554034f4c955SGuy Levi wr->sg_list + i); 5541e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_data_seg) / 16; 554234f4c955SGuy Levi seg += sizeof(struct mlx5_wqe_data_seg); 5543e126ba97SEli Cohen } 5544e126ba97SEli Cohen } 5545e126ba97SEli Cohen } 5546e126ba97SEli Cohen 55476e8484c5SMax Gurtovoy qp->next_fence = next_fence; 554834f4c955SGuy Levi finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, 554934f4c955SGuy Levi fence, mlx5_ib_opcode[wr->opcode]); 5550e6631814SSagi Grimberg skip_psv: 5551e126ba97SEli Cohen if (0) 5552e126ba97SEli Cohen dump_wqe(qp, idx, size); 5553e126ba97SEli Cohen } 5554e126ba97SEli Cohen 5555e126ba97SEli Cohen out: 5556e126ba97SEli Cohen if (likely(nreq)) { 5557e126ba97SEli Cohen qp->sq.head += nreq; 5558e126ba97SEli Cohen 5559e126ba97SEli Cohen /* Make sure that descriptors are written before 5560e126ba97SEli Cohen * updating doorbell record and ringing the doorbell 5561e126ba97SEli Cohen */ 5562e126ba97SEli Cohen wmb(); 5563e126ba97SEli Cohen 5564e126ba97SEli Cohen qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 5565e126ba97SEli Cohen 5566ada388f7SEli Cohen /* Make sure doorbell record is visible to the HCA before 5567ada388f7SEli Cohen * we hit doorbell */ 5568ada388f7SEli Cohen wmb(); 5569ada388f7SEli Cohen 5570bbf29f61SMaxim Mikityanskiy mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset); 5571e126ba97SEli Cohen /* Make sure doorbells don't leak out of SQ spinlock 5572e126ba97SEli Cohen * and reach the HCA out of order. 5573e126ba97SEli Cohen */ 5574e126ba97SEli Cohen bf->offset ^= bf->buf_size; 5575e126ba97SEli Cohen } 5576e126ba97SEli Cohen 5577e126ba97SEli Cohen spin_unlock_irqrestore(&qp->sq.lock, flags); 5578e126ba97SEli Cohen 5579e126ba97SEli Cohen return err; 5580e126ba97SEli Cohen } 5581e126ba97SEli Cohen 5582d34ac5cdSBart Van Assche int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 5583d34ac5cdSBart Van Assche const struct ib_send_wr **bad_wr) 5584d0e84c0aSYishai Hadas { 5585d0e84c0aSYishai Hadas return _mlx5_ib_post_send(ibqp, wr, bad_wr, false); 5586d0e84c0aSYishai Hadas } 5587d0e84c0aSYishai Hadas 5588e126ba97SEli Cohen static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) 5589e126ba97SEli Cohen { 5590e126ba97SEli Cohen sig->signature = calc_sig(sig, size); 5591e126ba97SEli Cohen } 5592e126ba97SEli Cohen 5593d34ac5cdSBart Van Assche static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 5594d34ac5cdSBart Van Assche const struct ib_recv_wr **bad_wr, bool drain) 5595e126ba97SEli Cohen { 5596e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 5597e126ba97SEli Cohen struct mlx5_wqe_data_seg *scat; 5598e126ba97SEli Cohen struct mlx5_rwqe_sig *sig; 559989ea94a7SMaor Gottlieb struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 560089ea94a7SMaor Gottlieb struct mlx5_core_dev *mdev = dev->mdev; 5601e126ba97SEli Cohen unsigned long flags; 5602e126ba97SEli Cohen int err = 0; 5603e126ba97SEli Cohen int nreq; 5604e126ba97SEli Cohen int ind; 5605e126ba97SEli Cohen int i; 5606e126ba97SEli Cohen 56076c75520fSParav Pandit if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && 56086c75520fSParav Pandit !drain)) { 56096c75520fSParav Pandit *bad_wr = wr; 56106c75520fSParav Pandit return -EIO; 56116c75520fSParav Pandit } 56126c75520fSParav Pandit 5613d16e91daSHaggai Eran if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 5614d16e91daSHaggai Eran return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); 5615d16e91daSHaggai Eran 5616e126ba97SEli Cohen spin_lock_irqsave(&qp->rq.lock, flags); 5617e126ba97SEli Cohen 5618e126ba97SEli Cohen ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 5619e126ba97SEli Cohen 5620e126ba97SEli Cohen for (nreq = 0; wr; nreq++, wr = wr->next) { 5621e126ba97SEli Cohen if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 5622e126ba97SEli Cohen err = -ENOMEM; 5623e126ba97SEli Cohen *bad_wr = wr; 5624e126ba97SEli Cohen goto out; 5625e126ba97SEli Cohen } 5626e126ba97SEli Cohen 5627e126ba97SEli Cohen if (unlikely(wr->num_sge > qp->rq.max_gs)) { 5628e126ba97SEli Cohen err = -EINVAL; 5629e126ba97SEli Cohen *bad_wr = wr; 5630e126ba97SEli Cohen goto out; 5631e126ba97SEli Cohen } 5632e126ba97SEli Cohen 563334f4c955SGuy Levi scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind); 5634c95e6d53SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 5635e126ba97SEli Cohen scat++; 5636e126ba97SEli Cohen 5637e126ba97SEli Cohen for (i = 0; i < wr->num_sge; i++) 5638e126ba97SEli Cohen set_data_ptr_seg(scat + i, wr->sg_list + i); 5639e126ba97SEli Cohen 5640e126ba97SEli Cohen if (i < qp->rq.max_gs) { 5641e126ba97SEli Cohen scat[i].byte_count = 0; 5642e126ba97SEli Cohen scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 5643e126ba97SEli Cohen scat[i].addr = 0; 5644e126ba97SEli Cohen } 5645e126ba97SEli Cohen 5646c95e6d53SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) { 5647e126ba97SEli Cohen sig = (struct mlx5_rwqe_sig *)scat; 5648e126ba97SEli Cohen set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); 5649e126ba97SEli Cohen } 5650e126ba97SEli Cohen 5651e126ba97SEli Cohen qp->rq.wrid[ind] = wr->wr_id; 5652e126ba97SEli Cohen 5653e126ba97SEli Cohen ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 5654e126ba97SEli Cohen } 5655e126ba97SEli Cohen 5656e126ba97SEli Cohen out: 5657e126ba97SEli Cohen if (likely(nreq)) { 5658e126ba97SEli Cohen qp->rq.head += nreq; 5659e126ba97SEli Cohen 5660e126ba97SEli Cohen /* Make sure that descriptors are written before 5661e126ba97SEli Cohen * doorbell record. 5662e126ba97SEli Cohen */ 5663e126ba97SEli Cohen wmb(); 5664e126ba97SEli Cohen 5665e126ba97SEli Cohen *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 5666e126ba97SEli Cohen } 5667e126ba97SEli Cohen 5668e126ba97SEli Cohen spin_unlock_irqrestore(&qp->rq.lock, flags); 5669e126ba97SEli Cohen 5670e126ba97SEli Cohen return err; 5671e126ba97SEli Cohen } 5672e126ba97SEli Cohen 5673d34ac5cdSBart Van Assche int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 5674d34ac5cdSBart Van Assche const struct ib_recv_wr **bad_wr) 5675d0e84c0aSYishai Hadas { 5676d0e84c0aSYishai Hadas return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false); 5677d0e84c0aSYishai Hadas } 5678d0e84c0aSYishai Hadas 5679e126ba97SEli Cohen static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 5680e126ba97SEli Cohen { 5681e126ba97SEli Cohen switch (mlx5_state) { 5682e126ba97SEli Cohen case MLX5_QP_STATE_RST: return IB_QPS_RESET; 5683e126ba97SEli Cohen case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 5684e126ba97SEli Cohen case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 5685e126ba97SEli Cohen case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 5686e126ba97SEli Cohen case MLX5_QP_STATE_SQ_DRAINING: 5687e126ba97SEli Cohen case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 5688e126ba97SEli Cohen case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 5689e126ba97SEli Cohen case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 5690e126ba97SEli Cohen default: return -1; 5691e126ba97SEli Cohen } 5692e126ba97SEli Cohen } 5693e126ba97SEli Cohen 5694e126ba97SEli Cohen static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 5695e126ba97SEli Cohen { 5696e126ba97SEli Cohen switch (mlx5_mig_state) { 5697e126ba97SEli Cohen case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 5698e126ba97SEli Cohen case MLX5_QP_PM_REARM: return IB_MIG_REARM; 5699e126ba97SEli Cohen case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 5700e126ba97SEli Cohen default: return -1; 5701e126ba97SEli Cohen } 5702e126ba97SEli Cohen } 5703e126ba97SEli Cohen 5704e126ba97SEli Cohen static int to_ib_qp_access_flags(int mlx5_flags) 5705e126ba97SEli Cohen { 5706e126ba97SEli Cohen int ib_flags = 0; 5707e126ba97SEli Cohen 5708e126ba97SEli Cohen if (mlx5_flags & MLX5_QP_BIT_RRE) 5709e126ba97SEli Cohen ib_flags |= IB_ACCESS_REMOTE_READ; 5710e126ba97SEli Cohen if (mlx5_flags & MLX5_QP_BIT_RWE) 5711e126ba97SEli Cohen ib_flags |= IB_ACCESS_REMOTE_WRITE; 5712e126ba97SEli Cohen if (mlx5_flags & MLX5_QP_BIT_RAE) 5713e126ba97SEli Cohen ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 5714e126ba97SEli Cohen 5715e126ba97SEli Cohen return ib_flags; 5716e126ba97SEli Cohen } 5717e126ba97SEli Cohen 571838349389SDasaratharaman Chandramouli static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, 5719d8966fcdSDasaratharaman Chandramouli struct rdma_ah_attr *ah_attr, 5720e126ba97SEli Cohen struct mlx5_qp_path *path) 5721e126ba97SEli Cohen { 5722e126ba97SEli Cohen 5723d8966fcdSDasaratharaman Chandramouli memset(ah_attr, 0, sizeof(*ah_attr)); 5724e126ba97SEli Cohen 5725e7996a9aSJason Gunthorpe if (!path->port || path->port > ibdev->num_ports) 5726e126ba97SEli Cohen return; 5727e126ba97SEli Cohen 5728ae59c3f0SLeon Romanovsky ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); 5729ae59c3f0SLeon Romanovsky 5730d8966fcdSDasaratharaman Chandramouli rdma_ah_set_port_num(ah_attr, path->port); 5731d8966fcdSDasaratharaman Chandramouli rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); 5732e126ba97SEli Cohen 5733d8966fcdSDasaratharaman Chandramouli rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); 5734d8966fcdSDasaratharaman Chandramouli rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); 5735d8966fcdSDasaratharaman Chandramouli rdma_ah_set_static_rate(ah_attr, 5736d8966fcdSDasaratharaman Chandramouli path->static_rate ? path->static_rate - 5 : 0); 5737d8966fcdSDasaratharaman Chandramouli if (path->grh_mlid & (1 << 7)) { 5738d8966fcdSDasaratharaman Chandramouli u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); 5739d8966fcdSDasaratharaman Chandramouli 5740d8966fcdSDasaratharaman Chandramouli rdma_ah_set_grh(ah_attr, NULL, 5741d8966fcdSDasaratharaman Chandramouli tc_fl & 0xfffff, 5742d8966fcdSDasaratharaman Chandramouli path->mgid_index, 5743d8966fcdSDasaratharaman Chandramouli path->hop_limit, 5744d8966fcdSDasaratharaman Chandramouli (tc_fl >> 20) & 0xff); 5745d8966fcdSDasaratharaman Chandramouli rdma_ah_set_dgid_raw(ah_attr, path->rgid); 5746e126ba97SEli Cohen } 5747e126ba97SEli Cohen } 5748e126ba97SEli Cohen 57496d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, 57506d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq, 57516d2f89dfSmajd@mellanox.com u8 *sq_state) 5752e126ba97SEli Cohen { 57536d2f89dfSmajd@mellanox.com int err; 57546d2f89dfSmajd@mellanox.com 575528160771SEran Ben Elisha err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); 57566d2f89dfSmajd@mellanox.com if (err) 57576d2f89dfSmajd@mellanox.com goto out; 57586d2f89dfSmajd@mellanox.com sq->state = *sq_state; 57596d2f89dfSmajd@mellanox.com 57606d2f89dfSmajd@mellanox.com out: 57616d2f89dfSmajd@mellanox.com return err; 57626d2f89dfSmajd@mellanox.com } 57636d2f89dfSmajd@mellanox.com 57646d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, 57656d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq, 57666d2f89dfSmajd@mellanox.com u8 *rq_state) 57676d2f89dfSmajd@mellanox.com { 57686d2f89dfSmajd@mellanox.com void *out; 57696d2f89dfSmajd@mellanox.com void *rqc; 57706d2f89dfSmajd@mellanox.com int inlen; 57716d2f89dfSmajd@mellanox.com int err; 57726d2f89dfSmajd@mellanox.com 57736d2f89dfSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(query_rq_out); 57741b9a07eeSLeon Romanovsky out = kvzalloc(inlen, GFP_KERNEL); 57756d2f89dfSmajd@mellanox.com if (!out) 57766d2f89dfSmajd@mellanox.com return -ENOMEM; 57776d2f89dfSmajd@mellanox.com 57786d2f89dfSmajd@mellanox.com err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); 57796d2f89dfSmajd@mellanox.com if (err) 57806d2f89dfSmajd@mellanox.com goto out; 57816d2f89dfSmajd@mellanox.com 57826d2f89dfSmajd@mellanox.com rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); 57836d2f89dfSmajd@mellanox.com *rq_state = MLX5_GET(rqc, rqc, state); 57846d2f89dfSmajd@mellanox.com rq->state = *rq_state; 57856d2f89dfSmajd@mellanox.com 57866d2f89dfSmajd@mellanox.com out: 57876d2f89dfSmajd@mellanox.com kvfree(out); 57886d2f89dfSmajd@mellanox.com return err; 57896d2f89dfSmajd@mellanox.com } 57906d2f89dfSmajd@mellanox.com 57916d2f89dfSmajd@mellanox.com static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, 57926d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, u8 *qp_state) 57936d2f89dfSmajd@mellanox.com { 57946d2f89dfSmajd@mellanox.com static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = { 57956d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RST] = { 57966d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 57976d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 57986d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD, 57996d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_RESET, 58006d2f89dfSmajd@mellanox.com }, 58016d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RDY] = { 58026d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 58036d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 58046d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_SQE, 58056d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE, 58066d2f89dfSmajd@mellanox.com }, 58076d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_ERR] = { 58086d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 58096d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 58106d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_ERR, 58116d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_ERR, 58126d2f89dfSmajd@mellanox.com }, 58136d2f89dfSmajd@mellanox.com [MLX5_RQ_STATE_NA] = { 58146d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 58156d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 58166d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE, 58176d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD, 58186d2f89dfSmajd@mellanox.com }, 58196d2f89dfSmajd@mellanox.com }; 58206d2f89dfSmajd@mellanox.com 58216d2f89dfSmajd@mellanox.com *qp_state = sqrq_trans[rq_state][sq_state]; 58226d2f89dfSmajd@mellanox.com 58236d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE_BAD) { 58246d2f89dfSmajd@mellanox.com WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", 58256d2f89dfSmajd@mellanox.com qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, 58266d2f89dfSmajd@mellanox.com qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); 58276d2f89dfSmajd@mellanox.com return -EINVAL; 58286d2f89dfSmajd@mellanox.com } 58296d2f89dfSmajd@mellanox.com 58306d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE) 58316d2f89dfSmajd@mellanox.com *qp_state = qp->state; 58326d2f89dfSmajd@mellanox.com 58336d2f89dfSmajd@mellanox.com return 0; 58346d2f89dfSmajd@mellanox.com } 58356d2f89dfSmajd@mellanox.com 58366d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, 58376d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, 58386d2f89dfSmajd@mellanox.com u8 *raw_packet_qp_state) 58396d2f89dfSmajd@mellanox.com { 58406d2f89dfSmajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 58416d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 58426d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 58436d2f89dfSmajd@mellanox.com int err; 58446d2f89dfSmajd@mellanox.com u8 sq_state = MLX5_SQ_STATE_NA; 58456d2f89dfSmajd@mellanox.com u8 rq_state = MLX5_RQ_STATE_NA; 58466d2f89dfSmajd@mellanox.com 58476d2f89dfSmajd@mellanox.com if (qp->sq.wqe_cnt) { 58486d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); 58496d2f89dfSmajd@mellanox.com if (err) 58506d2f89dfSmajd@mellanox.com return err; 58516d2f89dfSmajd@mellanox.com } 58526d2f89dfSmajd@mellanox.com 58536d2f89dfSmajd@mellanox.com if (qp->rq.wqe_cnt) { 58546d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_rq_state(dev, rq, &rq_state); 58556d2f89dfSmajd@mellanox.com if (err) 58566d2f89dfSmajd@mellanox.com return err; 58576d2f89dfSmajd@mellanox.com } 58586d2f89dfSmajd@mellanox.com 58596d2f89dfSmajd@mellanox.com return sqrq_state_to_qp_state(sq_state, rq_state, qp, 58606d2f89dfSmajd@mellanox.com raw_packet_qp_state); 58616d2f89dfSmajd@mellanox.com } 58626d2f89dfSmajd@mellanox.com 58636d2f89dfSmajd@mellanox.com static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 58646d2f89dfSmajd@mellanox.com struct ib_qp_attr *qp_attr) 58656d2f89dfSmajd@mellanox.com { 586609a7d9ecSSaeed Mahameed int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 5867e126ba97SEli Cohen struct mlx5_qp_context *context; 5868e126ba97SEli Cohen int mlx5_state; 586909a7d9ecSSaeed Mahameed u32 *outb; 5870e126ba97SEli Cohen int err = 0; 5871e126ba97SEli Cohen 587209a7d9ecSSaeed Mahameed outb = kzalloc(outlen, GFP_KERNEL); 58736d2f89dfSmajd@mellanox.com if (!outb) 58746d2f89dfSmajd@mellanox.com return -ENOMEM; 58756d2f89dfSmajd@mellanox.com 5876333fbaa0SLeon Romanovsky err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen); 5877e126ba97SEli Cohen if (err) 58786d2f89dfSmajd@mellanox.com goto out; 5879e126ba97SEli Cohen 588009a7d9ecSSaeed Mahameed /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ 588109a7d9ecSSaeed Mahameed context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc); 588209a7d9ecSSaeed Mahameed 5883e126ba97SEli Cohen mlx5_state = be32_to_cpu(context->flags) >> 28; 5884e126ba97SEli Cohen 5885e126ba97SEli Cohen qp->state = to_ib_qp_state(mlx5_state); 5886e126ba97SEli Cohen qp_attr->path_mtu = context->mtu_msgmax >> 5; 5887e126ba97SEli Cohen qp_attr->path_mig_state = 5888e126ba97SEli Cohen to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 5889e126ba97SEli Cohen qp_attr->qkey = be32_to_cpu(context->qkey); 5890e126ba97SEli Cohen qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 5891e126ba97SEli Cohen qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 5892e126ba97SEli Cohen qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; 5893e126ba97SEli Cohen qp_attr->qp_access_flags = 5894e126ba97SEli Cohen to_ib_qp_access_flags(be32_to_cpu(context->params2)); 5895e126ba97SEli Cohen 5896e126ba97SEli Cohen if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 589738349389SDasaratharaman Chandramouli to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 589838349389SDasaratharaman Chandramouli to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 5899d3ae2bdeSNoa Osherovich qp_attr->alt_pkey_index = 5900d3ae2bdeSNoa Osherovich be16_to_cpu(context->alt_path.pkey_index); 5901d8966fcdSDasaratharaman Chandramouli qp_attr->alt_port_num = 5902d8966fcdSDasaratharaman Chandramouli rdma_ah_get_port_num(&qp_attr->alt_ah_attr); 5903e126ba97SEli Cohen } 5904e126ba97SEli Cohen 5905d3ae2bdeSNoa Osherovich qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); 5906e126ba97SEli Cohen qp_attr->port_num = context->pri_path.port; 5907e126ba97SEli Cohen 5908e126ba97SEli Cohen /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 5909e126ba97SEli Cohen qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; 5910e126ba97SEli Cohen 5911e126ba97SEli Cohen qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 5912e126ba97SEli Cohen 5913e126ba97SEli Cohen qp_attr->max_dest_rd_atomic = 5914e126ba97SEli Cohen 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 5915e126ba97SEli Cohen qp_attr->min_rnr_timer = 5916e126ba97SEli Cohen (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 5917e126ba97SEli Cohen qp_attr->timeout = context->pri_path.ackto_lt >> 3; 5918e126ba97SEli Cohen qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 5919e126ba97SEli Cohen qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; 5920e126ba97SEli Cohen qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; 59216d2f89dfSmajd@mellanox.com 59226d2f89dfSmajd@mellanox.com out: 59236d2f89dfSmajd@mellanox.com kfree(outb); 59246d2f89dfSmajd@mellanox.com return err; 59256d2f89dfSmajd@mellanox.com } 59266d2f89dfSmajd@mellanox.com 5927776a3906SMoni Shoua static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, 5928776a3906SMoni Shoua struct ib_qp_attr *qp_attr, int qp_attr_mask, 5929776a3906SMoni Shoua struct ib_qp_init_attr *qp_init_attr) 5930776a3906SMoni Shoua { 5931776a3906SMoni Shoua struct mlx5_core_dct *dct = &mqp->dct.mdct; 5932776a3906SMoni Shoua u32 *out; 5933776a3906SMoni Shoua u32 access_flags = 0; 5934776a3906SMoni Shoua int outlen = MLX5_ST_SZ_BYTES(query_dct_out); 5935776a3906SMoni Shoua void *dctc; 5936776a3906SMoni Shoua int err; 5937776a3906SMoni Shoua int supported_mask = IB_QP_STATE | 5938776a3906SMoni Shoua IB_QP_ACCESS_FLAGS | 5939776a3906SMoni Shoua IB_QP_PORT | 5940776a3906SMoni Shoua IB_QP_MIN_RNR_TIMER | 5941776a3906SMoni Shoua IB_QP_AV | 5942776a3906SMoni Shoua IB_QP_PATH_MTU | 5943776a3906SMoni Shoua IB_QP_PKEY_INDEX; 5944776a3906SMoni Shoua 5945776a3906SMoni Shoua if (qp_attr_mask & ~supported_mask) 5946776a3906SMoni Shoua return -EINVAL; 5947776a3906SMoni Shoua if (mqp->state != IB_QPS_RTR) 5948776a3906SMoni Shoua return -EINVAL; 5949776a3906SMoni Shoua 5950776a3906SMoni Shoua out = kzalloc(outlen, GFP_KERNEL); 5951776a3906SMoni Shoua if (!out) 5952776a3906SMoni Shoua return -ENOMEM; 5953776a3906SMoni Shoua 5954333fbaa0SLeon Romanovsky err = mlx5_core_dct_query(dev, dct, out, outlen); 5955776a3906SMoni Shoua if (err) 5956776a3906SMoni Shoua goto out; 5957776a3906SMoni Shoua 5958776a3906SMoni Shoua dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry); 5959776a3906SMoni Shoua 5960776a3906SMoni Shoua if (qp_attr_mask & IB_QP_STATE) 5961776a3906SMoni Shoua qp_attr->qp_state = IB_QPS_RTR; 5962776a3906SMoni Shoua 5963776a3906SMoni Shoua if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 5964776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rre)) 5965776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_READ; 5966776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rwe)) 5967776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_WRITE; 5968776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rae)) 5969776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_ATOMIC; 5970776a3906SMoni Shoua qp_attr->qp_access_flags = access_flags; 5971776a3906SMoni Shoua } 5972776a3906SMoni Shoua 5973776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PORT) 5974776a3906SMoni Shoua qp_attr->port_num = MLX5_GET(dctc, dctc, port); 5975776a3906SMoni Shoua if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) 5976776a3906SMoni Shoua qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); 5977776a3906SMoni Shoua if (qp_attr_mask & IB_QP_AV) { 5978776a3906SMoni Shoua qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass); 5979776a3906SMoni Shoua qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label); 5980776a3906SMoni Shoua qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index); 5981776a3906SMoni Shoua qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit); 5982776a3906SMoni Shoua } 5983776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PATH_MTU) 5984776a3906SMoni Shoua qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu); 5985776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PKEY_INDEX) 5986776a3906SMoni Shoua qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index); 5987776a3906SMoni Shoua out: 5988776a3906SMoni Shoua kfree(out); 5989776a3906SMoni Shoua return err; 5990776a3906SMoni Shoua } 5991776a3906SMoni Shoua 59926d2f89dfSmajd@mellanox.com int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 59936d2f89dfSmajd@mellanox.com int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 59946d2f89dfSmajd@mellanox.com { 59956d2f89dfSmajd@mellanox.com struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 59966d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp = to_mqp(ibqp); 59976d2f89dfSmajd@mellanox.com int err = 0; 59986d2f89dfSmajd@mellanox.com u8 raw_packet_qp_state; 59996d2f89dfSmajd@mellanox.com 600028d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 600128d61370SYishai Hadas return -ENOSYS; 600228d61370SYishai Hadas 6003d16e91daSHaggai Eran if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 6004d16e91daSHaggai Eran return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 6005d16e91daSHaggai Eran qp_init_attr); 6006d16e91daSHaggai Eran 6007c2e53b2cSYishai Hadas /* Not all of output fields are applicable, make sure to zero them */ 6008c2e53b2cSYishai Hadas memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 6009c2e53b2cSYishai Hadas memset(qp_attr, 0, sizeof(*qp_attr)); 6010c2e53b2cSYishai Hadas 60117aede1a2SLeon Romanovsky if (unlikely(qp->type == MLX5_IB_QPT_DCT)) 6012776a3906SMoni Shoua return mlx5_ib_dct_query_qp(dev, qp, qp_attr, 6013776a3906SMoni Shoua qp_attr_mask, qp_init_attr); 6014776a3906SMoni Shoua 60156d2f89dfSmajd@mellanox.com mutex_lock(&qp->mutex); 60166d2f89dfSmajd@mellanox.com 6017c2e53b2cSYishai Hadas if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 60182be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 60196d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); 60206d2f89dfSmajd@mellanox.com if (err) 60216d2f89dfSmajd@mellanox.com goto out; 60226d2f89dfSmajd@mellanox.com qp->state = raw_packet_qp_state; 60236d2f89dfSmajd@mellanox.com qp_attr->port_num = 1; 60246d2f89dfSmajd@mellanox.com } else { 60256d2f89dfSmajd@mellanox.com err = query_qp_attr(dev, qp, qp_attr); 60266d2f89dfSmajd@mellanox.com if (err) 60276d2f89dfSmajd@mellanox.com goto out; 60286d2f89dfSmajd@mellanox.com } 60296d2f89dfSmajd@mellanox.com 60306d2f89dfSmajd@mellanox.com qp_attr->qp_state = qp->state; 6031e126ba97SEli Cohen qp_attr->cur_qp_state = qp_attr->qp_state; 6032e126ba97SEli Cohen qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 6033e126ba97SEli Cohen qp_attr->cap.max_recv_sge = qp->rq.max_gs; 6034e126ba97SEli Cohen 6035e126ba97SEli Cohen if (!ibqp->uobject) { 60360540d814SNoa Osherovich qp_attr->cap.max_send_wr = qp->sq.max_post; 6037e126ba97SEli Cohen qp_attr->cap.max_send_sge = qp->sq.max_gs; 60380540d814SNoa Osherovich qp_init_attr->qp_context = ibqp->qp_context; 6039e126ba97SEli Cohen } else { 6040e126ba97SEli Cohen qp_attr->cap.max_send_wr = 0; 6041e126ba97SEli Cohen qp_attr->cap.max_send_sge = 0; 6042e126ba97SEli Cohen } 6043e126ba97SEli Cohen 60440540d814SNoa Osherovich qp_init_attr->qp_type = ibqp->qp_type; 60450540d814SNoa Osherovich qp_init_attr->recv_cq = ibqp->recv_cq; 60460540d814SNoa Osherovich qp_init_attr->send_cq = ibqp->send_cq; 60470540d814SNoa Osherovich qp_init_attr->srq = ibqp->srq; 60480540d814SNoa Osherovich qp_attr->cap.max_inline_data = qp->max_inline_data; 6049e126ba97SEli Cohen 6050e126ba97SEli Cohen qp_init_attr->cap = qp_attr->cap; 6051e126ba97SEli Cohen 6052a8f3ea61SLeon Romanovsky qp_init_attr->create_flags = qp->flags; 6053051f2630SLeon Romanovsky 6054e126ba97SEli Cohen qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 6055e126ba97SEli Cohen IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 6056e126ba97SEli Cohen 6057e126ba97SEli Cohen out: 6058e126ba97SEli Cohen mutex_unlock(&qp->mutex); 6059e126ba97SEli Cohen return err; 6060e126ba97SEli Cohen } 6061e126ba97SEli Cohen 6062e126ba97SEli Cohen struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 6063e126ba97SEli Cohen struct ib_udata *udata) 6064e126ba97SEli Cohen { 6065e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibdev); 6066e126ba97SEli Cohen struct mlx5_ib_xrcd *xrcd; 6067e126ba97SEli Cohen int err; 6068e126ba97SEli Cohen 6069938fe83cSSaeed Mahameed if (!MLX5_CAP_GEN(dev->mdev, xrc)) 6070e126ba97SEli Cohen return ERR_PTR(-ENOSYS); 6071e126ba97SEli Cohen 6072e126ba97SEli Cohen xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 6073e126ba97SEli Cohen if (!xrcd) 6074e126ba97SEli Cohen return ERR_PTR(-ENOMEM); 6075e126ba97SEli Cohen 60765aa3771dSYishai Hadas err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); 6077e126ba97SEli Cohen if (err) { 6078e126ba97SEli Cohen kfree(xrcd); 6079e126ba97SEli Cohen return ERR_PTR(-ENOMEM); 6080e126ba97SEli Cohen } 6081e126ba97SEli Cohen 6082e126ba97SEli Cohen return &xrcd->ibxrcd; 6083e126ba97SEli Cohen } 6084e126ba97SEli Cohen 6085c4367a26SShamir Rabinovitch int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) 6086e126ba97SEli Cohen { 6087e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 6088e126ba97SEli Cohen u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 6089e126ba97SEli Cohen int err; 6090e126ba97SEli Cohen 60915aa3771dSYishai Hadas err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); 6092b081808aSLeon Romanovsky if (err) 6093e126ba97SEli Cohen mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 6094e126ba97SEli Cohen 6095e126ba97SEli Cohen kfree(xrcd); 6096e126ba97SEli Cohen return 0; 6097e126ba97SEli Cohen } 609879b20a6cSYishai Hadas 6099350d0e4cSYishai Hadas static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) 6100350d0e4cSYishai Hadas { 6101350d0e4cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp); 6102350d0e4cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device); 6103350d0e4cSYishai Hadas struct ib_event event; 6104350d0e4cSYishai Hadas 6105350d0e4cSYishai Hadas if (rwq->ibwq.event_handler) { 6106350d0e4cSYishai Hadas event.device = rwq->ibwq.device; 6107350d0e4cSYishai Hadas event.element.wq = &rwq->ibwq; 6108350d0e4cSYishai Hadas switch (type) { 6109350d0e4cSYishai Hadas case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 6110350d0e4cSYishai Hadas event.event = IB_EVENT_WQ_FATAL; 6111350d0e4cSYishai Hadas break; 6112350d0e4cSYishai Hadas default: 6113350d0e4cSYishai Hadas mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn); 6114350d0e4cSYishai Hadas return; 6115350d0e4cSYishai Hadas } 6116350d0e4cSYishai Hadas 6117350d0e4cSYishai Hadas rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); 6118350d0e4cSYishai Hadas } 6119350d0e4cSYishai Hadas } 6120350d0e4cSYishai Hadas 612103404e8aSMaor Gottlieb static int set_delay_drop(struct mlx5_ib_dev *dev) 612203404e8aSMaor Gottlieb { 612303404e8aSMaor Gottlieb int err = 0; 612403404e8aSMaor Gottlieb 612503404e8aSMaor Gottlieb mutex_lock(&dev->delay_drop.lock); 612603404e8aSMaor Gottlieb if (dev->delay_drop.activate) 612703404e8aSMaor Gottlieb goto out; 612803404e8aSMaor Gottlieb 6129333fbaa0SLeon Romanovsky err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout); 613003404e8aSMaor Gottlieb if (err) 613103404e8aSMaor Gottlieb goto out; 613203404e8aSMaor Gottlieb 613303404e8aSMaor Gottlieb dev->delay_drop.activate = true; 613403404e8aSMaor Gottlieb out: 613503404e8aSMaor Gottlieb mutex_unlock(&dev->delay_drop.lock); 6136fe248c3aSMaor Gottlieb 6137fe248c3aSMaor Gottlieb if (!err) 6138fe248c3aSMaor Gottlieb atomic_inc(&dev->delay_drop.rqs_cnt); 613903404e8aSMaor Gottlieb return err; 614003404e8aSMaor Gottlieb } 614103404e8aSMaor Gottlieb 614279b20a6cSYishai Hadas static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, 614379b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr) 614479b20a6cSYishai Hadas { 614579b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 61464be6da1eSNoa Osherovich int has_net_offloads; 614779b20a6cSYishai Hadas __be64 *rq_pas0; 614879b20a6cSYishai Hadas void *in; 614979b20a6cSYishai Hadas void *rqc; 615079b20a6cSYishai Hadas void *wq; 615179b20a6cSYishai Hadas int inlen; 615279b20a6cSYishai Hadas int err; 615379b20a6cSYishai Hadas 615479b20a6cSYishai Hadas dev = to_mdev(pd->device); 615579b20a6cSYishai Hadas 615679b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 61571b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 615879b20a6cSYishai Hadas if (!in) 615979b20a6cSYishai Hadas return -ENOMEM; 616079b20a6cSYishai Hadas 616134d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 616279b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 616379b20a6cSYishai Hadas MLX5_SET(rqc, rqc, mem_rq_type, 616479b20a6cSYishai Hadas MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 616579b20a6cSYishai Hadas MLX5_SET(rqc, rqc, user_index, rwq->user_index); 616679b20a6cSYishai Hadas MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 616779b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 616879b20a6cSYishai Hadas MLX5_SET(rqc, rqc, flush_in_error_en, 1); 616979b20a6cSYishai Hadas wq = MLX5_ADDR_OF(rqc, rqc, wq); 6170ccc87087SNoa Osherovich MLX5_SET(wq, wq, wq_type, 6171ccc87087SNoa Osherovich rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ? 6172ccc87087SNoa Osherovich MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC); 6173b1383aa6SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 6174b1383aa6SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { 6175b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Scatter end padding is not supported\n"); 6176b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 6177b1383aa6SNoa Osherovich goto out; 6178b1383aa6SNoa Osherovich } else { 617979b20a6cSYishai Hadas MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 6180b1383aa6SNoa Osherovich } 6181b1383aa6SNoa Osherovich } 618279b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); 6183ccc87087SNoa Osherovich if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) { 6184c16339b6SMark Zhang /* 6185c16339b6SMark Zhang * In Firmware number of strides in each WQE is: 6186c16339b6SMark Zhang * "512 * 2^single_wqe_log_num_of_strides" 6187c16339b6SMark Zhang * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are 6188c16339b6SMark Zhang * accepted as 0 to 9 6189c16339b6SMark Zhang */ 6190c16339b6SMark Zhang static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1, 6191c16339b6SMark Zhang 2, 3, 4, 5, 6, 7, 8, 9 }; 6192ccc87087SNoa Osherovich MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); 6193ccc87087SNoa Osherovich MLX5_SET(wq, wq, log_wqe_stride_size, 6194ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes - 6195ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES); 6196c16339b6SMark Zhang MLX5_SET(wq, wq, log_wqe_num_of_strides, 6197c16339b6SMark Zhang fw_map[rwq->log_num_strides - 6198c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]); 6199ccc87087SNoa Osherovich } 620079b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); 620179b20a6cSYishai Hadas MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); 620279b20a6cSYishai Hadas MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); 620379b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); 620479b20a6cSYishai Hadas MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); 620579b20a6cSYishai Hadas MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); 62064be6da1eSNoa Osherovich has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads); 6207b1f74a84SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) { 62084be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 6209b1f74a84SNoa Osherovich mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); 6210b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 6211b1f74a84SNoa Osherovich goto out; 6212b1f74a84SNoa Osherovich } 6213b1f74a84SNoa Osherovich } else { 6214b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 1); 6215b1f74a84SNoa Osherovich } 62164be6da1eSNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) { 62174be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) { 62184be6da1eSNoa Osherovich mlx5_ib_dbg(dev, "Scatter FCS is not supported\n"); 62194be6da1eSNoa Osherovich err = -EOPNOTSUPP; 62204be6da1eSNoa Osherovich goto out; 62214be6da1eSNoa Osherovich } 62224be6da1eSNoa Osherovich MLX5_SET(rqc, rqc, scatter_fcs, 1); 62234be6da1eSNoa Osherovich } 622403404e8aSMaor Gottlieb if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 622503404e8aSMaor Gottlieb if (!(dev->ib_dev.attrs.raw_packet_caps & 622603404e8aSMaor Gottlieb IB_RAW_PACKET_CAP_DELAY_DROP)) { 622703404e8aSMaor Gottlieb mlx5_ib_dbg(dev, "Delay drop is not supported\n"); 622803404e8aSMaor Gottlieb err = -EOPNOTSUPP; 622903404e8aSMaor Gottlieb goto out; 623003404e8aSMaor Gottlieb } 623103404e8aSMaor Gottlieb MLX5_SET(rqc, rqc, delay_drop_en, 1); 623203404e8aSMaor Gottlieb } 623379b20a6cSYishai Hadas rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 623479b20a6cSYishai Hadas mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); 6235333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); 623603404e8aSMaor Gottlieb if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 623703404e8aSMaor Gottlieb err = set_delay_drop(dev); 623803404e8aSMaor Gottlieb if (err) { 623903404e8aSMaor Gottlieb mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", 624003404e8aSMaor Gottlieb err); 6241333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 624203404e8aSMaor Gottlieb } else { 624303404e8aSMaor Gottlieb rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; 624403404e8aSMaor Gottlieb } 624503404e8aSMaor Gottlieb } 6246b1f74a84SNoa Osherovich out: 624779b20a6cSYishai Hadas kvfree(in); 624879b20a6cSYishai Hadas return err; 624979b20a6cSYishai Hadas } 625079b20a6cSYishai Hadas 625179b20a6cSYishai Hadas static int set_user_rq_size(struct mlx5_ib_dev *dev, 625279b20a6cSYishai Hadas struct ib_wq_init_attr *wq_init_attr, 625379b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd, 625479b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 625579b20a6cSYishai Hadas { 625679b20a6cSYishai Hadas /* Sanity check RQ size before proceeding */ 625779b20a6cSYishai Hadas if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) 625879b20a6cSYishai Hadas return -EINVAL; 625979b20a6cSYishai Hadas 626079b20a6cSYishai Hadas if (!ucmd->rq_wqe_count) 626179b20a6cSYishai Hadas return -EINVAL; 626279b20a6cSYishai Hadas 626379b20a6cSYishai Hadas rwq->wqe_count = ucmd->rq_wqe_count; 626479b20a6cSYishai Hadas rwq->wqe_shift = ucmd->rq_wqe_shift; 62650dfe4522SLeon Romanovsky if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) 62660dfe4522SLeon Romanovsky return -EINVAL; 62670dfe4522SLeon Romanovsky 626879b20a6cSYishai Hadas rwq->log_rq_stride = rwq->wqe_shift; 626979b20a6cSYishai Hadas rwq->log_rq_size = ilog2(rwq->wqe_count); 627079b20a6cSYishai Hadas return 0; 627179b20a6cSYishai Hadas } 627279b20a6cSYishai Hadas 6273c16339b6SMark Zhang static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides) 6274c16339b6SMark Zhang { 6275c16339b6SMark Zhang if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) || 6276c16339b6SMark Zhang (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 6277c16339b6SMark Zhang return false; 6278c16339b6SMark Zhang 6279c16339b6SMark Zhang if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) && 6280c16339b6SMark Zhang (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 6281c16339b6SMark Zhang return false; 6282c16339b6SMark Zhang 6283c16339b6SMark Zhang return true; 6284c16339b6SMark Zhang } 6285c16339b6SMark Zhang 628679b20a6cSYishai Hadas static int prepare_user_rq(struct ib_pd *pd, 628779b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 628879b20a6cSYishai Hadas struct ib_udata *udata, 628979b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 629079b20a6cSYishai Hadas { 629179b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(pd->device); 629279b20a6cSYishai Hadas struct mlx5_ib_create_wq ucmd = {}; 629379b20a6cSYishai Hadas int err; 629479b20a6cSYishai Hadas size_t required_cmd_sz; 629579b20a6cSYishai Hadas 6296ccc87087SNoa Osherovich required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes) 6297ccc87087SNoa Osherovich + sizeof(ucmd.single_stride_log_num_of_bytes); 629879b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) { 629979b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid inlen\n"); 630079b20a6cSYishai Hadas return -EINVAL; 630179b20a6cSYishai Hadas } 630279b20a6cSYishai Hadas 630379b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 630479b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 630579b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) { 630679b20a6cSYishai Hadas mlx5_ib_dbg(dev, "inlen is not supported\n"); 630779b20a6cSYishai Hadas return -EOPNOTSUPP; 630879b20a6cSYishai Hadas } 630979b20a6cSYishai Hadas 631079b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 631179b20a6cSYishai Hadas mlx5_ib_dbg(dev, "copy failed\n"); 631279b20a6cSYishai Hadas return -EFAULT; 631379b20a6cSYishai Hadas } 631479b20a6cSYishai Hadas 6315ccc87087SNoa Osherovich if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) { 631679b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 631779b20a6cSYishai Hadas return -EOPNOTSUPP; 6318ccc87087SNoa Osherovich } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) { 6319ccc87087SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) { 6320ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Striding RQ is not supported\n"); 632179b20a6cSYishai Hadas return -EOPNOTSUPP; 632279b20a6cSYishai Hadas } 6323ccc87087SNoa Osherovich if ((ucmd.single_stride_log_num_of_bytes < 6324ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) || 6325ccc87087SNoa Osherovich (ucmd.single_stride_log_num_of_bytes > 6326ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) { 6327ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n", 6328ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes, 6329ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES, 6330ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES); 6331ccc87087SNoa Osherovich return -EINVAL; 6332ccc87087SNoa Osherovich } 6333c16339b6SMark Zhang if (!log_of_strides_valid(dev, 6334c16339b6SMark Zhang ucmd.single_wqe_log_num_of_strides)) { 6335c16339b6SMark Zhang mlx5_ib_dbg( 6336c16339b6SMark Zhang dev, 6337c16339b6SMark Zhang "Invalid log num strides (%u. Range is %u - %u)\n", 6338ccc87087SNoa Osherovich ucmd.single_wqe_log_num_of_strides, 6339c16339b6SMark Zhang MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ? 6340c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES : 6341ccc87087SNoa Osherovich MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES, 6342ccc87087SNoa Osherovich MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES); 6343ccc87087SNoa Osherovich return -EINVAL; 6344ccc87087SNoa Osherovich } 6345ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes = 6346ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes; 6347ccc87087SNoa Osherovich rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; 6348ccc87087SNoa Osherovich rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; 6349ccc87087SNoa Osherovich rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ; 6350ccc87087SNoa Osherovich } 635179b20a6cSYishai Hadas 635279b20a6cSYishai Hadas err = set_user_rq_size(dev, init_attr, &ucmd, rwq); 635379b20a6cSYishai Hadas if (err) { 635479b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 635579b20a6cSYishai Hadas return err; 635679b20a6cSYishai Hadas } 635779b20a6cSYishai Hadas 6358b0ea0fa5SJason Gunthorpe err = create_user_rq(dev, pd, udata, rwq, &ucmd); 635979b20a6cSYishai Hadas if (err) { 636079b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 636179b20a6cSYishai Hadas return err; 636279b20a6cSYishai Hadas } 636379b20a6cSYishai Hadas 636479b20a6cSYishai Hadas rwq->user_index = ucmd.user_index; 636579b20a6cSYishai Hadas return 0; 636679b20a6cSYishai Hadas } 636779b20a6cSYishai Hadas 636879b20a6cSYishai Hadas struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 636979b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 637079b20a6cSYishai Hadas struct ib_udata *udata) 637179b20a6cSYishai Hadas { 637279b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 637379b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq; 637479b20a6cSYishai Hadas struct mlx5_ib_create_wq_resp resp = {}; 637579b20a6cSYishai Hadas size_t min_resp_len; 637679b20a6cSYishai Hadas int err; 637779b20a6cSYishai Hadas 637879b20a6cSYishai Hadas if (!udata) 637979b20a6cSYishai Hadas return ERR_PTR(-ENOSYS); 638079b20a6cSYishai Hadas 638179b20a6cSYishai Hadas min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 638279b20a6cSYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 638379b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 638479b20a6cSYishai Hadas 6385ba80013fSMaor Gottlieb if (!capable(CAP_SYS_RAWIO) && 6386ba80013fSMaor Gottlieb init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) 6387ba80013fSMaor Gottlieb return ERR_PTR(-EPERM); 6388ba80013fSMaor Gottlieb 638979b20a6cSYishai Hadas dev = to_mdev(pd->device); 639079b20a6cSYishai Hadas switch (init_attr->wq_type) { 639179b20a6cSYishai Hadas case IB_WQT_RQ: 639279b20a6cSYishai Hadas rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); 639379b20a6cSYishai Hadas if (!rwq) 639479b20a6cSYishai Hadas return ERR_PTR(-ENOMEM); 639579b20a6cSYishai Hadas err = prepare_user_rq(pd, init_attr, udata, rwq); 639679b20a6cSYishai Hadas if (err) 639779b20a6cSYishai Hadas goto err; 639879b20a6cSYishai Hadas err = create_rq(rwq, pd, init_attr); 639979b20a6cSYishai Hadas if (err) 640079b20a6cSYishai Hadas goto err_user_rq; 640179b20a6cSYishai Hadas break; 640279b20a6cSYishai Hadas default: 640379b20a6cSYishai Hadas mlx5_ib_dbg(dev, "unsupported wq type %d\n", 640479b20a6cSYishai Hadas init_attr->wq_type); 640579b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 640679b20a6cSYishai Hadas } 640779b20a6cSYishai Hadas 6408350d0e4cSYishai Hadas rwq->ibwq.wq_num = rwq->core_qp.qpn; 640979b20a6cSYishai Hadas rwq->ibwq.state = IB_WQS_RESET; 641079b20a6cSYishai Hadas if (udata->outlen) { 641179b20a6cSYishai Hadas resp.response_length = offsetof(typeof(resp), response_length) + 641279b20a6cSYishai Hadas sizeof(resp.response_length); 641379b20a6cSYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 641479b20a6cSYishai Hadas if (err) 641579b20a6cSYishai Hadas goto err_copy; 641679b20a6cSYishai Hadas } 641779b20a6cSYishai Hadas 6418350d0e4cSYishai Hadas rwq->core_qp.event = mlx5_ib_wq_event; 6419350d0e4cSYishai Hadas rwq->ibwq.event_handler = init_attr->event_handler; 642079b20a6cSYishai Hadas return &rwq->ibwq; 642179b20a6cSYishai Hadas 642279b20a6cSYishai Hadas err_copy: 6423333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 642479b20a6cSYishai Hadas err_user_rq: 6425bdeacabdSShamir Rabinovitch destroy_user_rq(dev, pd, rwq, udata); 642679b20a6cSYishai Hadas err: 642779b20a6cSYishai Hadas kfree(rwq); 642879b20a6cSYishai Hadas return ERR_PTR(err); 642979b20a6cSYishai Hadas } 643079b20a6cSYishai Hadas 6431a49b1dc7SLeon Romanovsky void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) 643279b20a6cSYishai Hadas { 643379b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 643479b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 643579b20a6cSYishai Hadas 6436333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 6437bdeacabdSShamir Rabinovitch destroy_user_rq(dev, wq->pd, rwq, udata); 643879b20a6cSYishai Hadas kfree(rwq); 643979b20a6cSYishai Hadas } 644079b20a6cSYishai Hadas 6441c5f90929SYishai Hadas struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, 6442c5f90929SYishai Hadas struct ib_rwq_ind_table_init_attr *init_attr, 6443c5f90929SYishai Hadas struct ib_udata *udata) 6444c5f90929SYishai Hadas { 6445c5f90929SYishai Hadas struct mlx5_ib_dev *dev = to_mdev(device); 6446c5f90929SYishai Hadas struct mlx5_ib_rwq_ind_table *rwq_ind_tbl; 6447c5f90929SYishai Hadas int sz = 1 << init_attr->log_ind_tbl_size; 6448c5f90929SYishai Hadas struct mlx5_ib_create_rwq_ind_tbl_resp resp = {}; 6449c5f90929SYishai Hadas size_t min_resp_len; 6450c5f90929SYishai Hadas int inlen; 6451c5f90929SYishai Hadas int err; 6452c5f90929SYishai Hadas int i; 6453c5f90929SYishai Hadas u32 *in; 6454c5f90929SYishai Hadas void *rqtc; 6455c5f90929SYishai Hadas 6456c5f90929SYishai Hadas if (udata->inlen > 0 && 6457c5f90929SYishai Hadas !ib_is_udata_cleared(udata, 0, 6458c5f90929SYishai Hadas udata->inlen)) 6459c5f90929SYishai Hadas return ERR_PTR(-EOPNOTSUPP); 6460c5f90929SYishai Hadas 6461efd7f400SMaor Gottlieb if (init_attr->log_ind_tbl_size > 6462efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 6463efd7f400SMaor Gottlieb mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 6464efd7f400SMaor Gottlieb init_attr->log_ind_tbl_size, 6465efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 6466efd7f400SMaor Gottlieb return ERR_PTR(-EINVAL); 6467efd7f400SMaor Gottlieb } 6468efd7f400SMaor Gottlieb 6469c5f90929SYishai Hadas min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 6470c5f90929SYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 6471c5f90929SYishai Hadas return ERR_PTR(-EINVAL); 6472c5f90929SYishai Hadas 6473c5f90929SYishai Hadas rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL); 6474c5f90929SYishai Hadas if (!rwq_ind_tbl) 6475c5f90929SYishai Hadas return ERR_PTR(-ENOMEM); 6476c5f90929SYishai Hadas 6477c5f90929SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 64781b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 6479c5f90929SYishai Hadas if (!in) { 6480c5f90929SYishai Hadas err = -ENOMEM; 6481c5f90929SYishai Hadas goto err; 6482c5f90929SYishai Hadas } 6483c5f90929SYishai Hadas 6484c5f90929SYishai Hadas rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 6485c5f90929SYishai Hadas 6486c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 6487c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 6488c5f90929SYishai Hadas 6489c5f90929SYishai Hadas for (i = 0; i < sz; i++) 6490c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num); 6491c5f90929SYishai Hadas 64925deba86eSYishai Hadas rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid; 64935deba86eSYishai Hadas MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid); 64945deba86eSYishai Hadas 6495c5f90929SYishai Hadas err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); 6496c5f90929SYishai Hadas kvfree(in); 6497c5f90929SYishai Hadas 6498c5f90929SYishai Hadas if (err) 6499c5f90929SYishai Hadas goto err; 6500c5f90929SYishai Hadas 6501c5f90929SYishai Hadas rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn; 6502c5f90929SYishai Hadas if (udata->outlen) { 6503c5f90929SYishai Hadas resp.response_length = offsetof(typeof(resp), response_length) + 6504c5f90929SYishai Hadas sizeof(resp.response_length); 6505c5f90929SYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 6506c5f90929SYishai Hadas if (err) 6507c5f90929SYishai Hadas goto err_copy; 6508c5f90929SYishai Hadas } 6509c5f90929SYishai Hadas 6510c5f90929SYishai Hadas return &rwq_ind_tbl->ib_rwq_ind_tbl; 6511c5f90929SYishai Hadas 6512c5f90929SYishai Hadas err_copy: 65135deba86eSYishai Hadas mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 6514c5f90929SYishai Hadas err: 6515c5f90929SYishai Hadas kfree(rwq_ind_tbl); 6516c5f90929SYishai Hadas return ERR_PTR(err); 6517c5f90929SYishai Hadas } 6518c5f90929SYishai Hadas 6519c5f90929SYishai Hadas int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 6520c5f90929SYishai Hadas { 6521c5f90929SYishai Hadas struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl); 6522c5f90929SYishai Hadas struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device); 6523c5f90929SYishai Hadas 65245deba86eSYishai Hadas mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 6525c5f90929SYishai Hadas 6526c5f90929SYishai Hadas kfree(rwq_ind_tbl); 6527c5f90929SYishai Hadas return 0; 6528c5f90929SYishai Hadas } 6529c5f90929SYishai Hadas 653079b20a6cSYishai Hadas int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 653179b20a6cSYishai Hadas u32 wq_attr_mask, struct ib_udata *udata) 653279b20a6cSYishai Hadas { 653379b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 653479b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 653579b20a6cSYishai Hadas struct mlx5_ib_modify_wq ucmd = {}; 653679b20a6cSYishai Hadas size_t required_cmd_sz; 653779b20a6cSYishai Hadas int curr_wq_state; 653879b20a6cSYishai Hadas int wq_state; 653979b20a6cSYishai Hadas int inlen; 654079b20a6cSYishai Hadas int err; 654179b20a6cSYishai Hadas void *rqc; 654279b20a6cSYishai Hadas void *in; 654379b20a6cSYishai Hadas 654479b20a6cSYishai Hadas required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved); 654579b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) 654679b20a6cSYishai Hadas return -EINVAL; 654779b20a6cSYishai Hadas 654879b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 654979b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 655079b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) 655179b20a6cSYishai Hadas return -EOPNOTSUPP; 655279b20a6cSYishai Hadas 655379b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) 655479b20a6cSYishai Hadas return -EFAULT; 655579b20a6cSYishai Hadas 655679b20a6cSYishai Hadas if (ucmd.comp_mask || ucmd.reserved) 655779b20a6cSYishai Hadas return -EOPNOTSUPP; 655879b20a6cSYishai Hadas 655979b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 65601b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 656179b20a6cSYishai Hadas if (!in) 656279b20a6cSYishai Hadas return -ENOMEM; 656379b20a6cSYishai Hadas 656479b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 656579b20a6cSYishai Hadas 656679b20a6cSYishai Hadas curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ? 656779b20a6cSYishai Hadas wq_attr->curr_wq_state : wq->state; 656879b20a6cSYishai Hadas wq_state = (wq_attr_mask & IB_WQ_STATE) ? 656979b20a6cSYishai Hadas wq_attr->wq_state : curr_wq_state; 657079b20a6cSYishai Hadas if (curr_wq_state == IB_WQS_ERR) 657179b20a6cSYishai Hadas curr_wq_state = MLX5_RQC_STATE_ERR; 657279b20a6cSYishai Hadas if (wq_state == IB_WQS_ERR) 657379b20a6cSYishai Hadas wq_state = MLX5_RQC_STATE_ERR; 657479b20a6cSYishai Hadas MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); 657534d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid); 657679b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, wq_state); 657779b20a6cSYishai Hadas 6578b1f74a84SNoa Osherovich if (wq_attr_mask & IB_WQ_FLAGS) { 6579b1f74a84SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) { 6580b1f74a84SNoa Osherovich if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 6581b1f74a84SNoa Osherovich MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 6582b1f74a84SNoa Osherovich mlx5_ib_dbg(dev, "VLAN offloads are not " 6583b1f74a84SNoa Osherovich "supported\n"); 6584b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 6585b1f74a84SNoa Osherovich goto out; 6586b1f74a84SNoa Osherovich } 6587b1f74a84SNoa Osherovich MLX5_SET64(modify_rq_in, in, modify_bitmask, 6588b1f74a84SNoa Osherovich MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); 6589b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 6590b1f74a84SNoa Osherovich (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1); 6591b1f74a84SNoa Osherovich } 6592b1383aa6SNoa Osherovich 6593b1383aa6SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 6594b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n"); 6595b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 6596b1383aa6SNoa Osherovich goto out; 6597b1383aa6SNoa Osherovich } 6598b1f74a84SNoa Osherovich } 6599b1f74a84SNoa Osherovich 660023a6964eSMajd Dibbiny if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { 66013e1f000fSParav Pandit u16 set_id; 66023e1f000fSParav Pandit 66033e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, 0); 660423a6964eSMajd Dibbiny if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 660523a6964eSMajd Dibbiny MLX5_SET64(modify_rq_in, in, modify_bitmask, 660623a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 66073e1f000fSParav Pandit MLX5_SET(rqc, rqc, counter_set_id, set_id); 660823a6964eSMajd Dibbiny } else 66095a738b5dSJason Gunthorpe dev_info_once( 66105a738b5dSJason Gunthorpe &dev->ib_dev.dev, 66115a738b5dSJason Gunthorpe "Receive WQ counters are not supported on current FW\n"); 661223a6964eSMajd Dibbiny } 661323a6964eSMajd Dibbiny 6614e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); 661579b20a6cSYishai Hadas if (!err) 661679b20a6cSYishai Hadas rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; 661779b20a6cSYishai Hadas 6618b1f74a84SNoa Osherovich out: 6619b1f74a84SNoa Osherovich kvfree(in); 662079b20a6cSYishai Hadas return err; 662179b20a6cSYishai Hadas } 6622d0e84c0aSYishai Hadas 6623d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe { 6624d0e84c0aSYishai Hadas struct ib_cqe cqe; 6625d0e84c0aSYishai Hadas struct completion done; 6626d0e84c0aSYishai Hadas }; 6627d0e84c0aSYishai Hadas 6628d0e84c0aSYishai Hadas static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 6629d0e84c0aSYishai Hadas { 6630d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, 6631d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe, 6632d0e84c0aSYishai Hadas cqe); 6633d0e84c0aSYishai Hadas 6634d0e84c0aSYishai Hadas complete(&cqe->done); 6635d0e84c0aSYishai Hadas } 6636d0e84c0aSYishai Hadas 6637d0e84c0aSYishai Hadas /* This function returns only once the drained WR was completed */ 6638d0e84c0aSYishai Hadas static void handle_drain_completion(struct ib_cq *cq, 6639d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *sdrain, 6640d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev) 6641d0e84c0aSYishai Hadas { 6642d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 6643d0e84c0aSYishai Hadas 6644d0e84c0aSYishai Hadas if (cq->poll_ctx == IB_POLL_DIRECT) { 6645d0e84c0aSYishai Hadas while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) 6646d0e84c0aSYishai Hadas ib_process_cq_direct(cq, -1); 6647d0e84c0aSYishai Hadas return; 6648d0e84c0aSYishai Hadas } 6649d0e84c0aSYishai Hadas 6650d0e84c0aSYishai Hadas if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 6651d0e84c0aSYishai Hadas struct mlx5_ib_cq *mcq = to_mcq(cq); 6652d0e84c0aSYishai Hadas bool triggered = false; 6653d0e84c0aSYishai Hadas unsigned long flags; 6654d0e84c0aSYishai Hadas 6655d0e84c0aSYishai Hadas spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 6656d0e84c0aSYishai Hadas /* Make sure that the CQ handler won't run if wasn't run yet */ 6657d0e84c0aSYishai Hadas if (!mcq->mcq.reset_notify_added) 6658d0e84c0aSYishai Hadas mcq->mcq.reset_notify_added = 1; 6659d0e84c0aSYishai Hadas else 6660d0e84c0aSYishai Hadas triggered = true; 6661d0e84c0aSYishai Hadas spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 6662d0e84c0aSYishai Hadas 6663d0e84c0aSYishai Hadas if (triggered) { 6664d0e84c0aSYishai Hadas /* Wait for any scheduled/running task to be ended */ 6665d0e84c0aSYishai Hadas switch (cq->poll_ctx) { 6666d0e84c0aSYishai Hadas case IB_POLL_SOFTIRQ: 6667d0e84c0aSYishai Hadas irq_poll_disable(&cq->iop); 6668d0e84c0aSYishai Hadas irq_poll_enable(&cq->iop); 6669d0e84c0aSYishai Hadas break; 6670d0e84c0aSYishai Hadas case IB_POLL_WORKQUEUE: 6671d0e84c0aSYishai Hadas cancel_work_sync(&cq->work); 6672d0e84c0aSYishai Hadas break; 6673d0e84c0aSYishai Hadas default: 6674d0e84c0aSYishai Hadas WARN_ON_ONCE(1); 6675d0e84c0aSYishai Hadas } 6676d0e84c0aSYishai Hadas } 6677d0e84c0aSYishai Hadas 6678d0e84c0aSYishai Hadas /* Run the CQ handler - this makes sure that the drain WR will 6679d0e84c0aSYishai Hadas * be processed if wasn't processed yet. 6680d0e84c0aSYishai Hadas */ 66814e0e2ea1SYishai Hadas mcq->mcq.comp(&mcq->mcq, NULL); 6682d0e84c0aSYishai Hadas } 6683d0e84c0aSYishai Hadas 6684d0e84c0aSYishai Hadas wait_for_completion(&sdrain->done); 6685d0e84c0aSYishai Hadas } 6686d0e84c0aSYishai Hadas 6687d0e84c0aSYishai Hadas void mlx5_ib_drain_sq(struct ib_qp *qp) 6688d0e84c0aSYishai Hadas { 6689d0e84c0aSYishai Hadas struct ib_cq *cq = qp->send_cq; 6690d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 6691d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe sdrain; 6692d34ac5cdSBart Van Assche const struct ib_send_wr *bad_swr; 6693d0e84c0aSYishai Hadas struct ib_rdma_wr swr = { 6694d0e84c0aSYishai Hadas .wr = { 6695d0e84c0aSYishai Hadas .next = NULL, 6696d0e84c0aSYishai Hadas { .wr_cqe = &sdrain.cqe, }, 6697d0e84c0aSYishai Hadas .opcode = IB_WR_RDMA_WRITE, 6698d0e84c0aSYishai Hadas }, 6699d0e84c0aSYishai Hadas }; 6700d0e84c0aSYishai Hadas int ret; 6701d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 6702d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 6703d0e84c0aSYishai Hadas 6704d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 6705d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 6706d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 6707d0e84c0aSYishai Hadas return; 6708d0e84c0aSYishai Hadas } 6709d0e84c0aSYishai Hadas 6710d0e84c0aSYishai Hadas sdrain.cqe.done = mlx5_ib_drain_qp_done; 6711d0e84c0aSYishai Hadas init_completion(&sdrain.done); 6712d0e84c0aSYishai Hadas 6713d0e84c0aSYishai Hadas ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true); 6714d0e84c0aSYishai Hadas if (ret) { 6715d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 6716d0e84c0aSYishai Hadas return; 6717d0e84c0aSYishai Hadas } 6718d0e84c0aSYishai Hadas 6719d0e84c0aSYishai Hadas handle_drain_completion(cq, &sdrain, dev); 6720d0e84c0aSYishai Hadas } 6721d0e84c0aSYishai Hadas 6722d0e84c0aSYishai Hadas void mlx5_ib_drain_rq(struct ib_qp *qp) 6723d0e84c0aSYishai Hadas { 6724d0e84c0aSYishai Hadas struct ib_cq *cq = qp->recv_cq; 6725d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 6726d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe rdrain; 6727d34ac5cdSBart Van Assche struct ib_recv_wr rwr = {}; 6728d34ac5cdSBart Van Assche const struct ib_recv_wr *bad_rwr; 6729d0e84c0aSYishai Hadas int ret; 6730d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 6731d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 6732d0e84c0aSYishai Hadas 6733d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 6734d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 6735d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 6736d0e84c0aSYishai Hadas return; 6737d0e84c0aSYishai Hadas } 6738d0e84c0aSYishai Hadas 6739d0e84c0aSYishai Hadas rwr.wr_cqe = &rdrain.cqe; 6740d0e84c0aSYishai Hadas rdrain.cqe.done = mlx5_ib_drain_qp_done; 6741d0e84c0aSYishai Hadas init_completion(&rdrain.done); 6742d0e84c0aSYishai Hadas 6743d0e84c0aSYishai Hadas ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true); 6744d0e84c0aSYishai Hadas if (ret) { 6745d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 6746d0e84c0aSYishai Hadas return; 6747d0e84c0aSYishai Hadas } 6748d0e84c0aSYishai Hadas 6749d0e84c0aSYishai Hadas handle_drain_completion(cq, &rdrain, dev); 6750d0e84c0aSYishai Hadas } 6751d14133ddSMark Zhang 6752d14133ddSMark Zhang /** 6753d14133ddSMark Zhang * Bind a qp to a counter. If @counter is NULL then bind the qp to 6754d14133ddSMark Zhang * the default counter 6755d14133ddSMark Zhang */ 6756d14133ddSMark Zhang int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) 6757d14133ddSMark Zhang { 675810189e8eSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 6759d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 6760d14133ddSMark Zhang int err = 0; 6761d14133ddSMark Zhang 6762d14133ddSMark Zhang mutex_lock(&mqp->mutex); 6763d14133ddSMark Zhang if (mqp->state == IB_QPS_RESET) { 6764d14133ddSMark Zhang qp->counter = counter; 6765d14133ddSMark Zhang goto out; 6766d14133ddSMark Zhang } 6767d14133ddSMark Zhang 676810189e8eSMark Zhang if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { 676910189e8eSMark Zhang err = -EOPNOTSUPP; 677010189e8eSMark Zhang goto out; 677110189e8eSMark Zhang } 677210189e8eSMark Zhang 6773d14133ddSMark Zhang if (mqp->state == IB_QPS_RTS) { 6774d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(qp, counter); 6775d14133ddSMark Zhang if (!err) 6776d14133ddSMark Zhang qp->counter = counter; 6777d14133ddSMark Zhang 6778d14133ddSMark Zhang goto out; 6779d14133ddSMark Zhang } 6780d14133ddSMark Zhang 6781d14133ddSMark Zhang mqp->counter_pending = 1; 6782d14133ddSMark Zhang qp->counter = counter; 6783d14133ddSMark Zhang 6784d14133ddSMark Zhang out: 6785d14133ddSMark Zhang mutex_unlock(&mqp->mutex); 6786d14133ddSMark Zhang return err; 6787d14133ddSMark Zhang } 6788