1e126ba97SEli Cohen /* 26cf0a15fSSaeed Mahameed * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3e126ba97SEli Cohen * 4e126ba97SEli Cohen * This software is available to you under a choice of one of two 5e126ba97SEli Cohen * licenses. You may choose to be licensed under the terms of the GNU 6e126ba97SEli Cohen * General Public License (GPL) Version 2, available from the file 7e126ba97SEli Cohen * COPYING in the main directory of this source tree, or the 8e126ba97SEli Cohen * OpenIB.org BSD license below: 9e126ba97SEli Cohen * 10e126ba97SEli Cohen * Redistribution and use in source and binary forms, with or 11e126ba97SEli Cohen * without modification, are permitted provided that the following 12e126ba97SEli Cohen * conditions are met: 13e126ba97SEli Cohen * 14e126ba97SEli Cohen * - Redistributions of source code must retain the above 15e126ba97SEli Cohen * copyright notice, this list of conditions and the following 16e126ba97SEli Cohen * disclaimer. 17e126ba97SEli Cohen * 18e126ba97SEli Cohen * - Redistributions in binary form must reproduce the above 19e126ba97SEli Cohen * copyright notice, this list of conditions and the following 20e126ba97SEli Cohen * disclaimer in the documentation and/or other materials 21e126ba97SEli Cohen * provided with the distribution. 22e126ba97SEli Cohen * 23e126ba97SEli Cohen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e126ba97SEli Cohen * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e126ba97SEli Cohen * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e126ba97SEli Cohen * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e126ba97SEli Cohen * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e126ba97SEli Cohen * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e126ba97SEli Cohen * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e126ba97SEli Cohen * SOFTWARE. 31e126ba97SEli Cohen */ 32e126ba97SEli Cohen 33b6459415SJakub Kicinski #include <linux/etherdevice.h> 34e126ba97SEli Cohen #include <rdma/ib_umem.h> 352811ba51SAchiad Shochat #include <rdma/ib_cache.h> 36cfb5e088SHaggai Abramovsky #include <rdma/ib_user_verbs.h> 37d14133ddSMark Zhang #include <rdma/rdma_counter.h> 38c2e53b2cSYishai Hadas #include <linux/mlx5/fs.h> 39e126ba97SEli Cohen #include "mlx5_ib.h" 40b96c9ddeSMark Bloch #include "ib_rep.h" 4164825827SLeon Romanovsky #include "counters.h" 42443c1cf9SYishai Hadas #include "cmd.h" 438a8a5d37SAharon Landau #include "umr.h" 44333fbaa0SLeon Romanovsky #include "qp.h" 45029e88fdSLeon Romanovsky #include "wr.h" 46e126ba97SEli Cohen 47e126ba97SEli Cohen enum { 48e126ba97SEli Cohen MLX5_IB_ACK_REQ_FREQ = 8, 49e126ba97SEli Cohen }; 50e126ba97SEli Cohen 51e126ba97SEli Cohen enum { 52e126ba97SEli Cohen MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 53e126ba97SEli Cohen MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 54e126ba97SEli Cohen MLX5_IB_LINK_TYPE_IB = 0, 55e126ba97SEli Cohen MLX5_IB_LINK_TYPE_ETH = 1 56e126ba97SEli Cohen }; 57e126ba97SEli Cohen 58eb49ab0cSAlex Vesker enum raw_qp_set_mask_map { 59eb49ab0cSAlex Vesker MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, 607d29f349SBodong Wang MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, 61eb49ab0cSAlex Vesker }; 62eb49ab0cSAlex Vesker 630680efa2SAlex Vesker struct mlx5_modify_raw_qp_param { 640680efa2SAlex Vesker u16 operation; 65eb49ab0cSAlex Vesker 66eb49ab0cSAlex Vesker u32 set_mask; /* raw_qp_set_mask_map */ 6761147f39SBodong Wang 6861147f39SBodong Wang struct mlx5_rate_limit rl; 6961147f39SBodong Wang 70eb49ab0cSAlex Vesker u8 rq_q_ctr_id; 711fb7f897SMark Bloch u32 port; 720680efa2SAlex Vesker }; 730680efa2SAlex Vesker 74312b8f79SMark Zhang struct mlx5_ib_qp_event_work { 75312b8f79SMark Zhang struct work_struct work; 76312b8f79SMark Zhang struct mlx5_core_qp *qp; 77312b8f79SMark Zhang int type; 78312b8f79SMark Zhang }; 79312b8f79SMark Zhang 80312b8f79SMark Zhang static struct workqueue_struct *mlx5_ib_qp_event_wq; 81312b8f79SMark Zhang 8289ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 8389ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 8489ea94a7SMaor Gottlieb struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 8589ea94a7SMaor Gottlieb 86e126ba97SEli Cohen static int is_qp0(enum ib_qp_type qp_type) 87e126ba97SEli Cohen { 88e126ba97SEli Cohen return qp_type == IB_QPT_SMI; 89e126ba97SEli Cohen } 90e126ba97SEli Cohen 91e126ba97SEli Cohen static int is_sqp(enum ib_qp_type qp_type) 92e126ba97SEli Cohen { 93e126ba97SEli Cohen return is_qp0(qp_type) || is_qp1(qp_type); 94e126ba97SEli Cohen } 95e126ba97SEli Cohen 96c1395a2aSHaggai Eran /** 97fbeb4075SMoni Shoua * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ 98fbeb4075SMoni Shoua * to kernel buffer 99c1395a2aSHaggai Eran * 100fbeb4075SMoni Shoua * @umem: User space memory where the WQ is 101fbeb4075SMoni Shoua * @buffer: buffer to copy to 102fbeb4075SMoni Shoua * @buflen: buffer length 103fbeb4075SMoni Shoua * @wqe_index: index of WQE to copy from 104fbeb4075SMoni Shoua * @wq_offset: offset to start of WQ 105fbeb4075SMoni Shoua * @wq_wqe_cnt: number of WQEs in WQ 106fbeb4075SMoni Shoua * @wq_wqe_shift: log2 of WQE size 107fbeb4075SMoni Shoua * @bcnt: number of bytes to copy 108fbeb4075SMoni Shoua * @bytes_copied: number of bytes to copy (return value) 109c1395a2aSHaggai Eran * 110fbeb4075SMoni Shoua * Copies from start of WQE bcnt or less bytes. 111fbeb4075SMoni Shoua * Does not gurantee to copy the entire WQE. 112c1395a2aSHaggai Eran * 113fbeb4075SMoni Shoua * Return: zero on success, or an error code. 114c1395a2aSHaggai Eran */ 115da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer, 116da9ee9d8SMoni Shoua size_t buflen, int wqe_index, 117da9ee9d8SMoni Shoua int wq_offset, int wq_wqe_cnt, 118da9ee9d8SMoni Shoua int wq_wqe_shift, int bcnt, 119fbeb4075SMoni Shoua size_t *bytes_copied) 120c1395a2aSHaggai Eran { 121fbeb4075SMoni Shoua size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift); 122fbeb4075SMoni Shoua size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift); 123fbeb4075SMoni Shoua size_t copy_length; 124c1395a2aSHaggai Eran int ret; 125c1395a2aSHaggai Eran 126fbeb4075SMoni Shoua /* don't copy more than requested, more than buffer length or 127fbeb4075SMoni Shoua * beyond WQ end 128fbeb4075SMoni Shoua */ 129fbeb4075SMoni Shoua copy_length = min_t(u32, buflen, wq_end - offset); 130fbeb4075SMoni Shoua copy_length = min_t(u32, copy_length, bcnt); 131c1395a2aSHaggai Eran 132fbeb4075SMoni Shoua ret = ib_umem_copy_from(buffer, umem, offset, copy_length); 133c1395a2aSHaggai Eran if (ret) 134c1395a2aSHaggai Eran return ret; 135c1395a2aSHaggai Eran 136fbeb4075SMoni Shoua if (!ret && bytes_copied) 137fbeb4075SMoni Shoua *bytes_copied = copy_length; 138c1395a2aSHaggai Eran 139fbeb4075SMoni Shoua return 0; 140fbeb4075SMoni Shoua } 141fbeb4075SMoni Shoua 142da9ee9d8SMoni Shoua static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 143da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 144da9ee9d8SMoni Shoua { 145da9ee9d8SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 146da9ee9d8SMoni Shoua size_t bytes_copied = 0; 147da9ee9d8SMoni Shoua size_t wqe_length; 148da9ee9d8SMoni Shoua void *p; 149da9ee9d8SMoni Shoua int ds; 150da9ee9d8SMoni Shoua 151da9ee9d8SMoni Shoua wqe_index = wqe_index & qp->sq.fbc.sz_m1; 152da9ee9d8SMoni Shoua 153da9ee9d8SMoni Shoua /* read the control segment first */ 154da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 155da9ee9d8SMoni Shoua ctrl = p; 156da9ee9d8SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 157da9ee9d8SMoni Shoua wqe_length = ds * MLX5_WQE_DS_UNITS; 158da9ee9d8SMoni Shoua 159da9ee9d8SMoni Shoua /* read rest of WQE if it spreads over more than one stride */ 160da9ee9d8SMoni Shoua while (bytes_copied < wqe_length) { 161da9ee9d8SMoni Shoua size_t copy_length = 162da9ee9d8SMoni Shoua min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB); 163da9ee9d8SMoni Shoua 164da9ee9d8SMoni Shoua if (!copy_length) 165da9ee9d8SMoni Shoua break; 166da9ee9d8SMoni Shoua 167da9ee9d8SMoni Shoua memcpy(buffer + bytes_copied, p, copy_length); 168da9ee9d8SMoni Shoua bytes_copied += copy_length; 169da9ee9d8SMoni Shoua 170da9ee9d8SMoni Shoua wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; 171da9ee9d8SMoni Shoua p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); 172da9ee9d8SMoni Shoua } 173da9ee9d8SMoni Shoua *bc = bytes_copied; 174da9ee9d8SMoni Shoua return 0; 175da9ee9d8SMoni Shoua } 176da9ee9d8SMoni Shoua 177da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, 178da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 179fbeb4075SMoni Shoua { 180fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 181fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 182fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->sq; 183fbeb4075SMoni Shoua struct mlx5_wqe_ctrl_seg *ctrl; 184fbeb4075SMoni Shoua size_t bytes_copied; 185fbeb4075SMoni Shoua size_t bytes_copied2; 186fbeb4075SMoni Shoua size_t wqe_length; 187fbeb4075SMoni Shoua int ret; 188fbeb4075SMoni Shoua int ds; 189fbeb4075SMoni Shoua 190fbeb4075SMoni Shoua /* at first read as much as possible */ 191da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 192da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 193da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 194fbeb4075SMoni Shoua &bytes_copied); 195fbeb4075SMoni Shoua if (ret) 196fbeb4075SMoni Shoua return ret; 197fbeb4075SMoni Shoua 198fbeb4075SMoni Shoua /* we need at least control segment size to proceed */ 199fbeb4075SMoni Shoua if (bytes_copied < sizeof(*ctrl)) 200fbeb4075SMoni Shoua return -EINVAL; 201fbeb4075SMoni Shoua 202fbeb4075SMoni Shoua ctrl = buffer; 203fbeb4075SMoni Shoua ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 204c1395a2aSHaggai Eran wqe_length = ds * MLX5_WQE_DS_UNITS; 205fbeb4075SMoni Shoua 206fbeb4075SMoni Shoua /* if we copied enough then we are done */ 207fbeb4075SMoni Shoua if (bytes_copied >= wqe_length) { 208fbeb4075SMoni Shoua *bc = bytes_copied; 209fbeb4075SMoni Shoua return 0; 210c1395a2aSHaggai Eran } 211c1395a2aSHaggai Eran 212fbeb4075SMoni Shoua /* otherwise this a wrapped around wqe 213fbeb4075SMoni Shoua * so read the remaining bytes starting 214fbeb4075SMoni Shoua * from wqe_index 0 215fbeb4075SMoni Shoua */ 216da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied, 217da9ee9d8SMoni Shoua buflen - bytes_copied, 0, wq->offset, 218da9ee9d8SMoni Shoua wq->wqe_cnt, wq->wqe_shift, 219fbeb4075SMoni Shoua wqe_length - bytes_copied, 220fbeb4075SMoni Shoua &bytes_copied2); 221c1395a2aSHaggai Eran 222c1395a2aSHaggai Eran if (ret) 223c1395a2aSHaggai Eran return ret; 224fbeb4075SMoni Shoua *bc = bytes_copied + bytes_copied2; 225fbeb4075SMoni Shoua return 0; 226fbeb4075SMoni Shoua } 227c1395a2aSHaggai Eran 228da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 229da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 230da9ee9d8SMoni Shoua { 231da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 232da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 233da9ee9d8SMoni Shoua 234da9ee9d8SMoni Shoua if (buflen < sizeof(struct mlx5_wqe_ctrl_seg)) 235da9ee9d8SMoni Shoua return -EINVAL; 236da9ee9d8SMoni Shoua 237da9ee9d8SMoni Shoua if (!umem) 238da9ee9d8SMoni Shoua return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer, 239da9ee9d8SMoni Shoua buflen, bc); 240da9ee9d8SMoni Shoua 241da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc); 242da9ee9d8SMoni Shoua } 243da9ee9d8SMoni Shoua 244da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, 245da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 246fbeb4075SMoni Shoua { 247fbeb4075SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 248fbeb4075SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 249fbeb4075SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 250fbeb4075SMoni Shoua size_t bytes_copied; 251fbeb4075SMoni Shoua int ret; 252fbeb4075SMoni Shoua 253da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 254da9ee9d8SMoni Shoua wq->offset, wq->wqe_cnt, 255da9ee9d8SMoni Shoua wq->wqe_shift, buflen, 256fbeb4075SMoni Shoua &bytes_copied); 257fbeb4075SMoni Shoua 258fbeb4075SMoni Shoua if (ret) 259fbeb4075SMoni Shoua return ret; 260fbeb4075SMoni Shoua *bc = bytes_copied; 261fbeb4075SMoni Shoua return 0; 262fbeb4075SMoni Shoua } 263fbeb4075SMoni Shoua 264da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 265da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 266da9ee9d8SMoni Shoua { 267da9ee9d8SMoni Shoua struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 268da9ee9d8SMoni Shoua struct ib_umem *umem = base->ubuffer.umem; 269da9ee9d8SMoni Shoua struct mlx5_ib_wq *wq = &qp->rq; 270da9ee9d8SMoni Shoua size_t wqe_size = 1 << wq->wqe_shift; 271da9ee9d8SMoni Shoua 272da9ee9d8SMoni Shoua if (buflen < wqe_size) 273da9ee9d8SMoni Shoua return -EINVAL; 274da9ee9d8SMoni Shoua 275da9ee9d8SMoni Shoua if (!umem) 276da9ee9d8SMoni Shoua return -EOPNOTSUPP; 277da9ee9d8SMoni Shoua 278da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc); 279da9ee9d8SMoni Shoua } 280da9ee9d8SMoni Shoua 281da9ee9d8SMoni Shoua static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, 282da9ee9d8SMoni Shoua void *buffer, size_t buflen, size_t *bc) 283fbeb4075SMoni Shoua { 284fbeb4075SMoni Shoua struct ib_umem *umem = srq->umem; 285fbeb4075SMoni Shoua size_t bytes_copied; 286fbeb4075SMoni Shoua int ret; 287fbeb4075SMoni Shoua 288da9ee9d8SMoni Shoua ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0, 289da9ee9d8SMoni Shoua srq->msrq.max, srq->msrq.wqe_shift, 290da9ee9d8SMoni Shoua buflen, &bytes_copied); 291fbeb4075SMoni Shoua 292fbeb4075SMoni Shoua if (ret) 293fbeb4075SMoni Shoua return ret; 294fbeb4075SMoni Shoua *bc = bytes_copied; 295fbeb4075SMoni Shoua return 0; 296c1395a2aSHaggai Eran } 297c1395a2aSHaggai Eran 298da9ee9d8SMoni Shoua int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, 299da9ee9d8SMoni Shoua size_t buflen, size_t *bc) 300da9ee9d8SMoni Shoua { 301da9ee9d8SMoni Shoua struct ib_umem *umem = srq->umem; 302da9ee9d8SMoni Shoua size_t wqe_size = 1 << srq->msrq.wqe_shift; 303da9ee9d8SMoni Shoua 304da9ee9d8SMoni Shoua if (buflen < wqe_size) 305da9ee9d8SMoni Shoua return -EINVAL; 306da9ee9d8SMoni Shoua 307da9ee9d8SMoni Shoua if (!umem) 308da9ee9d8SMoni Shoua return -EOPNOTSUPP; 309da9ee9d8SMoni Shoua 310da9ee9d8SMoni Shoua return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc); 311da9ee9d8SMoni Shoua } 312da9ee9d8SMoni Shoua 313*8067fd8bSPatrisious Haddad static void mlx5_ib_qp_err_syndrome(struct ib_qp *ibqp) 314*8067fd8bSPatrisious Haddad { 315*8067fd8bSPatrisious Haddad struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 316*8067fd8bSPatrisious Haddad int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 317*8067fd8bSPatrisious Haddad struct mlx5_ib_qp *qp = to_mqp(ibqp); 318*8067fd8bSPatrisious Haddad void *pas_ext_union, *err_syn; 319*8067fd8bSPatrisious Haddad u32 *outb; 320*8067fd8bSPatrisious Haddad int err; 321*8067fd8bSPatrisious Haddad 322*8067fd8bSPatrisious Haddad if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) || 323*8067fd8bSPatrisious Haddad !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome)) 324*8067fd8bSPatrisious Haddad return; 325*8067fd8bSPatrisious Haddad 326*8067fd8bSPatrisious Haddad outb = kzalloc(outlen, GFP_KERNEL); 327*8067fd8bSPatrisious Haddad if (!outb) 328*8067fd8bSPatrisious Haddad return; 329*8067fd8bSPatrisious Haddad 330*8067fd8bSPatrisious Haddad err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, 331*8067fd8bSPatrisious Haddad true); 332*8067fd8bSPatrisious Haddad if (err) 333*8067fd8bSPatrisious Haddad goto out; 334*8067fd8bSPatrisious Haddad 335*8067fd8bSPatrisious Haddad pas_ext_union = 336*8067fd8bSPatrisious Haddad MLX5_ADDR_OF(query_qp_out, outb, qp_pas_or_qpc_ext_and_pas); 337*8067fd8bSPatrisious Haddad err_syn = MLX5_ADDR_OF(qpc_extension_and_pas_list_in, pas_ext_union, 338*8067fd8bSPatrisious Haddad qpc_data_extension.error_syndrome); 339*8067fd8bSPatrisious Haddad 340*8067fd8bSPatrisious Haddad pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n", 341*8067fd8bSPatrisious Haddad ibqp->device->name, ibqp->port, ibqp->qp_num, 342*8067fd8bSPatrisious Haddad ib_wc_status_msg( 343*8067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, syndrome)), 344*8067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, vendor_error_syndrome), 345*8067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, hw_syndrome_type), 346*8067fd8bSPatrisious Haddad MLX5_GET(cqe_error_syndrome, err_syn, hw_error_syndrome)); 347*8067fd8bSPatrisious Haddad out: 348*8067fd8bSPatrisious Haddad kfree(outb); 349*8067fd8bSPatrisious Haddad } 350*8067fd8bSPatrisious Haddad 351312b8f79SMark Zhang static void mlx5_ib_handle_qp_event(struct work_struct *_work) 352e126ba97SEli Cohen { 353312b8f79SMark Zhang struct mlx5_ib_qp_event_work *qpe_work = 354312b8f79SMark Zhang container_of(_work, struct mlx5_ib_qp_event_work, work); 355312b8f79SMark Zhang struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; 356312b8f79SMark Zhang struct ib_event event = {}; 357e126ba97SEli Cohen 358e126ba97SEli Cohen event.device = ibqp->device; 359e126ba97SEli Cohen event.element.qp = ibqp; 360312b8f79SMark Zhang switch (qpe_work->type) { 361e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG: 362e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG; 363e126ba97SEli Cohen break; 364e126ba97SEli Cohen case MLX5_EVENT_TYPE_COMM_EST: 365e126ba97SEli Cohen event.event = IB_EVENT_COMM_EST; 366e126ba97SEli Cohen break; 367e126ba97SEli Cohen case MLX5_EVENT_TYPE_SQ_DRAINED: 368e126ba97SEli Cohen event.event = IB_EVENT_SQ_DRAINED; 369e126ba97SEli Cohen break; 370e126ba97SEli Cohen case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 371e126ba97SEli Cohen event.event = IB_EVENT_QP_LAST_WQE_REACHED; 372e126ba97SEli Cohen break; 373e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 374e126ba97SEli Cohen event.event = IB_EVENT_QP_FATAL; 375e126ba97SEli Cohen break; 376e126ba97SEli Cohen case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 377e126ba97SEli Cohen event.event = IB_EVENT_PATH_MIG_ERR; 378e126ba97SEli Cohen break; 379e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 380e126ba97SEli Cohen event.event = IB_EVENT_QP_REQ_ERR; 381e126ba97SEli Cohen break; 382e126ba97SEli Cohen case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 383e126ba97SEli Cohen event.event = IB_EVENT_QP_ACCESS_ERR; 384e126ba97SEli Cohen break; 385e126ba97SEli Cohen default: 386312b8f79SMark Zhang pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", 387312b8f79SMark Zhang qpe_work->type, qpe_work->qp->qpn); 388312b8f79SMark Zhang goto out; 389e126ba97SEli Cohen } 390e126ba97SEli Cohen 391*8067fd8bSPatrisious Haddad if ((event.event == IB_EVENT_QP_FATAL) || 392*8067fd8bSPatrisious Haddad (event.event == IB_EVENT_QP_ACCESS_ERR)) 393*8067fd8bSPatrisious Haddad mlx5_ib_qp_err_syndrome(ibqp); 394*8067fd8bSPatrisious Haddad 395e126ba97SEli Cohen ibqp->event_handler(&event, ibqp->qp_context); 396312b8f79SMark Zhang 397312b8f79SMark Zhang out: 398312b8f79SMark Zhang mlx5_core_res_put(&qpe_work->qp->common); 399312b8f79SMark Zhang kfree(qpe_work); 400e126ba97SEli Cohen } 401312b8f79SMark Zhang 402312b8f79SMark Zhang static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 403312b8f79SMark Zhang { 404312b8f79SMark Zhang struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 405312b8f79SMark Zhang struct mlx5_ib_qp_event_work *qpe_work; 406312b8f79SMark Zhang 407312b8f79SMark Zhang if (type == MLX5_EVENT_TYPE_PATH_MIG) { 408312b8f79SMark Zhang /* This event is only valid for trans_qps */ 409312b8f79SMark Zhang to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; 410312b8f79SMark Zhang } 411312b8f79SMark Zhang 412312b8f79SMark Zhang if (!ibqp->event_handler) 413312b8f79SMark Zhang goto out_no_handler; 414312b8f79SMark Zhang 415312b8f79SMark Zhang qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); 416312b8f79SMark Zhang if (!qpe_work) 417312b8f79SMark Zhang goto out_no_handler; 418312b8f79SMark Zhang 419312b8f79SMark Zhang qpe_work->qp = qp; 420312b8f79SMark Zhang qpe_work->type = type; 421312b8f79SMark Zhang INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event); 422312b8f79SMark Zhang queue_work(mlx5_ib_qp_event_wq, &qpe_work->work); 423312b8f79SMark Zhang return; 424312b8f79SMark Zhang 425312b8f79SMark Zhang out_no_handler: 426312b8f79SMark Zhang mlx5_core_res_put(&qp->common); 427e126ba97SEli Cohen } 428e126ba97SEli Cohen 429e126ba97SEli Cohen static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 430e126ba97SEli Cohen int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 431e126ba97SEli Cohen { 432e126ba97SEli Cohen int wqe_size; 433e126ba97SEli Cohen int wq_size; 434e126ba97SEli Cohen 435e126ba97SEli Cohen /* Sanity check RQ size before proceeding */ 436938fe83cSSaeed Mahameed if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 437e126ba97SEli Cohen return -EINVAL; 438e126ba97SEli Cohen 439e126ba97SEli Cohen if (!has_rq) { 440e126ba97SEli Cohen qp->rq.max_gs = 0; 441e126ba97SEli Cohen qp->rq.wqe_cnt = 0; 442e126ba97SEli Cohen qp->rq.wqe_shift = 0; 4430540d814SNoa Osherovich cap->max_recv_wr = 0; 4440540d814SNoa Osherovich cap->max_recv_sge = 0; 445e126ba97SEli Cohen } else { 446c95e6d53SLeon Romanovsky int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE); 447c95e6d53SLeon Romanovsky 448e126ba97SEli Cohen if (ucmd) { 449e126ba97SEli Cohen qp->rq.wqe_cnt = ucmd->rq_wqe_count; 450002bf228SLeon Romanovsky if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) 451002bf228SLeon Romanovsky return -EINVAL; 452e126ba97SEli Cohen qp->rq.wqe_shift = ucmd->rq_wqe_shift; 453c95e6d53SLeon Romanovsky if ((1 << qp->rq.wqe_shift) / 454c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) < 455c95e6d53SLeon Romanovsky wq_sig) 456002bf228SLeon Romanovsky return -EINVAL; 457c95e6d53SLeon Romanovsky qp->rq.max_gs = 458c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 459c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 460c95e6d53SLeon Romanovsky wq_sig; 461e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 462e126ba97SEli Cohen } else { 463c95e6d53SLeon Romanovsky wqe_size = 464c95e6d53SLeon Romanovsky wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 465c95e6d53SLeon Romanovsky 0; 466e126ba97SEli Cohen wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 467e126ba97SEli Cohen wqe_size = roundup_pow_of_two(wqe_size); 468e126ba97SEli Cohen wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 469e126ba97SEli Cohen wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 470e126ba97SEli Cohen qp->rq.wqe_cnt = wq_size / wqe_size; 471938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 472e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 473e126ba97SEli Cohen wqe_size, 474938fe83cSSaeed Mahameed MLX5_CAP_GEN(dev->mdev, 475938fe83cSSaeed Mahameed max_wqe_sz_rq)); 476e126ba97SEli Cohen return -EINVAL; 477e126ba97SEli Cohen } 478e126ba97SEli Cohen qp->rq.wqe_shift = ilog2(wqe_size); 479c95e6d53SLeon Romanovsky qp->rq.max_gs = 480c95e6d53SLeon Romanovsky (1 << qp->rq.wqe_shift) / 481c95e6d53SLeon Romanovsky sizeof(struct mlx5_wqe_data_seg) - 482c95e6d53SLeon Romanovsky wq_sig; 483e126ba97SEli Cohen qp->rq.max_post = qp->rq.wqe_cnt; 484e126ba97SEli Cohen } 485e126ba97SEli Cohen } 486e126ba97SEli Cohen 487e126ba97SEli Cohen return 0; 488e126ba97SEli Cohen } 489e126ba97SEli Cohen 490f0313965SErez Shitrit static int sq_overhead(struct ib_qp_init_attr *attr) 491e126ba97SEli Cohen { 492618af384SAndi Shyti int size = 0; 493e126ba97SEli Cohen 494f0313965SErez Shitrit switch (attr->qp_type) { 495e126ba97SEli Cohen case IB_QPT_XRC_INI: 496b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_xrc_seg); 497df561f66SGustavo A. R. Silva fallthrough; 498e126ba97SEli Cohen case IB_QPT_RC: 499e126ba97SEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 50075c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_atomic_seg) + 50175c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_raddr_seg), 50275c1657eSLeon Romanovsky sizeof(struct mlx5_wqe_umr_ctrl_seg) + 503064e5262SIdan Burstein sizeof(struct mlx5_mkey_seg) + 504064e5262SIdan Burstein MLX5_IB_SQ_UMR_INLINE_THRESHOLD / 505064e5262SIdan Burstein MLX5_IB_UMR_OCTOWORD); 506e126ba97SEli Cohen break; 507e126ba97SEli Cohen 508b125a54bSEli Cohen case IB_QPT_XRC_TGT: 509b125a54bSEli Cohen return 0; 510b125a54bSEli Cohen 511e126ba97SEli Cohen case IB_QPT_UC: 512b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 51375c1657eSLeon Romanovsky max(sizeof(struct mlx5_wqe_raddr_seg), 5149e65dc37SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 51575c1657eSLeon Romanovsky sizeof(struct mlx5_mkey_seg)); 516e126ba97SEli Cohen break; 517e126ba97SEli Cohen 518e126ba97SEli Cohen case IB_QPT_UD: 519f0313965SErez Shitrit if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 520f0313965SErez Shitrit size += sizeof(struct mlx5_wqe_eth_pad) + 521f0313965SErez Shitrit sizeof(struct mlx5_wqe_eth_seg); 522df561f66SGustavo A. R. Silva fallthrough; 523e126ba97SEli Cohen case IB_QPT_SMI: 524d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 525b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 526e126ba97SEli Cohen sizeof(struct mlx5_wqe_datagram_seg); 527e126ba97SEli Cohen break; 528e126ba97SEli Cohen 529e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 530b125a54bSEli Cohen size += sizeof(struct mlx5_wqe_ctrl_seg) + 531e126ba97SEli Cohen sizeof(struct mlx5_wqe_umr_ctrl_seg) + 532e126ba97SEli Cohen sizeof(struct mlx5_mkey_seg); 533e126ba97SEli Cohen break; 534e126ba97SEli Cohen 535e126ba97SEli Cohen default: 536e126ba97SEli Cohen return -EINVAL; 537e126ba97SEli Cohen } 538e126ba97SEli Cohen 539e126ba97SEli Cohen return size; 540e126ba97SEli Cohen } 541e126ba97SEli Cohen 542e126ba97SEli Cohen static int calc_send_wqe(struct ib_qp_init_attr *attr) 543e126ba97SEli Cohen { 544e126ba97SEli Cohen int inl_size = 0; 545e126ba97SEli Cohen int size; 546e126ba97SEli Cohen 547f0313965SErez Shitrit size = sq_overhead(attr); 548e126ba97SEli Cohen if (size < 0) 549e126ba97SEli Cohen return size; 550e126ba97SEli Cohen 551e126ba97SEli Cohen if (attr->cap.max_inline_data) { 552e126ba97SEli Cohen inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 553e126ba97SEli Cohen attr->cap.max_inline_data; 554e126ba97SEli Cohen } 555e126ba97SEli Cohen 556e126ba97SEli Cohen size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 557c0a6cbb9SIsrael Rukshin if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN && 558e1e66cc2SSagi Grimberg ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) 559e1e66cc2SSagi Grimberg return MLX5_SIG_WQE_SIZE; 560e1e66cc2SSagi Grimberg else 561e126ba97SEli Cohen return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 562e126ba97SEli Cohen } 563e126ba97SEli Cohen 564288c01b7SEli Cohen static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) 565288c01b7SEli Cohen { 566288c01b7SEli Cohen int max_sge; 567288c01b7SEli Cohen 568288c01b7SEli Cohen if (attr->qp_type == IB_QPT_RC) 569288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 570288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 571288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 572288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 573288c01b7SEli Cohen else if (attr->qp_type == IB_QPT_XRC_INI) 574288c01b7SEli Cohen max_sge = (min_t(int, wqe_size, 512) - 575288c01b7SEli Cohen sizeof(struct mlx5_wqe_ctrl_seg) - 576288c01b7SEli Cohen sizeof(struct mlx5_wqe_xrc_seg) - 577288c01b7SEli Cohen sizeof(struct mlx5_wqe_raddr_seg)) / 578288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 579288c01b7SEli Cohen else 580288c01b7SEli Cohen max_sge = (wqe_size - sq_overhead(attr)) / 581288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg); 582288c01b7SEli Cohen 583288c01b7SEli Cohen return min_t(int, max_sge, wqe_size - sq_overhead(attr) / 584288c01b7SEli Cohen sizeof(struct mlx5_wqe_data_seg)); 585288c01b7SEli Cohen } 586288c01b7SEli Cohen 587e126ba97SEli Cohen static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 588e126ba97SEli Cohen struct mlx5_ib_qp *qp) 589e126ba97SEli Cohen { 590e126ba97SEli Cohen int wqe_size; 591e126ba97SEli Cohen int wq_size; 592e126ba97SEli Cohen 593e126ba97SEli Cohen if (!attr->cap.max_send_wr) 594e126ba97SEli Cohen return 0; 595e126ba97SEli Cohen 596e126ba97SEli Cohen wqe_size = calc_send_wqe(attr); 597e126ba97SEli Cohen mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 598e126ba97SEli Cohen if (wqe_size < 0) 599e126ba97SEli Cohen return wqe_size; 600e126ba97SEli Cohen 601938fe83cSSaeed Mahameed if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 602b125a54bSEli Cohen mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 603938fe83cSSaeed Mahameed wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 604e126ba97SEli Cohen return -EINVAL; 605e126ba97SEli Cohen } 606e126ba97SEli Cohen 607f0313965SErez Shitrit qp->max_inline_data = wqe_size - sq_overhead(attr) - 608e126ba97SEli Cohen sizeof(struct mlx5_wqe_inline_seg); 609e126ba97SEli Cohen attr->cap.max_inline_data = qp->max_inline_data; 610e126ba97SEli Cohen 611e126ba97SEli Cohen wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 612e126ba97SEli Cohen qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 613938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 6141974ab9dSBart Van Assche mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n", 6151974ab9dSBart Van Assche attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB, 616938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 617938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 618b125a54bSEli Cohen return -ENOMEM; 619b125a54bSEli Cohen } 620e126ba97SEli Cohen qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 621288c01b7SEli Cohen qp->sq.max_gs = get_send_sge(attr, wqe_size); 622288c01b7SEli Cohen if (qp->sq.max_gs < attr->cap.max_send_sge) 623288c01b7SEli Cohen return -ENOMEM; 624288c01b7SEli Cohen 625288c01b7SEli Cohen attr->cap.max_send_sge = qp->sq.max_gs; 626b125a54bSEli Cohen qp->sq.max_post = wq_size / wqe_size; 627b125a54bSEli Cohen attr->cap.max_send_wr = qp->sq.max_post; 628e126ba97SEli Cohen 629e126ba97SEli Cohen return wq_size; 630e126ba97SEli Cohen } 631e126ba97SEli Cohen 632e126ba97SEli Cohen static int set_user_buf_size(struct mlx5_ib_dev *dev, 633e126ba97SEli Cohen struct mlx5_ib_qp *qp, 63419098df2Smajd@mellanox.com struct mlx5_ib_create_qp *ucmd, 6350fb2ed66Smajd@mellanox.com struct mlx5_ib_qp_base *base, 6360fb2ed66Smajd@mellanox.com struct ib_qp_init_attr *attr) 637e126ba97SEli Cohen { 638e126ba97SEli Cohen int desc_sz = 1 << qp->sq.wqe_shift; 639e126ba97SEli Cohen 640938fe83cSSaeed Mahameed if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 641e126ba97SEli Cohen mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 642938fe83cSSaeed Mahameed desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 643e126ba97SEli Cohen return -EINVAL; 644e126ba97SEli Cohen } 645e126ba97SEli Cohen 646af8b38edSGal Pressman if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) { 647af8b38edSGal Pressman mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n", 648af8b38edSGal Pressman ucmd->sq_wqe_count); 649e126ba97SEli Cohen return -EINVAL; 650e126ba97SEli Cohen } 651e126ba97SEli Cohen 652e126ba97SEli Cohen qp->sq.wqe_cnt = ucmd->sq_wqe_count; 653e126ba97SEli Cohen 654938fe83cSSaeed Mahameed if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 655e126ba97SEli Cohen mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 656938fe83cSSaeed Mahameed qp->sq.wqe_cnt, 657938fe83cSSaeed Mahameed 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 658e126ba97SEli Cohen return -EINVAL; 659e126ba97SEli Cohen } 660e126ba97SEli Cohen 661c2e53b2cSYishai Hadas if (attr->qp_type == IB_QPT_RAW_PACKET || 6622be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 6630fb2ed66Smajd@mellanox.com base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 6640fb2ed66Smajd@mellanox.com qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; 6650fb2ed66Smajd@mellanox.com } else { 66619098df2Smajd@mellanox.com base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 667e126ba97SEli Cohen (qp->sq.wqe_cnt << 6); 6680fb2ed66Smajd@mellanox.com } 669e126ba97SEli Cohen 670e126ba97SEli Cohen return 0; 671e126ba97SEli Cohen } 672e126ba97SEli Cohen 673e126ba97SEli Cohen static int qp_has_rq(struct ib_qp_init_attr *attr) 674e126ba97SEli Cohen { 675e126ba97SEli Cohen if (attr->qp_type == IB_QPT_XRC_INI || 676e126ba97SEli Cohen attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 677e126ba97SEli Cohen attr->qp_type == MLX5_IB_QPT_REG_UMR || 678e126ba97SEli Cohen !attr->cap.max_recv_wr) 679e126ba97SEli Cohen return 0; 680e126ba97SEli Cohen 681e126ba97SEli Cohen return 1; 682e126ba97SEli Cohen } 683e126ba97SEli Cohen 6840b80c14fSEli Cohen enum { 6850b80c14fSEli Cohen /* this is the first blue flame register in the array of bfregs assigned 6860b80c14fSEli Cohen * to a processes. Since we do not use it for blue flame but rather 6870b80c14fSEli Cohen * regular 64 bit doorbells, we do not need a lock for maintaiing 6880b80c14fSEli Cohen * "odd/even" order 6890b80c14fSEli Cohen */ 6900b80c14fSEli Cohen NUM_NON_BLUE_FLAME_BFREGS = 1, 6910b80c14fSEli Cohen }; 6920b80c14fSEli Cohen 693b037c29aSEli Cohen static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) 694b037c29aSEli Cohen { 69584aa6c39SLeon Romanovsky return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 69684aa6c39SLeon Romanovsky bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR; 697b037c29aSEli Cohen } 698b037c29aSEli Cohen 699b037c29aSEli Cohen static int num_med_bfreg(struct mlx5_ib_dev *dev, 700b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 701c1be5232SEli Cohen { 702c1be5232SEli Cohen int n; 703c1be5232SEli Cohen 704b037c29aSEli Cohen n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - 705b037c29aSEli Cohen NUM_NON_BLUE_FLAME_BFREGS; 706c1be5232SEli Cohen 707c1be5232SEli Cohen return n >= 0 ? n : 0; 708c1be5232SEli Cohen } 709c1be5232SEli Cohen 71018b0362eSYishai Hadas static int first_med_bfreg(struct mlx5_ib_dev *dev, 71118b0362eSYishai Hadas struct mlx5_bfreg_info *bfregi) 71218b0362eSYishai Hadas { 71318b0362eSYishai Hadas return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; 71418b0362eSYishai Hadas } 71518b0362eSYishai Hadas 716b037c29aSEli Cohen static int first_hi_bfreg(struct mlx5_ib_dev *dev, 717b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 718c1be5232SEli Cohen { 719c1be5232SEli Cohen int med; 720c1be5232SEli Cohen 721b037c29aSEli Cohen med = num_med_bfreg(dev, bfregi); 722b037c29aSEli Cohen return ++med; 723c1be5232SEli Cohen } 724c1be5232SEli Cohen 725b037c29aSEli Cohen static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, 726b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 727e126ba97SEli Cohen { 728e126ba97SEli Cohen int i; 729e126ba97SEli Cohen 730b037c29aSEli Cohen for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { 731b037c29aSEli Cohen if (!bfregi->count[i]) { 7322f5ff264SEli Cohen bfregi->count[i]++; 733e126ba97SEli Cohen return i; 734e126ba97SEli Cohen } 735e126ba97SEli Cohen } 736e126ba97SEli Cohen 737e126ba97SEli Cohen return -ENOMEM; 738e126ba97SEli Cohen } 739e126ba97SEli Cohen 740b037c29aSEli Cohen static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 741b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi) 742e126ba97SEli Cohen { 74318b0362eSYishai Hadas int minidx = first_med_bfreg(dev, bfregi); 744e126ba97SEli Cohen int i; 745e126ba97SEli Cohen 74618b0362eSYishai Hadas if (minidx < 0) 74718b0362eSYishai Hadas return minidx; 74818b0362eSYishai Hadas 74918b0362eSYishai Hadas for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { 7502f5ff264SEli Cohen if (bfregi->count[i] < bfregi->count[minidx]) 751e126ba97SEli Cohen minidx = i; 7520b80c14fSEli Cohen if (!bfregi->count[minidx]) 7530b80c14fSEli Cohen break; 754e126ba97SEli Cohen } 755e126ba97SEli Cohen 7562f5ff264SEli Cohen bfregi->count[minidx]++; 757e126ba97SEli Cohen return minidx; 758e126ba97SEli Cohen } 759e126ba97SEli Cohen 760b037c29aSEli Cohen static int alloc_bfreg(struct mlx5_ib_dev *dev, 761ffaf58deSLeon Romanovsky struct mlx5_bfreg_info *bfregi) 762e126ba97SEli Cohen { 763ffaf58deSLeon Romanovsky int bfregn = -ENOMEM; 764e126ba97SEli Cohen 7650a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 7660a2fd01cSYishai Hadas return -EINVAL; 7670a2fd01cSYishai Hadas 7682f5ff264SEli Cohen mutex_lock(&bfregi->lock); 769ffaf58deSLeon Romanovsky if (bfregi->ver >= 2) { 770ffaf58deSLeon Romanovsky bfregn = alloc_high_class_bfreg(dev, bfregi); 771ffaf58deSLeon Romanovsky if (bfregn < 0) 772ffaf58deSLeon Romanovsky bfregn = alloc_med_class_bfreg(dev, bfregi); 773ffaf58deSLeon Romanovsky } 774ffaf58deSLeon Romanovsky 775ffaf58deSLeon Romanovsky if (bfregn < 0) { 7760b80c14fSEli Cohen BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); 7772f5ff264SEli Cohen bfregn = 0; 7782f5ff264SEli Cohen bfregi->count[bfregn]++; 779e126ba97SEli Cohen } 7802f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 781e126ba97SEli Cohen 7822f5ff264SEli Cohen return bfregn; 783e126ba97SEli Cohen } 784e126ba97SEli Cohen 7854ed131d0SYishai Hadas void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) 786e126ba97SEli Cohen { 7872f5ff264SEli Cohen mutex_lock(&bfregi->lock); 788b037c29aSEli Cohen bfregi->count[bfregn]--; 7892f5ff264SEli Cohen mutex_unlock(&bfregi->lock); 790e126ba97SEli Cohen } 791e126ba97SEli Cohen 792e126ba97SEli Cohen static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 793e126ba97SEli Cohen { 794e126ba97SEli Cohen switch (state) { 795e126ba97SEli Cohen case IB_QPS_RESET: return MLX5_QP_STATE_RST; 796e126ba97SEli Cohen case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 797e126ba97SEli Cohen case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 798e126ba97SEli Cohen case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 799e126ba97SEli Cohen case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 800e126ba97SEli Cohen case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 801e126ba97SEli Cohen case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 802e126ba97SEli Cohen default: return -1; 803e126ba97SEli Cohen } 804e126ba97SEli Cohen } 805e126ba97SEli Cohen 806e126ba97SEli Cohen static int to_mlx5_st(enum ib_qp_type type) 807e126ba97SEli Cohen { 808e126ba97SEli Cohen switch (type) { 809e126ba97SEli Cohen case IB_QPT_RC: return MLX5_QP_ST_RC; 810e126ba97SEli Cohen case IB_QPT_UC: return MLX5_QP_ST_UC; 811e126ba97SEli Cohen case IB_QPT_UD: return MLX5_QP_ST_UD; 812e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; 813e126ba97SEli Cohen case IB_QPT_XRC_INI: 814e126ba97SEli Cohen case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 815e126ba97SEli Cohen case IB_QPT_SMI: return MLX5_QP_ST_QP0; 816d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; 817c32a4f29SMoni Shoua case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI; 8183ae7e66aSLeon Romanovsky case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE; 819e126ba97SEli Cohen default: return -EINVAL; 820e126ba97SEli Cohen } 821e126ba97SEli Cohen } 822e126ba97SEli Cohen 82389ea94a7SMaor Gottlieb static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 82489ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 82589ea94a7SMaor Gottlieb static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 82689ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq); 82789ea94a7SMaor Gottlieb 8287c043e90SYishai Hadas int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 82905f58cebSLeon Romanovsky struct mlx5_bfreg_info *bfregi, u32 bfregn, 8301ee47ab3SYishai Hadas bool dyn_bfreg) 831e126ba97SEli Cohen { 83205f58cebSLeon Romanovsky unsigned int bfregs_per_sys_page; 83305f58cebSLeon Romanovsky u32 index_of_sys_page; 83405f58cebSLeon Romanovsky u32 offset; 835b037c29aSEli Cohen 8360a2fd01cSYishai Hadas if (bfregi->lib_uar_dyn) 8370a2fd01cSYishai Hadas return -EINVAL; 8380a2fd01cSYishai Hadas 839b037c29aSEli Cohen bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 840b037c29aSEli Cohen MLX5_NON_FP_BFREGS_PER_UAR; 841b037c29aSEli Cohen index_of_sys_page = bfregn / bfregs_per_sys_page; 842b037c29aSEli Cohen 84305f58cebSLeon Romanovsky if (dyn_bfreg) { 84405f58cebSLeon Romanovsky index_of_sys_page += bfregi->num_static_sys_pages; 84505f58cebSLeon Romanovsky 8467c043e90SYishai Hadas if (index_of_sys_page >= bfregi->num_sys_pages) 8477c043e90SYishai Hadas return -EINVAL; 8487c043e90SYishai Hadas 8491ee47ab3SYishai Hadas if (bfregn > bfregi->num_dyn_bfregs || 8501ee47ab3SYishai Hadas bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) { 8511ee47ab3SYishai Hadas mlx5_ib_dbg(dev, "Invalid dynamic uar index\n"); 8521ee47ab3SYishai Hadas return -EINVAL; 8531ee47ab3SYishai Hadas } 8541ee47ab3SYishai Hadas } 855b037c29aSEli Cohen 8561ee47ab3SYishai Hadas offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; 857b037c29aSEli Cohen return bfregi->sys_pages[index_of_sys_page] + offset; 858e126ba97SEli Cohen } 859e126ba97SEli Cohen 860fe248c3aSMaor Gottlieb static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 861bdeacabdSShamir Rabinovitch struct mlx5_ib_rwq *rwq, struct ib_udata *udata) 86279b20a6cSYishai Hadas { 863bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext *context = 864bdeacabdSShamir Rabinovitch rdma_udata_to_drv_context( 865bdeacabdSShamir Rabinovitch udata, 866bdeacabdSShamir Rabinovitch struct mlx5_ib_ucontext, 867bdeacabdSShamir Rabinovitch ibucontext); 86879b20a6cSYishai Hadas 869fe248c3aSMaor Gottlieb if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) 870fe248c3aSMaor Gottlieb atomic_dec(&dev->delay_drop.rqs_cnt); 871fe248c3aSMaor Gottlieb 87279b20a6cSYishai Hadas mlx5_ib_db_unmap_user(context, &rwq->db); 87379b20a6cSYishai Hadas ib_umem_release(rwq->umem); 87479b20a6cSYishai Hadas } 87579b20a6cSYishai Hadas 87679b20a6cSYishai Hadas static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 877b0ea0fa5SJason Gunthorpe struct ib_udata *udata, struct mlx5_ib_rwq *rwq, 87879b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd) 87979b20a6cSYishai Hadas { 88089944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 88189944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 882ad480ea5SJason Gunthorpe unsigned long page_size = 0; 88379b20a6cSYishai Hadas u32 offset = 0; 88479b20a6cSYishai Hadas int err; 88579b20a6cSYishai Hadas 88679b20a6cSYishai Hadas if (!ucmd->buf_addr) 88779b20a6cSYishai Hadas return -EINVAL; 88879b20a6cSYishai Hadas 889c320e527SMoni Shoua rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0); 89079b20a6cSYishai Hadas if (IS_ERR(rwq->umem)) { 89179b20a6cSYishai Hadas mlx5_ib_dbg(dev, "umem_get failed\n"); 89279b20a6cSYishai Hadas err = PTR_ERR(rwq->umem); 89379b20a6cSYishai Hadas return err; 89479b20a6cSYishai Hadas } 89579b20a6cSYishai Hadas 896ad480ea5SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 897ad480ea5SJason Gunthorpe rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT, 898ad480ea5SJason Gunthorpe page_offset, 64, &rwq->rq_page_offset); 899ad480ea5SJason Gunthorpe if (!page_size) { 90079b20a6cSYishai Hadas mlx5_ib_warn(dev, "bad offset\n"); 901ad480ea5SJason Gunthorpe err = -EINVAL; 90279b20a6cSYishai Hadas goto err_umem; 90379b20a6cSYishai Hadas } 90479b20a6cSYishai Hadas 905ad480ea5SJason Gunthorpe rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size); 906ad480ea5SJason Gunthorpe rwq->page_shift = order_base_2(page_size); 907ad480ea5SJason Gunthorpe rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT; 90879b20a6cSYishai Hadas rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); 90979b20a6cSYishai Hadas 910f8fb3110SJason Gunthorpe mlx5_ib_dbg( 911f8fb3110SJason Gunthorpe dev, 912ad480ea5SJason Gunthorpe "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n", 91379b20a6cSYishai Hadas (unsigned long long)ucmd->buf_addr, rwq->buf_size, 914ad480ea5SJason Gunthorpe ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas, 915f8fb3110SJason Gunthorpe offset); 91679b20a6cSYishai Hadas 9170bedd3d0SLang Cheng err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db); 91879b20a6cSYishai Hadas if (err) { 91979b20a6cSYishai Hadas mlx5_ib_dbg(dev, "map failed\n"); 92079b20a6cSYishai Hadas goto err_umem; 92179b20a6cSYishai Hadas } 92279b20a6cSYishai Hadas 92379b20a6cSYishai Hadas return 0; 92479b20a6cSYishai Hadas 92579b20a6cSYishai Hadas err_umem: 92679b20a6cSYishai Hadas ib_umem_release(rwq->umem); 92779b20a6cSYishai Hadas return err; 92879b20a6cSYishai Hadas } 92979b20a6cSYishai Hadas 930b037c29aSEli Cohen static int adjust_bfregn(struct mlx5_ib_dev *dev, 931b037c29aSEli Cohen struct mlx5_bfreg_info *bfregi, int bfregn) 932b037c29aSEli Cohen { 933b037c29aSEli Cohen return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + 934b037c29aSEli Cohen bfregn % MLX5_NON_FP_BFREGS_PER_UAR; 935b037c29aSEli Cohen } 936b037c29aSEli Cohen 93798fc1126SLeon Romanovsky static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 938e126ba97SEli Cohen struct mlx5_ib_qp *qp, struct ib_udata *udata, 93976883a6cSLeon Romanovsky struct ib_qp_init_attr *attr, u32 **in, 94019098df2Smajd@mellanox.com struct mlx5_ib_create_qp_resp *resp, int *inlen, 94176883a6cSLeon Romanovsky struct mlx5_ib_qp_base *base, 94276883a6cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd) 943e126ba97SEli Cohen { 944e126ba97SEli Cohen struct mlx5_ib_ucontext *context; 94519098df2Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer; 946a59b7b05SJason Gunthorpe unsigned int page_offset_quantized = 0; 947a59b7b05SJason Gunthorpe unsigned long page_size = 0; 9481ee47ab3SYishai Hadas int uar_index = 0; 9492f5ff264SEli Cohen int bfregn; 9509e9c47d0SEli Cohen int ncont = 0; 95109a7d9ecSSaeed Mahameed __be64 *pas; 95209a7d9ecSSaeed Mahameed void *qpc; 953e126ba97SEli Cohen int err; 9545aa3771dSYishai Hadas u16 uid; 955ac42a5eeSYishai Hadas u32 uar_flags; 956e126ba97SEli Cohen 95789944450SShamir Rabinovitch context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext, 95889944450SShamir Rabinovitch ibucontext); 95976883a6cSLeon Romanovsky uar_flags = qp->flags_en & 96076883a6cSLeon Romanovsky (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX); 961ac42a5eeSYishai Hadas switch (uar_flags) { 962ac42a5eeSYishai Hadas case MLX5_QP_FLAG_UAR_PAGE_INDEX: 96376883a6cSLeon Romanovsky uar_index = ucmd->bfreg_index; 964ac42a5eeSYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 965ac42a5eeSYishai Hadas break; 966ac42a5eeSYishai Hadas case MLX5_QP_FLAG_BFREG_INDEX: 9671ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, 96876883a6cSLeon Romanovsky ucmd->bfreg_index, true); 9691ee47ab3SYishai Hadas if (uar_index < 0) 9701ee47ab3SYishai Hadas return uar_index; 9711ee47ab3SYishai Hadas bfregn = MLX5_IB_INVALID_BFREG; 972ac42a5eeSYishai Hadas break; 973ac42a5eeSYishai Hadas case 0: 9742be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 975ac42a5eeSYishai Hadas return -EINVAL; 976ffaf58deSLeon Romanovsky bfregn = alloc_bfreg(dev, &context->bfregi); 977ffaf58deSLeon Romanovsky if (bfregn < 0) 9782f5ff264SEli Cohen return bfregn; 979ac42a5eeSYishai Hadas break; 980ac42a5eeSYishai Hadas default: 981ac42a5eeSYishai Hadas return -EINVAL; 982e126ba97SEli Cohen } 983e126ba97SEli Cohen 9842f5ff264SEli Cohen mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); 9851ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 9861ee47ab3SYishai Hadas uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn, 9871ee47ab3SYishai Hadas false); 988e126ba97SEli Cohen 98948fea837SHaggai Eran qp->rq.offset = 0; 99048fea837SHaggai Eran qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 99148fea837SHaggai Eran qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 99248fea837SHaggai Eran 99376883a6cSLeon Romanovsky err = set_user_buf_size(dev, qp, ucmd, base, attr); 994e126ba97SEli Cohen if (err) 9952f5ff264SEli Cohen goto err_bfreg; 996e126ba97SEli Cohen 99776883a6cSLeon Romanovsky if (ucmd->buf_addr && ubuffer->buf_size) { 99876883a6cSLeon Romanovsky ubuffer->buf_addr = ucmd->buf_addr; 999a59b7b05SJason Gunthorpe ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr, 1000a59b7b05SJason Gunthorpe ubuffer->buf_size, 0); 1001a59b7b05SJason Gunthorpe if (IS_ERR(ubuffer->umem)) { 1002a59b7b05SJason Gunthorpe err = PTR_ERR(ubuffer->umem); 10032f5ff264SEli Cohen goto err_bfreg; 1004a59b7b05SJason Gunthorpe } 1005a59b7b05SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 1006a59b7b05SJason Gunthorpe ubuffer->umem, qpc, log_page_size, 1007a59b7b05SJason Gunthorpe MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64, 1008a59b7b05SJason Gunthorpe &page_offset_quantized); 1009a59b7b05SJason Gunthorpe if (!page_size) { 1010a59b7b05SJason Gunthorpe err = -EINVAL; 1011a59b7b05SJason Gunthorpe goto err_umem; 1012a59b7b05SJason Gunthorpe } 1013a59b7b05SJason Gunthorpe ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size); 10149e9c47d0SEli Cohen } else { 101519098df2Smajd@mellanox.com ubuffer->umem = NULL; 10169e9c47d0SEli Cohen } 1017e126ba97SEli Cohen 101809a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 101909a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; 10201b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 1021e126ba97SEli Cohen if (!*in) { 1022e126ba97SEli Cohen err = -ENOMEM; 1023e126ba97SEli Cohen goto err_umem; 1024e126ba97SEli Cohen } 1025e126ba97SEli Cohen 102604bcc1c2SLeon Romanovsky uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0; 10275aa3771dSYishai Hadas MLX5_SET(create_qp_in, *in, uid, uid); 102809a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 1029a59b7b05SJason Gunthorpe pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); 1030a59b7b05SJason Gunthorpe if (ubuffer->umem) { 1031a59b7b05SJason Gunthorpe mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0); 1032a59b7b05SJason Gunthorpe MLX5_SET(qpc, qpc, log_page_size, 1033a59b7b05SJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 1034a59b7b05SJason Gunthorpe MLX5_SET(qpc, qpc, page_offset, page_offset_quantized); 1035a59b7b05SJason Gunthorpe } 103609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 10371ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 1038b037c29aSEli Cohen resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); 10391ee47ab3SYishai Hadas else 10401ee47ab3SYishai Hadas resp->bfreg_index = MLX5_IB_INVALID_BFREG; 10412f5ff264SEli Cohen qp->bfregn = bfregn; 1042e126ba97SEli Cohen 10430bedd3d0SLang Cheng err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); 1044e126ba97SEli Cohen if (err) { 1045e126ba97SEli Cohen mlx5_ib_dbg(dev, "map failed\n"); 1046e126ba97SEli Cohen goto err_free; 1047e126ba97SEli Cohen } 1048e126ba97SEli Cohen 1049e126ba97SEli Cohen return 0; 1050e126ba97SEli Cohen 1051e126ba97SEli Cohen err_free: 1052479163f4SAl Viro kvfree(*in); 1053e126ba97SEli Cohen 1054e126ba97SEli Cohen err_umem: 105519098df2Smajd@mellanox.com ib_umem_release(ubuffer->umem); 1056e126ba97SEli Cohen 10572f5ff264SEli Cohen err_bfreg: 10581ee47ab3SYishai Hadas if (bfregn != MLX5_IB_INVALID_BFREG) 10594ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn); 1060e126ba97SEli Cohen return err; 1061e126ba97SEli Cohen } 1062e126ba97SEli Cohen 1063747c519cSLeon Romanovsky static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1064747c519cSLeon Romanovsky struct mlx5_ib_qp_base *base, struct ib_udata *udata) 1065e126ba97SEli Cohen { 1066747c519cSLeon Romanovsky struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1067747c519cSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 1068e126ba97SEli Cohen 1069747c519cSLeon Romanovsky if (udata) { 1070747c519cSLeon Romanovsky /* User QP */ 1071e126ba97SEli Cohen mlx5_ib_db_unmap_user(context, &qp->db); 107219098df2Smajd@mellanox.com ib_umem_release(base->ubuffer.umem); 10731ee47ab3SYishai Hadas 10741ee47ab3SYishai Hadas /* 10751ee47ab3SYishai Hadas * Free only the BFREGs which are handled by the kernel. 10761ee47ab3SYishai Hadas * BFREGs of UARs allocated dynamically are handled by user. 10771ee47ab3SYishai Hadas */ 10781ee47ab3SYishai Hadas if (qp->bfregn != MLX5_IB_INVALID_BFREG) 10794ed131d0SYishai Hadas mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); 1080747c519cSLeon Romanovsky return; 1081747c519cSLeon Romanovsky } 1082747c519cSLeon Romanovsky 1083747c519cSLeon Romanovsky /* Kernel QP */ 1084747c519cSLeon Romanovsky kvfree(qp->sq.wqe_head); 1085747c519cSLeon Romanovsky kvfree(qp->sq.w_list); 1086747c519cSLeon Romanovsky kvfree(qp->sq.wrid); 1087747c519cSLeon Romanovsky kvfree(qp->sq.wr_data); 1088747c519cSLeon Romanovsky kvfree(qp->rq.wrid); 1089747c519cSLeon Romanovsky if (qp->db.db) 1090747c519cSLeon Romanovsky mlx5_db_free(dev->mdev, &qp->db); 1091747c519cSLeon Romanovsky if (qp->buf.frags) 1092747c519cSLeon Romanovsky mlx5_frag_buf_free(dev->mdev, &qp->buf); 1093e126ba97SEli Cohen } 1094e126ba97SEli Cohen 109598fc1126SLeon Romanovsky static int _create_kernel_qp(struct mlx5_ib_dev *dev, 1096e126ba97SEli Cohen struct ib_qp_init_attr *init_attr, 109798fc1126SLeon Romanovsky struct mlx5_ib_qp *qp, u32 **in, int *inlen, 109819098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base) 1099e126ba97SEli Cohen { 1100e126ba97SEli Cohen int uar_index; 110109a7d9ecSSaeed Mahameed void *qpc; 1102e126ba97SEli Cohen int err; 1103e126ba97SEli Cohen 1104e126ba97SEli Cohen if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 11055fe9dec0SEli Cohen qp->bf.bfreg = &dev->fp_bfreg; 11062978975cSLeon Romanovsky else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) 110711f552e2SMichael Guralnik qp->bf.bfreg = &dev->wc_bfreg; 11085fe9dec0SEli Cohen else 11095fe9dec0SEli Cohen qp->bf.bfreg = &dev->bfreg; 1110e126ba97SEli Cohen 1111d8030b0dSEli Cohen /* We need to divide by two since each register is comprised of 1112d8030b0dSEli Cohen * two buffers of identical size, namely odd and even 1113d8030b0dSEli Cohen */ 1114d8030b0dSEli Cohen qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; 11155fe9dec0SEli Cohen uar_index = qp->bf.bfreg->index; 1116e126ba97SEli Cohen 1117e126ba97SEli Cohen err = calc_sq_size(dev, init_attr, qp); 1118e126ba97SEli Cohen if (err < 0) { 1119e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11205fe9dec0SEli Cohen return err; 1121e126ba97SEli Cohen } 1122e126ba97SEli Cohen 1123e126ba97SEli Cohen qp->rq.offset = 0; 1124e126ba97SEli Cohen qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 112519098df2Smajd@mellanox.com base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 1126e126ba97SEli Cohen 112734f4c955SGuy Levi err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size, 112834f4c955SGuy Levi &qp->buf, dev->mdev->priv.numa_node); 1129e126ba97SEli Cohen if (err) { 1130e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 11315fe9dec0SEli Cohen return err; 1132e126ba97SEli Cohen } 1133e126ba97SEli Cohen 113434f4c955SGuy Levi if (qp->rq.wqe_cnt) 113534f4c955SGuy Levi mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift, 113634f4c955SGuy Levi ilog2(qp->rq.wqe_cnt), &qp->rq.fbc); 113734f4c955SGuy Levi 113834f4c955SGuy Levi if (qp->sq.wqe_cnt) { 113934f4c955SGuy Levi int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) / 114034f4c955SGuy Levi MLX5_SEND_WQE_BB; 114134f4c955SGuy Levi mlx5_init_fbc_offset(qp->buf.frags + 114234f4c955SGuy Levi (qp->sq.offset / PAGE_SIZE), 114334f4c955SGuy Levi ilog2(MLX5_SEND_WQE_BB), 114434f4c955SGuy Levi ilog2(qp->sq.wqe_cnt), 114534f4c955SGuy Levi sq_strides_offset, &qp->sq.fbc); 114634f4c955SGuy Levi 114734f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 114834f4c955SGuy Levi } 114934f4c955SGuy Levi 115009a7d9ecSSaeed Mahameed *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 115109a7d9ecSSaeed Mahameed MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; 11521b9a07eeSLeon Romanovsky *in = kvzalloc(*inlen, GFP_KERNEL); 1153e126ba97SEli Cohen if (!*in) { 1154e126ba97SEli Cohen err = -ENOMEM; 1155e126ba97SEli Cohen goto err_buf; 1156e126ba97SEli Cohen } 115709a7d9ecSSaeed Mahameed 115809a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 115909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, uar_page, uar_index); 11608256c69bSMaor Gottlieb MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 116109a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 116209a7d9ecSSaeed Mahameed 1163e126ba97SEli Cohen /* Set "fast registration enabled" for all kernel QPs */ 116409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, fre, 1); 116509a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rlky, 1); 1166e126ba97SEli Cohen 11672978975cSLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 116809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, deth_sqpn, 1); 1169b11a4f9cSHaggai Eran 117034f4c955SGuy Levi mlx5_fill_page_frag_array(&qp->buf, 117134f4c955SGuy Levi (__be64 *)MLX5_ADDR_OF(create_qp_in, 117234f4c955SGuy Levi *in, pas)); 1173e126ba97SEli Cohen 11749603b61dSJack Morgenstein err = mlx5_db_alloc(dev->mdev, &qp->db); 1175e126ba97SEli Cohen if (err) { 1176e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 1177e126ba97SEli Cohen goto err_free; 1178e126ba97SEli Cohen } 1179e126ba97SEli Cohen 1180b5883008SLi Dongyang qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, 1181b5883008SLi Dongyang sizeof(*qp->sq.wrid), GFP_KERNEL); 1182b5883008SLi Dongyang qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, 1183b5883008SLi Dongyang sizeof(*qp->sq.wr_data), GFP_KERNEL); 1184b5883008SLi Dongyang qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, 1185b5883008SLi Dongyang sizeof(*qp->rq.wrid), GFP_KERNEL); 1186b5883008SLi Dongyang qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, 1187b5883008SLi Dongyang sizeof(*qp->sq.w_list), GFP_KERNEL); 1188b5883008SLi Dongyang qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, 1189b5883008SLi Dongyang sizeof(*qp->sq.wqe_head), GFP_KERNEL); 1190e126ba97SEli Cohen 1191e126ba97SEli Cohen if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || 1192e126ba97SEli Cohen !qp->sq.w_list || !qp->sq.wqe_head) { 1193e126ba97SEli Cohen err = -ENOMEM; 1194e126ba97SEli Cohen goto err_wrid; 1195e126ba97SEli Cohen } 1196e126ba97SEli Cohen 1197e126ba97SEli Cohen return 0; 1198e126ba97SEli Cohen 1199e126ba97SEli Cohen err_wrid: 1200b5883008SLi Dongyang kvfree(qp->sq.wqe_head); 1201b5883008SLi Dongyang kvfree(qp->sq.w_list); 1202b5883008SLi Dongyang kvfree(qp->sq.wrid); 1203b5883008SLi Dongyang kvfree(qp->sq.wr_data); 1204b5883008SLi Dongyang kvfree(qp->rq.wrid); 1205f4044dacSEli Cohen mlx5_db_free(dev->mdev, &qp->db); 1206e126ba97SEli Cohen 1207e126ba97SEli Cohen err_free: 1208479163f4SAl Viro kvfree(*in); 1209e126ba97SEli Cohen 1210e126ba97SEli Cohen err_buf: 121134f4c955SGuy Levi mlx5_frag_buf_free(dev->mdev, &qp->buf); 1212e126ba97SEli Cohen return err; 1213e126ba97SEli Cohen } 1214e126ba97SEli Cohen 121509a7d9ecSSaeed Mahameed static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 1216e126ba97SEli Cohen { 12177aede1a2SLeon Romanovsky if (attr->srq || (qp->type == IB_QPT_XRC_TGT) || 12187aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI)) 121909a7d9ecSSaeed Mahameed return MLX5_SRQ_RQ; 1220e126ba97SEli Cohen else if (!qp->has_rq) 122109a7d9ecSSaeed Mahameed return MLX5_ZERO_LEN_RQ; 12227aede1a2SLeon Romanovsky 122309a7d9ecSSaeed Mahameed return MLX5_NON_ZERO_RQ; 1224e126ba97SEli Cohen } 1225e126ba97SEli Cohen 12260fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 1227c2e53b2cSYishai Hadas struct mlx5_ib_qp *qp, 12281cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u32 tdn, 12291cd6dbd3SYishai Hadas struct ib_pd *pd) 12300fb2ed66Smajd@mellanox.com { 1231e0b4b472SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 12320fb2ed66Smajd@mellanox.com void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 12330fb2ed66Smajd@mellanox.com 12341cd6dbd3SYishai Hadas MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); 12350fb2ed66Smajd@mellanox.com MLX5_SET(tisc, tisc, transport_domain, tdn); 12362be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 1237c2e53b2cSYishai Hadas MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); 1238c2e53b2cSYishai Hadas 1239e0b4b472SLeon Romanovsky return mlx5_core_create_tis(dev->mdev, in, &sq->tisn); 12400fb2ed66Smajd@mellanox.com } 12410fb2ed66Smajd@mellanox.com 12420fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 12431cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, struct ib_pd *pd) 12440fb2ed66Smajd@mellanox.com { 12451cd6dbd3SYishai Hadas mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); 12460fb2ed66Smajd@mellanox.com } 12470fb2ed66Smajd@mellanox.com 1248d5ed8ac3SMark Bloch static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq) 1249b96c9ddeSMark Bloch { 1250b96c9ddeSMark Bloch if (sq->flow_rule) 1251b96c9ddeSMark Bloch mlx5_del_flow_rules(sq->flow_rule); 1252d5ed8ac3SMark Bloch sq->flow_rule = NULL; 1253b96c9ddeSMark Bloch } 1254b96c9ddeSMark Bloch 12559a1ac95aSAharon Landau static bool fr_supported(int ts_cap) 12562fe8d4b8SAharon Landau { 12579a1ac95aSAharon Landau return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || 12589a1ac95aSAharon Landau ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; 12599a1ac95aSAharon Landau } 12602fe8d4b8SAharon Landau 12619a1ac95aSAharon Landau static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, 126233652951SAharon Landau bool fr_sup, bool rt_sup) 12639a1ac95aSAharon Landau { 126433652951SAharon Landau if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) { 126533652951SAharon Landau if (!rt_sup) { 126633652951SAharon Landau mlx5_ib_dbg(dev, 126733652951SAharon Landau "Real time TS format is not supported\n"); 12682fe8d4b8SAharon Landau return -EOPNOTSUPP; 12692fe8d4b8SAharon Landau } 127033652951SAharon Landau return MLX5_TIMESTAMP_FORMAT_REAL_TIME; 12712fe8d4b8SAharon Landau } 12729a1ac95aSAharon Landau if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) { 12739a1ac95aSAharon Landau if (!fr_sup) { 12749a1ac95aSAharon Landau mlx5_ib_dbg(dev, 12759a1ac95aSAharon Landau "Free running TS format is not supported\n"); 12762fe8d4b8SAharon Landau return -EOPNOTSUPP; 12772fe8d4b8SAharon Landau } 12789a1ac95aSAharon Landau return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; 12792fe8d4b8SAharon Landau } 12809a1ac95aSAharon Landau return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : 12819a1ac95aSAharon Landau MLX5_TIMESTAMP_FORMAT_DEFAULT; 12829a1ac95aSAharon Landau } 12839a1ac95aSAharon Landau 12849a1ac95aSAharon Landau static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq) 12859a1ac95aSAharon Landau { 12869a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format); 12879a1ac95aSAharon Landau 128833652951SAharon Landau return get_ts_format(dev, recv_cq, fr_supported(ts_cap), 128933652951SAharon Landau rt_supported(ts_cap)); 12902fe8d4b8SAharon Landau } 12912fe8d4b8SAharon Landau 12922fe8d4b8SAharon Landau static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) 12932fe8d4b8SAharon Landau { 12949a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format); 12952fe8d4b8SAharon Landau 129633652951SAharon Landau return get_ts_format(dev, send_cq, fr_supported(ts_cap), 129733652951SAharon Landau rt_supported(ts_cap)); 12982fe8d4b8SAharon Landau } 12992fe8d4b8SAharon Landau 13002fe8d4b8SAharon Landau static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, 13012fe8d4b8SAharon Landau struct mlx5_ib_cq *recv_cq) 13022fe8d4b8SAharon Landau { 13039a1ac95aSAharon Landau u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format); 13049a1ac95aSAharon Landau bool fr_sup = fr_supported(ts_cap); 130533652951SAharon Landau bool rt_sup = rt_supported(ts_cap); 13069a1ac95aSAharon Landau u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : 13079a1ac95aSAharon Landau MLX5_TIMESTAMP_FORMAT_DEFAULT; 13089a1ac95aSAharon Landau int send_ts_format = 130933652951SAharon Landau send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : 13109a1ac95aSAharon Landau default_ts; 13119a1ac95aSAharon Landau int recv_ts_format = 131233652951SAharon Landau recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) : 13139a1ac95aSAharon Landau default_ts; 13142fe8d4b8SAharon Landau 13159a1ac95aSAharon Landau if (send_ts_format < 0 || recv_ts_format < 0) 13162fe8d4b8SAharon Landau return -EOPNOTSUPP; 13172fe8d4b8SAharon Landau 131833652951SAharon Landau if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && 131933652951SAharon Landau recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT && 132033652951SAharon Landau send_ts_format != recv_ts_format) { 132133652951SAharon Landau mlx5_ib_dbg( 132233652951SAharon Landau dev, 132333652951SAharon Landau "The send ts_format does not match the receive ts_format\n"); 13242fe8d4b8SAharon Landau return -EOPNOTSUPP; 13252fe8d4b8SAharon Landau } 132633652951SAharon Landau 13279a1ac95aSAharon Landau return send_ts_format == default_ts ? recv_ts_format : send_ts_format; 13282fe8d4b8SAharon Landau } 13292fe8d4b8SAharon Landau 13300fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1331b0ea0fa5SJason Gunthorpe struct ib_udata *udata, 13320fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq, void *qpin, 13332fe8d4b8SAharon Landau struct ib_pd *pd, struct mlx5_ib_cq *cq) 13340fb2ed66Smajd@mellanox.com { 13350fb2ed66Smajd@mellanox.com struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; 13360fb2ed66Smajd@mellanox.com __be64 *pas; 13370fb2ed66Smajd@mellanox.com void *in; 13380fb2ed66Smajd@mellanox.com void *sqc; 13390fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 13400fb2ed66Smajd@mellanox.com void *wq; 13410fb2ed66Smajd@mellanox.com int inlen; 13420fb2ed66Smajd@mellanox.com int err; 1343ad480ea5SJason Gunthorpe unsigned int page_offset_quantized; 1344ad480ea5SJason Gunthorpe unsigned long page_size; 13452fe8d4b8SAharon Landau int ts_format; 13462fe8d4b8SAharon Landau 13472fe8d4b8SAharon Landau ts_format = get_sq_ts_format(dev, cq); 13482fe8d4b8SAharon Landau if (ts_format < 0) 13492fe8d4b8SAharon Landau return ts_format; 13500fb2ed66Smajd@mellanox.com 1351ad480ea5SJason Gunthorpe sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr, 1352ad480ea5SJason Gunthorpe ubuffer->buf_size, 0); 1353ad480ea5SJason Gunthorpe if (IS_ERR(sq->ubuffer.umem)) 1354ad480ea5SJason Gunthorpe return PTR_ERR(sq->ubuffer.umem); 1355ad480ea5SJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff( 1356ad480ea5SJason Gunthorpe ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT, 1357ad480ea5SJason Gunthorpe page_offset, 64, &page_offset_quantized); 1358ad480ea5SJason Gunthorpe if (!page_size) { 1359ad480ea5SJason Gunthorpe err = -EINVAL; 1360ad480ea5SJason Gunthorpe goto err_umem; 1361ad480ea5SJason Gunthorpe } 13620fb2ed66Smajd@mellanox.com 13637db0eea9SJason Gunthorpe inlen = MLX5_ST_SZ_BYTES(create_sq_in) + 1364ad480ea5SJason Gunthorpe sizeof(u64) * 1365ad480ea5SJason Gunthorpe ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size); 13661b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 13670fb2ed66Smajd@mellanox.com if (!in) { 13680fb2ed66Smajd@mellanox.com err = -ENOMEM; 13690fb2ed66Smajd@mellanox.com goto err_umem; 13700fb2ed66Smajd@mellanox.com } 13710fb2ed66Smajd@mellanox.com 1372c14003f0SYishai Hadas MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid); 13730fb2ed66Smajd@mellanox.com sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 13740fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1375795b609cSBodong Wang if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) 1376795b609cSBodong Wang MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1); 13770fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 13782fe8d4b8SAharon Landau MLX5_SET(sqc, sqc, ts_format, ts_format); 13790fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); 13800fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); 13810fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_lst_sz, 1); 13820fb2ed66Smajd@mellanox.com MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); 138396dc3fc5SNoa Osherovich if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 138496dc3fc5SNoa Osherovich MLX5_CAP_ETH(dev->mdev, swp)) 138596dc3fc5SNoa Osherovich MLX5_SET(sqc, sqc, allow_swp, 1); 13860fb2ed66Smajd@mellanox.com 13870fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(sqc, sqc, wq); 13880fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 13890fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 13900fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); 13910fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 13920fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 13930fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); 1394ad480ea5SJason Gunthorpe MLX5_SET(wq, wq, log_wq_pg_sz, 1395ad480ea5SJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 1396ad480ea5SJason Gunthorpe MLX5_SET(wq, wq, page_offset, page_offset_quantized); 13970fb2ed66Smajd@mellanox.com 13980fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 1399ad480ea5SJason Gunthorpe mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0); 14000fb2ed66Smajd@mellanox.com 1401333fbaa0SLeon Romanovsky err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp); 14020fb2ed66Smajd@mellanox.com 14030fb2ed66Smajd@mellanox.com kvfree(in); 14040fb2ed66Smajd@mellanox.com 14050fb2ed66Smajd@mellanox.com if (err) 14060fb2ed66Smajd@mellanox.com goto err_umem; 14070fb2ed66Smajd@mellanox.com 14080fb2ed66Smajd@mellanox.com return 0; 14090fb2ed66Smajd@mellanox.com 14100fb2ed66Smajd@mellanox.com err_umem: 14110fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 14120fb2ed66Smajd@mellanox.com sq->ubuffer.umem = NULL; 14130fb2ed66Smajd@mellanox.com 14140fb2ed66Smajd@mellanox.com return err; 14150fb2ed66Smajd@mellanox.com } 14160fb2ed66Smajd@mellanox.com 14170fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 14180fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq) 14190fb2ed66Smajd@mellanox.com { 1420d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 1421333fbaa0SLeon Romanovsky mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp); 14220fb2ed66Smajd@mellanox.com ib_umem_release(sq->ubuffer.umem); 14230fb2ed66Smajd@mellanox.com } 14240fb2ed66Smajd@mellanox.com 14250fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 14262c292dbbSBoris Pismenny struct mlx5_ib_rq *rq, void *qpin, 14272fe8d4b8SAharon Landau struct ib_pd *pd, struct mlx5_ib_cq *cq) 14280fb2ed66Smajd@mellanox.com { 1429358e42eaSMajd Dibbiny struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 14300fb2ed66Smajd@mellanox.com __be64 *pas; 14310fb2ed66Smajd@mellanox.com void *in; 14320fb2ed66Smajd@mellanox.com void *rqc; 14330fb2ed66Smajd@mellanox.com void *wq; 14340fb2ed66Smajd@mellanox.com void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 14357579dcdfSJason Gunthorpe struct ib_umem *umem = rq->base.ubuffer.umem; 14367579dcdfSJason Gunthorpe unsigned int page_offset_quantized; 14377579dcdfSJason Gunthorpe unsigned long page_size = 0; 14382fe8d4b8SAharon Landau int ts_format; 14392c292dbbSBoris Pismenny size_t inlen; 14400fb2ed66Smajd@mellanox.com int err; 14412c292dbbSBoris Pismenny 14422fe8d4b8SAharon Landau ts_format = get_rq_ts_format(dev, cq); 14432fe8d4b8SAharon Landau if (ts_format < 0) 14442fe8d4b8SAharon Landau return ts_format; 14452fe8d4b8SAharon Landau 14467579dcdfSJason Gunthorpe page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz, 14477579dcdfSJason Gunthorpe MLX5_ADAPTER_PAGE_SHIFT, 14487579dcdfSJason Gunthorpe page_offset, 64, 14497579dcdfSJason Gunthorpe &page_offset_quantized); 14507579dcdfSJason Gunthorpe if (!page_size) 14512c292dbbSBoris Pismenny return -EINVAL; 14520fb2ed66Smajd@mellanox.com 14537579dcdfSJason Gunthorpe inlen = MLX5_ST_SZ_BYTES(create_rq_in) + 14547579dcdfSJason Gunthorpe sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size); 14551b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 14560fb2ed66Smajd@mellanox.com if (!in) 14570fb2ed66Smajd@mellanox.com return -ENOMEM; 14580fb2ed66Smajd@mellanox.com 145934d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 14600fb2ed66Smajd@mellanox.com rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1461e4cc4fa7SNoa Osherovich if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING)) 14620fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, vsd, 1); 14630fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 14640fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 14652fe8d4b8SAharon Landau MLX5_SET(rqc, rqc, ts_format, ts_format); 14660fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, flush_in_error_en, 1); 14670fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index)); 14680fb2ed66Smajd@mellanox.com MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv)); 14690fb2ed66Smajd@mellanox.com 14702be08c30SLeon Romanovsky if (mqp->flags & IB_QP_CREATE_SCATTER_FCS) 1471358e42eaSMajd Dibbiny MLX5_SET(rqc, rqc, scatter_fcs, 1); 1472358e42eaSMajd Dibbiny 14730fb2ed66Smajd@mellanox.com wq = MLX5_ADDR_OF(rqc, rqc, wq); 14740fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1475b1383aa6SNoa Osherovich if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING) 1476b1383aa6SNoa Osherovich MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 14777579dcdfSJason Gunthorpe MLX5_SET(wq, wq, page_offset, page_offset_quantized); 14780fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 14790fb2ed66Smajd@mellanox.com MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 14800fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); 14817579dcdfSJason Gunthorpe MLX5_SET(wq, wq, log_wq_pg_sz, 14827579dcdfSJason Gunthorpe order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); 14830fb2ed66Smajd@mellanox.com MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); 14840fb2ed66Smajd@mellanox.com 14850fb2ed66Smajd@mellanox.com pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 14867579dcdfSJason Gunthorpe mlx5_ib_populate_pas(umem, page_size, pas, 0); 14870fb2ed66Smajd@mellanox.com 1488333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp); 14890fb2ed66Smajd@mellanox.com 14900fb2ed66Smajd@mellanox.com kvfree(in); 14910fb2ed66Smajd@mellanox.com 14920fb2ed66Smajd@mellanox.com return err; 14930fb2ed66Smajd@mellanox.com } 14940fb2ed66Smajd@mellanox.com 14950fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 14960fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq) 14970fb2ed66Smajd@mellanox.com { 1498333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp); 14990fb2ed66Smajd@mellanox.com } 15000fb2ed66Smajd@mellanox.com 15010042f9e4SMark Bloch static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 15020042f9e4SMark Bloch struct mlx5_ib_rq *rq, 1503443c1cf9SYishai Hadas u32 qp_flags_en, 1504443c1cf9SYishai Hadas struct ib_pd *pd) 15050042f9e4SMark Bloch { 15060042f9e4SMark Bloch if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 15070042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 15080042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1509443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid); 15100042f9e4SMark Bloch } 15110042f9e4SMark Bloch 15120fb2ed66Smajd@mellanox.com static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1513f95ef6cbSMaor Gottlieb struct mlx5_ib_rq *rq, u32 tdn, 1514e0b4b472SLeon Romanovsky u32 *qp_flags_en, struct ib_pd *pd, 1515e0b4b472SLeon Romanovsky u32 *out) 15160fb2ed66Smajd@mellanox.com { 1517175edba8SMark Bloch u8 lb_flag = 0; 15180fb2ed66Smajd@mellanox.com u32 *in; 15190fb2ed66Smajd@mellanox.com void *tirc; 15200fb2ed66Smajd@mellanox.com int inlen; 15210fb2ed66Smajd@mellanox.com int err; 15220fb2ed66Smajd@mellanox.com 15230fb2ed66Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(create_tir_in); 15241b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 15250fb2ed66Smajd@mellanox.com if (!in) 15260fb2ed66Smajd@mellanox.com return -ENOMEM; 15270fb2ed66Smajd@mellanox.com 1528443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 15290fb2ed66Smajd@mellanox.com tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 15300fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 15310fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); 15320fb2ed66Smajd@mellanox.com MLX5_SET(tirc, tirc, transport_domain, tdn); 1533175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1534f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 15350fb2ed66Smajd@mellanox.com 1536175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 1537175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1538175edba8SMark Bloch 1539175edba8SMark Bloch if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1540175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1541175edba8SMark Bloch 15426a4d00beSMark Bloch if (dev->is_rep) { 1543175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 1544175edba8SMark Bloch *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1545175edba8SMark Bloch } 1546175edba8SMark Bloch 1547175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1548e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1549e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 15501f1d6abbSAriel Levkovich rq->tirn = MLX5_GET(create_tir_out, out, tirn); 15510042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 15520042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 15530042f9e4SMark Bloch 15540042f9e4SMark Bloch if (err) 1555443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, 0, pd); 15560042f9e4SMark Bloch } 15570fb2ed66Smajd@mellanox.com kvfree(in); 15580fb2ed66Smajd@mellanox.com 15590fb2ed66Smajd@mellanox.com return err; 15600fb2ed66Smajd@mellanox.com } 15610fb2ed66Smajd@mellanox.com 15620fb2ed66Smajd@mellanox.com static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 15632fe8d4b8SAharon Landau u32 *in, size_t inlen, struct ib_pd *pd, 15647f72052cSYishai Hadas struct ib_udata *udata, 15652fe8d4b8SAharon Landau struct mlx5_ib_create_qp_resp *resp, 15662fe8d4b8SAharon Landau struct ib_qp_init_attr *init_attr) 15670fb2ed66Smajd@mellanox.com { 15680fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 15690fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 15700fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 157189944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 157289944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 15730fb2ed66Smajd@mellanox.com int err; 15740fb2ed66Smajd@mellanox.com u32 tdn = mucontext->tdn; 15757f72052cSYishai Hadas u16 uid = to_mpd(pd)->uid; 15761f1d6abbSAriel Levkovich u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 15770fb2ed66Smajd@mellanox.com 15780eacc574SAharon Landau if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) 15790eacc574SAharon Landau return -EINVAL; 15800fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 15811cd6dbd3SYishai Hadas err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); 15820fb2ed66Smajd@mellanox.com if (err) 15830fb2ed66Smajd@mellanox.com return err; 15840fb2ed66Smajd@mellanox.com 15852fe8d4b8SAharon Landau err = create_raw_packet_qp_sq(dev, udata, sq, in, pd, 15862fe8d4b8SAharon Landau to_mcq(init_attr->send_cq)); 15870fb2ed66Smajd@mellanox.com if (err) 15880fb2ed66Smajd@mellanox.com goto err_destroy_tis; 15890fb2ed66Smajd@mellanox.com 15907f72052cSYishai Hadas if (uid) { 15917f72052cSYishai Hadas resp->tisn = sq->tisn; 15927f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN; 15937f72052cSYishai Hadas resp->sqn = sq->base.mqp.qpn; 15947f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN; 15957f72052cSYishai Hadas } 15967f72052cSYishai Hadas 15970fb2ed66Smajd@mellanox.com sq->base.container_mibqp = qp; 15981d31e9c0SMajd Dibbiny sq->base.mqp.event = mlx5_ib_qp_event; 15990fb2ed66Smajd@mellanox.com } 16000fb2ed66Smajd@mellanox.com 16010fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1602358e42eaSMajd Dibbiny rq->base.container_mibqp = qp; 1603358e42eaSMajd Dibbiny 16042be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING) 1605e4cc4fa7SNoa Osherovich rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 16062be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) 1607b1383aa6SNoa Osherovich rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 16082fe8d4b8SAharon Landau err = create_raw_packet_qp_rq(dev, rq, in, pd, 16092fe8d4b8SAharon Landau to_mcq(init_attr->recv_cq)); 16100fb2ed66Smajd@mellanox.com if (err) 16110fb2ed66Smajd@mellanox.com goto err_destroy_sq; 16120fb2ed66Smajd@mellanox.com 1613e0b4b472SLeon Romanovsky err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd, 1614e0b4b472SLeon Romanovsky out); 16150fb2ed66Smajd@mellanox.com if (err) 16160fb2ed66Smajd@mellanox.com goto err_destroy_rq; 16177f72052cSYishai Hadas 16187f72052cSYishai Hadas if (uid) { 16197f72052cSYishai Hadas resp->rqn = rq->base.mqp.qpn; 16207f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN; 16217f72052cSYishai Hadas resp->tirn = rq->tirn; 16227f72052cSYishai Hadas resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 162354a38b66SAlex Vesker if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 162454a38b66SAlex Vesker MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { 16251f1d6abbSAriel Levkovich resp->tir_icm_addr = MLX5_GET( 16261f1d6abbSAriel Levkovich create_tir_out, out, icm_address_31_0); 16271f1d6abbSAriel Levkovich resp->tir_icm_addr |= 16281f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 16291f1d6abbSAriel Levkovich icm_address_39_32) 16301f1d6abbSAriel Levkovich << 32; 16311f1d6abbSAriel Levkovich resp->tir_icm_addr |= 16321f1d6abbSAriel Levkovich (u64)MLX5_GET(create_tir_out, out, 16331f1d6abbSAriel Levkovich icm_address_63_40) 16341f1d6abbSAriel Levkovich << 40; 16351f1d6abbSAriel Levkovich resp->comp_mask |= 16361f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 16371f1d6abbSAriel Levkovich } 16387f72052cSYishai Hadas } 16390fb2ed66Smajd@mellanox.com } 16400fb2ed66Smajd@mellanox.com 16410fb2ed66Smajd@mellanox.com qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : 16420fb2ed66Smajd@mellanox.com rq->base.mqp.qpn; 16430fb2ed66Smajd@mellanox.com return 0; 16440fb2ed66Smajd@mellanox.com 16450fb2ed66Smajd@mellanox.com err_destroy_rq: 16460fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 16470fb2ed66Smajd@mellanox.com err_destroy_sq: 16480fb2ed66Smajd@mellanox.com if (!qp->sq.wqe_cnt) 16490fb2ed66Smajd@mellanox.com return err; 16500fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 16510fb2ed66Smajd@mellanox.com err_destroy_tis: 16521cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, pd); 16530fb2ed66Smajd@mellanox.com 16540fb2ed66Smajd@mellanox.com return err; 16550fb2ed66Smajd@mellanox.com } 16560fb2ed66Smajd@mellanox.com 16570fb2ed66Smajd@mellanox.com static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, 16580fb2ed66Smajd@mellanox.com struct mlx5_ib_qp *qp) 16590fb2ed66Smajd@mellanox.com { 16600fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 16610fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 16620fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 16630fb2ed66Smajd@mellanox.com 16640fb2ed66Smajd@mellanox.com if (qp->rq.wqe_cnt) { 1665443c1cf9SYishai Hadas destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd); 16660fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_rq(dev, rq); 16670fb2ed66Smajd@mellanox.com } 16680fb2ed66Smajd@mellanox.com 16690fb2ed66Smajd@mellanox.com if (qp->sq.wqe_cnt) { 16700fb2ed66Smajd@mellanox.com destroy_raw_packet_qp_sq(dev, sq); 16711cd6dbd3SYishai Hadas destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); 16720fb2ed66Smajd@mellanox.com } 16730fb2ed66Smajd@mellanox.com } 16740fb2ed66Smajd@mellanox.com 16750fb2ed66Smajd@mellanox.com static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, 16760fb2ed66Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp) 16770fb2ed66Smajd@mellanox.com { 16780fb2ed66Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 16790fb2ed66Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 16800fb2ed66Smajd@mellanox.com 16810fb2ed66Smajd@mellanox.com sq->sq = &qp->sq; 16820fb2ed66Smajd@mellanox.com rq->rq = &qp->rq; 16830fb2ed66Smajd@mellanox.com sq->doorbell = &qp->db; 16840fb2ed66Smajd@mellanox.com rq->doorbell = &qp->db; 16850fb2ed66Smajd@mellanox.com } 16860fb2ed66Smajd@mellanox.com 168728d61370SYishai Hadas static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 168828d61370SYishai Hadas { 16890042f9e4SMark Bloch if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 16900042f9e4SMark Bloch MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) 16910042f9e4SMark Bloch mlx5_ib_disable_lb(dev, false, true); 1692443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1693443c1cf9SYishai Hadas to_mpd(qp->ibqp.pd)->uid); 169428d61370SYishai Hadas } 169528d61370SYishai Hadas 1696f78d358cSLeon Romanovsky struct mlx5_create_qp_params { 1697f78d358cSLeon Romanovsky struct ib_udata *udata; 1698f78d358cSLeon Romanovsky size_t inlen; 16996f2cf76eSLeon Romanovsky size_t outlen; 1700e383085cSLeon Romanovsky size_t ucmd_size; 1701f78d358cSLeon Romanovsky void *ucmd; 1702f78d358cSLeon Romanovsky u8 is_rss_raw : 1; 1703f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr; 1704f78d358cSLeon Romanovsky u32 uidx; 170508d53976SLeon Romanovsky struct mlx5_ib_create_qp_resp resp; 1706f78d358cSLeon Romanovsky }; 1707f78d358cSLeon Romanovsky 1708f78d358cSLeon Romanovsky static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1709f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 1710f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 171128d61370SYishai Hadas { 1712f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 1713f78d358cSLeon Romanovsky struct mlx5_ib_create_qp_rss *ucmd = params->ucmd; 1714f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 171589944450SShamir Rabinovitch struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context( 171689944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 171728d61370SYishai Hadas int inlen; 17181f1d6abbSAriel Levkovich int outlen; 171928d61370SYishai Hadas int err; 172028d61370SYishai Hadas u32 *in; 17211f1d6abbSAriel Levkovich u32 *out; 172228d61370SYishai Hadas void *tirc; 172328d61370SYishai Hadas void *hfso; 172428d61370SYishai Hadas u32 selected_fields = 0; 17252d93fc85SMatan Barak u32 outer_l4; 172628d61370SYishai Hadas u32 tdn = mucontext->tdn; 1727175edba8SMark Bloch u8 lb_flag = 0; 172828d61370SYishai Hadas 17295ce0592bSLeon Romanovsky if (ucmd->comp_mask) { 173028d61370SYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 173128d61370SYishai Hadas return -EOPNOTSUPP; 173228d61370SYishai Hadas } 173328d61370SYishai Hadas 17345ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER && 17355ce0592bSLeon Romanovsky !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) { 1736309fa347SMaor Gottlieb mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n"); 1737309fa347SMaor Gottlieb return -EOPNOTSUPP; 1738309fa347SMaor Gottlieb } 1739309fa347SMaor Gottlieb 174037518fa4SLeon Romanovsky if (dev->is_rep) 1741175edba8SMark Bloch qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; 1742175edba8SMark Bloch 174337518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) 174437518fa4SLeon Romanovsky lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 174537518fa4SLeon Romanovsky 174637518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) 1747175edba8SMark Bloch lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; 1748175edba8SMark Bloch 174928d61370SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_tir_in); 17501f1d6abbSAriel Levkovich outlen = MLX5_ST_SZ_BYTES(create_tir_out); 17511f1d6abbSAriel Levkovich in = kvzalloc(inlen + outlen, GFP_KERNEL); 175228d61370SYishai Hadas if (!in) 175328d61370SYishai Hadas return -ENOMEM; 175428d61370SYishai Hadas 17551f1d6abbSAriel Levkovich out = in + MLX5_ST_SZ_DW(create_tir_in); 1756443c1cf9SYishai Hadas MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 175728d61370SYishai Hadas tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 175828d61370SYishai Hadas MLX5_SET(tirc, tirc, disp_type, 175928d61370SYishai Hadas MLX5_TIRC_DISP_TYPE_INDIRECT); 176028d61370SYishai Hadas MLX5_SET(tirc, tirc, indirect_table, 176128d61370SYishai Hadas init_attr->rwq_ind_tbl->ind_tbl_num); 176228d61370SYishai Hadas MLX5_SET(tirc, tirc, transport_domain, tdn); 176328d61370SYishai Hadas 176428d61370SYishai Hadas hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1765f95ef6cbSMaor Gottlieb 17665ce0592bSLeon Romanovsky if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1767f95ef6cbSMaor Gottlieb MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 1768f95ef6cbSMaor Gottlieb 1769175edba8SMark Bloch MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1770175edba8SMark Bloch 17715ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER) 1772309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); 1773309fa347SMaor Gottlieb else 1774309fa347SMaor Gottlieb hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1775309fa347SMaor Gottlieb 17765ce0592bSLeon Romanovsky switch (ucmd->rx_hash_function) { 177728d61370SYishai Hadas case MLX5_RX_HASH_FUNC_TOEPLITZ: 177828d61370SYishai Hadas { 177928d61370SYishai Hadas void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 178028d61370SYishai Hadas size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); 178128d61370SYishai Hadas 17825ce0592bSLeon Romanovsky if (len != ucmd->rx_key_len) { 178328d61370SYishai Hadas err = -EINVAL; 178428d61370SYishai Hadas goto err; 178528d61370SYishai Hadas } 178628d61370SYishai Hadas 178728d61370SYishai Hadas MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 17885ce0592bSLeon Romanovsky memcpy(rss_key, ucmd->rx_hash_key, len); 178928d61370SYishai Hadas break; 179028d61370SYishai Hadas } 179128d61370SYishai Hadas default: 179228d61370SYishai Hadas err = -EOPNOTSUPP; 179328d61370SYishai Hadas goto err; 179428d61370SYishai Hadas } 179528d61370SYishai Hadas 17965ce0592bSLeon Romanovsky if (!ucmd->rx_hash_fields_mask) { 179728d61370SYishai Hadas /* special case when this TIR serves as steering entry without hashing */ 179828d61370SYishai Hadas if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) 179928d61370SYishai Hadas goto create_tir; 180028d61370SYishai Hadas err = -EINVAL; 180128d61370SYishai Hadas goto err; 180228d61370SYishai Hadas } 180328d61370SYishai Hadas 18045ce0592bSLeon Romanovsky if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18055ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && 18065ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 18075ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { 180828d61370SYishai Hadas err = -EINVAL; 180928d61370SYishai Hadas goto err; 181028d61370SYishai Hadas } 181128d61370SYishai Hadas 181228d61370SYishai Hadas /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */ 18135ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18145ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) 181528d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 181628d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV4); 18175ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 18185ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 181928d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 182028d61370SYishai Hadas MLX5_L3_PROT_TYPE_IPV6); 182128d61370SYishai Hadas 18225ce0592bSLeon Romanovsky outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18235ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 18245ce0592bSLeon Romanovsky << 0 | 18255ce0592bSLeon Romanovsky ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 18265ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 18275ce0592bSLeon Romanovsky << 1 | 18285ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2; 18292d93fc85SMatan Barak 18302d93fc85SMatan Barak /* Check that only one l4 protocol is set */ 18312d93fc85SMatan Barak if (outer_l4 & (outer_l4 - 1)) { 183228d61370SYishai Hadas err = -EINVAL; 183328d61370SYishai Hadas goto err; 183428d61370SYishai Hadas } 183528d61370SYishai Hadas 183628d61370SYishai Hadas /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */ 18375ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18385ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 183928d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 184028d61370SYishai Hadas MLX5_L4_PROT_TYPE_TCP); 18415ce0592bSLeon Romanovsky else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 18425ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 184328d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 184428d61370SYishai Hadas MLX5_L4_PROT_TYPE_UDP); 184528d61370SYishai Hadas 18465ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 18475ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) 184828d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP; 184928d61370SYishai Hadas 18505ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || 18515ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 185228d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP; 185328d61370SYishai Hadas 18545ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 18555ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) 185628d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT; 185728d61370SYishai Hadas 18585ce0592bSLeon Romanovsky if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || 18595ce0592bSLeon Romanovsky (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 186028d61370SYishai Hadas selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT; 186128d61370SYishai Hadas 18625ce0592bSLeon Romanovsky if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) 18632d93fc85SMatan Barak selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI; 18642d93fc85SMatan Barak 186528d61370SYishai Hadas MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 186628d61370SYishai Hadas 186728d61370SYishai Hadas create_tir: 1868e0b4b472SLeon Romanovsky MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 1869e0b4b472SLeon Romanovsky err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); 187028d61370SYishai Hadas 18711f1d6abbSAriel Levkovich qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); 18720042f9e4SMark Bloch if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 18730042f9e4SMark Bloch err = mlx5_ib_enable_lb(dev, false, true); 18740042f9e4SMark Bloch 18750042f9e4SMark Bloch if (err) 1876443c1cf9SYishai Hadas mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, 1877443c1cf9SYishai Hadas to_mpd(pd)->uid); 18780042f9e4SMark Bloch } 18790042f9e4SMark Bloch 188028d61370SYishai Hadas if (err) 188128d61370SYishai Hadas goto err; 188228d61370SYishai Hadas 18837f72052cSYishai Hadas if (mucontext->devx_uid) { 188408d53976SLeon Romanovsky params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 188508d53976SLeon Romanovsky params->resp.tirn = qp->rss_qp.tirn; 188654a38b66SAlex Vesker if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 188754a38b66SAlex Vesker MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { 188808d53976SLeon Romanovsky params->resp.tir_icm_addr = 18891f1d6abbSAriel Levkovich MLX5_GET(create_tir_out, out, icm_address_31_0); 189008d53976SLeon Romanovsky params->resp.tir_icm_addr |= 189108d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 18921f1d6abbSAriel Levkovich icm_address_39_32) 18931f1d6abbSAriel Levkovich << 32; 189408d53976SLeon Romanovsky params->resp.tir_icm_addr |= 189508d53976SLeon Romanovsky (u64)MLX5_GET(create_tir_out, out, 18961f1d6abbSAriel Levkovich icm_address_63_40) 18971f1d6abbSAriel Levkovich << 40; 189808d53976SLeon Romanovsky params->resp.comp_mask |= 18991f1d6abbSAriel Levkovich MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 19001f1d6abbSAriel Levkovich } 19017f72052cSYishai Hadas } 19027f72052cSYishai Hadas 190328d61370SYishai Hadas kvfree(in); 190428d61370SYishai Hadas /* qpn is reserved for that QP */ 190528d61370SYishai Hadas qp->trans_qp.base.mqp.qpn = 0; 19062be08c30SLeon Romanovsky qp->is_rss = true; 190728d61370SYishai Hadas return 0; 190828d61370SYishai Hadas 190928d61370SYishai Hadas err: 191028d61370SYishai Hadas kvfree(in); 191128d61370SYishai Hadas return err; 191228d61370SYishai Hadas } 191328d61370SYishai Hadas 19145d6ff1baSYonatan Cohen static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, 191581530ab0SLeon Romanovsky struct mlx5_ib_qp *qp, 19165d6ff1baSYonatan Cohen struct ib_qp_init_attr *init_attr, 19175d6ff1baSYonatan Cohen void *qpc) 19185d6ff1baSYonatan Cohen { 19195d6ff1baSYonatan Cohen int scqe_sz; 19202ab367a7Szhengbin bool allow_scat_cqe = false; 19215d6ff1baSYonatan Cohen 192281530ab0SLeon Romanovsky allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; 19236f4bc0eaSYonatan Cohen 19246f4bc0eaSYonatan Cohen if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) 19255d6ff1baSYonatan Cohen return; 19265d6ff1baSYonatan Cohen 19275d6ff1baSYonatan Cohen scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); 19285d6ff1baSYonatan Cohen if (scqe_sz == 128) { 19295d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); 19305d6ff1baSYonatan Cohen return; 19315d6ff1baSYonatan Cohen } 19325d6ff1baSYonatan Cohen 19335d6ff1baSYonatan Cohen if (init_attr->qp_type != MLX5_IB_QPT_DCI || 19345d6ff1baSYonatan Cohen MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe)) 19355d6ff1baSYonatan Cohen MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 19365d6ff1baSYonatan Cohen } 19375d6ff1baSYonatan Cohen 1938a60109dcSYonatan Cohen static int atomic_size_to_mode(int size_mask) 1939a60109dcSYonatan Cohen { 1940a60109dcSYonatan Cohen /* driver does not support atomic_size > 256B 1941a60109dcSYonatan Cohen * and does not know how to translate bigger sizes 1942a60109dcSYonatan Cohen */ 1943a60109dcSYonatan Cohen int supported_size_mask = size_mask & 0x1ff; 1944a60109dcSYonatan Cohen int log_max_size; 1945a60109dcSYonatan Cohen 1946a60109dcSYonatan Cohen if (!supported_size_mask) 1947a60109dcSYonatan Cohen return -EOPNOTSUPP; 1948a60109dcSYonatan Cohen 1949a60109dcSYonatan Cohen log_max_size = __fls(supported_size_mask); 1950a60109dcSYonatan Cohen 1951a60109dcSYonatan Cohen if (log_max_size > 3) 1952a60109dcSYonatan Cohen return log_max_size; 1953a60109dcSYonatan Cohen 1954a60109dcSYonatan Cohen return MLX5_ATOMIC_MODE_8B; 1955a60109dcSYonatan Cohen } 1956a60109dcSYonatan Cohen 1957a60109dcSYonatan Cohen static int get_atomic_mode(struct mlx5_ib_dev *dev, 1958a60109dcSYonatan Cohen enum ib_qp_type qp_type) 1959a60109dcSYonatan Cohen { 1960a60109dcSYonatan Cohen u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 1961a60109dcSYonatan Cohen u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic); 1962a60109dcSYonatan Cohen int atomic_mode = -EOPNOTSUPP; 1963a60109dcSYonatan Cohen int atomic_size_mask; 1964a60109dcSYonatan Cohen 1965a60109dcSYonatan Cohen if (!atomic) 1966a60109dcSYonatan Cohen return -EOPNOTSUPP; 1967a60109dcSYonatan Cohen 1968a60109dcSYonatan Cohen if (qp_type == MLX5_IB_QPT_DCT) 1969a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 1970a60109dcSYonatan Cohen else 1971a60109dcSYonatan Cohen atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 1972a60109dcSYonatan Cohen 1973a60109dcSYonatan Cohen if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) || 1974a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD)) 1975a60109dcSYonatan Cohen atomic_mode = atomic_size_to_mode(atomic_size_mask); 1976a60109dcSYonatan Cohen 1977a60109dcSYonatan Cohen if (atomic_mode <= 0 && 1978a60109dcSYonatan Cohen (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP && 1979a60109dcSYonatan Cohen atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD)) 1980a60109dcSYonatan Cohen atomic_mode = MLX5_ATOMIC_MODE_IB_COMP; 1981a60109dcSYonatan Cohen 1982a60109dcSYonatan Cohen return atomic_mode; 1983a60109dcSYonatan Cohen } 1984a60109dcSYonatan Cohen 1985f78d358cSLeon Romanovsky static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1986f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 198704bcc1c2SLeon Romanovsky { 1988f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 1989f78d358cSLeon Romanovsky u32 uidx = params->uidx; 199004bcc1c2SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 19913e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 199204bcc1c2SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 199304bcc1c2SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 199404bcc1c2SLeon Romanovsky struct mlx5_ib_qp_base *base; 199504bcc1c2SLeon Romanovsky unsigned long flags; 199604bcc1c2SLeon Romanovsky void *qpc; 199704bcc1c2SLeon Romanovsky u32 *in; 199804bcc1c2SLeon Romanovsky int err; 199904bcc1c2SLeon Romanovsky 200004bcc1c2SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 200104bcc1c2SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 200204bcc1c2SLeon Romanovsky 200304bcc1c2SLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 200404bcc1c2SLeon Romanovsky if (!in) 200504bcc1c2SLeon Romanovsky return -ENOMEM; 200604bcc1c2SLeon Romanovsky 200704bcc1c2SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 200804bcc1c2SLeon Romanovsky 200904bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC); 201004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 201104bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn); 201204bcc1c2SLeon Romanovsky 201304bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 201404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 201504bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 201604bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_master, 1); 201704bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 201804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_send, 1); 201904bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 202004bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cd_slave_receive, 1); 202104bcc1c2SLeon Romanovsky 20228256c69bSMaor Gottlieb MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 202304bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); 202404bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 202504bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 202604bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); 202704bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 202804bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn); 202904bcc1c2SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 203004bcc1c2SLeon Romanovsky 203104bcc1c2SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 203204bcc1c2SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 203304bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 203404bcc1c2SLeon Romanovsky 203504bcc1c2SLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 203604bcc1c2SLeon Romanovsky MLX5_SET(qpc, qpc, end_padding_mode, 203704bcc1c2SLeon Romanovsky MLX5_WQ_END_PAD_MODE_ALIGN); 203804bcc1c2SLeon Romanovsky /* Special case to clean flag */ 203904bcc1c2SLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 204004bcc1c2SLeon Romanovsky } 204104bcc1c2SLeon Romanovsky 204204bcc1c2SLeon Romanovsky base = &qp->trans_qp.base; 20433e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 204404bcc1c2SLeon Romanovsky kvfree(in); 20456367da46SLeon Romanovsky if (err) 204604bcc1c2SLeon Romanovsky return err; 204704bcc1c2SLeon Romanovsky 204804bcc1c2SLeon Romanovsky base->container_mibqp = qp; 204904bcc1c2SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 205092cd667cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 20513e09a427SLeon Romanovsky params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 205204bcc1c2SLeon Romanovsky 205304bcc1c2SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 205404bcc1c2SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 205504bcc1c2SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 205604bcc1c2SLeon Romanovsky 2057968f0b6fSLeon Romanovsky qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn; 205804bcc1c2SLeon Romanovsky return 0; 205904bcc1c2SLeon Romanovsky } 206004bcc1c2SLeon Romanovsky 20612013b4d5SLior Nahmanson static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd, 20622013b4d5SLior Nahmanson struct mlx5_ib_qp *qp, 20632013b4d5SLior Nahmanson struct mlx5_create_qp_params *params) 20642013b4d5SLior Nahmanson { 20652013b4d5SLior Nahmanson struct ib_qp_init_attr *init_attr = params->attr; 20662013b4d5SLior Nahmanson struct mlx5_ib_create_qp *ucmd = params->ucmd; 20672013b4d5SLior Nahmanson u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 20682013b4d5SLior Nahmanson struct ib_udata *udata = params->udata; 20692013b4d5SLior Nahmanson u32 uidx = params->uidx; 20702013b4d5SLior Nahmanson struct mlx5_ib_resources *devr = &dev->devr; 20712013b4d5SLior Nahmanson int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 20722013b4d5SLior Nahmanson struct mlx5_core_dev *mdev = dev->mdev; 20732013b4d5SLior Nahmanson struct mlx5_ib_cq *send_cq; 20742013b4d5SLior Nahmanson struct mlx5_ib_cq *recv_cq; 20752013b4d5SLior Nahmanson unsigned long flags; 20762013b4d5SLior Nahmanson struct mlx5_ib_qp_base *base; 20772013b4d5SLior Nahmanson int ts_format; 20782013b4d5SLior Nahmanson int mlx5_st; 20792013b4d5SLior Nahmanson void *qpc; 20802013b4d5SLior Nahmanson u32 *in; 20812013b4d5SLior Nahmanson int err; 20822013b4d5SLior Nahmanson 20832013b4d5SLior Nahmanson spin_lock_init(&qp->sq.lock); 20842013b4d5SLior Nahmanson spin_lock_init(&qp->rq.lock); 20852013b4d5SLior Nahmanson 20862013b4d5SLior Nahmanson mlx5_st = to_mlx5_st(qp->type); 20872013b4d5SLior Nahmanson if (mlx5_st < 0) 20882013b4d5SLior Nahmanson return -EINVAL; 20892013b4d5SLior Nahmanson 20902013b4d5SLior Nahmanson if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 20912013b4d5SLior Nahmanson qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 20922013b4d5SLior Nahmanson 20932013b4d5SLior Nahmanson base = &qp->trans_qp.base; 20942013b4d5SLior Nahmanson 20952013b4d5SLior Nahmanson qp->has_rq = qp_has_rq(init_attr); 20962013b4d5SLior Nahmanson err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); 20972013b4d5SLior Nahmanson if (err) { 20982013b4d5SLior Nahmanson mlx5_ib_dbg(dev, "err %d\n", err); 20992013b4d5SLior Nahmanson return err; 21002013b4d5SLior Nahmanson } 21012013b4d5SLior Nahmanson 21022013b4d5SLior Nahmanson if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || 21032013b4d5SLior Nahmanson ucmd->rq_wqe_count != qp->rq.wqe_cnt) 21042013b4d5SLior Nahmanson return -EINVAL; 21052013b4d5SLior Nahmanson 21062013b4d5SLior Nahmanson if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) 21072013b4d5SLior Nahmanson return -EINVAL; 21082013b4d5SLior Nahmanson 21092013b4d5SLior Nahmanson ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), 21102013b4d5SLior Nahmanson to_mcq(init_attr->recv_cq)); 21112013b4d5SLior Nahmanson 21122013b4d5SLior Nahmanson if (ts_format < 0) 21132013b4d5SLior Nahmanson return ts_format; 21142013b4d5SLior Nahmanson 21152013b4d5SLior Nahmanson err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, 21162013b4d5SLior Nahmanson &inlen, base, ucmd); 21172013b4d5SLior Nahmanson if (err) 21182013b4d5SLior Nahmanson return err; 21192013b4d5SLior Nahmanson 21202013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, ece_support)) 21212013b4d5SLior Nahmanson MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); 21222013b4d5SLior Nahmanson qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 21232013b4d5SLior Nahmanson 21242013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, st, mlx5_st); 21252013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 21262013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); 21272013b4d5SLior Nahmanson 21282013b4d5SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 21292013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, wq_signature, 1); 21302013b4d5SLior Nahmanson 21312013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 21322013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cd_master, 1); 21332013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 21342013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cd_slave_send, 1); 21352013b4d5SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) 21362013b4d5SLior Nahmanson configure_requester_scat_cqe(dev, qp, init_attr, qpc); 21372013b4d5SLior Nahmanson 21382013b4d5SLior Nahmanson if (qp->rq.wqe_cnt) { 21392013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 21402013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 21412013b4d5SLior Nahmanson } 21422013b4d5SLior Nahmanson 214311656f59SLior Nahmanson if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) { 214411656f59SLior Nahmanson MLX5_SET(qpc, qpc, log_num_dci_stream_channels, 214511656f59SLior Nahmanson ucmd->dci_streams.log_num_concurent); 214611656f59SLior Nahmanson MLX5_SET(qpc, qpc, log_num_dci_errored_streams, 214711656f59SLior Nahmanson ucmd->dci_streams.log_num_errored); 214811656f59SLior Nahmanson } 214911656f59SLior Nahmanson 21502013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, ts_format, ts_format); 21512013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 21522013b4d5SLior Nahmanson 21532013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 21542013b4d5SLior Nahmanson 21552013b4d5SLior Nahmanson /* Set default resources */ 21562013b4d5SLior Nahmanson if (init_attr->srq) { 21572013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 21582013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 21592013b4d5SLior Nahmanson to_msrq(init_attr->srq)->msrq.srqn); 21602013b4d5SLior Nahmanson } else { 21612013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 21622013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 21632013b4d5SLior Nahmanson to_msrq(devr->s1)->msrq.srqn); 21642013b4d5SLior Nahmanson } 21652013b4d5SLior Nahmanson 21662013b4d5SLior Nahmanson if (init_attr->send_cq) 21672013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cqn_snd, 21682013b4d5SLior Nahmanson to_mcq(init_attr->send_cq)->mcq.cqn); 21692013b4d5SLior Nahmanson 21702013b4d5SLior Nahmanson if (init_attr->recv_cq) 21712013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, cqn_rcv, 21722013b4d5SLior Nahmanson to_mcq(init_attr->recv_cq)->mcq.cqn); 21732013b4d5SLior Nahmanson 21742013b4d5SLior Nahmanson MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 21752013b4d5SLior Nahmanson 21762013b4d5SLior Nahmanson /* 0xffffff means we ask to work with cqe version 0 */ 21772013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 21782013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, user_index, uidx); 21792013b4d5SLior Nahmanson 21802013b4d5SLior Nahmanson if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 21812013b4d5SLior Nahmanson MLX5_SET(qpc, qpc, end_padding_mode, 21822013b4d5SLior Nahmanson MLX5_WQ_END_PAD_MODE_ALIGN); 21832013b4d5SLior Nahmanson /* Special case to clean flag */ 21842013b4d5SLior Nahmanson qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 21852013b4d5SLior Nahmanson } 21862013b4d5SLior Nahmanson 21872013b4d5SLior Nahmanson err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 21882013b4d5SLior Nahmanson 21892013b4d5SLior Nahmanson kvfree(in); 21902013b4d5SLior Nahmanson if (err) 21912013b4d5SLior Nahmanson goto err_create; 21922013b4d5SLior Nahmanson 21932013b4d5SLior Nahmanson base->container_mibqp = qp; 21942013b4d5SLior Nahmanson base->mqp.event = mlx5_ib_qp_event; 21952013b4d5SLior Nahmanson if (MLX5_CAP_GEN(mdev, ece_support)) 21962013b4d5SLior Nahmanson params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 21972013b4d5SLior Nahmanson 21982013b4d5SLior Nahmanson get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, 21992013b4d5SLior Nahmanson &send_cq, &recv_cq); 22002013b4d5SLior Nahmanson spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 22012013b4d5SLior Nahmanson mlx5_ib_lock_cqs(send_cq, recv_cq); 22022013b4d5SLior Nahmanson /* Maintain device to QPs access, needed for further handling via reset 22032013b4d5SLior Nahmanson * flow 22042013b4d5SLior Nahmanson */ 22052013b4d5SLior Nahmanson list_add_tail(&qp->qps_list, &dev->qp_list); 22062013b4d5SLior Nahmanson /* Maintain CQ to QPs access, needed for further handling via reset flow 22072013b4d5SLior Nahmanson */ 22082013b4d5SLior Nahmanson if (send_cq) 22092013b4d5SLior Nahmanson list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 22102013b4d5SLior Nahmanson if (recv_cq) 22112013b4d5SLior Nahmanson list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 22122013b4d5SLior Nahmanson mlx5_ib_unlock_cqs(send_cq, recv_cq); 22132013b4d5SLior Nahmanson spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 22142013b4d5SLior Nahmanson 22152013b4d5SLior Nahmanson return 0; 22162013b4d5SLior Nahmanson 22172013b4d5SLior Nahmanson err_create: 22182013b4d5SLior Nahmanson destroy_qp(dev, qp, base, udata); 22192013b4d5SLior Nahmanson return err; 22202013b4d5SLior Nahmanson } 22212013b4d5SLior Nahmanson 222298fc1126SLeon Romanovsky static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2223f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2224f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 2225e126ba97SEli Cohen { 2226f78d358cSLeon Romanovsky struct ib_qp_init_attr *init_attr = params->attr; 2227f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 22283e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 2229f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 2230f78d358cSLeon Romanovsky u32 uidx = params->uidx; 2231e126ba97SEli Cohen struct mlx5_ib_resources *devr = &dev->devr; 223209a7d9ecSSaeed Mahameed int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 2233938fe83cSSaeed Mahameed struct mlx5_core_dev *mdev = dev->mdev; 223489ea94a7SMaor Gottlieb struct mlx5_ib_cq *send_cq; 223589ea94a7SMaor Gottlieb struct mlx5_ib_cq *recv_cq; 223689ea94a7SMaor Gottlieb unsigned long flags; 223709a7d9ecSSaeed Mahameed struct mlx5_ib_qp_base *base; 22382fe8d4b8SAharon Landau int ts_format; 2239e7b169f3SNoa Osherovich int mlx5_st; 2240cfb5e088SHaggai Abramovsky void *qpc; 224109a7d9ecSSaeed Mahameed u32 *in; 224209a7d9ecSSaeed Mahameed int err; 2243e126ba97SEli Cohen 2244e126ba97SEli Cohen spin_lock_init(&qp->sq.lock); 2245e126ba97SEli Cohen spin_lock_init(&qp->rq.lock); 2246e126ba97SEli Cohen 22477aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 2248e7b169f3SNoa Osherovich if (mlx5_st < 0) 2249e7b169f3SNoa Osherovich return -EINVAL; 2250e7b169f3SNoa Osherovich 2251e126ba97SEli Cohen if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 2252e126ba97SEli Cohen qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 2253e126ba97SEli Cohen 22542978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 22552978975cSLeon Romanovsky qp->underlay_qpn = init_attr->source_qpn; 22562978975cSLeon Romanovsky 2257c2e53b2cSYishai Hadas base = (init_attr->qp_type == IB_QPT_RAW_PACKET || 22582be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 2259c2e53b2cSYishai Hadas &qp->raw_packet_qp.rq.base : 2260c2e53b2cSYishai Hadas &qp->trans_qp.base; 2261c2e53b2cSYishai Hadas 2262e126ba97SEli Cohen qp->has_rq = qp_has_rq(init_attr); 22632dfac92dSLeon Romanovsky err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); 2264e126ba97SEli Cohen if (err) { 2265e126ba97SEli Cohen mlx5_ib_dbg(dev, "err %d\n", err); 2266e126ba97SEli Cohen return err; 2267e126ba97SEli Cohen } 2268e126ba97SEli Cohen 22692dfac92dSLeon Romanovsky if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || 227098fc1126SLeon Romanovsky ucmd->rq_wqe_count != qp->rq.wqe_cnt) 2271e126ba97SEli Cohen return -EINVAL; 2272e126ba97SEli Cohen 227398fc1126SLeon Romanovsky if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) 227498fc1126SLeon Romanovsky return -EINVAL; 227598fc1126SLeon Romanovsky 22762fe8d4b8SAharon Landau if (init_attr->qp_type != IB_QPT_RAW_PACKET) { 22772fe8d4b8SAharon Landau ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), 22782fe8d4b8SAharon Landau to_mcq(init_attr->recv_cq)); 22792fe8d4b8SAharon Landau if (ts_format < 0) 22802fe8d4b8SAharon Landau return ts_format; 22812fe8d4b8SAharon Landau } 22822fe8d4b8SAharon Landau 228308d53976SLeon Romanovsky err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, 228408d53976SLeon Romanovsky &inlen, base, ucmd); 2285e126ba97SEli Cohen if (err) 2286e126ba97SEli Cohen return err; 2287e126ba97SEli Cohen 2288e126ba97SEli Cohen if (is_sqp(init_attr->qp_type)) 2289e126ba97SEli Cohen qp->port = init_attr->port_num; 2290e126ba97SEli Cohen 2291e383085cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 2292e383085cSLeon Romanovsky MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); 229309a7d9ecSSaeed Mahameed qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 229409a7d9ecSSaeed Mahameed 2295e7b169f3SNoa Osherovich MLX5_SET(qpc, qpc, st, mlx5_st); 229609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 229798fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); 2298e126ba97SEli Cohen 2299c95e6d53SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) 230009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, wq_signature, 1); 2301e126ba97SEli Cohen 23022be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 230309a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, block_lb_mc, 1); 2304f360d88aSEli Cohen 23052be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) 230609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_master, 1); 23072be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_SEND) 230809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_send, 1); 23092be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 231009a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cd_slave_receive, 1); 23112be08c30SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) 2312569c6651SDanit Goldberg MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1); 231390ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 231490ecb37aSLeon Romanovsky (init_attr->qp_type == IB_QPT_RC || 23158bde2c50SLeon Romanovsky init_attr->qp_type == IB_QPT_UC)) { 231652c81f47SColin Ian King int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq); 23178bde2c50SLeon Romanovsky 23188bde2c50SLeon Romanovsky MLX5_SET(qpc, qpc, cs_res, 23198bde2c50SLeon Romanovsky rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE : 23208bde2c50SLeon Romanovsky MLX5_RES_SCAT_DATA32_CQE); 23218bde2c50SLeon Romanovsky } 232290ecb37aSLeon Romanovsky if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && 23237aede1a2SLeon Romanovsky (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) 232481530ab0SLeon Romanovsky configure_requester_scat_cqe(dev, qp, init_attr, qpc); 2325e126ba97SEli Cohen 2326e126ba97SEli Cohen if (qp->rq.wqe_cnt) { 232709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 232809a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 2329e126ba97SEli Cohen } 2330e126ba97SEli Cohen 23312fe8d4b8SAharon Landau if (init_attr->qp_type != IB_QPT_RAW_PACKET) 23322fe8d4b8SAharon Landau MLX5_SET(qpc, qpc, ts_format, ts_format); 23332fe8d4b8SAharon Landau 233409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 2335e126ba97SEli Cohen 23363fd3307eSArtemy Kovalyov if (qp->sq.wqe_cnt) { 233709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 23383fd3307eSArtemy Kovalyov } else { 233909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, no_sq, 1); 23403fd3307eSArtemy Kovalyov if (init_attr->srq && 23413fd3307eSArtemy Kovalyov init_attr->srq->srq_type == IB_SRQT_TM) 23423fd3307eSArtemy Kovalyov MLX5_SET(qpc, qpc, offload_type, 23433fd3307eSArtemy Kovalyov MLX5_QPC_OFFLOAD_TYPE_RNDV); 23443fd3307eSArtemy Kovalyov } 2345e126ba97SEli Cohen 2346e126ba97SEli Cohen /* Set default resources */ 2347e126ba97SEli Cohen switch (init_attr->qp_type) { 2348e126ba97SEli Cohen case IB_QPT_XRC_INI: 234909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 2350f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 235109a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 2352e126ba97SEli Cohen break; 2353e126ba97SEli Cohen default: 2354e126ba97SEli Cohen if (init_attr->srq) { 2355f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 235609a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); 2357e126ba97SEli Cohen } else { 2358f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 235909a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); 2360e126ba97SEli Cohen } 2361e126ba97SEli Cohen } 2362e126ba97SEli Cohen 2363e126ba97SEli Cohen if (init_attr->send_cq) 236409a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); 2365e126ba97SEli Cohen 2366e126ba97SEli Cohen if (init_attr->recv_cq) 236709a7d9ecSSaeed Mahameed MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); 2368e126ba97SEli Cohen 236909a7d9ecSSaeed Mahameed MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 2370e126ba97SEli Cohen 2371cfb5e088SHaggai Abramovsky /* 0xffffff means we ask to work with cqe version 0 */ 237209a7d9ecSSaeed Mahameed if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 2373cfb5e088SHaggai Abramovsky MLX5_SET(qpc, qpc, user_index, uidx); 237409a7d9ecSSaeed Mahameed 23752978975cSLeon Romanovsky if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING && 23762978975cSLeon Romanovsky init_attr->qp_type != IB_QPT_RAW_PACKET) { 2377b1383aa6SNoa Osherovich MLX5_SET(qpc, qpc, end_padding_mode, 2378b1383aa6SNoa Osherovich MLX5_WQ_END_PAD_MODE_ALIGN); 23792978975cSLeon Romanovsky /* Special case to clean flag */ 23802978975cSLeon Romanovsky qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; 2381b1383aa6SNoa Osherovich } 2382b1383aa6SNoa Osherovich 2383c2e53b2cSYishai Hadas if (init_attr->qp_type == IB_QPT_RAW_PACKET || 23842be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 23852dfac92dSLeon Romanovsky qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr; 23860fb2ed66Smajd@mellanox.com raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 23877f72052cSYishai Hadas err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, 23882fe8d4b8SAharon Landau ¶ms->resp, init_attr); 238904bcc1c2SLeon Romanovsky } else 23903e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 2391e126ba97SEli Cohen 2392479163f4SAl Viro kvfree(in); 239304bcc1c2SLeon Romanovsky if (err) 239404bcc1c2SLeon Romanovsky goto err_create; 2395e126ba97SEli Cohen 239619098df2Smajd@mellanox.com base->container_mibqp = qp; 239719098df2Smajd@mellanox.com base->mqp.event = mlx5_ib_qp_event; 239892cd667cSLeon Romanovsky if (MLX5_CAP_GEN(mdev, ece_support)) 23993e09a427SLeon Romanovsky params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); 2400e126ba97SEli Cohen 24017aede1a2SLeon Romanovsky get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, 240289ea94a7SMaor Gottlieb &send_cq, &recv_cq); 240389ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 240489ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 240589ea94a7SMaor Gottlieb /* Maintain device to QPs access, needed for further handling via reset 240689ea94a7SMaor Gottlieb * flow 240789ea94a7SMaor Gottlieb */ 240889ea94a7SMaor Gottlieb list_add_tail(&qp->qps_list, &dev->qp_list); 240989ea94a7SMaor Gottlieb /* Maintain CQ to QPs access, needed for further handling via reset flow 241089ea94a7SMaor Gottlieb */ 241189ea94a7SMaor Gottlieb if (send_cq) 241289ea94a7SMaor Gottlieb list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 241389ea94a7SMaor Gottlieb if (recv_cq) 241489ea94a7SMaor Gottlieb list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 241589ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 241689ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 241789ea94a7SMaor Gottlieb 2418e126ba97SEli Cohen return 0; 2419e126ba97SEli Cohen 2420e126ba97SEli Cohen err_create: 2421747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2422e126ba97SEli Cohen return err; 2423e126ba97SEli Cohen } 2424e126ba97SEli Cohen 242598fc1126SLeon Romanovsky static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2426f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 2427f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 242898fc1126SLeon Romanovsky { 2429f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2430f78d358cSLeon Romanovsky u32 uidx = params->uidx; 243198fc1126SLeon Romanovsky struct mlx5_ib_resources *devr = &dev->devr; 24323e09a427SLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 243398fc1126SLeon Romanovsky int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 243498fc1126SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 243598fc1126SLeon Romanovsky struct mlx5_ib_cq *send_cq; 243698fc1126SLeon Romanovsky struct mlx5_ib_cq *recv_cq; 243798fc1126SLeon Romanovsky unsigned long flags; 243898fc1126SLeon Romanovsky struct mlx5_ib_qp_base *base; 243998fc1126SLeon Romanovsky int mlx5_st; 244098fc1126SLeon Romanovsky void *qpc; 244198fc1126SLeon Romanovsky u32 *in; 244298fc1126SLeon Romanovsky int err; 244398fc1126SLeon Romanovsky 244498fc1126SLeon Romanovsky spin_lock_init(&qp->sq.lock); 244598fc1126SLeon Romanovsky spin_lock_init(&qp->rq.lock); 244698fc1126SLeon Romanovsky 244798fc1126SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 244898fc1126SLeon Romanovsky if (mlx5_st < 0) 244998fc1126SLeon Romanovsky return -EINVAL; 245098fc1126SLeon Romanovsky 245198fc1126SLeon Romanovsky if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) 245298fc1126SLeon Romanovsky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 245398fc1126SLeon Romanovsky 245498fc1126SLeon Romanovsky base = &qp->trans_qp.base; 245598fc1126SLeon Romanovsky 245698fc1126SLeon Romanovsky qp->has_rq = qp_has_rq(attr); 245798fc1126SLeon Romanovsky err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL); 245898fc1126SLeon Romanovsky if (err) { 245998fc1126SLeon Romanovsky mlx5_ib_dbg(dev, "err %d\n", err); 246098fc1126SLeon Romanovsky return err; 246198fc1126SLeon Romanovsky } 246298fc1126SLeon Romanovsky 246398fc1126SLeon Romanovsky err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base); 246498fc1126SLeon Romanovsky if (err) 246598fc1126SLeon Romanovsky return err; 246698fc1126SLeon Romanovsky 246798fc1126SLeon Romanovsky if (is_sqp(attr->qp_type)) 246898fc1126SLeon Romanovsky qp->port = attr->port_num; 246998fc1126SLeon Romanovsky 247098fc1126SLeon Romanovsky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 247198fc1126SLeon Romanovsky 247298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, st, mlx5_st); 247398fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 247498fc1126SLeon Romanovsky 247598fc1126SLeon Romanovsky if (attr->qp_type != MLX5_IB_QPT_REG_UMR) 247698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); 247798fc1126SLeon Romanovsky else 247898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, latency_sensitive, 1); 247998fc1126SLeon Romanovsky 248098fc1126SLeon Romanovsky 248198fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 248298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, block_lb_mc, 1); 248398fc1126SLeon Romanovsky 248498fc1126SLeon Romanovsky if (qp->rq.wqe_cnt) { 248598fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 248698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 248798fc1126SLeon Romanovsky } 248898fc1126SLeon Romanovsky 248998fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr)); 249098fc1126SLeon Romanovsky 249198fc1126SLeon Romanovsky if (qp->sq.wqe_cnt) 249298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 249398fc1126SLeon Romanovsky else 249498fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, no_sq, 1); 249598fc1126SLeon Romanovsky 249698fc1126SLeon Romanovsky if (attr->srq) { 2497f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); 249898fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 249998fc1126SLeon Romanovsky to_msrq(attr->srq)->msrq.srqn); 250098fc1126SLeon Romanovsky } else { 2501f4375443SLeon Romanovsky MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); 250298fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, 250398fc1126SLeon Romanovsky to_msrq(devr->s1)->msrq.srqn); 250498fc1126SLeon Romanovsky } 250598fc1126SLeon Romanovsky 250698fc1126SLeon Romanovsky if (attr->send_cq) 250798fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn); 250898fc1126SLeon Romanovsky 250998fc1126SLeon Romanovsky if (attr->recv_cq) 251098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn); 251198fc1126SLeon Romanovsky 251298fc1126SLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 251398fc1126SLeon Romanovsky 251498fc1126SLeon Romanovsky /* 0xffffff means we ask to work with cqe version 0 */ 251598fc1126SLeon Romanovsky if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 251698fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, user_index, uidx); 251798fc1126SLeon Romanovsky 251898fc1126SLeon Romanovsky /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 251998fc1126SLeon Romanovsky if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) 252098fc1126SLeon Romanovsky MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 252198fc1126SLeon Romanovsky 25223e09a427SLeon Romanovsky err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); 252398fc1126SLeon Romanovsky kvfree(in); 252498fc1126SLeon Romanovsky if (err) 252598fc1126SLeon Romanovsky goto err_create; 252698fc1126SLeon Romanovsky 252798fc1126SLeon Romanovsky base->container_mibqp = qp; 252898fc1126SLeon Romanovsky base->mqp.event = mlx5_ib_qp_event; 252998fc1126SLeon Romanovsky 253098fc1126SLeon Romanovsky get_cqs(qp->type, attr->send_cq, attr->recv_cq, 253198fc1126SLeon Romanovsky &send_cq, &recv_cq); 253298fc1126SLeon Romanovsky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 253398fc1126SLeon Romanovsky mlx5_ib_lock_cqs(send_cq, recv_cq); 253498fc1126SLeon Romanovsky /* Maintain device to QPs access, needed for further handling via reset 253598fc1126SLeon Romanovsky * flow 253698fc1126SLeon Romanovsky */ 253798fc1126SLeon Romanovsky list_add_tail(&qp->qps_list, &dev->qp_list); 253898fc1126SLeon Romanovsky /* Maintain CQ to QPs access, needed for further handling via reset flow 253998fc1126SLeon Romanovsky */ 254098fc1126SLeon Romanovsky if (send_cq) 254198fc1126SLeon Romanovsky list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 254298fc1126SLeon Romanovsky if (recv_cq) 254398fc1126SLeon Romanovsky list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 254498fc1126SLeon Romanovsky mlx5_ib_unlock_cqs(send_cq, recv_cq); 254598fc1126SLeon Romanovsky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 254698fc1126SLeon Romanovsky 254798fc1126SLeon Romanovsky return 0; 254898fc1126SLeon Romanovsky 254998fc1126SLeon Romanovsky err_create: 2550747c519cSLeon Romanovsky destroy_qp(dev, qp, base, NULL); 255198fc1126SLeon Romanovsky return err; 255298fc1126SLeon Romanovsky } 255398fc1126SLeon Romanovsky 2554e126ba97SEli Cohen static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2555e126ba97SEli Cohen __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 2556e126ba97SEli Cohen { 2557e126ba97SEli Cohen if (send_cq) { 2558e126ba97SEli Cohen if (recv_cq) { 2559e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 256089ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2561e126ba97SEli Cohen spin_lock_nested(&recv_cq->lock, 2562e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2563e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 256489ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 2565e126ba97SEli Cohen __acquire(&recv_cq->lock); 2566e126ba97SEli Cohen } else { 256789ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 2568e126ba97SEli Cohen spin_lock_nested(&send_cq->lock, 2569e126ba97SEli Cohen SINGLE_DEPTH_NESTING); 2570e126ba97SEli Cohen } 2571e126ba97SEli Cohen } else { 257289ea94a7SMaor Gottlieb spin_lock(&send_cq->lock); 25736a4f139aSEli Cohen __acquire(&recv_cq->lock); 2574e126ba97SEli Cohen } 2575e126ba97SEli Cohen } else if (recv_cq) { 257689ea94a7SMaor Gottlieb spin_lock(&recv_cq->lock); 25776a4f139aSEli Cohen __acquire(&send_cq->lock); 25786a4f139aSEli Cohen } else { 25796a4f139aSEli Cohen __acquire(&send_cq->lock); 25806a4f139aSEli Cohen __acquire(&recv_cq->lock); 2581e126ba97SEli Cohen } 2582e126ba97SEli Cohen } 2583e126ba97SEli Cohen 2584e126ba97SEli Cohen static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 2585e126ba97SEli Cohen __releases(&send_cq->lock) __releases(&recv_cq->lock) 2586e126ba97SEli Cohen { 2587e126ba97SEli Cohen if (send_cq) { 2588e126ba97SEli Cohen if (recv_cq) { 2589e126ba97SEli Cohen if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 2590e126ba97SEli Cohen spin_unlock(&recv_cq->lock); 259189ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2592e126ba97SEli Cohen } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 2593e126ba97SEli Cohen __release(&recv_cq->lock); 259489ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2595e126ba97SEli Cohen } else { 2596e126ba97SEli Cohen spin_unlock(&send_cq->lock); 259789ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 2598e126ba97SEli Cohen } 2599e126ba97SEli Cohen } else { 26006a4f139aSEli Cohen __release(&recv_cq->lock); 260189ea94a7SMaor Gottlieb spin_unlock(&send_cq->lock); 2602e126ba97SEli Cohen } 2603e126ba97SEli Cohen } else if (recv_cq) { 26046a4f139aSEli Cohen __release(&send_cq->lock); 260589ea94a7SMaor Gottlieb spin_unlock(&recv_cq->lock); 26066a4f139aSEli Cohen } else { 26076a4f139aSEli Cohen __release(&recv_cq->lock); 26086a4f139aSEli Cohen __release(&send_cq->lock); 2609e126ba97SEli Cohen } 2610e126ba97SEli Cohen } 2611e126ba97SEli Cohen 261289ea94a7SMaor Gottlieb static void get_cqs(enum ib_qp_type qp_type, 261389ea94a7SMaor Gottlieb struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 2614e126ba97SEli Cohen struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 2615e126ba97SEli Cohen { 261689ea94a7SMaor Gottlieb switch (qp_type) { 2617e126ba97SEli Cohen case IB_QPT_XRC_TGT: 2618e126ba97SEli Cohen *send_cq = NULL; 2619e126ba97SEli Cohen *recv_cq = NULL; 2620e126ba97SEli Cohen break; 2621e126ba97SEli Cohen case MLX5_IB_QPT_REG_UMR: 2622e126ba97SEli Cohen case IB_QPT_XRC_INI: 262389ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 2624e126ba97SEli Cohen *recv_cq = NULL; 2625e126ba97SEli Cohen break; 2626e126ba97SEli Cohen 2627e126ba97SEli Cohen case IB_QPT_SMI: 2628d16e91daSHaggai Eran case MLX5_IB_QPT_HW_GSI: 2629e126ba97SEli Cohen case IB_QPT_RC: 2630e126ba97SEli Cohen case IB_QPT_UC: 2631e126ba97SEli Cohen case IB_QPT_UD: 26320fb2ed66Smajd@mellanox.com case IB_QPT_RAW_PACKET: 263389ea94a7SMaor Gottlieb *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 263489ea94a7SMaor Gottlieb *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 2635e126ba97SEli Cohen break; 2636e126ba97SEli Cohen default: 2637e126ba97SEli Cohen *send_cq = NULL; 2638e126ba97SEli Cohen *recv_cq = NULL; 2639e126ba97SEli Cohen break; 2640e126ba97SEli Cohen } 2641e126ba97SEli Cohen } 2642e126ba97SEli Cohen 2643ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 264413eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 264513eab21fSAviv Heller u8 lag_tx_affinity); 2646ad5f8e96Smajd@mellanox.com 2647bdeacabdSShamir Rabinovitch static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2648bdeacabdSShamir Rabinovitch struct ib_udata *udata) 2649e126ba97SEli Cohen { 2650e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 2651c2e53b2cSYishai Hadas struct mlx5_ib_qp_base *base; 265289ea94a7SMaor Gottlieb unsigned long flags; 2653e126ba97SEli Cohen int err; 2654e126ba97SEli Cohen 26556c41965dSLeon Romanovsky if (qp->is_rss) { 265628d61370SYishai Hadas destroy_rss_raw_qp_tir(dev, qp); 265728d61370SYishai Hadas return; 265828d61370SYishai Hadas } 265928d61370SYishai Hadas 26606c41965dSLeon Romanovsky base = (qp->type == IB_QPT_RAW_PACKET || 26612be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 26620fb2ed66Smajd@mellanox.com &qp->raw_packet_qp.rq.base : 26630fb2ed66Smajd@mellanox.com &qp->trans_qp.base; 26640fb2ed66Smajd@mellanox.com 26656aec21f6SHaggai Eran if (qp->state != IB_QPS_RESET) { 26666c41965dSLeon Romanovsky if (qp->type != IB_QPT_RAW_PACKET && 26672be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { 2668333fbaa0SLeon Romanovsky err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, 26695f62a521SLeon Romanovsky NULL, &base->mqp, NULL); 2670ad5f8e96Smajd@mellanox.com } else { 26710680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = { 26720680efa2SAlex Vesker .operation = MLX5_CMD_OP_2RST_QP 26730680efa2SAlex Vesker }; 26740680efa2SAlex Vesker 267513eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); 2676ad5f8e96Smajd@mellanox.com } 2677ad5f8e96Smajd@mellanox.com if (err) 2678427c1e7bSmajd@mellanox.com mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 267919098df2Smajd@mellanox.com base->mqp.qpn); 26806aec21f6SHaggai Eran } 2681e126ba97SEli Cohen 26826c41965dSLeon Romanovsky get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, 26836c41965dSLeon Romanovsky &recv_cq); 268489ea94a7SMaor Gottlieb 268589ea94a7SMaor Gottlieb spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 268689ea94a7SMaor Gottlieb mlx5_ib_lock_cqs(send_cq, recv_cq); 268789ea94a7SMaor Gottlieb /* del from lists under both locks above to protect reset flow paths */ 268889ea94a7SMaor Gottlieb list_del(&qp->qps_list); 268989ea94a7SMaor Gottlieb if (send_cq) 269089ea94a7SMaor Gottlieb list_del(&qp->cq_send_list); 269189ea94a7SMaor Gottlieb 269289ea94a7SMaor Gottlieb if (recv_cq) 269389ea94a7SMaor Gottlieb list_del(&qp->cq_recv_list); 2694e126ba97SEli Cohen 269503c4077bSLeon Romanovsky if (!udata) { 269619098df2Smajd@mellanox.com __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 2697e126ba97SEli Cohen qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 2698e126ba97SEli Cohen if (send_cq != recv_cq) 269919098df2Smajd@mellanox.com __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 270019098df2Smajd@mellanox.com NULL); 2701e126ba97SEli Cohen } 270289ea94a7SMaor Gottlieb mlx5_ib_unlock_cqs(send_cq, recv_cq); 270389ea94a7SMaor Gottlieb spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 2704e126ba97SEli Cohen 27056c41965dSLeon Romanovsky if (qp->type == IB_QPT_RAW_PACKET || 27062be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 27070fb2ed66Smajd@mellanox.com destroy_raw_packet_qp(dev, qp); 27080fb2ed66Smajd@mellanox.com } else { 2709333fbaa0SLeon Romanovsky err = mlx5_core_destroy_qp(dev, &base->mqp); 2710e126ba97SEli Cohen if (err) 27110fb2ed66Smajd@mellanox.com mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 27120fb2ed66Smajd@mellanox.com base->mqp.qpn); 27130fb2ed66Smajd@mellanox.com } 2714e126ba97SEli Cohen 2715747c519cSLeon Romanovsky destroy_qp(dev, qp, base, udata); 2716e126ba97SEli Cohen } 2717e126ba97SEli Cohen 2718a645a89dSLeon Romanovsky static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd, 2719a645a89dSLeon Romanovsky struct mlx5_ib_qp *qp, 2720f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 2721b4aaa1f0SMoni Shoua { 2722f78d358cSLeon Romanovsky struct ib_qp_init_attr *attr = params->attr; 2723f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 2724f78d358cSLeon Romanovsky u32 uidx = params->uidx; 2725b4aaa1f0SMoni Shoua void *dctc; 2726b4aaa1f0SMoni Shoua 27277c4b1ab9SMark Zhang if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct)) 27287c4b1ab9SMark Zhang return -EOPNOTSUPP; 27297c4b1ab9SMark Zhang 2730b4aaa1f0SMoni Shoua qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); 27319c2ba4edSLeon Romanovsky if (!qp->dct.in) 273247c80612SLeon Romanovsky return -ENOMEM; 2733b4aaa1f0SMoni Shoua 2734a01a5860SYishai Hadas MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid); 2735b4aaa1f0SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 2736b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn); 2737b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); 2738b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); 2739b4aaa1f0SMoni Shoua MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); 2740b4aaa1f0SMoni Shoua MLX5_SET(dctc, dctc, user_index, uidx); 2741a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support)) 2742a645a89dSLeon Romanovsky MLX5_SET(dctc, dctc, ece, ucmd->ece_options); 2743b4aaa1f0SMoni Shoua 274437518fa4SLeon Romanovsky if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) { 2745fd9dab7eSLeon Romanovsky int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq); 2746fd9dab7eSLeon Romanovsky 2747fd9dab7eSLeon Romanovsky if (rcqe_sz == 128) 2748fd9dab7eSLeon Romanovsky MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 2749fd9dab7eSLeon Romanovsky } 27505d6ff1baSYonatan Cohen 2751b4aaa1f0SMoni Shoua qp->state = IB_QPS_RESET; 275247c80612SLeon Romanovsky return 0; 2753b4aaa1f0SMoni Shoua } 2754b4aaa1f0SMoni Shoua 27557aede1a2SLeon Romanovsky static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 27567aede1a2SLeon Romanovsky enum ib_qp_type *type) 27576eb7edffSLeon Romanovsky { 27586eb7edffSLeon Romanovsky if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct)) 27596eb7edffSLeon Romanovsky goto out; 27606eb7edffSLeon Romanovsky 27616eb7edffSLeon Romanovsky switch (attr->qp_type) { 27626eb7edffSLeon Romanovsky case IB_QPT_XRC_TGT: 27636eb7edffSLeon Romanovsky case IB_QPT_XRC_INI: 27646eb7edffSLeon Romanovsky if (!MLX5_CAP_GEN(dev->mdev, xrc)) 27656eb7edffSLeon Romanovsky goto out; 27666eb7edffSLeon Romanovsky fallthrough; 27676eb7edffSLeon Romanovsky case IB_QPT_RC: 27686eb7edffSLeon Romanovsky case IB_QPT_UC: 27696eb7edffSLeon Romanovsky case IB_QPT_SMI: 27706eb7edffSLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 27716eb7edffSLeon Romanovsky case IB_QPT_DRIVER: 27726eb7edffSLeon Romanovsky case IB_QPT_GSI: 277342caf9cbSMark Bloch case IB_QPT_RAW_PACKET: 277442caf9cbSMark Bloch case IB_QPT_UD: 277542caf9cbSMark Bloch case MLX5_IB_QPT_REG_UMR: 27767aede1a2SLeon Romanovsky break; 27776eb7edffSLeon Romanovsky default: 27786eb7edffSLeon Romanovsky goto out; 2779b4aaa1f0SMoni Shoua } 2780b4aaa1f0SMoni Shoua 27817aede1a2SLeon Romanovsky *type = attr->qp_type; 2782b4aaa1f0SMoni Shoua return 0; 27836eb7edffSLeon Romanovsky 27846eb7edffSLeon Romanovsky out: 27856eb7edffSLeon Romanovsky mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type); 27866eb7edffSLeon Romanovsky return -EOPNOTSUPP; 2787b4aaa1f0SMoni Shoua } 2788b4aaa1f0SMoni Shoua 27892242cc25SLeon Romanovsky static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd, 27902242cc25SLeon Romanovsky struct ib_qp_init_attr *attr, 27912242cc25SLeon Romanovsky struct ib_udata *udata) 27922242cc25SLeon Romanovsky { 27932242cc25SLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 27942242cc25SLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 27952242cc25SLeon Romanovsky 27962242cc25SLeon Romanovsky if (!udata) { 27972242cc25SLeon Romanovsky /* Kernel create_qp callers */ 27982242cc25SLeon Romanovsky if (attr->rwq_ind_tbl) 27992242cc25SLeon Romanovsky return -EOPNOTSUPP; 28002242cc25SLeon Romanovsky 28012242cc25SLeon Romanovsky switch (attr->qp_type) { 28022242cc25SLeon Romanovsky case IB_QPT_RAW_PACKET: 28032242cc25SLeon Romanovsky case IB_QPT_DRIVER: 28042242cc25SLeon Romanovsky return -EOPNOTSUPP; 28052242cc25SLeon Romanovsky default: 28062242cc25SLeon Romanovsky return 0; 28072242cc25SLeon Romanovsky } 28082242cc25SLeon Romanovsky } 28092242cc25SLeon Romanovsky 28102242cc25SLeon Romanovsky /* Userspace create_qp callers */ 28112242cc25SLeon Romanovsky if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) { 28122242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 28132242cc25SLeon Romanovsky "Raw Packet QP is only supported for CQE version > 0\n"); 28142242cc25SLeon Romanovsky return -EINVAL; 28152242cc25SLeon Romanovsky } 28162242cc25SLeon Romanovsky 28172242cc25SLeon Romanovsky if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) { 28182242cc25SLeon Romanovsky mlx5_ib_dbg(dev, 28192242cc25SLeon Romanovsky "Wrong QP type %d for the RWQ indirect table\n", 28202242cc25SLeon Romanovsky attr->qp_type); 28212242cc25SLeon Romanovsky return -EINVAL; 28222242cc25SLeon Romanovsky } 28232242cc25SLeon Romanovsky 28242242cc25SLeon Romanovsky /* 28252242cc25SLeon Romanovsky * We don't need to see this warning, it means that kernel code 28262242cc25SLeon Romanovsky * missing ib_pd. Placed here to catch developer's mistakes. 28272242cc25SLeon Romanovsky */ 28282242cc25SLeon Romanovsky WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT, 28292242cc25SLeon Romanovsky "There is a missing PD pointer assignment\n"); 28302242cc25SLeon Romanovsky return 0; 28312242cc25SLeon Romanovsky } 28322242cc25SLeon Romanovsky 283337518fa4SLeon Romanovsky static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 283437518fa4SLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 283537518fa4SLeon Romanovsky { 283637518fa4SLeon Romanovsky if (!(*flags & flag)) 283737518fa4SLeon Romanovsky return; 283837518fa4SLeon Romanovsky 283937518fa4SLeon Romanovsky if (cond) { 284037518fa4SLeon Romanovsky qp->flags_en |= flag; 284137518fa4SLeon Romanovsky *flags &= ~flag; 284237518fa4SLeon Romanovsky return; 284337518fa4SLeon Romanovsky } 284437518fa4SLeon Romanovsky 284581530ab0SLeon Romanovsky switch (flag) { 284681530ab0SLeon Romanovsky case MLX5_QP_FLAG_SCATTER_CQE: 284781530ab0SLeon Romanovsky case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: 284837518fa4SLeon Romanovsky /* 284981530ab0SLeon Romanovsky * We don't return error if these flags were provided, 285037518fa4SLeon Romanovsky * and mlx5 doesn't have right capability. 285137518fa4SLeon Romanovsky */ 285281530ab0SLeon Romanovsky *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | 285381530ab0SLeon Romanovsky MLX5_QP_FLAG_ALLOW_SCATTER_CQE); 285437518fa4SLeon Romanovsky return; 285581530ab0SLeon Romanovsky default: 285681530ab0SLeon Romanovsky break; 285737518fa4SLeon Romanovsky } 285837518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); 285937518fa4SLeon Romanovsky } 286037518fa4SLeon Romanovsky 286137518fa4SLeon Romanovsky static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 28625ce0592bSLeon Romanovsky void *ucmd, struct ib_qp_init_attr *attr) 28632fdddbd5SLeon Romanovsky { 286437518fa4SLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 286537518fa4SLeon Romanovsky bool cond; 28665ce0592bSLeon Romanovsky int flags; 28675ce0592bSLeon Romanovsky 28685ce0592bSLeon Romanovsky if (attr->rwq_ind_tbl) 28695ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags; 28705ce0592bSLeon Romanovsky else 28715ce0592bSLeon Romanovsky flags = ((struct mlx5_ib_create_qp *)ucmd)->flags; 287237518fa4SLeon Romanovsky 287337518fa4SLeon Romanovsky switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) { 28742fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCI: 28757aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCI; 28762fdddbd5SLeon Romanovsky break; 28772fdddbd5SLeon Romanovsky case MLX5_QP_FLAG_TYPE_DCT: 28787aede1a2SLeon Romanovsky qp->type = MLX5_IB_QPT_DCT; 287937518fa4SLeon Romanovsky break; 28807aede1a2SLeon Romanovsky default: 28817aede1a2SLeon Romanovsky if (qp->type != IB_QPT_DRIVER) 28827aede1a2SLeon Romanovsky break; 28837aede1a2SLeon Romanovsky /* 28847aede1a2SLeon Romanovsky * It is IB_QPT_DRIVER and or no subtype or 28857aede1a2SLeon Romanovsky * wrong subtype were provided. 28867aede1a2SLeon Romanovsky */ 288737518fa4SLeon Romanovsky return -EINVAL; 28887aede1a2SLeon Romanovsky } 288937518fa4SLeon Romanovsky 289037518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); 289137518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); 289211656f59SLior Nahmanson process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM, 289365f90c8eSLior Nahmanson MLX5_CAP_GEN(mdev, log_max_dci_stream_channels), 289411656f59SLior Nahmanson qp); 289537518fa4SLeon Romanovsky 289637518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); 289737518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, 289837518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); 289981530ab0SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE, 290081530ab0SLeon Romanovsky MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); 290137518fa4SLeon Romanovsky 29027aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RAW_PACKET) { 290337518fa4SLeon Romanovsky cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || 290437518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_gre) || 290537518fa4SLeon Romanovsky MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx); 290637518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS, 290737518fa4SLeon Romanovsky cond, qp); 290837518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 290937518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true, 291037518fa4SLeon Romanovsky qp); 291137518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 291237518fa4SLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true, 291337518fa4SLeon Romanovsky qp); 291437518fa4SLeon Romanovsky } 291537518fa4SLeon Romanovsky 29167aede1a2SLeon Romanovsky if (qp->type == IB_QPT_RC) 291737518fa4SLeon Romanovsky process_vendor_flag(dev, &flags, 291837518fa4SLeon Romanovsky MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE, 291937518fa4SLeon Romanovsky MLX5_CAP_GEN(mdev, qp_packet_based), qp); 292037518fa4SLeon Romanovsky 292176883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp); 292276883a6cSLeon Romanovsky process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp); 292376883a6cSLeon Romanovsky 29245d6fffedSLeon Romanovsky cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS | 29255d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | 29265d6fffedSLeon Romanovsky MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC); 29275d6fffedSLeon Romanovsky if (attr->rwq_ind_tbl && cond) { 29285d6fffedSLeon Romanovsky mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n", 29295d6fffedSLeon Romanovsky cond); 29305d6fffedSLeon Romanovsky return -EINVAL; 29315d6fffedSLeon Romanovsky } 29325d6fffedSLeon Romanovsky 293337518fa4SLeon Romanovsky if (flags) 293437518fa4SLeon Romanovsky mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags); 293537518fa4SLeon Romanovsky 293637518fa4SLeon Romanovsky return (flags) ? -EINVAL : 0; 29372fdddbd5SLeon Romanovsky } 29382fdddbd5SLeon Romanovsky 29392978975cSLeon Romanovsky static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag, 29402978975cSLeon Romanovsky bool cond, struct mlx5_ib_qp *qp) 29412978975cSLeon Romanovsky { 29422978975cSLeon Romanovsky if (!(*flags & flag)) 29432978975cSLeon Romanovsky return; 29442978975cSLeon Romanovsky 29452978975cSLeon Romanovsky if (cond) { 29462978975cSLeon Romanovsky qp->flags |= flag; 29472978975cSLeon Romanovsky *flags &= ~flag; 29482978975cSLeon Romanovsky return; 29492978975cSLeon Romanovsky } 29502978975cSLeon Romanovsky 29512978975cSLeon Romanovsky if (flag == MLX5_IB_QP_CREATE_WC_TEST) { 29522978975cSLeon Romanovsky /* 29532978975cSLeon Romanovsky * Special case, if condition didn't meet, it won't be error, 29542978975cSLeon Romanovsky * just different in-kernel flow. 29552978975cSLeon Romanovsky */ 29562978975cSLeon Romanovsky *flags &= ~MLX5_IB_QP_CREATE_WC_TEST; 29572978975cSLeon Romanovsky return; 29582978975cSLeon Romanovsky } 29592978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag); 29602978975cSLeon Romanovsky } 29612978975cSLeon Romanovsky 29622978975cSLeon Romanovsky static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 29632978975cSLeon Romanovsky struct ib_qp_init_attr *attr) 29642978975cSLeon Romanovsky { 29657aede1a2SLeon Romanovsky enum ib_qp_type qp_type = qp->type; 29662978975cSLeon Romanovsky struct mlx5_core_dev *mdev = dev->mdev; 29672978975cSLeon Romanovsky int create_flags = attr->create_flags; 29682978975cSLeon Romanovsky bool cond; 29692978975cSLeon Romanovsky 29707aede1a2SLeon Romanovsky if (qp_type == MLX5_IB_QPT_DCT) 29712978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 29722978975cSLeon Romanovsky 29732978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) 29742978975cSLeon Romanovsky return (create_flags) ? -EINVAL : 0; 29752978975cSLeon Romanovsky 2976f81b4565SLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP, 2977f81b4565SLeon Romanovsky mlx5_get_flow_namespace(dev->mdev, 2978f81b4565SLeon Romanovsky MLX5_FLOW_NAMESPACE_BYPASS), 2979f81b4565SLeon Romanovsky qp); 29802978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 29819e0dc7b9SMax Gurtovoy IB_QP_CREATE_INTEGRITY_EN, 29829e0dc7b9SMax Gurtovoy MLX5_CAP_GEN(mdev, sho), qp); 29839e0dc7b9SMax Gurtovoy process_create_flag(dev, &create_flags, 29842978975cSLeon Romanovsky IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 29852978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, block_lb_mc), qp); 29862978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL, 29872978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 29882978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND, 29892978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 29902978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV, 29912978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, cd), qp); 29922978975cSLeon Romanovsky 29932978975cSLeon Romanovsky if (qp_type == IB_QPT_UD) { 29942978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 29952978975cSLeon Romanovsky IB_QP_CREATE_IPOIB_UD_LSO, 29962978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, ipoib_basic_offloads), 29972978975cSLeon Romanovsky qp); 29982978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB; 29992978975cSLeon Romanovsky process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN, 30002978975cSLeon Romanovsky cond, qp); 30012978975cSLeon Romanovsky } 30022978975cSLeon Romanovsky 30032978975cSLeon Romanovsky if (qp_type == IB_QPT_RAW_PACKET) { 30042978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 30052978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, scatter_fcs); 30062978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30072978975cSLeon Romanovsky IB_QP_CREATE_SCATTER_FCS, cond, qp); 30082978975cSLeon Romanovsky 30092978975cSLeon Romanovsky cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && 30102978975cSLeon Romanovsky MLX5_CAP_ETH(mdev, vlan_cap); 30112978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30122978975cSLeon Romanovsky IB_QP_CREATE_CVLAN_STRIPPING, cond, qp); 30132978975cSLeon Romanovsky } 30142978975cSLeon Romanovsky 30152978975cSLeon Romanovsky process_create_flag(dev, &create_flags, 30162978975cSLeon Romanovsky IB_QP_CREATE_PCI_WRITE_END_PADDING, 30172978975cSLeon Romanovsky MLX5_CAP_GEN(mdev, end_pad), qp); 30182978975cSLeon Romanovsky 30192978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST, 30202978975cSLeon Romanovsky qp_type != MLX5_IB_QPT_REG_UMR, qp); 30212978975cSLeon Romanovsky process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1, 30222978975cSLeon Romanovsky true, qp); 30232978975cSLeon Romanovsky 30241f11a761SJason Gunthorpe if (create_flags) { 30252978975cSLeon Romanovsky mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n", 30262978975cSLeon Romanovsky create_flags); 30271f11a761SJason Gunthorpe return -EOPNOTSUPP; 30281f11a761SJason Gunthorpe } 30291f11a761SJason Gunthorpe return 0; 30302978975cSLeon Romanovsky } 30312978975cSLeon Romanovsky 30326f2cf76eSLeon Romanovsky static int process_udata_size(struct mlx5_ib_dev *dev, 30336f2cf76eSLeon Romanovsky struct mlx5_create_qp_params *params) 30342fdddbd5SLeon Romanovsky { 30352fdddbd5SLeon Romanovsky size_t ucmd = sizeof(struct mlx5_ib_create_qp); 30366f2cf76eSLeon Romanovsky struct ib_udata *udata = params->udata; 30376f2cf76eSLeon Romanovsky size_t outlen = udata->outlen; 30385ce0592bSLeon Romanovsky size_t inlen = udata->inlen; 30392fdddbd5SLeon Romanovsky 30406f2cf76eSLeon Romanovsky params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp)); 3041e383085cSLeon Romanovsky params->ucmd_size = ucmd; 30426f2cf76eSLeon Romanovsky if (!params->is_rss_raw) { 3043e383085cSLeon Romanovsky /* User has old rdma-core, which doesn't support ECE */ 3044e383085cSLeon Romanovsky size_t min_inlen = 3045e383085cSLeon Romanovsky offsetof(struct mlx5_ib_create_qp, ece_options); 3046e383085cSLeon Romanovsky 3047e383085cSLeon Romanovsky /* 3048e383085cSLeon Romanovsky * We will check in check_ucmd_data() that user 3049e383085cSLeon Romanovsky * cleared everything after inlen. 3050e383085cSLeon Romanovsky */ 3051e383085cSLeon Romanovsky params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd); 30526f2cf76eSLeon Romanovsky goto out; 30536f2cf76eSLeon Romanovsky } 30545ce0592bSLeon Romanovsky 30556f2cf76eSLeon Romanovsky /* RSS RAW QP */ 30565ce0592bSLeon Romanovsky if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags)) 30576f2cf76eSLeon Romanovsky return -EINVAL; 30586f2cf76eSLeon Romanovsky 30596f2cf76eSLeon Romanovsky if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index)) 30606f2cf76eSLeon Romanovsky return -EINVAL; 30615ce0592bSLeon Romanovsky 30625ce0592bSLeon Romanovsky ucmd = sizeof(struct mlx5_ib_create_qp_rss); 3063e383085cSLeon Romanovsky params->ucmd_size = ucmd; 30645ce0592bSLeon Romanovsky if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd)) 30656f2cf76eSLeon Romanovsky return -EINVAL; 30665ce0592bSLeon Romanovsky 30676f2cf76eSLeon Romanovsky params->inlen = min(ucmd, inlen); 30686f2cf76eSLeon Romanovsky out: 30696f2cf76eSLeon Romanovsky if (!params->inlen) 3070e383085cSLeon Romanovsky mlx5_ib_dbg(dev, "udata is too small\n"); 30716f2cf76eSLeon Romanovsky 30726f2cf76eSLeon Romanovsky return (params->inlen) ? 0 : -EINVAL; 30732fdddbd5SLeon Romanovsky } 30742fdddbd5SLeon Romanovsky 3075968f0b6fSLeon Romanovsky static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 3076f78d358cSLeon Romanovsky struct mlx5_ib_qp *qp, 3077f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 30785d0dc3d9SLeon Romanovsky { 3079968f0b6fSLeon Romanovsky int err; 30805d0dc3d9SLeon Romanovsky 3081968f0b6fSLeon Romanovsky if (params->is_rss_raw) { 3082968f0b6fSLeon Romanovsky err = create_rss_raw_qp_tir(dev, pd, qp, params); 3083968f0b6fSLeon Romanovsky goto out; 3084968f0b6fSLeon Romanovsky } 3085968f0b6fSLeon Romanovsky 30862dc4d672SLeon Romanovsky switch (qp->type) { 30872dc4d672SLeon Romanovsky case MLX5_IB_QPT_DCT: 3088a645a89dSLeon Romanovsky err = create_dct(dev, pd, qp, params); 30890dc0da15SLeon Romanovsky rdma_restrack_no_track(&qp->ibqp.res); 30902dc4d672SLeon Romanovsky break; 30912013b4d5SLior Nahmanson case MLX5_IB_QPT_DCI: 30922013b4d5SLior Nahmanson err = create_dci(dev, pd, qp, params); 30932013b4d5SLior Nahmanson break; 30942dc4d672SLeon Romanovsky case IB_QPT_XRC_TGT: 3095968f0b6fSLeon Romanovsky err = create_xrc_tgt_qp(dev, qp, params); 30962dc4d672SLeon Romanovsky break; 30972dc4d672SLeon Romanovsky case IB_QPT_GSI: 30982dc4d672SLeon Romanovsky err = mlx5_ib_create_gsi(pd, qp, params->attr); 30992dc4d672SLeon Romanovsky break; 31000dc0da15SLeon Romanovsky case MLX5_IB_QPT_HW_GSI: 31010dc0da15SLeon Romanovsky case MLX5_IB_QPT_REG_UMR: 31020dc0da15SLeon Romanovsky rdma_restrack_no_track(&qp->ibqp.res); 31030dc0da15SLeon Romanovsky fallthrough; 31042dc4d672SLeon Romanovsky default: 3105968f0b6fSLeon Romanovsky if (params->udata) 3106968f0b6fSLeon Romanovsky err = create_user_qp(dev, pd, qp, params); 3107968f0b6fSLeon Romanovsky else 3108968f0b6fSLeon Romanovsky err = create_kernel_qp(dev, pd, qp, params); 31092dc4d672SLeon Romanovsky } 3110968f0b6fSLeon Romanovsky 3111968f0b6fSLeon Romanovsky out: 3112968f0b6fSLeon Romanovsky if (err) { 3113968f0b6fSLeon Romanovsky mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type); 3114968f0b6fSLeon Romanovsky return err; 3115968f0b6fSLeon Romanovsky } 3116968f0b6fSLeon Romanovsky 3117968f0b6fSLeon Romanovsky if (is_qp0(qp->type)) 3118968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 0; 3119968f0b6fSLeon Romanovsky else if (is_qp1(qp->type)) 3120968f0b6fSLeon Romanovsky qp->ibqp.qp_num = 1; 3121968f0b6fSLeon Romanovsky else 3122968f0b6fSLeon Romanovsky qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; 3123968f0b6fSLeon Romanovsky 3124968f0b6fSLeon Romanovsky mlx5_ib_dbg(dev, 31253e09a427SLeon Romanovsky "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n", 3126968f0b6fSLeon Romanovsky qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 3127968f0b6fSLeon Romanovsky params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn : 3128968f0b6fSLeon Romanovsky -1, 3129968f0b6fSLeon Romanovsky params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn : 31303e09a427SLeon Romanovsky -1, 31313e09a427SLeon Romanovsky params->resp.ece_options); 3132968f0b6fSLeon Romanovsky 3133968f0b6fSLeon Romanovsky return 0; 31345d0dc3d9SLeon Romanovsky } 31355d0dc3d9SLeon Romanovsky 31367aede1a2SLeon Romanovsky static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 31377aede1a2SLeon Romanovsky struct ib_qp_init_attr *attr) 31387aede1a2SLeon Romanovsky { 31397aede1a2SLeon Romanovsky int ret = 0; 31407aede1a2SLeon Romanovsky 31417aede1a2SLeon Romanovsky switch (qp->type) { 31427aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCT: 31437aede1a2SLeon Romanovsky ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0; 31447aede1a2SLeon Romanovsky break; 31457aede1a2SLeon Romanovsky case MLX5_IB_QPT_DCI: 31467aede1a2SLeon Romanovsky ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ? 31477aede1a2SLeon Romanovsky -EINVAL : 31487aede1a2SLeon Romanovsky 0; 31497aede1a2SLeon Romanovsky break; 3150266424ebSLeon Romanovsky case IB_QPT_RAW_PACKET: 3151266424ebSLeon Romanovsky ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0; 3152266424ebSLeon Romanovsky break; 31537aede1a2SLeon Romanovsky default: 31547aede1a2SLeon Romanovsky break; 31557aede1a2SLeon Romanovsky } 31567aede1a2SLeon Romanovsky 31577aede1a2SLeon Romanovsky if (ret) 31587aede1a2SLeon Romanovsky mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type); 31597aede1a2SLeon Romanovsky 31607aede1a2SLeon Romanovsky return ret; 31617aede1a2SLeon Romanovsky } 31627aede1a2SLeon Romanovsky 3163f78d358cSLeon Romanovsky static int get_qp_uidx(struct mlx5_ib_qp *qp, 3164f78d358cSLeon Romanovsky struct mlx5_create_qp_params *params) 316521aad80bSLeon Romanovsky { 3166f78d358cSLeon Romanovsky struct mlx5_ib_create_qp *ucmd = params->ucmd; 3167f78d358cSLeon Romanovsky struct ib_udata *udata = params->udata; 316821aad80bSLeon Romanovsky struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 316921aad80bSLeon Romanovsky udata, struct mlx5_ib_ucontext, ibucontext); 317021aad80bSLeon Romanovsky 3171f78d358cSLeon Romanovsky if (params->is_rss_raw) 317221aad80bSLeon Romanovsky return 0; 317321aad80bSLeon Romanovsky 3174f78d358cSLeon Romanovsky return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), ¶ms->uidx); 317521aad80bSLeon Romanovsky } 317621aad80bSLeon Romanovsky 317708d53976SLeon Romanovsky static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) 317808d53976SLeon Romanovsky { 317908d53976SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device); 318008d53976SLeon Romanovsky 318108d53976SLeon Romanovsky if (mqp->state == IB_QPS_RTR) { 318208d53976SLeon Romanovsky int err; 318308d53976SLeon Romanovsky 318408d53976SLeon Romanovsky err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct); 318508d53976SLeon Romanovsky if (err) { 318608d53976SLeon Romanovsky mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); 318708d53976SLeon Romanovsky return err; 318808d53976SLeon Romanovsky } 318908d53976SLeon Romanovsky } 319008d53976SLeon Romanovsky 319108d53976SLeon Romanovsky kfree(mqp->dct.in); 319208d53976SLeon Romanovsky return 0; 319308d53976SLeon Romanovsky } 319408d53976SLeon Romanovsky 3195e383085cSLeon Romanovsky static int check_ucmd_data(struct mlx5_ib_dev *dev, 3196e383085cSLeon Romanovsky struct mlx5_create_qp_params *params) 3197e383085cSLeon Romanovsky { 3198e383085cSLeon Romanovsky struct ib_udata *udata = params->udata; 3199e383085cSLeon Romanovsky size_t size, last; 3200e383085cSLeon Romanovsky int ret; 3201e383085cSLeon Romanovsky 3202e383085cSLeon Romanovsky if (params->is_rss_raw) 3203e383085cSLeon Romanovsky /* 3204e383085cSLeon Romanovsky * These QPs don't have "reserved" field in their 3205e383085cSLeon Romanovsky * create_qp input struct, so their data is always valid. 3206e383085cSLeon Romanovsky */ 3207e383085cSLeon Romanovsky last = sizeof(struct mlx5_ib_create_qp_rss); 3208e383085cSLeon Romanovsky else 3209e383085cSLeon Romanovsky last = offsetof(struct mlx5_ib_create_qp, reserved); 3210e383085cSLeon Romanovsky 3211e383085cSLeon Romanovsky if (udata->inlen <= last) 3212e383085cSLeon Romanovsky return 0; 3213e383085cSLeon Romanovsky 3214e383085cSLeon Romanovsky /* 3215e383085cSLeon Romanovsky * User provides different create_qp structures based on the 3216e383085cSLeon Romanovsky * flow and we need to know if he cleared memory after our 3217e383085cSLeon Romanovsky * struct create_qp ends. 3218e383085cSLeon Romanovsky */ 3219e383085cSLeon Romanovsky size = udata->inlen - last; 3220e383085cSLeon Romanovsky ret = ib_is_udata_cleared(params->udata, last, size); 3221e383085cSLeon Romanovsky if (!ret) 3222e383085cSLeon Romanovsky mlx5_ib_dbg( 3223e383085cSLeon Romanovsky dev, 32244f5747cfSTom Seewald "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n", 3225e383085cSLeon Romanovsky udata->inlen, params->ucmd_size, last, size); 3226e383085cSLeon Romanovsky return ret ? 0 : -EINVAL; 3227e383085cSLeon Romanovsky } 3228e383085cSLeon Romanovsky 3229514aee66SLeon Romanovsky int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 3230e126ba97SEli Cohen struct ib_udata *udata) 3231e126ba97SEli Cohen { 3232f78d358cSLeon Romanovsky struct mlx5_create_qp_params params = {}; 3233514aee66SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3234514aee66SLeon Romanovsky struct mlx5_ib_qp *qp = to_mqp(ibqp); 3235514aee66SLeon Romanovsky struct ib_pd *pd = ibqp->pd; 32367aede1a2SLeon Romanovsky enum ib_qp_type type; 3237e126ba97SEli Cohen int err; 3238e126ba97SEli Cohen 3239f78d358cSLeon Romanovsky err = check_qp_type(dev, attr, &type); 32402242cc25SLeon Romanovsky if (err) 3241514aee66SLeon Romanovsky return err; 3242e126ba97SEli Cohen 3243f78d358cSLeon Romanovsky err = check_valid_flow(dev, pd, attr, udata); 3244f78d358cSLeon Romanovsky if (err) 3245514aee66SLeon Romanovsky return err; 3246f78d358cSLeon Romanovsky 3247f78d358cSLeon Romanovsky params.udata = udata; 3248f78d358cSLeon Romanovsky params.uidx = MLX5_IB_DEFAULT_UIDX; 3249f78d358cSLeon Romanovsky params.attr = attr; 3250f78d358cSLeon Romanovsky params.is_rss_raw = !!attr->rwq_ind_tbl; 32519c2ba4edSLeon Romanovsky 32525ce0592bSLeon Romanovsky if (udata) { 32536f2cf76eSLeon Romanovsky err = process_udata_size(dev, ¶ms); 32546f2cf76eSLeon Romanovsky if (err) 3255514aee66SLeon Romanovsky return err; 32562fdddbd5SLeon Romanovsky 3257e383085cSLeon Romanovsky err = check_ucmd_data(dev, ¶ms); 3258e383085cSLeon Romanovsky if (err) 3259514aee66SLeon Romanovsky return err; 3260e383085cSLeon Romanovsky 3261e383085cSLeon Romanovsky params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); 3262f78d358cSLeon Romanovsky if (!params.ucmd) 3263514aee66SLeon Romanovsky return -ENOMEM; 32645ce0592bSLeon Romanovsky 3265f78d358cSLeon Romanovsky err = ib_copy_from_udata(params.ucmd, udata, params.inlen); 32662fdddbd5SLeon Romanovsky if (err) 32675ce0592bSLeon Romanovsky goto free_ucmd; 32682fdddbd5SLeon Romanovsky } 32692fdddbd5SLeon Romanovsky 32707fa84b57SLeon Romanovsky mutex_init(&qp->mutex); 32717aede1a2SLeon Romanovsky qp->type = type; 327237518fa4SLeon Romanovsky if (udata) { 3273f78d358cSLeon Romanovsky err = process_vendor_flags(dev, qp, params.ucmd, attr); 3274b4aaa1f0SMoni Shoua if (err) 3275514aee66SLeon Romanovsky goto free_ucmd; 327621aad80bSLeon Romanovsky 3277f78d358cSLeon Romanovsky err = get_qp_uidx(qp, ¶ms); 327821aad80bSLeon Romanovsky if (err) 3279514aee66SLeon Romanovsky goto free_ucmd; 3280b4aaa1f0SMoni Shoua } 3281f78d358cSLeon Romanovsky err = process_create_flags(dev, qp, attr); 32822978975cSLeon Romanovsky if (err) 3283514aee66SLeon Romanovsky goto free_ucmd; 3284b4aaa1f0SMoni Shoua 3285f78d358cSLeon Romanovsky err = check_qp_attr(dev, qp, attr); 32867aede1a2SLeon Romanovsky if (err) 3287514aee66SLeon Romanovsky goto free_ucmd; 32887aede1a2SLeon Romanovsky 3289968f0b6fSLeon Romanovsky err = create_qp(dev, pd, qp, ¶ms); 3290968f0b6fSLeon Romanovsky if (err) 3291514aee66SLeon Romanovsky goto free_ucmd; 3292e126ba97SEli Cohen 3293f78d358cSLeon Romanovsky kfree(params.ucmd); 329408d53976SLeon Romanovsky params.ucmd = NULL; 32955ce0592bSLeon Romanovsky 329608d53976SLeon Romanovsky if (udata) 329708d53976SLeon Romanovsky /* 329808d53976SLeon Romanovsky * It is safe to copy response for all user create QP flows, 329908d53976SLeon Romanovsky * including MLX5_IB_QPT_DCT, which doesn't need it. 330008d53976SLeon Romanovsky * In that case, resp will be filled with zeros. 330108d53976SLeon Romanovsky */ 330208d53976SLeon Romanovsky err = ib_copy_to_udata(udata, ¶ms.resp, params.outlen); 330308d53976SLeon Romanovsky if (err) 330408d53976SLeon Romanovsky goto destroy_qp; 330508d53976SLeon Romanovsky 3306514aee66SLeon Romanovsky return 0; 33079c2ba4edSLeon Romanovsky 330808d53976SLeon Romanovsky destroy_qp: 33092dc4d672SLeon Romanovsky switch (qp->type) { 33102dc4d672SLeon Romanovsky case MLX5_IB_QPT_DCT: 331108d53976SLeon Romanovsky mlx5_ib_destroy_dct(qp); 33122dc4d672SLeon Romanovsky break; 33132dc4d672SLeon Romanovsky case IB_QPT_GSI: 33142dc4d672SLeon Romanovsky mlx5_ib_destroy_gsi(qp); 33152dc4d672SLeon Romanovsky break; 33162dc4d672SLeon Romanovsky default: 331708d53976SLeon Romanovsky destroy_qp_common(dev, qp, udata); 33186c41965dSLeon Romanovsky } 33196c41965dSLeon Romanovsky 33205ce0592bSLeon Romanovsky free_ucmd: 3321f78d358cSLeon Romanovsky kfree(params.ucmd); 3322514aee66SLeon Romanovsky return err; 3323e126ba97SEli Cohen } 3324e126ba97SEli Cohen 3325c4367a26SShamir Rabinovitch int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 3326e126ba97SEli Cohen { 3327e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(qp->device); 3328e126ba97SEli Cohen struct mlx5_ib_qp *mqp = to_mqp(qp); 3329e126ba97SEli Cohen 33309ecf6ac1SMaor Gottlieb if (mqp->type == IB_QPT_GSI) 33310d9aef86SLeon Romanovsky return mlx5_ib_destroy_gsi(mqp); 3332d16e91daSHaggai Eran 33337aede1a2SLeon Romanovsky if (mqp->type == MLX5_IB_QPT_DCT) 3334776a3906SMoni Shoua return mlx5_ib_destroy_dct(mqp); 3335776a3906SMoni Shoua 3336bdeacabdSShamir Rabinovitch destroy_qp_common(dev, mqp, udata); 3337e126ba97SEli Cohen return 0; 3338e126ba97SEli Cohen } 3339e126ba97SEli Cohen 3340f18e26afSLeon Romanovsky static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp, 3341f18e26afSLeon Romanovsky const struct ib_qp_attr *attr, int attr_mask, 3342f18e26afSLeon Romanovsky void *qpc) 3343e126ba97SEli Cohen { 3344a60109dcSYonatan Cohen struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 3345f18e26afSLeon Romanovsky u8 dest_rd_atomic; 3346f18e26afSLeon Romanovsky u32 access_flags; 3347a60109dcSYonatan Cohen 3348e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 3349e126ba97SEli Cohen dest_rd_atomic = attr->max_dest_rd_atomic; 3350e126ba97SEli Cohen else 335119098df2Smajd@mellanox.com dest_rd_atomic = qp->trans_qp.resp_depth; 3352e126ba97SEli Cohen 3353e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 3354e126ba97SEli Cohen access_flags = attr->qp_access_flags; 3355e126ba97SEli Cohen else 335619098df2Smajd@mellanox.com access_flags = qp->trans_qp.atomic_rd_en; 3357e126ba97SEli Cohen 3358e126ba97SEli Cohen if (!dest_rd_atomic) 3359e126ba97SEli Cohen access_flags &= IB_ACCESS_REMOTE_WRITE; 3360e126ba97SEli Cohen 3361f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ)); 3362f18e26afSLeon Romanovsky 336313f8d9c1SYonatan Cohen if (access_flags & IB_ACCESS_REMOTE_ATOMIC) { 3364a60109dcSYonatan Cohen int atomic_mode; 3365e126ba97SEli Cohen 33669ecf6ac1SMaor Gottlieb atomic_mode = get_atomic_mode(dev, qp->type); 3367a60109dcSYonatan Cohen if (atomic_mode < 0) 3368a60109dcSYonatan Cohen return -EOPNOTSUPP; 3369a60109dcSYonatan Cohen 3370f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rae, 1); 3371f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, atomic_mode, atomic_mode); 3372a60109dcSYonatan Cohen } 3373a60109dcSYonatan Cohen 3374f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); 3375a60109dcSYonatan Cohen return 0; 3376e126ba97SEli Cohen } 3377e126ba97SEli Cohen 3378e126ba97SEli Cohen enum { 3379e126ba97SEli Cohen MLX5_PATH_FLAG_FL = 1 << 0, 3380e126ba97SEli Cohen MLX5_PATH_FLAG_FREE_AR = 1 << 1, 3381e126ba97SEli Cohen MLX5_PATH_FLAG_COUNTER = 1 << 2, 3382e126ba97SEli Cohen }; 3383e126ba97SEli Cohen 33846fe6e568SMark Zhang static int mlx5_to_ib_rate_map(u8 rate) 33856fe6e568SMark Zhang { 33866fe6e568SMark Zhang static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS, 33876fe6e568SMark Zhang IB_RATE_25_GBPS, IB_RATE_100_GBPS, 33886fe6e568SMark Zhang IB_RATE_200_GBPS, IB_RATE_50_GBPS, 33896fe6e568SMark Zhang IB_RATE_400_GBPS }; 33906fe6e568SMark Zhang 33916fe6e568SMark Zhang if (rate < ARRAY_SIZE(rates)) 33926fe6e568SMark Zhang return rates[rate]; 33936fe6e568SMark Zhang 33946fe6e568SMark Zhang return rate - MLX5_STAT_RATE_OFFSET; 33956fe6e568SMark Zhang } 33966fe6e568SMark Zhang 3397c531024bSMark Zhang static int ib_to_mlx5_rate_map(u8 rate) 3398c531024bSMark Zhang { 3399c531024bSMark Zhang switch (rate) { 3400c531024bSMark Zhang case IB_RATE_PORT_CURRENT: 3401c531024bSMark Zhang return 0; 3402c531024bSMark Zhang case IB_RATE_56_GBPS: 3403c531024bSMark Zhang return 1; 3404c531024bSMark Zhang case IB_RATE_25_GBPS: 3405c531024bSMark Zhang return 2; 3406c531024bSMark Zhang case IB_RATE_100_GBPS: 3407c531024bSMark Zhang return 3; 3408c531024bSMark Zhang case IB_RATE_200_GBPS: 3409c531024bSMark Zhang return 4; 3410c531024bSMark Zhang case IB_RATE_50_GBPS: 3411c531024bSMark Zhang return 5; 3412c70f51deSPatrisious Haddad case IB_RATE_400_GBPS: 3413c70f51deSPatrisious Haddad return 6; 3414c531024bSMark Zhang default: 3415c531024bSMark Zhang return rate + MLX5_STAT_RATE_OFFSET; 34167f1d2dfaSTom Rix } 3417c531024bSMark Zhang 3418c531024bSMark Zhang return 0; 3419c531024bSMark Zhang } 3420c531024bSMark Zhang 3421e126ba97SEli Cohen static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 3422e126ba97SEli Cohen { 3423c531024bSMark Zhang u32 stat_rate_support; 3424c531024bSMark Zhang 34254f32ac2eSDanit Goldberg if (rate == IB_RATE_PORT_CURRENT) 3426e126ba97SEli Cohen return 0; 34274f32ac2eSDanit Goldberg 3428a5a5d199SMichael Guralnik if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS) 3429e126ba97SEli Cohen return -EINVAL; 34304f32ac2eSDanit Goldberg 3431c531024bSMark Zhang stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support); 34324f32ac2eSDanit Goldberg while (rate != IB_RATE_PORT_CURRENT && 3433c531024bSMark Zhang !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support)) 3434e126ba97SEli Cohen --rate; 3435e126ba97SEli Cohen 3436c531024bSMark Zhang return ib_to_mlx5_rate_map(rate); 3437e126ba97SEli Cohen } 3438e126ba97SEli Cohen 343975850d0bSmajd@mellanox.com static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, 34401cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 sl, 34411cd6dbd3SYishai Hadas struct ib_pd *pd) 344275850d0bSmajd@mellanox.com { 344375850d0bSmajd@mellanox.com void *in; 344475850d0bSmajd@mellanox.com void *tisc; 344575850d0bSmajd@mellanox.com int inlen; 344675850d0bSmajd@mellanox.com int err; 344775850d0bSmajd@mellanox.com 344875850d0bSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 34491b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 345075850d0bSmajd@mellanox.com if (!in) 345175850d0bSmajd@mellanox.com return -ENOMEM; 345275850d0bSmajd@mellanox.com 345375850d0bSmajd@mellanox.com MLX5_SET(modify_tis_in, in, bitmask.prio, 1); 34541cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 345575850d0bSmajd@mellanox.com 345675850d0bSmajd@mellanox.com tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 345775850d0bSmajd@mellanox.com MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); 345875850d0bSmajd@mellanox.com 3459e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 346075850d0bSmajd@mellanox.com 346175850d0bSmajd@mellanox.com kvfree(in); 346275850d0bSmajd@mellanox.com 346375850d0bSmajd@mellanox.com return err; 346475850d0bSmajd@mellanox.com } 346575850d0bSmajd@mellanox.com 346613eab21fSAviv Heller static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, 34671cd6dbd3SYishai Hadas struct mlx5_ib_sq *sq, u8 tx_affinity, 34681cd6dbd3SYishai Hadas struct ib_pd *pd) 346913eab21fSAviv Heller { 347013eab21fSAviv Heller void *in; 347113eab21fSAviv Heller void *tisc; 347213eab21fSAviv Heller int inlen; 347313eab21fSAviv Heller int err; 347413eab21fSAviv Heller 347513eab21fSAviv Heller inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 34761b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 347713eab21fSAviv Heller if (!in) 347813eab21fSAviv Heller return -ENOMEM; 347913eab21fSAviv Heller 348013eab21fSAviv Heller MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1); 34811cd6dbd3SYishai Hadas MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid); 348213eab21fSAviv Heller 348313eab21fSAviv Heller tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 348413eab21fSAviv Heller MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity); 348513eab21fSAviv Heller 3486e0b4b472SLeon Romanovsky err = mlx5_core_modify_tis(dev, sq->tisn, in); 348713eab21fSAviv Heller 348813eab21fSAviv Heller kvfree(in); 348913eab21fSAviv Heller 349013eab21fSAviv Heller return err; 349113eab21fSAviv Heller } 349213eab21fSAviv Heller 3493f18e26afSLeon Romanovsky static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah, 34942b880b2eSMark Zhang u32 lqpn, u32 rqpn) 34952b880b2eSMark Zhang 34962b880b2eSMark Zhang { 34972b880b2eSMark Zhang u32 fl = ah->grh.flow_label; 34982b880b2eSMark Zhang 34992b880b2eSMark Zhang if (!fl) 35002b880b2eSMark Zhang fl = rdma_calc_flow_label(lqpn, rqpn); 35012b880b2eSMark Zhang 3502f18e26afSLeon Romanovsky MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl)); 35032b880b2eSMark Zhang } 35042b880b2eSMark Zhang 350575850d0bSmajd@mellanox.com static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 3506f18e26afSLeon Romanovsky const struct rdma_ah_attr *ah, void *path, u8 port, 3507f18e26afSLeon Romanovsky int attr_mask, u32 path_flags, 3508f18e26afSLeon Romanovsky const struct ib_qp_attr *attr, bool alt) 3509e126ba97SEli Cohen { 3510d8966fcdSDasaratharaman Chandramouli const struct ib_global_route *grh = rdma_ah_read_grh(ah); 3511e126ba97SEli Cohen int err; 3512ed88451eSMajd Dibbiny enum ib_gid_type gid_type; 3513d8966fcdSDasaratharaman Chandramouli u8 ah_flags = rdma_ah_get_ah_flags(ah); 3514d8966fcdSDasaratharaman Chandramouli u8 sl = rdma_ah_get_sl(ah); 3515e126ba97SEli Cohen 3516e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 3517f18e26afSLeon Romanovsky MLX5_SET(ads, path, pkey_index, 3518f18e26afSLeon Romanovsky alt ? attr->alt_pkey_index : attr->pkey_index); 3519e126ba97SEli Cohen 3520d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 35217416790eSParav Pandit const struct ib_port_immutable *immutable; 35227416790eSParav Pandit 35237416790eSParav Pandit immutable = ib_port_immutable_read(&dev->ib_dev, port); 35247416790eSParav Pandit if (grh->sgid_index >= immutable->gid_tbl_len) { 3525f4f01b54SJoe Perches pr_err("sgid_index (%u) too large. max is %d\n", 3526d8966fcdSDasaratharaman Chandramouli grh->sgid_index, 35277416790eSParav Pandit immutable->gid_tbl_len); 3528f83b4263SEli Cohen return -EINVAL; 3529f83b4263SEli Cohen } 35302811ba51SAchiad Shochat } 353144c58487SDasaratharaman Chandramouli 353244c58487SDasaratharaman Chandramouli if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { 3533d8966fcdSDasaratharaman Chandramouli if (!(ah_flags & IB_AH_GRH)) 35342811ba51SAchiad Shochat return -EINVAL; 353547ec3866SParav Pandit 3536f18e26afSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32), 3537f18e26afSLeon Romanovsky ah->roce.dmac); 35389ecf6ac1SMaor Gottlieb if ((qp->type == IB_QPT_RC || 35399ecf6ac1SMaor Gottlieb qp->type == IB_QPT_UC || 35409ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_INI || 35419ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_TGT) && 35422b880b2eSMark Zhang (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) && 35432b880b2eSMark Zhang (attr_mask & IB_QP_DEST_QPN)) 35442b880b2eSMark Zhang mlx5_set_path_udp_sport(path, ah, 35452b880b2eSMark Zhang qp->ibqp.qp_num, 35462b880b2eSMark Zhang attr->dest_qp_num); 3547f18e26afSLeon Romanovsky MLX5_SET(ads, path, eth_prio, sl & 0x7); 354847ec3866SParav Pandit gid_type = ah->grh.sgid_attr->gid_type; 3549ed88451eSMajd Dibbiny if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3550f18e26afSLeon Romanovsky MLX5_SET(ads, path, dscp, grh->traffic_class >> 2); 35512811ba51SAchiad Shochat } else { 3552f18e26afSLeon Romanovsky MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL)); 3553f18e26afSLeon Romanovsky MLX5_SET(ads, path, free_ar, 3554f18e26afSLeon Romanovsky !!(path_flags & MLX5_PATH_FLAG_FREE_AR)); 3555f18e26afSLeon Romanovsky MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah)); 3556f18e26afSLeon Romanovsky MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah)); 3557f18e26afSLeon Romanovsky MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH)); 3558f18e26afSLeon Romanovsky MLX5_SET(ads, path, sl, sl); 35592811ba51SAchiad Shochat } 35602811ba51SAchiad Shochat 3561d8966fcdSDasaratharaman Chandramouli if (ah_flags & IB_AH_GRH) { 3562f18e26afSLeon Romanovsky MLX5_SET(ads, path, src_addr_index, grh->sgid_index); 3563f18e26afSLeon Romanovsky MLX5_SET(ads, path, hop_limit, grh->hop_limit); 3564f18e26afSLeon Romanovsky MLX5_SET(ads, path, tclass, grh->traffic_class); 3565f18e26afSLeon Romanovsky MLX5_SET(ads, path, flow_label, grh->flow_label); 3566f18e26afSLeon Romanovsky memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw, 3567f18e26afSLeon Romanovsky sizeof(grh->dgid.raw)); 3568e126ba97SEli Cohen } 3569e126ba97SEli Cohen 3570d8966fcdSDasaratharaman Chandramouli err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); 3571e126ba97SEli Cohen if (err < 0) 3572e126ba97SEli Cohen return err; 3573f18e26afSLeon Romanovsky MLX5_SET(ads, path, stat_rate, err); 3574f18e26afSLeon Romanovsky MLX5_SET(ads, path, vhca_port_num, port); 3575e126ba97SEli Cohen 3576e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 3577f18e26afSLeon Romanovsky MLX5_SET(ads, path, ack_timeout, 3578f18e26afSLeon Romanovsky alt ? attr->alt_timeout : attr->timeout); 3579e126ba97SEli Cohen 35809ecf6ac1SMaor Gottlieb if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 358175850d0bSmajd@mellanox.com return modify_raw_packet_eth_prio(dev->mdev, 358275850d0bSmajd@mellanox.com &qp->raw_packet_qp.sq, 35831cd6dbd3SYishai Hadas sl & 0xf, qp->ibqp.pd); 358475850d0bSmajd@mellanox.com 3585e126ba97SEli Cohen return 0; 3586e126ba97SEli Cohen } 3587e126ba97SEli Cohen 3588e126ba97SEli Cohen static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 3589e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3590e126ba97SEli Cohen [MLX5_QP_STATE_INIT] = { 3591e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3592e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3593e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3594e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3595cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3596cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3597e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3598e126ba97SEli Cohen MLX5_QP_OPTPAR_PKEY_INDEX | 3599cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3600cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3601e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3602e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY | 3603e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT, 36048f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 36058f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36068f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36078f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PKEY_INDEX | 3608cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PRI_PORT | 3609cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3610e126ba97SEli Cohen }, 3611e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3612e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3613e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3614e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3615e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3616cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3617cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3618e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3619e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3620cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3621cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3622e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 3623e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3624e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 3625e126ba97SEli Cohen MLX5_QP_OPTPAR_Q_KEY, 3626a4774e90SEli Cohen [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3627a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE | 3628a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3629a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3630cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_PKEY_INDEX | 3631cfc1a89eSMaor Gottlieb MLX5_QP_OPTPAR_LAG_TX_AFF, 3632e126ba97SEli Cohen }, 3633e126ba97SEli Cohen }, 3634e126ba97SEli Cohen [MLX5_QP_STATE_RTR] = { 3635e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3636e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3637e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | 3638e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3639e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3640e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3641e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT, 3642e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 3643e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3644e126ba97SEli Cohen MLX5_QP_OPTPAR_PM_STATE, 3645e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 36468f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 36478f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE | 36488f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36498f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36508f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 36518f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT, 3652e126ba97SEli Cohen }, 3653e126ba97SEli Cohen }, 3654e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3655e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3656e126ba97SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 3657e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE | 3658e126ba97SEli Cohen MLX5_QP_OPTPAR_RWE | 3659e126ba97SEli Cohen MLX5_QP_OPTPAR_RNR_TIMEOUT | 3660c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3661c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3662e126ba97SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 3663c2a3431eSEli Cohen MLX5_QP_OPTPAR_PM_STATE | 3664c2a3431eSEli Cohen MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3665e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 3666e126ba97SEli Cohen MLX5_QP_OPTPAR_SRQN | 3667e126ba97SEli Cohen MLX5_QP_OPTPAR_CQN_RCV, 36688f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE | 36698f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36708f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36718f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RNR_TIMEOUT | 36728f4426aaSJack Morgenstein MLX5_QP_OPTPAR_PM_STATE | 36738f4426aaSJack Morgenstein MLX5_QP_OPTPAR_ALT_ADDR_PATH, 3674e126ba97SEli Cohen }, 3675e126ba97SEli Cohen }, 3676e126ba97SEli Cohen [MLX5_QP_STATE_SQER] = { 3677e126ba97SEli Cohen [MLX5_QP_STATE_RTS] = { 3678e126ba97SEli Cohen [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 3679e126ba97SEli Cohen [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 368075959f56SEli Cohen [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 3681a4774e90SEli Cohen [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 3682a4774e90SEli Cohen MLX5_QP_OPTPAR_RWE | 3683a4774e90SEli Cohen MLX5_QP_OPTPAR_RAE | 3684a4774e90SEli Cohen MLX5_QP_OPTPAR_RRE, 36858f4426aaSJack Morgenstein [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 36868f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RWE | 36878f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RAE | 36888f4426aaSJack Morgenstein MLX5_QP_OPTPAR_RRE, 3689e126ba97SEli Cohen }, 3690e126ba97SEli Cohen }, 3691021c1f24SSergey Gorenko [MLX5_QP_STATE_SQD] = { 3692021c1f24SSergey Gorenko [MLX5_QP_STATE_RTS] = { 3693021c1f24SSergey Gorenko [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 3694021c1f24SSergey Gorenko [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 3695021c1f24SSergey Gorenko [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 3696021c1f24SSergey Gorenko [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 3697021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RWE | 3698021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RAE | 3699021c1f24SSergey Gorenko MLX5_QP_OPTPAR_RRE, 3700021c1f24SSergey Gorenko }, 3701021c1f24SSergey Gorenko }, 3702e126ba97SEli Cohen }; 3703e126ba97SEli Cohen 3704e126ba97SEli Cohen static int ib_nr_to_mlx5_nr(int ib_mask) 3705e126ba97SEli Cohen { 3706e126ba97SEli Cohen switch (ib_mask) { 3707e126ba97SEli Cohen case IB_QP_STATE: 3708e126ba97SEli Cohen return 0; 3709e126ba97SEli Cohen case IB_QP_CUR_STATE: 3710e126ba97SEli Cohen return 0; 3711e126ba97SEli Cohen case IB_QP_EN_SQD_ASYNC_NOTIFY: 3712e126ba97SEli Cohen return 0; 3713e126ba97SEli Cohen case IB_QP_ACCESS_FLAGS: 3714e126ba97SEli Cohen return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 3715e126ba97SEli Cohen MLX5_QP_OPTPAR_RAE; 3716e126ba97SEli Cohen case IB_QP_PKEY_INDEX: 3717e126ba97SEli Cohen return MLX5_QP_OPTPAR_PKEY_INDEX; 3718e126ba97SEli Cohen case IB_QP_PORT: 3719e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRI_PORT; 3720e126ba97SEli Cohen case IB_QP_QKEY: 3721e126ba97SEli Cohen return MLX5_QP_OPTPAR_Q_KEY; 3722e126ba97SEli Cohen case IB_QP_AV: 3723e126ba97SEli Cohen return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 3724e126ba97SEli Cohen MLX5_QP_OPTPAR_PRI_PORT; 3725e126ba97SEli Cohen case IB_QP_PATH_MTU: 3726e126ba97SEli Cohen return 0; 3727e126ba97SEli Cohen case IB_QP_TIMEOUT: 3728e126ba97SEli Cohen return MLX5_QP_OPTPAR_ACK_TIMEOUT; 3729e126ba97SEli Cohen case IB_QP_RETRY_CNT: 3730e126ba97SEli Cohen return MLX5_QP_OPTPAR_RETRY_COUNT; 3731e126ba97SEli Cohen case IB_QP_RNR_RETRY: 3732e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_RETRY; 3733e126ba97SEli Cohen case IB_QP_RQ_PSN: 3734e126ba97SEli Cohen return 0; 3735e126ba97SEli Cohen case IB_QP_MAX_QP_RD_ATOMIC: 3736e126ba97SEli Cohen return MLX5_QP_OPTPAR_SRA_MAX; 3737e126ba97SEli Cohen case IB_QP_ALT_PATH: 3738e126ba97SEli Cohen return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 3739e126ba97SEli Cohen case IB_QP_MIN_RNR_TIMER: 3740e126ba97SEli Cohen return MLX5_QP_OPTPAR_RNR_TIMEOUT; 3741e126ba97SEli Cohen case IB_QP_SQ_PSN: 3742e126ba97SEli Cohen return 0; 3743e126ba97SEli Cohen case IB_QP_MAX_DEST_RD_ATOMIC: 3744e126ba97SEli Cohen return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 3745e126ba97SEli Cohen MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 3746e126ba97SEli Cohen case IB_QP_PATH_MIG_STATE: 3747e126ba97SEli Cohen return MLX5_QP_OPTPAR_PM_STATE; 3748e126ba97SEli Cohen case IB_QP_CAP: 3749e126ba97SEli Cohen return 0; 3750e126ba97SEli Cohen case IB_QP_DEST_QPN: 3751e126ba97SEli Cohen return 0; 3752e126ba97SEli Cohen } 3753e126ba97SEli Cohen return 0; 3754e126ba97SEli Cohen } 3755e126ba97SEli Cohen 3756e126ba97SEli Cohen static int ib_mask_to_mlx5_opt(int ib_mask) 3757e126ba97SEli Cohen { 3758e126ba97SEli Cohen int result = 0; 3759e126ba97SEli Cohen int i; 3760e126ba97SEli Cohen 3761e126ba97SEli Cohen for (i = 0; i < 8 * sizeof(int); i++) { 3762e126ba97SEli Cohen if ((1 << i) & ib_mask) 3763e126ba97SEli Cohen result |= ib_nr_to_mlx5_nr(1 << i); 3764e126ba97SEli Cohen } 3765e126ba97SEli Cohen 3766e126ba97SEli Cohen return result; 3767e126ba97SEli Cohen } 3768e126ba97SEli Cohen 376934d57585SYishai Hadas static int modify_raw_packet_qp_rq( 377034d57585SYishai Hadas struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state, 377134d57585SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3772ad5f8e96Smajd@mellanox.com { 3773ad5f8e96Smajd@mellanox.com void *in; 3774ad5f8e96Smajd@mellanox.com void *rqc; 3775ad5f8e96Smajd@mellanox.com int inlen; 3776ad5f8e96Smajd@mellanox.com int err; 3777ad5f8e96Smajd@mellanox.com 3778ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 37791b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3780ad5f8e96Smajd@mellanox.com if (!in) 3781ad5f8e96Smajd@mellanox.com return -ENOMEM; 3782ad5f8e96Smajd@mellanox.com 3783ad5f8e96Smajd@mellanox.com MLX5_SET(modify_rq_in, in, rq_state, rq->state); 378434d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid); 3785ad5f8e96Smajd@mellanox.com 3786ad5f8e96Smajd@mellanox.com rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 3787ad5f8e96Smajd@mellanox.com MLX5_SET(rqc, rqc, state, new_state); 3788ad5f8e96Smajd@mellanox.com 3789eb49ab0cSAlex Vesker if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) { 3790eb49ab0cSAlex Vesker if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 3791eb49ab0cSAlex Vesker MLX5_SET64(modify_rq_in, in, modify_bitmask, 379223a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 3793eb49ab0cSAlex Vesker MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id); 3794eb49ab0cSAlex Vesker } else 37955a738b5dSJason Gunthorpe dev_info_once( 37965a738b5dSJason Gunthorpe &dev->ib_dev.dev, 37975a738b5dSJason Gunthorpe "RAW PACKET QP counters are not supported on current FW\n"); 3798eb49ab0cSAlex Vesker } 3799eb49ab0cSAlex Vesker 3800e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in); 3801ad5f8e96Smajd@mellanox.com if (err) 3802ad5f8e96Smajd@mellanox.com goto out; 3803ad5f8e96Smajd@mellanox.com 3804ad5f8e96Smajd@mellanox.com rq->state = new_state; 3805ad5f8e96Smajd@mellanox.com 3806ad5f8e96Smajd@mellanox.com out: 3807ad5f8e96Smajd@mellanox.com kvfree(in); 3808ad5f8e96Smajd@mellanox.com return err; 3809ad5f8e96Smajd@mellanox.com } 3810ad5f8e96Smajd@mellanox.com 3811c14003f0SYishai Hadas static int modify_raw_packet_qp_sq( 3812c14003f0SYishai Hadas struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state, 3813c14003f0SYishai Hadas const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd) 3814ad5f8e96Smajd@mellanox.com { 38157d29f349SBodong Wang struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; 381661147f39SBodong Wang struct mlx5_rate_limit old_rl = ibqp->rl; 381761147f39SBodong Wang struct mlx5_rate_limit new_rl = old_rl; 381861147f39SBodong Wang bool new_rate_added = false; 38197d29f349SBodong Wang u16 rl_index = 0; 3820ad5f8e96Smajd@mellanox.com void *in; 3821ad5f8e96Smajd@mellanox.com void *sqc; 3822ad5f8e96Smajd@mellanox.com int inlen; 3823ad5f8e96Smajd@mellanox.com int err; 3824ad5f8e96Smajd@mellanox.com 3825ad5f8e96Smajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 38261b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 3827ad5f8e96Smajd@mellanox.com if (!in) 3828ad5f8e96Smajd@mellanox.com return -ENOMEM; 3829ad5f8e96Smajd@mellanox.com 3830c14003f0SYishai Hadas MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid); 3831ad5f8e96Smajd@mellanox.com MLX5_SET(modify_sq_in, in, sq_state, sq->state); 3832ad5f8e96Smajd@mellanox.com 3833ad5f8e96Smajd@mellanox.com sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 3834ad5f8e96Smajd@mellanox.com MLX5_SET(sqc, sqc, state, new_state); 3835ad5f8e96Smajd@mellanox.com 38367d29f349SBodong Wang if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { 38377d29f349SBodong Wang if (new_state != MLX5_SQC_STATE_RDY) 38387d29f349SBodong Wang pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", 38397d29f349SBodong Wang __func__); 38407d29f349SBodong Wang else 384161147f39SBodong Wang new_rl = raw_qp_param->rl; 38427d29f349SBodong Wang } 3843ad5f8e96Smajd@mellanox.com 384461147f39SBodong Wang if (!mlx5_rl_are_equal(&old_rl, &new_rl)) { 384561147f39SBodong Wang if (new_rl.rate) { 384661147f39SBodong Wang err = mlx5_rl_add_rate(dev, &rl_index, &new_rl); 38477d29f349SBodong Wang if (err) { 384861147f39SBodong Wang pr_err("Failed configuring rate limit(err %d): \ 384961147f39SBodong Wang rate %u, max_burst_sz %u, typical_pkt_sz %u\n", 385061147f39SBodong Wang err, new_rl.rate, new_rl.max_burst_sz, 385161147f39SBodong Wang new_rl.typical_pkt_sz); 385261147f39SBodong Wang 38537d29f349SBodong Wang goto out; 38547d29f349SBodong Wang } 385561147f39SBodong Wang new_rate_added = true; 38567d29f349SBodong Wang } 38577d29f349SBodong Wang 38587d29f349SBodong Wang MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 385961147f39SBodong Wang /* index 0 means no limit */ 38607d29f349SBodong Wang MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); 38617d29f349SBodong Wang } 38627d29f349SBodong Wang 3863e0b4b472SLeon Romanovsky err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in); 38647d29f349SBodong Wang if (err) { 38657d29f349SBodong Wang /* Remove new rate from table if failed */ 386661147f39SBodong Wang if (new_rate_added) 386761147f39SBodong Wang mlx5_rl_remove_rate(dev, &new_rl); 38687d29f349SBodong Wang goto out; 38697d29f349SBodong Wang } 38707d29f349SBodong Wang 38717d29f349SBodong Wang /* Only remove the old rate after new rate was set */ 3872c8973df2SRafi Wiener if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) || 3873c8973df2SRafi Wiener (new_state != MLX5_SQC_STATE_RDY)) { 387461147f39SBodong Wang mlx5_rl_remove_rate(dev, &old_rl); 3875c8973df2SRafi Wiener if (new_state != MLX5_SQC_STATE_RDY) 3876c8973df2SRafi Wiener memset(&new_rl, 0, sizeof(new_rl)); 3877c8973df2SRafi Wiener } 38787d29f349SBodong Wang 387961147f39SBodong Wang ibqp->rl = new_rl; 3880ad5f8e96Smajd@mellanox.com sq->state = new_state; 3881ad5f8e96Smajd@mellanox.com 3882ad5f8e96Smajd@mellanox.com out: 3883ad5f8e96Smajd@mellanox.com kvfree(in); 3884ad5f8e96Smajd@mellanox.com return err; 3885ad5f8e96Smajd@mellanox.com } 3886ad5f8e96Smajd@mellanox.com 3887ad5f8e96Smajd@mellanox.com static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 388813eab21fSAviv Heller const struct mlx5_modify_raw_qp_param *raw_qp_param, 388913eab21fSAviv Heller u8 tx_affinity) 3890ad5f8e96Smajd@mellanox.com { 3891ad5f8e96Smajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 3892ad5f8e96Smajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 3893ad5f8e96Smajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 38947d29f349SBodong Wang int modify_rq = !!qp->rq.wqe_cnt; 38957d29f349SBodong Wang int modify_sq = !!qp->sq.wqe_cnt; 3896ad5f8e96Smajd@mellanox.com int rq_state; 3897ad5f8e96Smajd@mellanox.com int sq_state; 3898ad5f8e96Smajd@mellanox.com int err; 3899ad5f8e96Smajd@mellanox.com 39000680efa2SAlex Vesker switch (raw_qp_param->operation) { 3901ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RST2INIT_QP: 3902ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RDY; 3903c94e272bSMaor Gottlieb sq_state = MLX5_SQC_STATE_RST; 3904ad5f8e96Smajd@mellanox.com break; 3905ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2ERR_QP: 3906ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_ERR; 3907ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_ERR; 3908ad5f8e96Smajd@mellanox.com break; 3909ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_2RST_QP: 3910ad5f8e96Smajd@mellanox.com rq_state = MLX5_RQC_STATE_RST; 3911ad5f8e96Smajd@mellanox.com sq_state = MLX5_SQC_STATE_RST; 3912ad5f8e96Smajd@mellanox.com break; 3913ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTR2RTS_QP: 3914ad5f8e96Smajd@mellanox.com case MLX5_CMD_OP_RTS2RTS_QP: 3915c94e272bSMaor Gottlieb if (raw_qp_param->set_mask & ~MLX5_RAW_QP_RATE_LIMIT) 3916c94e272bSMaor Gottlieb return -EINVAL; 3917c94e272bSMaor Gottlieb 39187d29f349SBodong Wang modify_rq = 0; 3919c94e272bSMaor Gottlieb sq_state = MLX5_SQC_STATE_RDY; 39207d29f349SBodong Wang break; 39217d29f349SBodong Wang case MLX5_CMD_OP_INIT2INIT_QP: 39227d29f349SBodong Wang case MLX5_CMD_OP_INIT2RTR_QP: 3923eb49ab0cSAlex Vesker if (raw_qp_param->set_mask) 3924eb49ab0cSAlex Vesker return -EINVAL; 3925eb49ab0cSAlex Vesker else 3926ad5f8e96Smajd@mellanox.com return 0; 3927ad5f8e96Smajd@mellanox.com default: 3928ad5f8e96Smajd@mellanox.com WARN_ON(1); 3929ad5f8e96Smajd@mellanox.com return -EINVAL; 3930ad5f8e96Smajd@mellanox.com } 3931ad5f8e96Smajd@mellanox.com 39327d29f349SBodong Wang if (modify_rq) { 393334d57585SYishai Hadas err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param, 393434d57585SYishai Hadas qp->ibqp.pd); 3935ad5f8e96Smajd@mellanox.com if (err) 3936ad5f8e96Smajd@mellanox.com return err; 3937ad5f8e96Smajd@mellanox.com } 3938ad5f8e96Smajd@mellanox.com 39397d29f349SBodong Wang if (modify_sq) { 3940d5ed8ac3SMark Bloch struct mlx5_flow_handle *flow_rule; 3941d5ed8ac3SMark Bloch 394213eab21fSAviv Heller if (tx_affinity) { 394313eab21fSAviv Heller err = modify_raw_packet_tx_affinity(dev->mdev, sq, 39441cd6dbd3SYishai Hadas tx_affinity, 39451cd6dbd3SYishai Hadas qp->ibqp.pd); 394613eab21fSAviv Heller if (err) 394713eab21fSAviv Heller return err; 394813eab21fSAviv Heller } 394913eab21fSAviv Heller 3950d5ed8ac3SMark Bloch flow_rule = create_flow_rule_vport_sq(dev, sq, 3951d5ed8ac3SMark Bloch raw_qp_param->port); 3952d5ed8ac3SMark Bloch if (IS_ERR(flow_rule)) 39531db86318SColin Ian King return PTR_ERR(flow_rule); 3954d5ed8ac3SMark Bloch 3955d5ed8ac3SMark Bloch err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, 3956c14003f0SYishai Hadas raw_qp_param, qp->ibqp.pd); 3957d5ed8ac3SMark Bloch if (err) { 3958d5ed8ac3SMark Bloch if (flow_rule) 3959d5ed8ac3SMark Bloch mlx5_del_flow_rules(flow_rule); 3960d5ed8ac3SMark Bloch return err; 3961d5ed8ac3SMark Bloch } 3962d5ed8ac3SMark Bloch 3963d5ed8ac3SMark Bloch if (flow_rule) { 3964d5ed8ac3SMark Bloch destroy_flow_rule_vport_sq(sq); 3965d5ed8ac3SMark Bloch sq->flow_rule = flow_rule; 3966d5ed8ac3SMark Bloch } 3967d5ed8ac3SMark Bloch 3968d5ed8ac3SMark Bloch return err; 396913eab21fSAviv Heller } 3970ad5f8e96Smajd@mellanox.com 3971ad5f8e96Smajd@mellanox.com return 0; 3972ad5f8e96Smajd@mellanox.com } 3973ad5f8e96Smajd@mellanox.com 39745163b274SMaor Gottlieb static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev, 39755163b274SMaor Gottlieb struct ib_udata *udata) 3976c6a21c38SMajd Dibbiny { 397789944450SShamir Rabinovitch struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 397889944450SShamir Rabinovitch udata, struct mlx5_ib_ucontext, ibucontext); 39795163b274SMaor Gottlieb u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 39805163b274SMaor Gottlieb atomic_t *tx_port_affinity; 3981c6a21c38SMajd Dibbiny 39825163b274SMaor Gottlieb if (ucontext) 39835163b274SMaor Gottlieb tx_port_affinity = &ucontext->tx_port_affinity; 39845163b274SMaor Gottlieb else 39855163b274SMaor Gottlieb tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity; 39865163b274SMaor Gottlieb 39875163b274SMaor Gottlieb return (unsigned int)atomic_add_return(1, tx_port_affinity) % 398834a30d76SMark Bloch (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1; 3989c6a21c38SMajd Dibbiny } 3990c6a21c38SMajd Dibbiny 39918f3243a0SMark Zhang static bool qp_supports_affinity(struct mlx5_ib_qp *qp) 39925163b274SMaor Gottlieb { 39938f3243a0SMark Zhang if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) || 39948f3243a0SMark Zhang (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) || 39958f3243a0SMark Zhang (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) || 39968f3243a0SMark Zhang (qp->type == MLX5_IB_QPT_DCI)) 39975163b274SMaor Gottlieb return true; 39985163b274SMaor Gottlieb return false; 39995163b274SMaor Gottlieb } 40005163b274SMaor Gottlieb 4001cfc1a89eSMaor Gottlieb static unsigned int get_tx_affinity(struct ib_qp *qp, 4002cfc1a89eSMaor Gottlieb const struct ib_qp_attr *attr, 4003cfc1a89eSMaor Gottlieb int attr_mask, u8 init, 40045163b274SMaor Gottlieb struct ib_udata *udata) 40055163b274SMaor Gottlieb { 40065163b274SMaor Gottlieb struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context( 40075163b274SMaor Gottlieb udata, struct mlx5_ib_ucontext, ibucontext); 40085163b274SMaor Gottlieb struct mlx5_ib_dev *dev = to_mdev(qp->device); 40095163b274SMaor Gottlieb struct mlx5_ib_qp *mqp = to_mqp(qp); 40105163b274SMaor Gottlieb struct mlx5_ib_qp_base *qp_base; 40115163b274SMaor Gottlieb unsigned int tx_affinity; 40125163b274SMaor Gottlieb 4013802dcc7fSMark Zhang if (!(mlx5_ib_lag_should_assign_affinity(dev) && 40148f3243a0SMark Zhang qp_supports_affinity(mqp))) 40155163b274SMaor Gottlieb return 0; 40165163b274SMaor Gottlieb 4017cfc1a89eSMaor Gottlieb if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 4018cfc1a89eSMaor Gottlieb tx_affinity = mqp->gsi_lag_port; 4019cfc1a89eSMaor Gottlieb else if (init) 40205163b274SMaor Gottlieb tx_affinity = get_tx_affinity_rr(dev, udata); 4021cfc1a89eSMaor Gottlieb else if ((attr_mask & IB_QP_AV) && attr->xmit_slave) 4022cfc1a89eSMaor Gottlieb tx_affinity = 4023cfc1a89eSMaor Gottlieb mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave); 4024cfc1a89eSMaor Gottlieb else 4025cfc1a89eSMaor Gottlieb return 0; 40265163b274SMaor Gottlieb 40275163b274SMaor Gottlieb qp_base = &mqp->trans_qp.base; 40285163b274SMaor Gottlieb if (ucontext) 40295163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n", 40305163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn, ucontext); 40315163b274SMaor Gottlieb else 40325163b274SMaor Gottlieb mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n", 40335163b274SMaor Gottlieb tx_affinity, qp_base->mqp.qpn); 40345163b274SMaor Gottlieb return tx_affinity; 4035c6a21c38SMajd Dibbiny } 4036c6a21c38SMajd Dibbiny 4037d14133ddSMark Zhang static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, 4038d14133ddSMark Zhang struct rdma_counter *counter) 4039d14133ddSMark Zhang { 4040d14133ddSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 404164bae2d4SLeon Romanovsky u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {}; 4042d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 4043d14133ddSMark Zhang struct mlx5_ib_qp_base *base; 4044d14133ddSMark Zhang u32 set_id; 404564bae2d4SLeon Romanovsky u32 *qpc; 4046d14133ddSMark Zhang 40473e1f000fSParav Pandit if (counter) 4048d14133ddSMark Zhang set_id = counter->id; 40493e1f000fSParav Pandit else 40503e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); 4051d14133ddSMark Zhang 4052d14133ddSMark Zhang base = &mqp->trans_qp.base; 405364bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP); 405464bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn); 405564bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid); 405664bae2d4SLeon Romanovsky MLX5_SET(rts2rts_qp_in, in, opt_param_mask, 405764bae2d4SLeon Romanovsky MLX5_QP_OPTPAR_COUNTER_SET_ID); 405864bae2d4SLeon Romanovsky 405964bae2d4SLeon Romanovsky qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc); 406064bae2d4SLeon Romanovsky MLX5_SET(qpc, qpc, counter_set_id, set_id); 406164bae2d4SLeon Romanovsky return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in); 4062d14133ddSMark Zhang } 4063d14133ddSMark Zhang 4064e126ba97SEli Cohen static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 4065e126ba97SEli Cohen const struct ib_qp_attr *attr, int attr_mask, 406689944450SShamir Rabinovitch enum ib_qp_state cur_state, 406789944450SShamir Rabinovitch enum ib_qp_state new_state, 406889944450SShamir Rabinovitch const struct mlx5_ib_modify_qp *ucmd, 406950aec2c3SLeon Romanovsky struct mlx5_ib_modify_qp_resp *resp, 407089944450SShamir Rabinovitch struct ib_udata *udata) 4071e126ba97SEli Cohen { 4072427c1e7bSmajd@mellanox.com static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 4073427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = { 4074427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4075427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4076427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 4077427c1e7bSmajd@mellanox.com }, 4078427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = { 4079427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4080427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4081427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 4082427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 4083427c1e7bSmajd@mellanox.com }, 4084427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTR] = { 4085427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4086427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4087427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 4088427c1e7bSmajd@mellanox.com }, 4089427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = { 4090427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4091427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4092427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 4093427c1e7bSmajd@mellanox.com }, 4094427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQD] = { 4095427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4096427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4097021c1f24SSergey Gorenko [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP, 4098427c1e7bSmajd@mellanox.com }, 4099427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_SQER] = { 4100427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4101427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4102427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 4103427c1e7bSmajd@mellanox.com }, 4104427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = { 4105427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 4106427c1e7bSmajd@mellanox.com [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 4107427c1e7bSmajd@mellanox.com } 4108427c1e7bSmajd@mellanox.com }; 4109427c1e7bSmajd@mellanox.com 4110e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4111e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 411219098df2Smajd@mellanox.com struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 4113e126ba97SEli Cohen struct mlx5_ib_cq *send_cq, *recv_cq; 4114e126ba97SEli Cohen struct mlx5_ib_pd *pd; 4115e126ba97SEli Cohen enum mlx5_qp_state mlx5_cur, mlx5_new; 4116f18e26afSLeon Romanovsky void *qpc, *pri_path, *alt_path; 4117cfc1a89eSMaor Gottlieb enum mlx5_qp_optpar optpar = 0; 4118d14133ddSMark Zhang u32 set_id = 0; 4119e126ba97SEli Cohen int mlx5_st; 4120e126ba97SEli Cohen int err; 4121427c1e7bSmajd@mellanox.com u16 op; 412213eab21fSAviv Heller u8 tx_affinity = 0; 4123e126ba97SEli Cohen 41247aede1a2SLeon Romanovsky mlx5_st = to_mlx5_st(qp->type); 412555de9a77SLeon Romanovsky if (mlx5_st < 0) 412655de9a77SLeon Romanovsky return -EINVAL; 412755de9a77SLeon Romanovsky 4128f18e26afSLeon Romanovsky qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL); 4129f18e26afSLeon Romanovsky if (!qpc) 4130e126ba97SEli Cohen return -ENOMEM; 4131e126ba97SEli Cohen 4132029e88fdSLeon Romanovsky pd = to_mpd(qp->ibqp.pd); 4133f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, st, mlx5_st); 4134e126ba97SEli Cohen 4135e126ba97SEli Cohen if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 4136f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 4137e126ba97SEli Cohen } else { 4138e126ba97SEli Cohen switch (attr->path_mig_state) { 4139e126ba97SEli Cohen case IB_MIG_MIGRATED: 4140f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 4141e126ba97SEli Cohen break; 4142e126ba97SEli Cohen case IB_MIG_REARM: 4143f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM); 4144e126ba97SEli Cohen break; 4145e126ba97SEli Cohen case IB_MIG_ARMED: 4146f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED); 4147e126ba97SEli Cohen break; 4148e126ba97SEli Cohen } 4149e126ba97SEli Cohen } 4150e126ba97SEli Cohen 4151cfc1a89eSMaor Gottlieb tx_affinity = get_tx_affinity(ibqp, attr, attr_mask, 41525163b274SMaor Gottlieb cur_state == IB_QPS_RESET && 41535163b274SMaor Gottlieb new_state == IB_QPS_INIT, udata); 4154f18e26afSLeon Romanovsky 4155f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity); 4156f18e26afSLeon Romanovsky if (tx_affinity && new_state == IB_QPS_RTR && 4157cfc1a89eSMaor Gottlieb MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity)) 4158cfc1a89eSMaor Gottlieb optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF; 415913eab21fSAviv Heller 41609ecf6ac1SMaor Gottlieb if (is_sqp(qp->type)) { 4161f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, IB_MTU_256); 4162f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 8); 41639ecf6ac1SMaor Gottlieb } else if ((qp->type == IB_QPT_UD && 41642be08c30SLeon Romanovsky !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) || 41659ecf6ac1SMaor Gottlieb qp->type == MLX5_IB_QPT_REG_UMR) { 4166f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, IB_MTU_4096); 4167f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 12); 4168e126ba97SEli Cohen } else if (attr_mask & IB_QP_PATH_MTU) { 4169e126ba97SEli Cohen if (attr->path_mtu < IB_MTU_256 || 4170e126ba97SEli Cohen attr->path_mtu > IB_MTU_4096) { 4171e126ba97SEli Cohen mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 4172e126ba97SEli Cohen err = -EINVAL; 4173e126ba97SEli Cohen goto out; 4174e126ba97SEli Cohen } 4175f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, mtu, attr->path_mtu); 4176f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_msg_max, 4177f18e26afSLeon Romanovsky MLX5_CAP_GEN(dev->mdev, log_max_msg)); 4178e126ba97SEli Cohen } 4179e126ba97SEli Cohen 4180e126ba97SEli Cohen if (attr_mask & IB_QP_DEST_QPN) 4181f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num); 4182f18e26afSLeon Romanovsky 4183f18e26afSLeon Romanovsky pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 4184f18e26afSLeon Romanovsky alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path); 4185e126ba97SEli Cohen 4186e126ba97SEli Cohen if (attr_mask & IB_QP_PKEY_INDEX) 4187f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index); 4188e126ba97SEli Cohen 4189e126ba97SEli Cohen /* todo implement counter_index functionality */ 4190e126ba97SEli Cohen 41919ecf6ac1SMaor Gottlieb if (is_sqp(qp->type)) 4192f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, vhca_port_num, qp->port); 4193e126ba97SEli Cohen 4194e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 4195f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num); 4196e126ba97SEli Cohen 4197e126ba97SEli Cohen if (attr_mask & IB_QP_AV) { 4198f18e26afSLeon Romanovsky err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path, 4199f18e26afSLeon Romanovsky attr_mask & IB_QP_PORT ? attr->port_num : 4200f18e26afSLeon Romanovsky qp->port, 4201f879ee8dSAchiad Shochat attr_mask, 0, attr, false); 4202e126ba97SEli Cohen if (err) 4203e126ba97SEli Cohen goto out; 4204e126ba97SEli Cohen } 4205e126ba97SEli Cohen 4206e126ba97SEli Cohen if (attr_mask & IB_QP_TIMEOUT) 4207f18e26afSLeon Romanovsky MLX5_SET(ads, pri_path, ack_timeout, attr->timeout); 4208e126ba97SEli Cohen 4209e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) { 4210f18e26afSLeon Romanovsky err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path, 4211f879ee8dSAchiad Shochat attr->alt_port_num, 4212f18e26afSLeon Romanovsky attr_mask | IB_QP_PKEY_INDEX | 4213f18e26afSLeon Romanovsky IB_QP_TIMEOUT, 4214f879ee8dSAchiad Shochat 0, attr, true); 4215e126ba97SEli Cohen if (err) 4216e126ba97SEli Cohen goto out; 4217e126ba97SEli Cohen } 4218e126ba97SEli Cohen 42199ecf6ac1SMaor Gottlieb get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 422089ea94a7SMaor Gottlieb &send_cq, &recv_cq); 4221e126ba97SEli Cohen 4222f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 4223f18e26afSLeon Romanovsky if (send_cq) 4224f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn); 4225f18e26afSLeon Romanovsky if (recv_cq) 4226f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn); 4227f18e26afSLeon Romanovsky 4228f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ); 4229e126ba97SEli Cohen 4230e126ba97SEli Cohen if (attr_mask & IB_QP_RNR_RETRY) 4231f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); 4232e126ba97SEli Cohen 4233e126ba97SEli Cohen if (attr_mask & IB_QP_RETRY_CNT) 4234f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); 4235e126ba97SEli Cohen 4236f18e26afSLeon Romanovsky if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic) 4237f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic)); 4238e126ba97SEli Cohen 4239e126ba97SEli Cohen if (attr_mask & IB_QP_SQ_PSN) 4240f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn); 4241e126ba97SEli Cohen 4242f18e26afSLeon Romanovsky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic) 4243f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, log_rra_max, 4244f18e26afSLeon Romanovsky ilog2(attr->max_dest_rd_atomic)); 4245e126ba97SEli Cohen 4246a60109dcSYonatan Cohen if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 4247f18e26afSLeon Romanovsky err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc); 4248a60109dcSYonatan Cohen if (err) 4249a60109dcSYonatan Cohen goto out; 4250a60109dcSYonatan Cohen } 4251e126ba97SEli Cohen 4252e126ba97SEli Cohen if (attr_mask & IB_QP_MIN_RNR_TIMER) 4253f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer); 4254e126ba97SEli Cohen 4255e126ba97SEli Cohen if (attr_mask & IB_QP_RQ_PSN) 4256f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn); 4257e126ba97SEli Cohen 4258e126ba97SEli Cohen if (attr_mask & IB_QP_QKEY) 4259f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, q_key, attr->qkey); 4260e126ba97SEli Cohen 4261e126ba97SEli Cohen if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 4262f18e26afSLeon Romanovsky MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 4263e126ba97SEli Cohen 42640837e86aSMark Bloch if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 42650837e86aSMark Bloch u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 42660837e86aSMark Bloch qp->port) - 1; 4267c2e53b2cSYishai Hadas 4268c2e53b2cSYishai Hadas /* Underlay port should be used - index 0 function per port */ 42692be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) 4270c2e53b2cSYishai Hadas port_num = 0; 4271c2e53b2cSYishai Hadas 4272d14133ddSMark Zhang if (ibqp->counter) 4273d14133ddSMark Zhang set_id = ibqp->counter->id; 4274d14133ddSMark Zhang else 42753e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, port_num); 4276f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, counter_set_id, set_id); 42770837e86aSMark Bloch } 42780837e86aSMark Bloch 4279e126ba97SEli Cohen if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 4280f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, rlky, 1); 4281e126ba97SEli Cohen 42822be08c30SLeon Romanovsky if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) 4283f18e26afSLeon Romanovsky MLX5_SET(qpc, qpc, deth_sqpn, 1); 4284e126ba97SEli Cohen 4285e126ba97SEli Cohen mlx5_cur = to_mlx5_state(cur_state); 4286e126ba97SEli Cohen mlx5_new = to_mlx5_state(new_state); 4287e126ba97SEli Cohen 4288427c1e7bSmajd@mellanox.com if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 42895d414b17SDan Carpenter !optab[mlx5_cur][mlx5_new]) { 42905d414b17SDan Carpenter err = -EINVAL; 4291427c1e7bSmajd@mellanox.com goto out; 42925d414b17SDan Carpenter } 4293427c1e7bSmajd@mellanox.com 4294427c1e7bSmajd@mellanox.com op = optab[mlx5_cur][mlx5_new]; 4295cfc1a89eSMaor Gottlieb optpar |= ib_mask_to_mlx5_opt(attr_mask); 4296e126ba97SEli Cohen optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 4297ad5f8e96Smajd@mellanox.com 42989ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || 42992be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 43000680efa2SAlex Vesker struct mlx5_modify_raw_qp_param raw_qp_param = {}; 43010680efa2SAlex Vesker 43020680efa2SAlex Vesker raw_qp_param.operation = op; 4303eb49ab0cSAlex Vesker if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4304d14133ddSMark Zhang raw_qp_param.rq_q_ctr_id = set_id; 4305eb49ab0cSAlex Vesker raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; 4306eb49ab0cSAlex Vesker } 43077d29f349SBodong Wang 4308d5ed8ac3SMark Bloch if (attr_mask & IB_QP_PORT) 4309d5ed8ac3SMark Bloch raw_qp_param.port = attr->port_num; 4310d5ed8ac3SMark Bloch 43117d29f349SBodong Wang if (attr_mask & IB_QP_RATE_LIMIT) { 431261147f39SBodong Wang raw_qp_param.rl.rate = attr->rate_limit; 431361147f39SBodong Wang 431461147f39SBodong Wang if (ucmd->burst_info.max_burst_sz) { 431561147f39SBodong Wang if (attr->rate_limit && 431661147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { 431761147f39SBodong Wang raw_qp_param.rl.max_burst_sz = 431861147f39SBodong Wang ucmd->burst_info.max_burst_sz; 431961147f39SBodong Wang } else { 432061147f39SBodong Wang err = -EINVAL; 432161147f39SBodong Wang goto out; 432261147f39SBodong Wang } 432361147f39SBodong Wang } 432461147f39SBodong Wang 432561147f39SBodong Wang if (ucmd->burst_info.typical_pkt_sz) { 432661147f39SBodong Wang if (attr->rate_limit && 432761147f39SBodong Wang MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { 432861147f39SBodong Wang raw_qp_param.rl.typical_pkt_sz = 432961147f39SBodong Wang ucmd->burst_info.typical_pkt_sz; 433061147f39SBodong Wang } else { 433161147f39SBodong Wang err = -EINVAL; 433261147f39SBodong Wang goto out; 433361147f39SBodong Wang } 433461147f39SBodong Wang } 433561147f39SBodong Wang 43367d29f349SBodong Wang raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; 43377d29f349SBodong Wang } 43387d29f349SBodong Wang 433913eab21fSAviv Heller err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); 43400680efa2SAlex Vesker } else { 434150aec2c3SLeon Romanovsky if (udata) { 434250aec2c3SLeon Romanovsky /* For the kernel flows, the resp will stay zero */ 434350aec2c3SLeon Romanovsky resp->ece_options = 434450aec2c3SLeon Romanovsky MLX5_CAP_GEN(dev->mdev, ece_support) ? 43455f62a521SLeon Romanovsky ucmd->ece_options : 0; 434650aec2c3SLeon Romanovsky resp->response_length = sizeof(*resp); 434750aec2c3SLeon Romanovsky } 43485f62a521SLeon Romanovsky err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp, 434950aec2c3SLeon Romanovsky &resp->ece_options); 43500680efa2SAlex Vesker } 43510680efa2SAlex Vesker 4352e126ba97SEli Cohen if (err) 4353e126ba97SEli Cohen goto out; 4354e126ba97SEli Cohen 4355e126ba97SEli Cohen qp->state = new_state; 4356e126ba97SEli Cohen 4357e126ba97SEli Cohen if (attr_mask & IB_QP_ACCESS_FLAGS) 435819098df2Smajd@mellanox.com qp->trans_qp.atomic_rd_en = attr->qp_access_flags; 4359e126ba97SEli Cohen if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 436019098df2Smajd@mellanox.com qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; 4361e126ba97SEli Cohen if (attr_mask & IB_QP_PORT) 4362e126ba97SEli Cohen qp->port = attr->port_num; 4363e126ba97SEli Cohen if (attr_mask & IB_QP_ALT_PATH) 436419098df2Smajd@mellanox.com qp->trans_qp.alt_port = attr->alt_port_num; 4365e126ba97SEli Cohen 4366e126ba97SEli Cohen /* 4367e126ba97SEli Cohen * If we moved a kernel QP to RESET, clean up all old CQ 4368e126ba97SEli Cohen * entries and reinitialize the QP. 4369e126ba97SEli Cohen */ 437075a45982SLeon Romanovsky if (new_state == IB_QPS_RESET && 43719ecf6ac1SMaor Gottlieb !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) { 437219098df2Smajd@mellanox.com mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 4373e126ba97SEli Cohen ibqp->srq ? to_msrq(ibqp->srq) : NULL); 4374e126ba97SEli Cohen if (send_cq != recv_cq) 437519098df2Smajd@mellanox.com mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); 4376e126ba97SEli Cohen 4377e126ba97SEli Cohen qp->rq.head = 0; 4378e126ba97SEli Cohen qp->rq.tail = 0; 4379e126ba97SEli Cohen qp->sq.head = 0; 4380e126ba97SEli Cohen qp->sq.tail = 0; 4381e126ba97SEli Cohen qp->sq.cur_post = 0; 438234f4c955SGuy Levi if (qp->sq.wqe_cnt) 438334f4c955SGuy Levi qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 4384950bf4f1SLeon Romanovsky qp->sq.last_poll = 0; 4385e126ba97SEli Cohen qp->db.db[MLX5_RCV_DBR] = 0; 4386e126ba97SEli Cohen qp->db.db[MLX5_SND_DBR] = 0; 4387e126ba97SEli Cohen } 4388e126ba97SEli Cohen 4389d14133ddSMark Zhang if ((new_state == IB_QPS_RTS) && qp->counter_pending) { 4390d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter); 4391d14133ddSMark Zhang if (!err) 4392d14133ddSMark Zhang qp->counter_pending = 0; 4393d14133ddSMark Zhang } 4394d14133ddSMark Zhang 4395e126ba97SEli Cohen out: 4396f18e26afSLeon Romanovsky kfree(qpc); 4397e126ba97SEli Cohen return err; 4398e126ba97SEli Cohen } 4399e126ba97SEli Cohen 4400c32a4f29SMoni Shoua static inline bool is_valid_mask(int mask, int req, int opt) 4401c32a4f29SMoni Shoua { 4402c32a4f29SMoni Shoua if ((mask & req) != req) 4403c32a4f29SMoni Shoua return false; 4404c32a4f29SMoni Shoua 4405c32a4f29SMoni Shoua if (mask & ~(req | opt)) 4406c32a4f29SMoni Shoua return false; 4407c32a4f29SMoni Shoua 4408c32a4f29SMoni Shoua return true; 4409c32a4f29SMoni Shoua } 4410c32a4f29SMoni Shoua 4411c32a4f29SMoni Shoua /* check valid transition for driver QP types 4412c32a4f29SMoni Shoua * for now the only QP type that this function supports is DCI 4413c32a4f29SMoni Shoua */ 4414c32a4f29SMoni Shoua static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state, 4415c32a4f29SMoni Shoua enum ib_qp_attr_mask attr_mask) 4416c32a4f29SMoni Shoua { 4417c32a4f29SMoni Shoua int req = IB_QP_STATE; 4418c32a4f29SMoni Shoua int opt = 0; 4419c32a4f29SMoni Shoua 442099ed748eSMoni Shoua if (new_state == IB_QPS_RESET) { 442199ed748eSMoni Shoua return is_valid_mask(attr_mask, req, opt); 442299ed748eSMoni Shoua } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4423c32a4f29SMoni Shoua req |= IB_QP_PKEY_INDEX | IB_QP_PORT; 4424c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4425c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 4426c32a4f29SMoni Shoua opt = IB_QP_PKEY_INDEX | IB_QP_PORT; 4427c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4428c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4429c32a4f29SMoni Shoua req |= IB_QP_PATH_MTU; 44305ec0304cSArtemy Kovalyov opt = IB_QP_PKEY_INDEX | IB_QP_AV; 4431c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4432c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 4433c32a4f29SMoni Shoua req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 4434c32a4f29SMoni Shoua IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN; 4435c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4436c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4437c32a4f29SMoni Shoua } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) { 4438c32a4f29SMoni Shoua opt = IB_QP_MIN_RNR_TIMER; 4439c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4440c32a4f29SMoni Shoua } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) { 4441c32a4f29SMoni Shoua return is_valid_mask(attr_mask, req, opt); 4442c32a4f29SMoni Shoua } 4443c32a4f29SMoni Shoua return false; 4444c32a4f29SMoni Shoua } 4445c32a4f29SMoni Shoua 4446776a3906SMoni Shoua /* mlx5_ib_modify_dct: modify a DCT QP 4447776a3906SMoni Shoua * valid transitions are: 4448776a3906SMoni Shoua * RESET to INIT: must set access_flags, pkey_index and port 4449776a3906SMoni Shoua * INIT to RTR : must set min_rnr_timer, tclass, flow_label, 4450776a3906SMoni Shoua * mtu, gid_index and hop_limit 4451776a3906SMoni Shoua * Other transitions and attributes are illegal 4452776a3906SMoni Shoua */ 4453776a3906SMoni Shoua static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4454a645a89dSLeon Romanovsky int attr_mask, struct mlx5_ib_modify_qp *ucmd, 4455a645a89dSLeon Romanovsky struct ib_udata *udata) 4456776a3906SMoni Shoua { 4457776a3906SMoni Shoua struct mlx5_ib_qp *qp = to_mqp(ibqp); 4458776a3906SMoni Shoua struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4459776a3906SMoni Shoua enum ib_qp_state cur_state, new_state; 4460776a3906SMoni Shoua int required = IB_QP_STATE; 4461776a3906SMoni Shoua void *dctc; 446271cab8efSLeon Romanovsky int err; 4463776a3906SMoni Shoua 4464776a3906SMoni Shoua if (!(attr_mask & IB_QP_STATE)) 4465776a3906SMoni Shoua return -EINVAL; 4466776a3906SMoni Shoua 4467776a3906SMoni Shoua cur_state = qp->state; 4468776a3906SMoni Shoua new_state = attr->qp_state; 4469776a3906SMoni Shoua 4470776a3906SMoni Shoua dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 4471a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options) 4472a645a89dSLeon Romanovsky /* 4473a645a89dSLeon Romanovsky * DCT doesn't initialize QP till modify command is executed, 4474a645a89dSLeon Romanovsky * so we need to overwrite previously set ECE field if user 4475a645a89dSLeon Romanovsky * provided any value except zero, which means not set/not 4476a645a89dSLeon Romanovsky * valid. 4477a645a89dSLeon Romanovsky */ 4478a645a89dSLeon Romanovsky MLX5_SET(dctc, dctc, ece, ucmd->ece_options); 4479a645a89dSLeon Romanovsky 4480776a3906SMoni Shoua if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 44813e1f000fSParav Pandit u16 set_id; 44823e1f000fSParav Pandit 4483776a3906SMoni Shoua required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 4484776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4485776a3906SMoni Shoua return -EINVAL; 4486776a3906SMoni Shoua 4487776a3906SMoni Shoua if (attr->port_num == 0 || 4488776a3906SMoni Shoua attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) { 4489776a3906SMoni Shoua mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4490776a3906SMoni Shoua attr->port_num, dev->num_ports); 4491776a3906SMoni Shoua return -EINVAL; 4492776a3906SMoni Shoua } 4493776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 4494776a3906SMoni Shoua MLX5_SET(dctc, dctc, rre, 1); 4495776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 4496776a3906SMoni Shoua MLX5_SET(dctc, dctc, rwe, 1); 4497776a3906SMoni Shoua if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { 4498a60109dcSYonatan Cohen int atomic_mode; 4499a60109dcSYonatan Cohen 4500a60109dcSYonatan Cohen atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT); 4501a60109dcSYonatan Cohen if (atomic_mode < 0) 4502776a3906SMoni Shoua return -EOPNOTSUPP; 4503a60109dcSYonatan Cohen 4504a60109dcSYonatan Cohen MLX5_SET(dctc, dctc, atomic_mode, atomic_mode); 4505776a3906SMoni Shoua MLX5_SET(dctc, dctc, rae, 1); 4506776a3906SMoni Shoua } 4507776a3906SMoni Shoua MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); 45087c4b1ab9SMark Zhang if (mlx5_lag_is_active(dev->mdev)) 45097c4b1ab9SMark Zhang MLX5_SET(dctc, dctc, port, 45107c4b1ab9SMark Zhang get_tx_affinity_rr(dev, udata)); 45117c4b1ab9SMark Zhang else 4512776a3906SMoni Shoua MLX5_SET(dctc, dctc, port, attr->port_num); 45133e1f000fSParav Pandit 45143e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); 45153e1f000fSParav Pandit MLX5_SET(dctc, dctc, counter_set_id, set_id); 4516776a3906SMoni Shoua } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4517776a3906SMoni Shoua struct mlx5_ib_modify_qp_resp resp = {}; 4518a645a89dSLeon Romanovsky u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {}; 4519a645a89dSLeon Romanovsky u32 min_resp_len = offsetofend(typeof(resp), dctn); 4520776a3906SMoni Shoua 4521776a3906SMoni Shoua if (udata->outlen < min_resp_len) 4522776a3906SMoni Shoua return -EINVAL; 4523a645a89dSLeon Romanovsky /* 4524a645a89dSLeon Romanovsky * If we don't have enough space for the ECE options, 4525a645a89dSLeon Romanovsky * simply indicate it with resp.response_length. 4526a645a89dSLeon Romanovsky */ 4527a645a89dSLeon Romanovsky resp.response_length = (udata->outlen < sizeof(resp)) ? 4528a645a89dSLeon Romanovsky min_resp_len : 4529a645a89dSLeon Romanovsky sizeof(resp); 4530a645a89dSLeon Romanovsky 4531776a3906SMoni Shoua required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU; 4532776a3906SMoni Shoua if (!is_valid_mask(attr_mask, required, 0)) 4533776a3906SMoni Shoua return -EINVAL; 4534776a3906SMoni Shoua MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer); 4535776a3906SMoni Shoua MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class); 4536776a3906SMoni Shoua MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label); 4537776a3906SMoni Shoua MLX5_SET(dctc, dctc, mtu, attr->path_mtu); 4538776a3906SMoni Shoua MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); 4539776a3906SMoni Shoua MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 45401ab52ac1SPatrisious Haddad if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) 45411ab52ac1SPatrisious Haddad MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7); 4542776a3906SMoni Shoua 4543333fbaa0SLeon Romanovsky err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, 4544c5ae1954SYishai Hadas MLX5_ST_SZ_BYTES(create_dct_in), out, 4545c5ae1954SYishai Hadas sizeof(out)); 454631803e59SSaeed Mahameed err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); 4547776a3906SMoni Shoua if (err) 4548776a3906SMoni Shoua return err; 4549776a3906SMoni Shoua resp.dctn = qp->dct.mdct.mqp.qpn; 4550a645a89dSLeon Romanovsky if (MLX5_CAP_GEN(dev->mdev, ece_support)) 4551a645a89dSLeon Romanovsky resp.ece_options = MLX5_GET(create_dct_out, out, ece); 4552776a3906SMoni Shoua err = ib_copy_to_udata(udata, &resp, resp.response_length); 4553776a3906SMoni Shoua if (err) { 4554333fbaa0SLeon Romanovsky mlx5_core_destroy_dct(dev, &qp->dct.mdct); 4555776a3906SMoni Shoua return err; 4556776a3906SMoni Shoua } 4557776a3906SMoni Shoua } else { 4558776a3906SMoni Shoua mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state); 4559776a3906SMoni Shoua return -EINVAL; 4560776a3906SMoni Shoua } 456171cab8efSLeon Romanovsky 4562776a3906SMoni Shoua qp->state = new_state; 456371cab8efSLeon Romanovsky return 0; 4564776a3906SMoni Shoua } 4565776a3906SMoni Shoua 45662614488dSMark Bloch static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, 45679ecf6ac1SMaor Gottlieb struct mlx5_ib_qp *qp) 45682614488dSMark Bloch { 45692614488dSMark Bloch if (dev->profile != &raw_eth_profile) 45702614488dSMark Bloch return true; 45712614488dSMark Bloch 45729ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR) 45732614488dSMark Bloch return true; 45742614488dSMark Bloch 45752614488dSMark Bloch /* Internal QP used for wc testing, with NOPs in wq */ 45762614488dSMark Bloch if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) 45772614488dSMark Bloch return true; 45782614488dSMark Bloch 45792614488dSMark Bloch return false; 45802614488dSMark Bloch } 45812614488dSMark Bloch 45828de8482fSMaor Gottlieb static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, 45838de8482fSMaor Gottlieb int attr_mask, enum ib_qp_type qp_type) 45848de8482fSMaor Gottlieb { 45858de8482fSMaor Gottlieb int log_max_ra_res; 45868de8482fSMaor Gottlieb int log_max_ra_req; 45878de8482fSMaor Gottlieb 45888de8482fSMaor Gottlieb if (qp_type == MLX5_IB_QPT_DCI) { 45898de8482fSMaor Gottlieb log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, 45908de8482fSMaor Gottlieb log_max_ra_res_dc); 45918de8482fSMaor Gottlieb log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, 45928de8482fSMaor Gottlieb log_max_ra_req_dc); 45938de8482fSMaor Gottlieb } else { 45948de8482fSMaor Gottlieb log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, 45958de8482fSMaor Gottlieb log_max_ra_res_qp); 45968de8482fSMaor Gottlieb log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, 45978de8482fSMaor Gottlieb log_max_ra_req_qp); 45988de8482fSMaor Gottlieb } 45998de8482fSMaor Gottlieb 46008de8482fSMaor Gottlieb if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 46018de8482fSMaor Gottlieb attr->max_rd_atomic > log_max_ra_res) { 46028de8482fSMaor Gottlieb mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", 46038de8482fSMaor Gottlieb attr->max_rd_atomic); 46048de8482fSMaor Gottlieb return false; 46058de8482fSMaor Gottlieb } 46068de8482fSMaor Gottlieb 46078de8482fSMaor Gottlieb if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 46088de8482fSMaor Gottlieb attr->max_dest_rd_atomic > log_max_ra_req) { 46098de8482fSMaor Gottlieb mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", 46108de8482fSMaor Gottlieb attr->max_dest_rd_atomic); 46118de8482fSMaor Gottlieb return false; 46128de8482fSMaor Gottlieb } 46138de8482fSMaor Gottlieb return true; 46148de8482fSMaor Gottlieb } 46158de8482fSMaor Gottlieb 4616e126ba97SEli Cohen int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 4617e126ba97SEli Cohen int attr_mask, struct ib_udata *udata) 4618e126ba97SEli Cohen { 4619e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 462050aec2c3SLeon Romanovsky struct mlx5_ib_modify_qp_resp resp = {}; 4621e126ba97SEli Cohen struct mlx5_ib_qp *qp = to_mqp(ibqp); 462261147f39SBodong Wang struct mlx5_ib_modify_qp ucmd = {}; 4623d16e91daSHaggai Eran enum ib_qp_type qp_type; 4624e126ba97SEli Cohen enum ib_qp_state cur_state, new_state; 4625e126ba97SEli Cohen int err = -EINVAL; 4626e126ba97SEli Cohen 46279ecf6ac1SMaor Gottlieb if (!mlx5_ib_modify_qp_allowed(dev, qp)) 46282614488dSMark Bloch return -EOPNOTSUPP; 46292614488dSMark Bloch 463026e990baSJason Gunthorpe if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) 463126e990baSJason Gunthorpe return -EOPNOTSUPP; 463226e990baSJason Gunthorpe 463328d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 463428d61370SYishai Hadas return -ENOSYS; 463528d61370SYishai Hadas 463661147f39SBodong Wang if (udata && udata->inlen) { 46375f62a521SLeon Romanovsky if (udata->inlen < offsetofend(typeof(ucmd), ece_options)) 463861147f39SBodong Wang return -EINVAL; 463961147f39SBodong Wang 464061147f39SBodong Wang if (udata->inlen > sizeof(ucmd) && 464161147f39SBodong Wang !ib_is_udata_cleared(udata, sizeof(ucmd), 464261147f39SBodong Wang udata->inlen - sizeof(ucmd))) 464361147f39SBodong Wang return -EOPNOTSUPP; 464461147f39SBodong Wang 464561147f39SBodong Wang if (ib_copy_from_udata(&ucmd, udata, 464661147f39SBodong Wang min(udata->inlen, sizeof(ucmd)))) 464761147f39SBodong Wang return -EFAULT; 464861147f39SBodong Wang 464961147f39SBodong Wang if (ucmd.comp_mask || 465061147f39SBodong Wang memchr_inv(&ucmd.burst_info.reserved, 0, 465161147f39SBodong Wang sizeof(ucmd.burst_info.reserved))) 465261147f39SBodong Wang return -EOPNOTSUPP; 46535f62a521SLeon Romanovsky 465461147f39SBodong Wang } 465561147f39SBodong Wang 46569ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_GSI) 4657d16e91daSHaggai Eran return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 4658d16e91daSHaggai Eran 46599ecf6ac1SMaor Gottlieb qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type; 4660d16e91daSHaggai Eran 4661a645a89dSLeon Romanovsky if (qp_type == MLX5_IB_QPT_DCT) 4662a645a89dSLeon Romanovsky return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata); 4663c32a4f29SMoni Shoua 4664e126ba97SEli Cohen mutex_lock(&qp->mutex); 4665e126ba97SEli Cohen 4666e126ba97SEli Cohen cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 4667e126ba97SEli Cohen new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 4668e126ba97SEli Cohen 46692be08c30SLeon Romanovsky if (qp->flags & IB_QP_CREATE_SOURCE_QPN) { 4670c2e53b2cSYishai Hadas if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) { 4671c2e53b2cSYishai Hadas mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n", 4672c2e53b2cSYishai Hadas attr_mask); 4673c2e53b2cSYishai Hadas goto out; 4674c2e53b2cSYishai Hadas } 4675c2e53b2cSYishai Hadas } else if (qp_type != MLX5_IB_QPT_REG_UMR && 4676c32a4f29SMoni Shoua qp_type != MLX5_IB_QPT_DCI && 4677d31131bbSKamal Heib !ib_modify_qp_is_ok(cur_state, new_state, qp_type, 4678d31131bbSKamal Heib attr_mask)) { 4679158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 46809ecf6ac1SMaor Gottlieb cur_state, new_state, qp->type, attr_mask); 4681e126ba97SEli Cohen goto out; 4682c32a4f29SMoni Shoua } else if (qp_type == MLX5_IB_QPT_DCI && 4683c32a4f29SMoni Shoua !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) { 4684c32a4f29SMoni Shoua mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 4685c32a4f29SMoni Shoua cur_state, new_state, qp_type, attr_mask); 4686c32a4f29SMoni Shoua goto out; 4687158abf86SHaggai Eran } 4688e126ba97SEli Cohen 4689e126ba97SEli Cohen if ((attr_mask & IB_QP_PORT) && 4690938fe83cSSaeed Mahameed (attr->port_num == 0 || 4691508562d6SDaniel Jurgens attr->port_num > dev->num_ports)) { 4692158abf86SHaggai Eran mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 4693158abf86SHaggai Eran attr->port_num, dev->num_ports); 4694e126ba97SEli Cohen goto out; 4695158abf86SHaggai Eran } 4696e126ba97SEli Cohen 46972019d70eSParav Pandit if ((attr_mask & IB_QP_PKEY_INDEX) && 46982019d70eSParav Pandit attr->pkey_index >= dev->pkey_table_len) { 46992019d70eSParav Pandit mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index); 4700e126ba97SEli Cohen goto out; 4701e126ba97SEli Cohen } 4702e126ba97SEli Cohen 47038de8482fSMaor Gottlieb if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) 4704e126ba97SEli Cohen goto out; 4705e126ba97SEli Cohen 4706e126ba97SEli Cohen if (cur_state == new_state && cur_state == IB_QPS_RESET) { 4707e126ba97SEli Cohen err = 0; 4708e126ba97SEli Cohen goto out; 4709e126ba97SEli Cohen } 4710e126ba97SEli Cohen 471161147f39SBodong Wang err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, 471250aec2c3SLeon Romanovsky new_state, &ucmd, &resp, udata); 471350aec2c3SLeon Romanovsky 471450aec2c3SLeon Romanovsky /* resp.response_length is set in ECE supported flows only */ 471550aec2c3SLeon Romanovsky if (!err && resp.response_length && 471650aec2c3SLeon Romanovsky udata->outlen >= resp.response_length) 47176512f11dSLeon Romanovsky /* Return -EFAULT to the user and expect him to destroy QP. */ 47186512f11dSLeon Romanovsky err = ib_copy_to_udata(udata, &resp, resp.response_length); 4719e126ba97SEli Cohen 4720e126ba97SEli Cohen out: 4721e126ba97SEli Cohen mutex_unlock(&qp->mutex); 4722e126ba97SEli Cohen return err; 4723e126ba97SEli Cohen } 4724e126ba97SEli Cohen 4725e126ba97SEli Cohen static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 4726e126ba97SEli Cohen { 4727e126ba97SEli Cohen switch (mlx5_state) { 4728e126ba97SEli Cohen case MLX5_QP_STATE_RST: return IB_QPS_RESET; 4729e126ba97SEli Cohen case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 4730e126ba97SEli Cohen case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 4731e126ba97SEli Cohen case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 4732e126ba97SEli Cohen case MLX5_QP_STATE_SQ_DRAINING: 4733e126ba97SEli Cohen case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 4734e126ba97SEli Cohen case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 4735e126ba97SEli Cohen case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 4736e126ba97SEli Cohen default: return -1; 4737e126ba97SEli Cohen } 4738e126ba97SEli Cohen } 4739e126ba97SEli Cohen 4740e126ba97SEli Cohen static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 4741e126ba97SEli Cohen { 4742e126ba97SEli Cohen switch (mlx5_mig_state) { 4743e126ba97SEli Cohen case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 4744e126ba97SEli Cohen case MLX5_QP_PM_REARM: return IB_MIG_REARM; 4745e126ba97SEli Cohen case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 4746e126ba97SEli Cohen default: return -1; 4747e126ba97SEli Cohen } 4748e126ba97SEli Cohen } 4749e126ba97SEli Cohen 475038349389SDasaratharaman Chandramouli static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, 475170bd7fb8SLeon Romanovsky struct rdma_ah_attr *ah_attr, void *path) 4752e126ba97SEli Cohen { 475370bd7fb8SLeon Romanovsky int port = MLX5_GET(ads, path, vhca_port_num); 475470bd7fb8SLeon Romanovsky int static_rate; 4755e126ba97SEli Cohen 4756d8966fcdSDasaratharaman Chandramouli memset(ah_attr, 0, sizeof(*ah_attr)); 4757e126ba97SEli Cohen 475870bd7fb8SLeon Romanovsky if (!port || port > ibdev->num_ports) 4759e126ba97SEli Cohen return; 4760e126ba97SEli Cohen 476170bd7fb8SLeon Romanovsky ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port); 4762ae59c3f0SLeon Romanovsky 476370bd7fb8SLeon Romanovsky rdma_ah_set_port_num(ah_attr, port); 476470bd7fb8SLeon Romanovsky rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl)); 4765e126ba97SEli Cohen 476670bd7fb8SLeon Romanovsky rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid)); 476770bd7fb8SLeon Romanovsky rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid)); 47682d7e3ff7SAharon Landau 476970bd7fb8SLeon Romanovsky static_rate = MLX5_GET(ads, path, stat_rate); 47706fe6e568SMark Zhang rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate)); 477170bd7fb8SLeon Romanovsky if (MLX5_GET(ads, path, grh) || 47722d7e3ff7SAharon Landau ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 477370bd7fb8SLeon Romanovsky rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label), 477470bd7fb8SLeon Romanovsky MLX5_GET(ads, path, src_addr_index), 477570bd7fb8SLeon Romanovsky MLX5_GET(ads, path, hop_limit), 477670bd7fb8SLeon Romanovsky MLX5_GET(ads, path, tclass)); 4777d4433557SMaor Gottlieb rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip)); 4778e126ba97SEli Cohen } 4779e126ba97SEli Cohen } 4780e126ba97SEli Cohen 47816d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, 47826d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq, 47836d2f89dfSmajd@mellanox.com u8 *sq_state) 4784e126ba97SEli Cohen { 47856d2f89dfSmajd@mellanox.com int err; 47866d2f89dfSmajd@mellanox.com 478728160771SEran Ben Elisha err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); 47886d2f89dfSmajd@mellanox.com if (err) 47896d2f89dfSmajd@mellanox.com goto out; 47906d2f89dfSmajd@mellanox.com sq->state = *sq_state; 47916d2f89dfSmajd@mellanox.com 47926d2f89dfSmajd@mellanox.com out: 47936d2f89dfSmajd@mellanox.com return err; 47946d2f89dfSmajd@mellanox.com } 47956d2f89dfSmajd@mellanox.com 47966d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, 47976d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq, 47986d2f89dfSmajd@mellanox.com u8 *rq_state) 47996d2f89dfSmajd@mellanox.com { 48006d2f89dfSmajd@mellanox.com void *out; 48016d2f89dfSmajd@mellanox.com void *rqc; 48026d2f89dfSmajd@mellanox.com int inlen; 48036d2f89dfSmajd@mellanox.com int err; 48046d2f89dfSmajd@mellanox.com 48056d2f89dfSmajd@mellanox.com inlen = MLX5_ST_SZ_BYTES(query_rq_out); 48061b9a07eeSLeon Romanovsky out = kvzalloc(inlen, GFP_KERNEL); 48076d2f89dfSmajd@mellanox.com if (!out) 48086d2f89dfSmajd@mellanox.com return -ENOMEM; 48096d2f89dfSmajd@mellanox.com 48106d2f89dfSmajd@mellanox.com err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); 48116d2f89dfSmajd@mellanox.com if (err) 48126d2f89dfSmajd@mellanox.com goto out; 48136d2f89dfSmajd@mellanox.com 48146d2f89dfSmajd@mellanox.com rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); 48156d2f89dfSmajd@mellanox.com *rq_state = MLX5_GET(rqc, rqc, state); 48166d2f89dfSmajd@mellanox.com rq->state = *rq_state; 48176d2f89dfSmajd@mellanox.com 48186d2f89dfSmajd@mellanox.com out: 48196d2f89dfSmajd@mellanox.com kvfree(out); 48206d2f89dfSmajd@mellanox.com return err; 48216d2f89dfSmajd@mellanox.com } 48226d2f89dfSmajd@mellanox.com 48236d2f89dfSmajd@mellanox.com static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, 48246d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, u8 *qp_state) 48256d2f89dfSmajd@mellanox.com { 48266d2f89dfSmajd@mellanox.com static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = { 48276d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RST] = { 48286d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 48296d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 48306d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD, 48316d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_RESET, 48326d2f89dfSmajd@mellanox.com }, 48336d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_RDY] = { 4834c94e272bSMaor Gottlieb [MLX5_SQC_STATE_RST] = MLX5_QP_STATE, 48356d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 48366d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_SQE, 48376d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE, 48386d2f89dfSmajd@mellanox.com }, 48396d2f89dfSmajd@mellanox.com [MLX5_RQC_STATE_ERR] = { 48406d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 48416d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 48426d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = IB_QPS_ERR, 48436d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = IB_QPS_ERR, 48446d2f89dfSmajd@mellanox.com }, 48456d2f89dfSmajd@mellanox.com [MLX5_RQ_STATE_NA] = { 4846c94e272bSMaor Gottlieb [MLX5_SQC_STATE_RST] = MLX5_QP_STATE, 48476d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 48486d2f89dfSmajd@mellanox.com [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE, 48496d2f89dfSmajd@mellanox.com [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD, 48506d2f89dfSmajd@mellanox.com }, 48516d2f89dfSmajd@mellanox.com }; 48526d2f89dfSmajd@mellanox.com 48536d2f89dfSmajd@mellanox.com *qp_state = sqrq_trans[rq_state][sq_state]; 48546d2f89dfSmajd@mellanox.com 48556d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE_BAD) { 48566d2f89dfSmajd@mellanox.com WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", 48576d2f89dfSmajd@mellanox.com qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, 48586d2f89dfSmajd@mellanox.com qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); 48596d2f89dfSmajd@mellanox.com return -EINVAL; 48606d2f89dfSmajd@mellanox.com } 48616d2f89dfSmajd@mellanox.com 48626d2f89dfSmajd@mellanox.com if (*qp_state == MLX5_QP_STATE) 48636d2f89dfSmajd@mellanox.com *qp_state = qp->state; 48646d2f89dfSmajd@mellanox.com 48656d2f89dfSmajd@mellanox.com return 0; 48666d2f89dfSmajd@mellanox.com } 48676d2f89dfSmajd@mellanox.com 48686d2f89dfSmajd@mellanox.com static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, 48696d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp, 48706d2f89dfSmajd@mellanox.com u8 *raw_packet_qp_state) 48716d2f89dfSmajd@mellanox.com { 48726d2f89dfSmajd@mellanox.com struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 48736d2f89dfSmajd@mellanox.com struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 48746d2f89dfSmajd@mellanox.com struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 48756d2f89dfSmajd@mellanox.com int err; 48766d2f89dfSmajd@mellanox.com u8 sq_state = MLX5_SQ_STATE_NA; 48776d2f89dfSmajd@mellanox.com u8 rq_state = MLX5_RQ_STATE_NA; 48786d2f89dfSmajd@mellanox.com 48796d2f89dfSmajd@mellanox.com if (qp->sq.wqe_cnt) { 48806d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); 48816d2f89dfSmajd@mellanox.com if (err) 48826d2f89dfSmajd@mellanox.com return err; 48836d2f89dfSmajd@mellanox.com } 48846d2f89dfSmajd@mellanox.com 48856d2f89dfSmajd@mellanox.com if (qp->rq.wqe_cnt) { 48866d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_rq_state(dev, rq, &rq_state); 48876d2f89dfSmajd@mellanox.com if (err) 48886d2f89dfSmajd@mellanox.com return err; 48896d2f89dfSmajd@mellanox.com } 48906d2f89dfSmajd@mellanox.com 48916d2f89dfSmajd@mellanox.com return sqrq_state_to_qp_state(sq_state, rq_state, qp, 48926d2f89dfSmajd@mellanox.com raw_packet_qp_state); 48936d2f89dfSmajd@mellanox.com } 48946d2f89dfSmajd@mellanox.com 48956d2f89dfSmajd@mellanox.com static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 48966d2f89dfSmajd@mellanox.com struct ib_qp_attr *qp_attr) 48976d2f89dfSmajd@mellanox.com { 489809a7d9ecSSaeed Mahameed int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 489970bd7fb8SLeon Romanovsky void *qpc, *pri_path, *alt_path; 490009a7d9ecSSaeed Mahameed u32 *outb; 490170bd7fb8SLeon Romanovsky int err; 4902e126ba97SEli Cohen 490309a7d9ecSSaeed Mahameed outb = kzalloc(outlen, GFP_KERNEL); 49046d2f89dfSmajd@mellanox.com if (!outb) 49056d2f89dfSmajd@mellanox.com return -ENOMEM; 49066d2f89dfSmajd@mellanox.com 4907*8067fd8bSPatrisious Haddad err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, 4908*8067fd8bSPatrisious Haddad false); 4909e126ba97SEli Cohen if (err) 49106d2f89dfSmajd@mellanox.com goto out; 4911e126ba97SEli Cohen 491270bd7fb8SLeon Romanovsky qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc); 491309a7d9ecSSaeed Mahameed 491470bd7fb8SLeon Romanovsky qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state)); 491570bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING) 491670bd7fb8SLeon Romanovsky qp_attr->sq_draining = 1; 4917e126ba97SEli Cohen 491870bd7fb8SLeon Romanovsky qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu); 491970bd7fb8SLeon Romanovsky qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state)); 492070bd7fb8SLeon Romanovsky qp_attr->qkey = MLX5_GET(qpc, qpc, q_key); 492170bd7fb8SLeon Romanovsky qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn); 492270bd7fb8SLeon Romanovsky qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn); 492370bd7fb8SLeon Romanovsky qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn); 492470bd7fb8SLeon Romanovsky 492570bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rre)) 492670bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 492770bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rwe)) 492870bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE; 492970bd7fb8SLeon Romanovsky if (MLX5_GET(qpc, qpc, rae)) 493070bd7fb8SLeon Romanovsky qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC; 493170bd7fb8SLeon Romanovsky 493270bd7fb8SLeon Romanovsky qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max); 493370bd7fb8SLeon Romanovsky qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max); 493470bd7fb8SLeon Romanovsky qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak); 493570bd7fb8SLeon Romanovsky qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count); 493670bd7fb8SLeon Romanovsky qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry); 493770bd7fb8SLeon Romanovsky 493870bd7fb8SLeon Romanovsky pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 493970bd7fb8SLeon Romanovsky alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path); 4940e126ba97SEli Cohen 49419ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC || 49429ecf6ac1SMaor Gottlieb qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) { 494370bd7fb8SLeon Romanovsky to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path); 494470bd7fb8SLeon Romanovsky to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path); 494570bd7fb8SLeon Romanovsky qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index); 494670bd7fb8SLeon Romanovsky qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num); 4947e126ba97SEli Cohen } 4948e126ba97SEli Cohen 494970bd7fb8SLeon Romanovsky qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index); 495070bd7fb8SLeon Romanovsky qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num); 495170bd7fb8SLeon Romanovsky qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout); 495270bd7fb8SLeon Romanovsky qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout); 49536d2f89dfSmajd@mellanox.com 49546d2f89dfSmajd@mellanox.com out: 49556d2f89dfSmajd@mellanox.com kfree(outb); 49566d2f89dfSmajd@mellanox.com return err; 49576d2f89dfSmajd@mellanox.com } 49586d2f89dfSmajd@mellanox.com 4959776a3906SMoni Shoua static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, 4960776a3906SMoni Shoua struct ib_qp_attr *qp_attr, int qp_attr_mask, 4961776a3906SMoni Shoua struct ib_qp_init_attr *qp_init_attr) 4962776a3906SMoni Shoua { 4963776a3906SMoni Shoua struct mlx5_core_dct *dct = &mqp->dct.mdct; 4964776a3906SMoni Shoua u32 *out; 4965776a3906SMoni Shoua u32 access_flags = 0; 4966776a3906SMoni Shoua int outlen = MLX5_ST_SZ_BYTES(query_dct_out); 4967776a3906SMoni Shoua void *dctc; 4968776a3906SMoni Shoua int err; 4969776a3906SMoni Shoua int supported_mask = IB_QP_STATE | 4970776a3906SMoni Shoua IB_QP_ACCESS_FLAGS | 4971776a3906SMoni Shoua IB_QP_PORT | 4972776a3906SMoni Shoua IB_QP_MIN_RNR_TIMER | 4973776a3906SMoni Shoua IB_QP_AV | 4974776a3906SMoni Shoua IB_QP_PATH_MTU | 4975776a3906SMoni Shoua IB_QP_PKEY_INDEX; 4976776a3906SMoni Shoua 4977776a3906SMoni Shoua if (qp_attr_mask & ~supported_mask) 4978776a3906SMoni Shoua return -EINVAL; 4979776a3906SMoni Shoua if (mqp->state != IB_QPS_RTR) 4980776a3906SMoni Shoua return -EINVAL; 4981776a3906SMoni Shoua 4982776a3906SMoni Shoua out = kzalloc(outlen, GFP_KERNEL); 4983776a3906SMoni Shoua if (!out) 4984776a3906SMoni Shoua return -ENOMEM; 4985776a3906SMoni Shoua 4986333fbaa0SLeon Romanovsky err = mlx5_core_dct_query(dev, dct, out, outlen); 4987776a3906SMoni Shoua if (err) 4988776a3906SMoni Shoua goto out; 4989776a3906SMoni Shoua 4990776a3906SMoni Shoua dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry); 4991776a3906SMoni Shoua 4992776a3906SMoni Shoua if (qp_attr_mask & IB_QP_STATE) 4993776a3906SMoni Shoua qp_attr->qp_state = IB_QPS_RTR; 4994776a3906SMoni Shoua 4995776a3906SMoni Shoua if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 4996776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rre)) 4997776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_READ; 4998776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rwe)) 4999776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_WRITE; 5000776a3906SMoni Shoua if (MLX5_GET(dctc, dctc, rae)) 5001776a3906SMoni Shoua access_flags |= IB_ACCESS_REMOTE_ATOMIC; 5002776a3906SMoni Shoua qp_attr->qp_access_flags = access_flags; 5003776a3906SMoni Shoua } 5004776a3906SMoni Shoua 5005776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PORT) 5006776a3906SMoni Shoua qp_attr->port_num = MLX5_GET(dctc, dctc, port); 5007776a3906SMoni Shoua if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) 5008776a3906SMoni Shoua qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); 5009776a3906SMoni Shoua if (qp_attr_mask & IB_QP_AV) { 5010776a3906SMoni Shoua qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass); 5011776a3906SMoni Shoua qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label); 5012776a3906SMoni Shoua qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index); 5013776a3906SMoni Shoua qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit); 5014776a3906SMoni Shoua } 5015776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PATH_MTU) 5016776a3906SMoni Shoua qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu); 5017776a3906SMoni Shoua if (qp_attr_mask & IB_QP_PKEY_INDEX) 5018776a3906SMoni Shoua qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index); 5019776a3906SMoni Shoua out: 5020776a3906SMoni Shoua kfree(out); 5021776a3906SMoni Shoua return err; 5022776a3906SMoni Shoua } 5023776a3906SMoni Shoua 50246d2f89dfSmajd@mellanox.com int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 50256d2f89dfSmajd@mellanox.com int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 50266d2f89dfSmajd@mellanox.com { 50276d2f89dfSmajd@mellanox.com struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 50286d2f89dfSmajd@mellanox.com struct mlx5_ib_qp *qp = to_mqp(ibqp); 50296d2f89dfSmajd@mellanox.com int err = 0; 50306d2f89dfSmajd@mellanox.com u8 raw_packet_qp_state; 50316d2f89dfSmajd@mellanox.com 503228d61370SYishai Hadas if (ibqp->rwq_ind_tbl) 503328d61370SYishai Hadas return -ENOSYS; 503428d61370SYishai Hadas 50359ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_GSI) 5036d16e91daSHaggai Eran return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 5037d16e91daSHaggai Eran qp_init_attr); 5038d16e91daSHaggai Eran 5039c2e53b2cSYishai Hadas /* Not all of output fields are applicable, make sure to zero them */ 5040c2e53b2cSYishai Hadas memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 5041c2e53b2cSYishai Hadas memset(qp_attr, 0, sizeof(*qp_attr)); 5042c2e53b2cSYishai Hadas 50437aede1a2SLeon Romanovsky if (unlikely(qp->type == MLX5_IB_QPT_DCT)) 5044776a3906SMoni Shoua return mlx5_ib_dct_query_qp(dev, qp, qp_attr, 5045776a3906SMoni Shoua qp_attr_mask, qp_init_attr); 5046776a3906SMoni Shoua 50476d2f89dfSmajd@mellanox.com mutex_lock(&qp->mutex); 50486d2f89dfSmajd@mellanox.com 50499ecf6ac1SMaor Gottlieb if (qp->type == IB_QPT_RAW_PACKET || 50502be08c30SLeon Romanovsky qp->flags & IB_QP_CREATE_SOURCE_QPN) { 50516d2f89dfSmajd@mellanox.com err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); 50526d2f89dfSmajd@mellanox.com if (err) 50536d2f89dfSmajd@mellanox.com goto out; 50546d2f89dfSmajd@mellanox.com qp->state = raw_packet_qp_state; 50556d2f89dfSmajd@mellanox.com qp_attr->port_num = 1; 50566d2f89dfSmajd@mellanox.com } else { 50576d2f89dfSmajd@mellanox.com err = query_qp_attr(dev, qp, qp_attr); 50586d2f89dfSmajd@mellanox.com if (err) 50596d2f89dfSmajd@mellanox.com goto out; 50606d2f89dfSmajd@mellanox.com } 50616d2f89dfSmajd@mellanox.com 50626d2f89dfSmajd@mellanox.com qp_attr->qp_state = qp->state; 5063e126ba97SEli Cohen qp_attr->cur_qp_state = qp_attr->qp_state; 5064e126ba97SEli Cohen qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 5065e126ba97SEli Cohen qp_attr->cap.max_recv_sge = qp->rq.max_gs; 5066e126ba97SEli Cohen 5067e126ba97SEli Cohen if (!ibqp->uobject) { 50680540d814SNoa Osherovich qp_attr->cap.max_send_wr = qp->sq.max_post; 5069e126ba97SEli Cohen qp_attr->cap.max_send_sge = qp->sq.max_gs; 50700540d814SNoa Osherovich qp_init_attr->qp_context = ibqp->qp_context; 5071e126ba97SEli Cohen } else { 5072e126ba97SEli Cohen qp_attr->cap.max_send_wr = 0; 5073e126ba97SEli Cohen qp_attr->cap.max_send_sge = 0; 5074e126ba97SEli Cohen } 5075e126ba97SEli Cohen 50769ecf6ac1SMaor Gottlieb qp_init_attr->qp_type = qp->type; 50770540d814SNoa Osherovich qp_init_attr->recv_cq = ibqp->recv_cq; 50780540d814SNoa Osherovich qp_init_attr->send_cq = ibqp->send_cq; 50790540d814SNoa Osherovich qp_init_attr->srq = ibqp->srq; 50800540d814SNoa Osherovich qp_attr->cap.max_inline_data = qp->max_inline_data; 5081e126ba97SEli Cohen 5082e126ba97SEli Cohen qp_init_attr->cap = qp_attr->cap; 5083e126ba97SEli Cohen 5084a8f3ea61SLeon Romanovsky qp_init_attr->create_flags = qp->flags; 5085051f2630SLeon Romanovsky 5086e126ba97SEli Cohen qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 5087e126ba97SEli Cohen IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 5088e126ba97SEli Cohen 5089e126ba97SEli Cohen out: 5090e126ba97SEli Cohen mutex_unlock(&qp->mutex); 5091e126ba97SEli Cohen return err; 5092e126ba97SEli Cohen } 5093e126ba97SEli Cohen 509428ad5f65SLeon Romanovsky int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) 5095e126ba97SEli Cohen { 509628ad5f65SLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device); 509728ad5f65SLeon Romanovsky struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd); 5098e126ba97SEli Cohen 5099938fe83cSSaeed Mahameed if (!MLX5_CAP_GEN(dev->mdev, xrc)) 510028ad5f65SLeon Romanovsky return -EOPNOTSUPP; 5101e126ba97SEli Cohen 510228ad5f65SLeon Romanovsky return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); 5103e126ba97SEli Cohen } 5104e126ba97SEli Cohen 5105d0c45c85SLeon Romanovsky int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) 5106e126ba97SEli Cohen { 5107e126ba97SEli Cohen struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 5108e126ba97SEli Cohen u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 5109e126ba97SEli Cohen 5110d0c45c85SLeon Romanovsky return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); 5111e126ba97SEli Cohen } 511279b20a6cSYishai Hadas 5113350d0e4cSYishai Hadas static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) 5114350d0e4cSYishai Hadas { 5115350d0e4cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp); 5116350d0e4cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device); 5117350d0e4cSYishai Hadas struct ib_event event; 5118350d0e4cSYishai Hadas 5119350d0e4cSYishai Hadas if (rwq->ibwq.event_handler) { 5120350d0e4cSYishai Hadas event.device = rwq->ibwq.device; 5121350d0e4cSYishai Hadas event.element.wq = &rwq->ibwq; 5122350d0e4cSYishai Hadas switch (type) { 5123350d0e4cSYishai Hadas case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 5124350d0e4cSYishai Hadas event.event = IB_EVENT_WQ_FATAL; 5125350d0e4cSYishai Hadas break; 5126350d0e4cSYishai Hadas default: 5127350d0e4cSYishai Hadas mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn); 5128350d0e4cSYishai Hadas return; 5129350d0e4cSYishai Hadas } 5130350d0e4cSYishai Hadas 5131350d0e4cSYishai Hadas rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); 5132350d0e4cSYishai Hadas } 5133350d0e4cSYishai Hadas } 5134350d0e4cSYishai Hadas 513503404e8aSMaor Gottlieb static int set_delay_drop(struct mlx5_ib_dev *dev) 513603404e8aSMaor Gottlieb { 513703404e8aSMaor Gottlieb int err = 0; 513803404e8aSMaor Gottlieb 513903404e8aSMaor Gottlieb mutex_lock(&dev->delay_drop.lock); 514003404e8aSMaor Gottlieb if (dev->delay_drop.activate) 514103404e8aSMaor Gottlieb goto out; 514203404e8aSMaor Gottlieb 5143333fbaa0SLeon Romanovsky err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout); 514403404e8aSMaor Gottlieb if (err) 514503404e8aSMaor Gottlieb goto out; 514603404e8aSMaor Gottlieb 514703404e8aSMaor Gottlieb dev->delay_drop.activate = true; 514803404e8aSMaor Gottlieb out: 514903404e8aSMaor Gottlieb mutex_unlock(&dev->delay_drop.lock); 5150fe248c3aSMaor Gottlieb 5151fe248c3aSMaor Gottlieb if (!err) 5152fe248c3aSMaor Gottlieb atomic_inc(&dev->delay_drop.rqs_cnt); 515303404e8aSMaor Gottlieb return err; 515403404e8aSMaor Gottlieb } 515503404e8aSMaor Gottlieb 515679b20a6cSYishai Hadas static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, 515779b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr) 515879b20a6cSYishai Hadas { 515979b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 51604be6da1eSNoa Osherovich int has_net_offloads; 516179b20a6cSYishai Hadas __be64 *rq_pas0; 51628256c69bSMaor Gottlieb int ts_format; 516379b20a6cSYishai Hadas void *in; 516479b20a6cSYishai Hadas void *rqc; 516579b20a6cSYishai Hadas void *wq; 516679b20a6cSYishai Hadas int inlen; 516779b20a6cSYishai Hadas int err; 516879b20a6cSYishai Hadas 516979b20a6cSYishai Hadas dev = to_mdev(pd->device); 517079b20a6cSYishai Hadas 51718256c69bSMaor Gottlieb ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); 51728256c69bSMaor Gottlieb if (ts_format < 0) 51738256c69bSMaor Gottlieb return ts_format; 51748256c69bSMaor Gottlieb 517579b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 51761b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 517779b20a6cSYishai Hadas if (!in) 517879b20a6cSYishai Hadas return -ENOMEM; 517979b20a6cSYishai Hadas 518034d57585SYishai Hadas MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid); 518179b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 518279b20a6cSYishai Hadas MLX5_SET(rqc, rqc, mem_rq_type, 518379b20a6cSYishai Hadas MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 51848256c69bSMaor Gottlieb MLX5_SET(rqc, rqc, ts_format, ts_format); 518579b20a6cSYishai Hadas MLX5_SET(rqc, rqc, user_index, rwq->user_index); 518679b20a6cSYishai Hadas MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 518779b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 518879b20a6cSYishai Hadas MLX5_SET(rqc, rqc, flush_in_error_en, 1); 518979b20a6cSYishai Hadas wq = MLX5_ADDR_OF(rqc, rqc, wq); 5190ccc87087SNoa Osherovich MLX5_SET(wq, wq, wq_type, 5191ccc87087SNoa Osherovich rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ? 5192ccc87087SNoa Osherovich MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC); 5193b1383aa6SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5194b1383aa6SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { 5195b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Scatter end padding is not supported\n"); 5196b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 5197b1383aa6SNoa Osherovich goto out; 5198b1383aa6SNoa Osherovich } else { 519979b20a6cSYishai Hadas MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 5200b1383aa6SNoa Osherovich } 5201b1383aa6SNoa Osherovich } 520279b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); 5203ccc87087SNoa Osherovich if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) { 5204c16339b6SMark Zhang /* 5205c16339b6SMark Zhang * In Firmware number of strides in each WQE is: 5206c16339b6SMark Zhang * "512 * 2^single_wqe_log_num_of_strides" 5207c16339b6SMark Zhang * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are 5208c16339b6SMark Zhang * accepted as 0 to 9 5209c16339b6SMark Zhang */ 5210c16339b6SMark Zhang static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1, 5211c16339b6SMark Zhang 2, 3, 4, 5, 6, 7, 8, 9 }; 5212ccc87087SNoa Osherovich MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); 5213ccc87087SNoa Osherovich MLX5_SET(wq, wq, log_wqe_stride_size, 5214ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes - 5215ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES); 5216c16339b6SMark Zhang MLX5_SET(wq, wq, log_wqe_num_of_strides, 5217c16339b6SMark Zhang fw_map[rwq->log_num_strides - 5218c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]); 5219ccc87087SNoa Osherovich } 522079b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); 522179b20a6cSYishai Hadas MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); 522279b20a6cSYishai Hadas MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); 522379b20a6cSYishai Hadas MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); 522479b20a6cSYishai Hadas MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); 522579b20a6cSYishai Hadas MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); 52264be6da1eSNoa Osherovich has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads); 5227b1f74a84SNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) { 52284be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5229b1f74a84SNoa Osherovich mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); 5230b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 5231b1f74a84SNoa Osherovich goto out; 5232b1f74a84SNoa Osherovich } 5233b1f74a84SNoa Osherovich } else { 5234b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 1); 5235b1f74a84SNoa Osherovich } 52364be6da1eSNoa Osherovich if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) { 52374be6da1eSNoa Osherovich if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) { 52384be6da1eSNoa Osherovich mlx5_ib_dbg(dev, "Scatter FCS is not supported\n"); 52394be6da1eSNoa Osherovich err = -EOPNOTSUPP; 52404be6da1eSNoa Osherovich goto out; 52414be6da1eSNoa Osherovich } 52424be6da1eSNoa Osherovich MLX5_SET(rqc, rqc, scatter_fcs, 1); 52434be6da1eSNoa Osherovich } 524403404e8aSMaor Gottlieb if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 524503404e8aSMaor Gottlieb if (!(dev->ib_dev.attrs.raw_packet_caps & 524603404e8aSMaor Gottlieb IB_RAW_PACKET_CAP_DELAY_DROP)) { 524703404e8aSMaor Gottlieb mlx5_ib_dbg(dev, "Delay drop is not supported\n"); 524803404e8aSMaor Gottlieb err = -EOPNOTSUPP; 524903404e8aSMaor Gottlieb goto out; 525003404e8aSMaor Gottlieb } 525103404e8aSMaor Gottlieb MLX5_SET(rqc, rqc, delay_drop_en, 1); 525203404e8aSMaor Gottlieb } 525379b20a6cSYishai Hadas rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 5254aab8d396SJason Gunthorpe mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0); 5255333fbaa0SLeon Romanovsky err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); 525603404e8aSMaor Gottlieb if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 525703404e8aSMaor Gottlieb err = set_delay_drop(dev); 525803404e8aSMaor Gottlieb if (err) { 525903404e8aSMaor Gottlieb mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", 526003404e8aSMaor Gottlieb err); 5261333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 526203404e8aSMaor Gottlieb } else { 526303404e8aSMaor Gottlieb rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; 526403404e8aSMaor Gottlieb } 526503404e8aSMaor Gottlieb } 5266b1f74a84SNoa Osherovich out: 526779b20a6cSYishai Hadas kvfree(in); 526879b20a6cSYishai Hadas return err; 526979b20a6cSYishai Hadas } 527079b20a6cSYishai Hadas 527179b20a6cSYishai Hadas static int set_user_rq_size(struct mlx5_ib_dev *dev, 527279b20a6cSYishai Hadas struct ib_wq_init_attr *wq_init_attr, 527379b20a6cSYishai Hadas struct mlx5_ib_create_wq *ucmd, 527479b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 527579b20a6cSYishai Hadas { 527679b20a6cSYishai Hadas /* Sanity check RQ size before proceeding */ 527779b20a6cSYishai Hadas if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) 527879b20a6cSYishai Hadas return -EINVAL; 527979b20a6cSYishai Hadas 528079b20a6cSYishai Hadas if (!ucmd->rq_wqe_count) 528179b20a6cSYishai Hadas return -EINVAL; 528279b20a6cSYishai Hadas 528379b20a6cSYishai Hadas rwq->wqe_count = ucmd->rq_wqe_count; 528479b20a6cSYishai Hadas rwq->wqe_shift = ucmd->rq_wqe_shift; 52850dfe4522SLeon Romanovsky if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) 52860dfe4522SLeon Romanovsky return -EINVAL; 52870dfe4522SLeon Romanovsky 528879b20a6cSYishai Hadas rwq->log_rq_stride = rwq->wqe_shift; 528979b20a6cSYishai Hadas rwq->log_rq_size = ilog2(rwq->wqe_count); 529079b20a6cSYishai Hadas return 0; 529179b20a6cSYishai Hadas } 529279b20a6cSYishai Hadas 5293c16339b6SMark Zhang static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides) 5294c16339b6SMark Zhang { 5295c16339b6SMark Zhang if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) || 5296c16339b6SMark Zhang (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 5297c16339b6SMark Zhang return false; 5298c16339b6SMark Zhang 5299c16339b6SMark Zhang if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) && 5300c16339b6SMark Zhang (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) 5301c16339b6SMark Zhang return false; 5302c16339b6SMark Zhang 5303c16339b6SMark Zhang return true; 5304c16339b6SMark Zhang } 5305c16339b6SMark Zhang 530679b20a6cSYishai Hadas static int prepare_user_rq(struct ib_pd *pd, 530779b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 530879b20a6cSYishai Hadas struct ib_udata *udata, 530979b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq) 531079b20a6cSYishai Hadas { 531179b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(pd->device); 531279b20a6cSYishai Hadas struct mlx5_ib_create_wq ucmd = {}; 531379b20a6cSYishai Hadas int err; 531479b20a6cSYishai Hadas size_t required_cmd_sz; 531579b20a6cSYishai Hadas 531670c1430fSLeon Romanovsky required_cmd_sz = offsetofend(struct mlx5_ib_create_wq, 531770c1430fSLeon Romanovsky single_stride_log_num_of_bytes); 531879b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) { 531979b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid inlen\n"); 532079b20a6cSYishai Hadas return -EINVAL; 532179b20a6cSYishai Hadas } 532279b20a6cSYishai Hadas 532379b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 532479b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 532579b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) { 532679b20a6cSYishai Hadas mlx5_ib_dbg(dev, "inlen is not supported\n"); 532779b20a6cSYishai Hadas return -EOPNOTSUPP; 532879b20a6cSYishai Hadas } 532979b20a6cSYishai Hadas 533079b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 533179b20a6cSYishai Hadas mlx5_ib_dbg(dev, "copy failed\n"); 533279b20a6cSYishai Hadas return -EFAULT; 533379b20a6cSYishai Hadas } 533479b20a6cSYishai Hadas 5335ccc87087SNoa Osherovich if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) { 533679b20a6cSYishai Hadas mlx5_ib_dbg(dev, "invalid comp mask\n"); 533779b20a6cSYishai Hadas return -EOPNOTSUPP; 5338ccc87087SNoa Osherovich } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) { 5339ccc87087SNoa Osherovich if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) { 5340ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Striding RQ is not supported\n"); 534179b20a6cSYishai Hadas return -EOPNOTSUPP; 534279b20a6cSYishai Hadas } 5343ccc87087SNoa Osherovich if ((ucmd.single_stride_log_num_of_bytes < 5344ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) || 5345ccc87087SNoa Osherovich (ucmd.single_stride_log_num_of_bytes > 5346ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) { 5347ccc87087SNoa Osherovich mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n", 5348ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes, 5349ccc87087SNoa Osherovich MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES, 5350ccc87087SNoa Osherovich MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES); 5351ccc87087SNoa Osherovich return -EINVAL; 5352ccc87087SNoa Osherovich } 5353c16339b6SMark Zhang if (!log_of_strides_valid(dev, 5354c16339b6SMark Zhang ucmd.single_wqe_log_num_of_strides)) { 5355c16339b6SMark Zhang mlx5_ib_dbg( 5356c16339b6SMark Zhang dev, 5357c16339b6SMark Zhang "Invalid log num strides (%u. Range is %u - %u)\n", 5358ccc87087SNoa Osherovich ucmd.single_wqe_log_num_of_strides, 5359c16339b6SMark Zhang MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ? 5360c16339b6SMark Zhang MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES : 5361ccc87087SNoa Osherovich MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES, 5362ccc87087SNoa Osherovich MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES); 5363ccc87087SNoa Osherovich return -EINVAL; 5364ccc87087SNoa Osherovich } 5365ccc87087SNoa Osherovich rwq->single_stride_log_num_of_bytes = 5366ccc87087SNoa Osherovich ucmd.single_stride_log_num_of_bytes; 5367ccc87087SNoa Osherovich rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; 5368ccc87087SNoa Osherovich rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; 5369ccc87087SNoa Osherovich rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ; 5370ccc87087SNoa Osherovich } 537179b20a6cSYishai Hadas 537279b20a6cSYishai Hadas err = set_user_rq_size(dev, init_attr, &ucmd, rwq); 537379b20a6cSYishai Hadas if (err) { 537479b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 537579b20a6cSYishai Hadas return err; 537679b20a6cSYishai Hadas } 537779b20a6cSYishai Hadas 5378b0ea0fa5SJason Gunthorpe err = create_user_rq(dev, pd, udata, rwq, &ucmd); 537979b20a6cSYishai Hadas if (err) { 538079b20a6cSYishai Hadas mlx5_ib_dbg(dev, "err %d\n", err); 538179b20a6cSYishai Hadas return err; 538279b20a6cSYishai Hadas } 538379b20a6cSYishai Hadas 538479b20a6cSYishai Hadas rwq->user_index = ucmd.user_index; 538579b20a6cSYishai Hadas return 0; 538679b20a6cSYishai Hadas } 538779b20a6cSYishai Hadas 538879b20a6cSYishai Hadas struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 538979b20a6cSYishai Hadas struct ib_wq_init_attr *init_attr, 539079b20a6cSYishai Hadas struct ib_udata *udata) 539179b20a6cSYishai Hadas { 539279b20a6cSYishai Hadas struct mlx5_ib_dev *dev; 539379b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq; 539479b20a6cSYishai Hadas struct mlx5_ib_create_wq_resp resp = {}; 539579b20a6cSYishai Hadas size_t min_resp_len; 539679b20a6cSYishai Hadas int err; 539779b20a6cSYishai Hadas 539879b20a6cSYishai Hadas if (!udata) 539979b20a6cSYishai Hadas return ERR_PTR(-ENOSYS); 540079b20a6cSYishai Hadas 540170c1430fSLeon Romanovsky min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved); 540279b20a6cSYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 540379b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 540479b20a6cSYishai Hadas 5405ba80013fSMaor Gottlieb if (!capable(CAP_SYS_RAWIO) && 5406ba80013fSMaor Gottlieb init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) 5407ba80013fSMaor Gottlieb return ERR_PTR(-EPERM); 5408ba80013fSMaor Gottlieb 540979b20a6cSYishai Hadas dev = to_mdev(pd->device); 541079b20a6cSYishai Hadas switch (init_attr->wq_type) { 541179b20a6cSYishai Hadas case IB_WQT_RQ: 541279b20a6cSYishai Hadas rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); 541379b20a6cSYishai Hadas if (!rwq) 541479b20a6cSYishai Hadas return ERR_PTR(-ENOMEM); 541579b20a6cSYishai Hadas err = prepare_user_rq(pd, init_attr, udata, rwq); 541679b20a6cSYishai Hadas if (err) 541779b20a6cSYishai Hadas goto err; 541879b20a6cSYishai Hadas err = create_rq(rwq, pd, init_attr); 541979b20a6cSYishai Hadas if (err) 542079b20a6cSYishai Hadas goto err_user_rq; 542179b20a6cSYishai Hadas break; 542279b20a6cSYishai Hadas default: 542379b20a6cSYishai Hadas mlx5_ib_dbg(dev, "unsupported wq type %d\n", 542479b20a6cSYishai Hadas init_attr->wq_type); 542579b20a6cSYishai Hadas return ERR_PTR(-EINVAL); 542679b20a6cSYishai Hadas } 542779b20a6cSYishai Hadas 5428350d0e4cSYishai Hadas rwq->ibwq.wq_num = rwq->core_qp.qpn; 542979b20a6cSYishai Hadas rwq->ibwq.state = IB_WQS_RESET; 543079b20a6cSYishai Hadas if (udata->outlen) { 543170c1430fSLeon Romanovsky resp.response_length = offsetofend( 543270c1430fSLeon Romanovsky struct mlx5_ib_create_wq_resp, response_length); 543379b20a6cSYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 543479b20a6cSYishai Hadas if (err) 543579b20a6cSYishai Hadas goto err_copy; 543679b20a6cSYishai Hadas } 543779b20a6cSYishai Hadas 5438350d0e4cSYishai Hadas rwq->core_qp.event = mlx5_ib_wq_event; 5439350d0e4cSYishai Hadas rwq->ibwq.event_handler = init_attr->event_handler; 544079b20a6cSYishai Hadas return &rwq->ibwq; 544179b20a6cSYishai Hadas 544279b20a6cSYishai Hadas err_copy: 5443333fbaa0SLeon Romanovsky mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 544479b20a6cSYishai Hadas err_user_rq: 5445bdeacabdSShamir Rabinovitch destroy_user_rq(dev, pd, rwq, udata); 544679b20a6cSYishai Hadas err: 544779b20a6cSYishai Hadas kfree(rwq); 544879b20a6cSYishai Hadas return ERR_PTR(err); 544979b20a6cSYishai Hadas } 545079b20a6cSYishai Hadas 5451add53535SLeon Romanovsky int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) 545279b20a6cSYishai Hadas { 545379b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 545479b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 5455add53535SLeon Romanovsky int ret; 545679b20a6cSYishai Hadas 5457add53535SLeon Romanovsky ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp); 5458add53535SLeon Romanovsky if (ret) 5459add53535SLeon Romanovsky return ret; 5460bdeacabdSShamir Rabinovitch destroy_user_rq(dev, wq->pd, rwq, udata); 546179b20a6cSYishai Hadas kfree(rwq); 5462add53535SLeon Romanovsky return 0; 546379b20a6cSYishai Hadas } 546479b20a6cSYishai Hadas 5465c0a6b5ecSLeon Romanovsky int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, 5466c5f90929SYishai Hadas struct ib_rwq_ind_table_init_attr *init_attr, 5467c5f90929SYishai Hadas struct ib_udata *udata) 5468c5f90929SYishai Hadas { 5469c0a6b5ecSLeon Romanovsky struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = 5470c0a6b5ecSLeon Romanovsky to_mrwq_ind_table(ib_rwq_ind_table); 5471c0a6b5ecSLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device); 5472c5f90929SYishai Hadas int sz = 1 << init_attr->log_ind_tbl_size; 5473c5f90929SYishai Hadas struct mlx5_ib_create_rwq_ind_tbl_resp resp = {}; 5474c5f90929SYishai Hadas size_t min_resp_len; 5475c5f90929SYishai Hadas int inlen; 5476c5f90929SYishai Hadas int err; 5477c5f90929SYishai Hadas int i; 5478c5f90929SYishai Hadas u32 *in; 5479c5f90929SYishai Hadas void *rqtc; 5480c5f90929SYishai Hadas 5481c5f90929SYishai Hadas if (udata->inlen > 0 && 5482c5f90929SYishai Hadas !ib_is_udata_cleared(udata, 0, 5483c5f90929SYishai Hadas udata->inlen)) 5484c0a6b5ecSLeon Romanovsky return -EOPNOTSUPP; 5485c5f90929SYishai Hadas 5486efd7f400SMaor Gottlieb if (init_attr->log_ind_tbl_size > 5487efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 5488efd7f400SMaor Gottlieb mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 5489efd7f400SMaor Gottlieb init_attr->log_ind_tbl_size, 5490efd7f400SMaor Gottlieb MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 5491c0a6b5ecSLeon Romanovsky return -EINVAL; 5492efd7f400SMaor Gottlieb } 5493efd7f400SMaor Gottlieb 549470c1430fSLeon Romanovsky min_resp_len = 549570c1430fSLeon Romanovsky offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved); 5496c5f90929SYishai Hadas if (udata->outlen && udata->outlen < min_resp_len) 5497c0a6b5ecSLeon Romanovsky return -EINVAL; 5498c5f90929SYishai Hadas 5499c5f90929SYishai Hadas inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 55001b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 5501c0a6b5ecSLeon Romanovsky if (!in) 5502c0a6b5ecSLeon Romanovsky return -ENOMEM; 5503c5f90929SYishai Hadas 5504c5f90929SYishai Hadas rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 5505c5f90929SYishai Hadas 5506c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 5507c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 5508c5f90929SYishai Hadas 5509c5f90929SYishai Hadas for (i = 0; i < sz; i++) 5510c5f90929SYishai Hadas MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num); 5511c5f90929SYishai Hadas 55125deba86eSYishai Hadas rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid; 55135deba86eSYishai Hadas MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid); 55145deba86eSYishai Hadas 5515c5f90929SYishai Hadas err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); 5516c5f90929SYishai Hadas kvfree(in); 5517c5f90929SYishai Hadas if (err) 5518c0a6b5ecSLeon Romanovsky return err; 5519c5f90929SYishai Hadas 5520c5f90929SYishai Hadas rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn; 5521c5f90929SYishai Hadas if (udata->outlen) { 552270c1430fSLeon Romanovsky resp.response_length = 552370c1430fSLeon Romanovsky offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, 552470c1430fSLeon Romanovsky response_length); 5525c5f90929SYishai Hadas err = ib_copy_to_udata(udata, &resp, resp.response_length); 5526c5f90929SYishai Hadas if (err) 5527c5f90929SYishai Hadas goto err_copy; 5528c5f90929SYishai Hadas } 5529c5f90929SYishai Hadas 5530c0a6b5ecSLeon Romanovsky return 0; 5531c5f90929SYishai Hadas 5532c5f90929SYishai Hadas err_copy: 55335deba86eSYishai Hadas mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 5534c0a6b5ecSLeon Romanovsky return err; 5535c5f90929SYishai Hadas } 5536c5f90929SYishai Hadas 5537c5f90929SYishai Hadas int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 5538c5f90929SYishai Hadas { 5539c5f90929SYishai Hadas struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl); 5540c5f90929SYishai Hadas struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device); 5541c5f90929SYishai Hadas 5542c0a6b5ecSLeon Romanovsky return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); 5543c5f90929SYishai Hadas } 5544c5f90929SYishai Hadas 554579b20a6cSYishai Hadas int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 554679b20a6cSYishai Hadas u32 wq_attr_mask, struct ib_udata *udata) 554779b20a6cSYishai Hadas { 554879b20a6cSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(wq->device); 554979b20a6cSYishai Hadas struct mlx5_ib_rwq *rwq = to_mrwq(wq); 555079b20a6cSYishai Hadas struct mlx5_ib_modify_wq ucmd = {}; 555179b20a6cSYishai Hadas size_t required_cmd_sz; 555279b20a6cSYishai Hadas int curr_wq_state; 555379b20a6cSYishai Hadas int wq_state; 555479b20a6cSYishai Hadas int inlen; 555579b20a6cSYishai Hadas int err; 555679b20a6cSYishai Hadas void *rqc; 555779b20a6cSYishai Hadas void *in; 555879b20a6cSYishai Hadas 555970c1430fSLeon Romanovsky required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved); 556079b20a6cSYishai Hadas if (udata->inlen < required_cmd_sz) 556179b20a6cSYishai Hadas return -EINVAL; 556279b20a6cSYishai Hadas 556379b20a6cSYishai Hadas if (udata->inlen > sizeof(ucmd) && 556479b20a6cSYishai Hadas !ib_is_udata_cleared(udata, sizeof(ucmd), 556579b20a6cSYishai Hadas udata->inlen - sizeof(ucmd))) 556679b20a6cSYishai Hadas return -EOPNOTSUPP; 556779b20a6cSYishai Hadas 556879b20a6cSYishai Hadas if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) 556979b20a6cSYishai Hadas return -EFAULT; 557079b20a6cSYishai Hadas 557179b20a6cSYishai Hadas if (ucmd.comp_mask || ucmd.reserved) 557279b20a6cSYishai Hadas return -EOPNOTSUPP; 557379b20a6cSYishai Hadas 557479b20a6cSYishai Hadas inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 55751b9a07eeSLeon Romanovsky in = kvzalloc(inlen, GFP_KERNEL); 557679b20a6cSYishai Hadas if (!in) 557779b20a6cSYishai Hadas return -ENOMEM; 557879b20a6cSYishai Hadas 557979b20a6cSYishai Hadas rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 558079b20a6cSYishai Hadas 5581f9744288SLeon Romanovsky curr_wq_state = wq_attr->curr_wq_state; 5582f9744288SLeon Romanovsky wq_state = wq_attr->wq_state; 558379b20a6cSYishai Hadas if (curr_wq_state == IB_WQS_ERR) 558479b20a6cSYishai Hadas curr_wq_state = MLX5_RQC_STATE_ERR; 558579b20a6cSYishai Hadas if (wq_state == IB_WQS_ERR) 558679b20a6cSYishai Hadas wq_state = MLX5_RQC_STATE_ERR; 558779b20a6cSYishai Hadas MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); 558834d57585SYishai Hadas MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid); 558979b20a6cSYishai Hadas MLX5_SET(rqc, rqc, state, wq_state); 559079b20a6cSYishai Hadas 5591b1f74a84SNoa Osherovich if (wq_attr_mask & IB_WQ_FLAGS) { 5592b1f74a84SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) { 5593b1f74a84SNoa Osherovich if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 5594b1f74a84SNoa Osherovich MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5595b1f74a84SNoa Osherovich mlx5_ib_dbg(dev, "VLAN offloads are not " 5596b1f74a84SNoa Osherovich "supported\n"); 5597b1f74a84SNoa Osherovich err = -EOPNOTSUPP; 5598b1f74a84SNoa Osherovich goto out; 5599b1f74a84SNoa Osherovich } 5600b1f74a84SNoa Osherovich MLX5_SET64(modify_rq_in, in, modify_bitmask, 5601b1f74a84SNoa Osherovich MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); 5602b1f74a84SNoa Osherovich MLX5_SET(rqc, rqc, vsd, 5603b1f74a84SNoa Osherovich (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1); 5604b1f74a84SNoa Osherovich } 5605b1383aa6SNoa Osherovich 5606b1383aa6SNoa Osherovich if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5607b1383aa6SNoa Osherovich mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n"); 5608b1383aa6SNoa Osherovich err = -EOPNOTSUPP; 5609b1383aa6SNoa Osherovich goto out; 5610b1383aa6SNoa Osherovich } 5611b1f74a84SNoa Osherovich } 5612b1f74a84SNoa Osherovich 561323a6964eSMajd Dibbiny if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { 56143e1f000fSParav Pandit u16 set_id; 56153e1f000fSParav Pandit 56163e1f000fSParav Pandit set_id = mlx5_ib_get_counters_id(dev, 0); 561723a6964eSMajd Dibbiny if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 561823a6964eSMajd Dibbiny MLX5_SET64(modify_rq_in, in, modify_bitmask, 561923a6964eSMajd Dibbiny MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 56203e1f000fSParav Pandit MLX5_SET(rqc, rqc, counter_set_id, set_id); 562123a6964eSMajd Dibbiny } else 56225a738b5dSJason Gunthorpe dev_info_once( 56235a738b5dSJason Gunthorpe &dev->ib_dev.dev, 56245a738b5dSJason Gunthorpe "Receive WQ counters are not supported on current FW\n"); 562523a6964eSMajd Dibbiny } 562623a6964eSMajd Dibbiny 5627e0b4b472SLeon Romanovsky err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); 562879b20a6cSYishai Hadas if (!err) 562979b20a6cSYishai Hadas rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; 563079b20a6cSYishai Hadas 5631b1f74a84SNoa Osherovich out: 5632b1f74a84SNoa Osherovich kvfree(in); 563379b20a6cSYishai Hadas return err; 563479b20a6cSYishai Hadas } 5635d0e84c0aSYishai Hadas 5636d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe { 5637d0e84c0aSYishai Hadas struct ib_cqe cqe; 5638d0e84c0aSYishai Hadas struct completion done; 5639d0e84c0aSYishai Hadas }; 5640d0e84c0aSYishai Hadas 5641d0e84c0aSYishai Hadas static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 5642d0e84c0aSYishai Hadas { 5643d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, 5644d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe, 5645d0e84c0aSYishai Hadas cqe); 5646d0e84c0aSYishai Hadas 5647d0e84c0aSYishai Hadas complete(&cqe->done); 5648d0e84c0aSYishai Hadas } 5649d0e84c0aSYishai Hadas 5650d0e84c0aSYishai Hadas /* This function returns only once the drained WR was completed */ 5651d0e84c0aSYishai Hadas static void handle_drain_completion(struct ib_cq *cq, 5652d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe *sdrain, 5653d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev) 5654d0e84c0aSYishai Hadas { 5655d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5656d0e84c0aSYishai Hadas 5657d0e84c0aSYishai Hadas if (cq->poll_ctx == IB_POLL_DIRECT) { 5658d0e84c0aSYishai Hadas while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) 5659d0e84c0aSYishai Hadas ib_process_cq_direct(cq, -1); 5660d0e84c0aSYishai Hadas return; 5661d0e84c0aSYishai Hadas } 5662d0e84c0aSYishai Hadas 5663d0e84c0aSYishai Hadas if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5664d0e84c0aSYishai Hadas struct mlx5_ib_cq *mcq = to_mcq(cq); 5665d0e84c0aSYishai Hadas bool triggered = false; 5666d0e84c0aSYishai Hadas unsigned long flags; 5667d0e84c0aSYishai Hadas 5668d0e84c0aSYishai Hadas spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 5669d0e84c0aSYishai Hadas /* Make sure that the CQ handler won't run if wasn't run yet */ 5670d0e84c0aSYishai Hadas if (!mcq->mcq.reset_notify_added) 5671d0e84c0aSYishai Hadas mcq->mcq.reset_notify_added = 1; 5672d0e84c0aSYishai Hadas else 5673d0e84c0aSYishai Hadas triggered = true; 5674d0e84c0aSYishai Hadas spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 5675d0e84c0aSYishai Hadas 5676d0e84c0aSYishai Hadas if (triggered) { 5677d0e84c0aSYishai Hadas /* Wait for any scheduled/running task to be ended */ 5678d0e84c0aSYishai Hadas switch (cq->poll_ctx) { 5679d0e84c0aSYishai Hadas case IB_POLL_SOFTIRQ: 5680d0e84c0aSYishai Hadas irq_poll_disable(&cq->iop); 5681d0e84c0aSYishai Hadas irq_poll_enable(&cq->iop); 5682d0e84c0aSYishai Hadas break; 5683d0e84c0aSYishai Hadas case IB_POLL_WORKQUEUE: 5684d0e84c0aSYishai Hadas cancel_work_sync(&cq->work); 5685d0e84c0aSYishai Hadas break; 5686d0e84c0aSYishai Hadas default: 5687d0e84c0aSYishai Hadas WARN_ON_ONCE(1); 5688d0e84c0aSYishai Hadas } 5689d0e84c0aSYishai Hadas } 5690d0e84c0aSYishai Hadas 5691d0e84c0aSYishai Hadas /* Run the CQ handler - this makes sure that the drain WR will 5692d0e84c0aSYishai Hadas * be processed if wasn't processed yet. 5693d0e84c0aSYishai Hadas */ 56944e0e2ea1SYishai Hadas mcq->mcq.comp(&mcq->mcq, NULL); 5695d0e84c0aSYishai Hadas } 5696d0e84c0aSYishai Hadas 5697d0e84c0aSYishai Hadas wait_for_completion(&sdrain->done); 5698d0e84c0aSYishai Hadas } 5699d0e84c0aSYishai Hadas 5700d0e84c0aSYishai Hadas void mlx5_ib_drain_sq(struct ib_qp *qp) 5701d0e84c0aSYishai Hadas { 5702d0e84c0aSYishai Hadas struct ib_cq *cq = qp->send_cq; 5703d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5704d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe sdrain; 5705d34ac5cdSBart Van Assche const struct ib_send_wr *bad_swr; 5706d0e84c0aSYishai Hadas struct ib_rdma_wr swr = { 5707d0e84c0aSYishai Hadas .wr = { 5708d0e84c0aSYishai Hadas .next = NULL, 5709d0e84c0aSYishai Hadas { .wr_cqe = &sdrain.cqe, }, 5710d0e84c0aSYishai Hadas .opcode = IB_WR_RDMA_WRITE, 5711d0e84c0aSYishai Hadas }, 5712d0e84c0aSYishai Hadas }; 5713d0e84c0aSYishai Hadas int ret; 5714d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 5715d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5716d0e84c0aSYishai Hadas 5717d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5718d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5719d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5720d0e84c0aSYishai Hadas return; 5721d0e84c0aSYishai Hadas } 5722d0e84c0aSYishai Hadas 5723d0e84c0aSYishai Hadas sdrain.cqe.done = mlx5_ib_drain_qp_done; 5724d0e84c0aSYishai Hadas init_completion(&sdrain.done); 5725d0e84c0aSYishai Hadas 5726029e88fdSLeon Romanovsky ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr); 5727d0e84c0aSYishai Hadas if (ret) { 5728d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5729d0e84c0aSYishai Hadas return; 5730d0e84c0aSYishai Hadas } 5731d0e84c0aSYishai Hadas 5732d0e84c0aSYishai Hadas handle_drain_completion(cq, &sdrain, dev); 5733d0e84c0aSYishai Hadas } 5734d0e84c0aSYishai Hadas 5735d0e84c0aSYishai Hadas void mlx5_ib_drain_rq(struct ib_qp *qp) 5736d0e84c0aSYishai Hadas { 5737d0e84c0aSYishai Hadas struct ib_cq *cq = qp->recv_cq; 5738d0e84c0aSYishai Hadas struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5739d0e84c0aSYishai Hadas struct mlx5_ib_drain_cqe rdrain; 5740d34ac5cdSBart Van Assche struct ib_recv_wr rwr = {}; 5741d34ac5cdSBart Van Assche const struct ib_recv_wr *bad_rwr; 5742d0e84c0aSYishai Hadas int ret; 5743d0e84c0aSYishai Hadas struct mlx5_ib_dev *dev = to_mdev(qp->device); 5744d0e84c0aSYishai Hadas struct mlx5_core_dev *mdev = dev->mdev; 5745d0e84c0aSYishai Hadas 5746d0e84c0aSYishai Hadas ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5747d0e84c0aSYishai Hadas if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5748d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5749d0e84c0aSYishai Hadas return; 5750d0e84c0aSYishai Hadas } 5751d0e84c0aSYishai Hadas 5752d0e84c0aSYishai Hadas rwr.wr_cqe = &rdrain.cqe; 5753d0e84c0aSYishai Hadas rdrain.cqe.done = mlx5_ib_drain_qp_done; 5754d0e84c0aSYishai Hadas init_completion(&rdrain.done); 5755d0e84c0aSYishai Hadas 5756029e88fdSLeon Romanovsky ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr); 5757d0e84c0aSYishai Hadas if (ret) { 5758d0e84c0aSYishai Hadas WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5759d0e84c0aSYishai Hadas return; 5760d0e84c0aSYishai Hadas } 5761d0e84c0aSYishai Hadas 5762d0e84c0aSYishai Hadas handle_drain_completion(cq, &rdrain, dev); 5763d0e84c0aSYishai Hadas } 5764d14133ddSMark Zhang 576530cd9fc5SLee Jones /* 5766d14133ddSMark Zhang * Bind a qp to a counter. If @counter is NULL then bind the qp to 5767d14133ddSMark Zhang * the default counter 5768d14133ddSMark Zhang */ 5769d14133ddSMark Zhang int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) 5770d14133ddSMark Zhang { 577110189e8eSMark Zhang struct mlx5_ib_dev *dev = to_mdev(qp->device); 5772d14133ddSMark Zhang struct mlx5_ib_qp *mqp = to_mqp(qp); 5773d14133ddSMark Zhang int err = 0; 5774d14133ddSMark Zhang 5775d14133ddSMark Zhang mutex_lock(&mqp->mutex); 5776d14133ddSMark Zhang if (mqp->state == IB_QPS_RESET) { 5777d14133ddSMark Zhang qp->counter = counter; 5778d14133ddSMark Zhang goto out; 5779d14133ddSMark Zhang } 5780d14133ddSMark Zhang 578110189e8eSMark Zhang if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { 578210189e8eSMark Zhang err = -EOPNOTSUPP; 578310189e8eSMark Zhang goto out; 578410189e8eSMark Zhang } 578510189e8eSMark Zhang 5786d14133ddSMark Zhang if (mqp->state == IB_QPS_RTS) { 5787d14133ddSMark Zhang err = __mlx5_ib_qp_set_counter(qp, counter); 5788d14133ddSMark Zhang if (!err) 5789d14133ddSMark Zhang qp->counter = counter; 5790d14133ddSMark Zhang 5791d14133ddSMark Zhang goto out; 5792d14133ddSMark Zhang } 5793d14133ddSMark Zhang 5794d14133ddSMark Zhang mqp->counter_pending = 1; 5795d14133ddSMark Zhang qp->counter = counter; 5796d14133ddSMark Zhang 5797d14133ddSMark Zhang out: 5798d14133ddSMark Zhang mutex_unlock(&mqp->mutex); 5799d14133ddSMark Zhang return err; 5800d14133ddSMark Zhang } 5801312b8f79SMark Zhang 5802312b8f79SMark Zhang int mlx5_ib_qp_event_init(void) 5803312b8f79SMark Zhang { 5804312b8f79SMark Zhang mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0); 5805312b8f79SMark Zhang if (!mlx5_ib_qp_event_wq) 5806312b8f79SMark Zhang return -ENOMEM; 5807312b8f79SMark Zhang 5808312b8f79SMark Zhang return 0; 5809312b8f79SMark Zhang } 5810312b8f79SMark Zhang 5811312b8f79SMark Zhang void mlx5_ib_qp_event_cleanup(void) 5812312b8f79SMark Zhang { 5813312b8f79SMark Zhang destroy_workqueue(mlx5_ib_qp_event_wq); 5814312b8f79SMark Zhang } 5815