1303ae1cdSBernard Metzler // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2303ae1cdSBernard Metzler 3303ae1cdSBernard Metzler /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4303ae1cdSBernard Metzler /* Copyright (c) 2008-2019, IBM Corporation */ 5303ae1cdSBernard Metzler 6303ae1cdSBernard Metzler #include <linux/errno.h> 7303ae1cdSBernard Metzler #include <linux/types.h> 8303ae1cdSBernard Metzler #include <linux/uaccess.h> 9303ae1cdSBernard Metzler #include <linux/vmalloc.h> 10303ae1cdSBernard Metzler #include <linux/xarray.h> 11303ae1cdSBernard Metzler 12303ae1cdSBernard Metzler #include <rdma/iw_cm.h> 13303ae1cdSBernard Metzler #include <rdma/ib_verbs.h> 14303ae1cdSBernard Metzler #include <rdma/ib_user_verbs.h> 15303ae1cdSBernard Metzler #include <rdma/uverbs_ioctl.h> 16303ae1cdSBernard Metzler 17303ae1cdSBernard Metzler #include "siw.h" 18303ae1cdSBernard Metzler #include "siw_verbs.h" 19303ae1cdSBernard Metzler #include "siw_mem.h" 20303ae1cdSBernard Metzler 21303ae1cdSBernard Metzler static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = { 22303ae1cdSBernard Metzler [IB_QPS_RESET] = SIW_QP_STATE_IDLE, 23303ae1cdSBernard Metzler [IB_QPS_INIT] = SIW_QP_STATE_IDLE, 24303ae1cdSBernard Metzler [IB_QPS_RTR] = SIW_QP_STATE_RTR, 25303ae1cdSBernard Metzler [IB_QPS_RTS] = SIW_QP_STATE_RTS, 26303ae1cdSBernard Metzler [IB_QPS_SQD] = SIW_QP_STATE_CLOSING, 27303ae1cdSBernard Metzler [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE, 28303ae1cdSBernard Metzler [IB_QPS_ERR] = SIW_QP_STATE_ERROR 29303ae1cdSBernard Metzler }; 30303ae1cdSBernard Metzler 31303ae1cdSBernard Metzler static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = { 32303ae1cdSBernard Metzler [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR", 33303ae1cdSBernard Metzler [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE", 34303ae1cdSBernard Metzler [IB_QPS_ERR] = "ERR" 35303ae1cdSBernard Metzler }; 36303ae1cdSBernard Metzler 3711f1a755SMichal Kalderon void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 38303ae1cdSBernard Metzler { 3911f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry); 40303ae1cdSBernard Metzler 4111f1a755SMichal Kalderon kfree(entry); 42303ae1cdSBernard Metzler } 43303ae1cdSBernard Metzler 44303ae1cdSBernard Metzler int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma) 45303ae1cdSBernard Metzler { 46303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(ctx); 4711f1a755SMichal Kalderon size_t size = vma->vm_end - vma->vm_start; 4811f1a755SMichal Kalderon struct rdma_user_mmap_entry *rdma_entry; 4911f1a755SMichal Kalderon struct siw_user_mmap_entry *entry; 50303ae1cdSBernard Metzler int rv = -EINVAL; 51303ae1cdSBernard Metzler 52303ae1cdSBernard Metzler /* 53303ae1cdSBernard Metzler * Must be page aligned 54303ae1cdSBernard Metzler */ 55303ae1cdSBernard Metzler if (vma->vm_start & (PAGE_SIZE - 1)) { 56303ae1cdSBernard Metzler pr_warn("siw: mmap not page aligned\n"); 5711f1a755SMichal Kalderon return -EINVAL; 5811f1a755SMichal Kalderon } 5911f1a755SMichal Kalderon rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma); 6011f1a755SMichal Kalderon if (!rdma_entry) { 6111f1a755SMichal Kalderon siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n", 6211f1a755SMichal Kalderon vma->vm_pgoff, size); 6311f1a755SMichal Kalderon return -EINVAL; 6411f1a755SMichal Kalderon } 6511f1a755SMichal Kalderon entry = to_siw_mmap_entry(rdma_entry); 6611f1a755SMichal Kalderon 6711f1a755SMichal Kalderon rv = remap_vmalloc_range(vma, entry->address, 0); 6811f1a755SMichal Kalderon if (rv) { 6911f1a755SMichal Kalderon pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff, 7011f1a755SMichal Kalderon size); 71303ae1cdSBernard Metzler goto out; 72303ae1cdSBernard Metzler } 73303ae1cdSBernard Metzler out: 7411f1a755SMichal Kalderon rdma_user_mmap_entry_put(rdma_entry); 7511f1a755SMichal Kalderon 76303ae1cdSBernard Metzler return rv; 77303ae1cdSBernard Metzler } 78303ae1cdSBernard Metzler 79303ae1cdSBernard Metzler int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata) 80303ae1cdSBernard Metzler { 81303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_ctx->device); 82303ae1cdSBernard Metzler struct siw_ucontext *ctx = to_siw_ctx(base_ctx); 83303ae1cdSBernard Metzler struct siw_uresp_alloc_ctx uresp = {}; 84303ae1cdSBernard Metzler int rv; 85303ae1cdSBernard Metzler 86303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) { 87303ae1cdSBernard Metzler rv = -ENOMEM; 88303ae1cdSBernard Metzler goto err_out; 89303ae1cdSBernard Metzler } 90303ae1cdSBernard Metzler ctx->sdev = sdev; 91303ae1cdSBernard Metzler 92303ae1cdSBernard Metzler uresp.dev_id = sdev->vendor_part_id; 93303ae1cdSBernard Metzler 94303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 95303ae1cdSBernard Metzler rv = -EINVAL; 96303ae1cdSBernard Metzler goto err_out; 97303ae1cdSBernard Metzler } 98303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 99303ae1cdSBernard Metzler if (rv) 100303ae1cdSBernard Metzler goto err_out; 101303ae1cdSBernard Metzler 102303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "success. now %d context(s)\n", 103303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx)); 104303ae1cdSBernard Metzler 105303ae1cdSBernard Metzler return 0; 106303ae1cdSBernard Metzler 107303ae1cdSBernard Metzler err_out: 108303ae1cdSBernard Metzler atomic_dec(&sdev->num_ctx); 109303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv, 110303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx)); 111303ae1cdSBernard Metzler 112303ae1cdSBernard Metzler return rv; 113303ae1cdSBernard Metzler } 114303ae1cdSBernard Metzler 115303ae1cdSBernard Metzler void siw_dealloc_ucontext(struct ib_ucontext *base_ctx) 116303ae1cdSBernard Metzler { 117303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(base_ctx); 118303ae1cdSBernard Metzler 119303ae1cdSBernard Metzler atomic_dec(&uctx->sdev->num_ctx); 120303ae1cdSBernard Metzler } 121303ae1cdSBernard Metzler 122303ae1cdSBernard Metzler int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, 123303ae1cdSBernard Metzler struct ib_udata *udata) 124303ae1cdSBernard Metzler { 125303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 126303ae1cdSBernard Metzler 127303ae1cdSBernard Metzler if (udata->inlen || udata->outlen) 128303ae1cdSBernard Metzler return -EINVAL; 129303ae1cdSBernard Metzler 130303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr)); 131303ae1cdSBernard Metzler 132303ae1cdSBernard Metzler /* Revisit atomic caps if RFC 7306 gets supported */ 133303ae1cdSBernard Metzler attr->atomic_cap = 0; 134303ae1cdSBernard Metzler attr->device_cap_flags = 135303ae1cdSBernard Metzler IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG; 136303ae1cdSBernard Metzler attr->max_cq = sdev->attrs.max_cq; 137303ae1cdSBernard Metzler attr->max_cqe = sdev->attrs.max_cqe; 138303ae1cdSBernard Metzler attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL; 139303ae1cdSBernard Metzler attr->max_mr = sdev->attrs.max_mr; 140303ae1cdSBernard Metzler attr->max_mw = sdev->attrs.max_mw; 141303ae1cdSBernard Metzler attr->max_mr_size = ~0ull; 142303ae1cdSBernard Metzler attr->max_pd = sdev->attrs.max_pd; 143303ae1cdSBernard Metzler attr->max_qp = sdev->attrs.max_qp; 144303ae1cdSBernard Metzler attr->max_qp_init_rd_atom = sdev->attrs.max_ird; 145303ae1cdSBernard Metzler attr->max_qp_rd_atom = sdev->attrs.max_ord; 146303ae1cdSBernard Metzler attr->max_qp_wr = sdev->attrs.max_qp_wr; 147303ae1cdSBernard Metzler attr->max_recv_sge = sdev->attrs.max_sge; 148303ae1cdSBernard Metzler attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird; 149303ae1cdSBernard Metzler attr->max_send_sge = sdev->attrs.max_sge; 150303ae1cdSBernard Metzler attr->max_sge_rd = sdev->attrs.max_sge_rd; 151303ae1cdSBernard Metzler attr->max_srq = sdev->attrs.max_srq; 152303ae1cdSBernard Metzler attr->max_srq_sge = sdev->attrs.max_srq_sge; 153303ae1cdSBernard Metzler attr->max_srq_wr = sdev->attrs.max_srq_wr; 154303ae1cdSBernard Metzler attr->page_size_cap = PAGE_SIZE; 155303ae1cdSBernard Metzler attr->vendor_id = SIW_VENDOR_ID; 156303ae1cdSBernard Metzler attr->vendor_part_id = sdev->vendor_part_id; 157303ae1cdSBernard Metzler 158303ae1cdSBernard Metzler memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); 159303ae1cdSBernard Metzler 160303ae1cdSBernard Metzler return 0; 161303ae1cdSBernard Metzler } 162303ae1cdSBernard Metzler 1631fb7f897SMark Bloch int siw_query_port(struct ib_device *base_dev, u32 port, 164303ae1cdSBernard Metzler struct ib_port_attr *attr) 165303ae1cdSBernard Metzler { 166303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 16725baba21SKamal Heib int rv; 168303ae1cdSBernard Metzler 169303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr)); 170303ae1cdSBernard Metzler 17125baba21SKamal Heib rv = ib_get_eth_speed(base_dev, port, &attr->active_speed, 17225baba21SKamal Heib &attr->active_width); 173303ae1cdSBernard Metzler attr->gid_tbl_len = 1; 174303ae1cdSBernard Metzler attr->max_msg_sz = -1; 175303ae1cdSBernard Metzler attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 176beb205ddSKamal Heib attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 17772a7720fSKamal Heib attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 17872a7720fSKamal Heib IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; 179303ae1cdSBernard Metzler attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; 180303ae1cdSBernard Metzler attr->state = sdev->state; 181303ae1cdSBernard Metzler /* 182303ae1cdSBernard Metzler * All zero 183303ae1cdSBernard Metzler * 184303ae1cdSBernard Metzler * attr->lid = 0; 185303ae1cdSBernard Metzler * attr->bad_pkey_cntr = 0; 186303ae1cdSBernard Metzler * attr->qkey_viol_cntr = 0; 187303ae1cdSBernard Metzler * attr->sm_lid = 0; 188303ae1cdSBernard Metzler * attr->lmc = 0; 189303ae1cdSBernard Metzler * attr->max_vl_num = 0; 190303ae1cdSBernard Metzler * attr->sm_sl = 0; 191303ae1cdSBernard Metzler * attr->subnet_timeout = 0; 192303ae1cdSBernard Metzler * attr->init_type_repy = 0; 193303ae1cdSBernard Metzler */ 19425baba21SKamal Heib return rv; 195303ae1cdSBernard Metzler } 196303ae1cdSBernard Metzler 1971fb7f897SMark Bloch int siw_get_port_immutable(struct ib_device *base_dev, u32 port, 198303ae1cdSBernard Metzler struct ib_port_immutable *port_immutable) 199303ae1cdSBernard Metzler { 200303ae1cdSBernard Metzler struct ib_port_attr attr; 201303ae1cdSBernard Metzler int rv = siw_query_port(base_dev, port, &attr); 202303ae1cdSBernard Metzler 203303ae1cdSBernard Metzler if (rv) 204303ae1cdSBernard Metzler return rv; 205303ae1cdSBernard Metzler 206303ae1cdSBernard Metzler port_immutable->gid_tbl_len = attr.gid_tbl_len; 207303ae1cdSBernard Metzler port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 208303ae1cdSBernard Metzler 209303ae1cdSBernard Metzler return 0; 210303ae1cdSBernard Metzler } 211303ae1cdSBernard Metzler 2121fb7f897SMark Bloch int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, 213303ae1cdSBernard Metzler union ib_gid *gid) 214303ae1cdSBernard Metzler { 215303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 216303ae1cdSBernard Metzler 217303ae1cdSBernard Metzler /* subnet_prefix == interface_id == 0; */ 218303ae1cdSBernard Metzler memset(gid, 0, sizeof(*gid)); 219303ae1cdSBernard Metzler memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6); 220303ae1cdSBernard Metzler 221303ae1cdSBernard Metzler return 0; 222303ae1cdSBernard Metzler } 223303ae1cdSBernard Metzler 224303ae1cdSBernard Metzler int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 225303ae1cdSBernard Metzler { 226303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 227303ae1cdSBernard Metzler 228303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) { 229303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd); 230303ae1cdSBernard Metzler return -ENOMEM; 231303ae1cdSBernard Metzler } 232303ae1cdSBernard Metzler siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd)); 233303ae1cdSBernard Metzler 234303ae1cdSBernard Metzler return 0; 235303ae1cdSBernard Metzler } 236303ae1cdSBernard Metzler 23791a7c58fSLeon Romanovsky int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 238303ae1cdSBernard Metzler { 239303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 240303ae1cdSBernard Metzler 241303ae1cdSBernard Metzler siw_dbg_pd(pd, "free PD\n"); 242303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd); 24391a7c58fSLeon Romanovsky return 0; 244303ae1cdSBernard Metzler } 245303ae1cdSBernard Metzler 246303ae1cdSBernard Metzler void siw_qp_get_ref(struct ib_qp *base_qp) 247303ae1cdSBernard Metzler { 248303ae1cdSBernard Metzler siw_qp_get(to_siw_qp(base_qp)); 249303ae1cdSBernard Metzler } 250303ae1cdSBernard Metzler 251303ae1cdSBernard Metzler void siw_qp_put_ref(struct ib_qp *base_qp) 252303ae1cdSBernard Metzler { 253303ae1cdSBernard Metzler siw_qp_put(to_siw_qp(base_qp)); 254303ae1cdSBernard Metzler } 255303ae1cdSBernard Metzler 25611f1a755SMichal Kalderon static struct rdma_user_mmap_entry * 25711f1a755SMichal Kalderon siw_mmap_entry_insert(struct siw_ucontext *uctx, 25811f1a755SMichal Kalderon void *address, size_t length, 25911f1a755SMichal Kalderon u64 *offset) 26011f1a755SMichal Kalderon { 26111f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 26211f1a755SMichal Kalderon int rv; 26311f1a755SMichal Kalderon 26411f1a755SMichal Kalderon *offset = SIW_INVAL_UOBJ_KEY; 26511f1a755SMichal Kalderon if (!entry) 26611f1a755SMichal Kalderon return NULL; 26711f1a755SMichal Kalderon 26811f1a755SMichal Kalderon entry->address = address; 26911f1a755SMichal Kalderon 27011f1a755SMichal Kalderon rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext, 27111f1a755SMichal Kalderon &entry->rdma_entry, 27211f1a755SMichal Kalderon length); 27311f1a755SMichal Kalderon if (rv) { 27411f1a755SMichal Kalderon kfree(entry); 27511f1a755SMichal Kalderon return NULL; 27611f1a755SMichal Kalderon } 27711f1a755SMichal Kalderon 27811f1a755SMichal Kalderon *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 27911f1a755SMichal Kalderon 28011f1a755SMichal Kalderon return &entry->rdma_entry; 28111f1a755SMichal Kalderon } 28211f1a755SMichal Kalderon 283303ae1cdSBernard Metzler /* 284303ae1cdSBernard Metzler * siw_create_qp() 285303ae1cdSBernard Metzler * 286303ae1cdSBernard Metzler * Create QP of requested size on given device. 287303ae1cdSBernard Metzler * 288303ae1cdSBernard Metzler * @pd: Protection Domain 289303ae1cdSBernard Metzler * @attrs: Initial QP attributes. 290303ae1cdSBernard Metzler * @udata: used to provide QP ID, SQ and RQ size back to user. 291303ae1cdSBernard Metzler */ 292303ae1cdSBernard Metzler 293303ae1cdSBernard Metzler struct ib_qp *siw_create_qp(struct ib_pd *pd, 294303ae1cdSBernard Metzler struct ib_qp_init_attr *attrs, 295303ae1cdSBernard Metzler struct ib_udata *udata) 296303ae1cdSBernard Metzler { 297303ae1cdSBernard Metzler struct siw_qp *qp = NULL; 298303ae1cdSBernard Metzler struct ib_device *base_dev = pd->device; 299303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 300303ae1cdSBernard Metzler struct siw_ucontext *uctx = 301303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 302303ae1cdSBernard Metzler base_ucontext); 303303ae1cdSBernard Metzler unsigned long flags; 304303ae1cdSBernard Metzler int num_sqe, num_rqe, rv = 0; 30511f1a755SMichal Kalderon size_t length; 306303ae1cdSBernard Metzler 307303ae1cdSBernard Metzler siw_dbg(base_dev, "create new QP\n"); 308303ae1cdSBernard Metzler 3091f11a761SJason Gunthorpe if (attrs->create_flags) 3101f11a761SJason Gunthorpe return ERR_PTR(-EOPNOTSUPP); 3111f11a761SJason Gunthorpe 312303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { 313303ae1cdSBernard Metzler siw_dbg(base_dev, "too many QP's\n"); 314303ae1cdSBernard Metzler rv = -ENOMEM; 315303ae1cdSBernard Metzler goto err_out; 316303ae1cdSBernard Metzler } 317303ae1cdSBernard Metzler if (attrs->qp_type != IB_QPT_RC) { 318303ae1cdSBernard Metzler siw_dbg(base_dev, "only RC QP's supported\n"); 319bb8865f4SKamal Heib rv = -EOPNOTSUPP; 320303ae1cdSBernard Metzler goto err_out; 321303ae1cdSBernard Metzler } 322303ae1cdSBernard Metzler if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || 323303ae1cdSBernard Metzler (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) || 324303ae1cdSBernard Metzler (attrs->cap.max_send_sge > SIW_MAX_SGE) || 325303ae1cdSBernard Metzler (attrs->cap.max_recv_sge > SIW_MAX_SGE)) { 326303ae1cdSBernard Metzler siw_dbg(base_dev, "QP size error\n"); 327303ae1cdSBernard Metzler rv = -EINVAL; 328303ae1cdSBernard Metzler goto err_out; 329303ae1cdSBernard Metzler } 330303ae1cdSBernard Metzler if (attrs->cap.max_inline_data > SIW_MAX_INLINE) { 331303ae1cdSBernard Metzler siw_dbg(base_dev, "max inline send: %d > %d\n", 332303ae1cdSBernard Metzler attrs->cap.max_inline_data, (int)SIW_MAX_INLINE); 333303ae1cdSBernard Metzler rv = -EINVAL; 334303ae1cdSBernard Metzler goto err_out; 335303ae1cdSBernard Metzler } 336303ae1cdSBernard Metzler /* 337303ae1cdSBernard Metzler * NOTE: we allow for zero element SQ and RQ WQE's SGL's 338303ae1cdSBernard Metzler * but not for a QP unable to hold any WQE (SQ + RQ) 339303ae1cdSBernard Metzler */ 340303ae1cdSBernard Metzler if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { 341303ae1cdSBernard Metzler siw_dbg(base_dev, "QP must have send or receive queue\n"); 342303ae1cdSBernard Metzler rv = -EINVAL; 343303ae1cdSBernard Metzler goto err_out; 344303ae1cdSBernard Metzler } 345303ae1cdSBernard Metzler 346a568814aSLeon Romanovsky if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { 347303ae1cdSBernard Metzler siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); 348303ae1cdSBernard Metzler rv = -EINVAL; 349303ae1cdSBernard Metzler goto err_out; 350303ae1cdSBernard Metzler } 351303ae1cdSBernard Metzler qp = kzalloc(sizeof(*qp), GFP_KERNEL); 352303ae1cdSBernard Metzler if (!qp) { 353303ae1cdSBernard Metzler rv = -ENOMEM; 354303ae1cdSBernard Metzler goto err_out; 355303ae1cdSBernard Metzler } 356303ae1cdSBernard Metzler init_rwsem(&qp->state_lock); 357303ae1cdSBernard Metzler spin_lock_init(&qp->sq_lock); 358303ae1cdSBernard Metzler spin_lock_init(&qp->rq_lock); 359303ae1cdSBernard Metzler spin_lock_init(&qp->orq_lock); 360303ae1cdSBernard Metzler 361303ae1cdSBernard Metzler rv = siw_qp_add(sdev, qp); 362303ae1cdSBernard Metzler if (rv) 363303ae1cdSBernard Metzler goto err_out; 364303ae1cdSBernard Metzler 365661f3859SBernard Metzler num_sqe = attrs->cap.max_send_wr; 366661f3859SBernard Metzler num_rqe = attrs->cap.max_recv_wr; 367661f3859SBernard Metzler 368303ae1cdSBernard Metzler /* All queue indices are derived from modulo operations 369303ae1cdSBernard Metzler * on a free running 'get' (consumer) and 'put' (producer) 370303ae1cdSBernard Metzler * unsigned counter. Having queue sizes at power of two 371303ae1cdSBernard Metzler * avoids handling counter wrap around. 372303ae1cdSBernard Metzler */ 373661f3859SBernard Metzler if (num_sqe) 374661f3859SBernard Metzler num_sqe = roundup_pow_of_two(num_sqe); 375661f3859SBernard Metzler else { 376661f3859SBernard Metzler /* Zero sized SQ is not supported */ 377661f3859SBernard Metzler rv = -EINVAL; 378*a3d83276SLeon Romanovsky goto err_out_xa; 379661f3859SBernard Metzler } 380661f3859SBernard Metzler if (num_rqe) 381661f3859SBernard Metzler num_rqe = roundup_pow_of_two(num_rqe); 382303ae1cdSBernard Metzler 38358fb0b56SBernard Metzler if (udata) 384303ae1cdSBernard Metzler qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); 38558fb0b56SBernard Metzler else 38658fb0b56SBernard Metzler qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe)); 387303ae1cdSBernard Metzler 388303ae1cdSBernard Metzler if (qp->sendq == NULL) { 389303ae1cdSBernard Metzler rv = -ENOMEM; 390303ae1cdSBernard Metzler goto err_out_xa; 391303ae1cdSBernard Metzler } 392303ae1cdSBernard Metzler if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) { 393303ae1cdSBernard Metzler if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) 394303ae1cdSBernard Metzler qp->attrs.flags |= SIW_SIGNAL_ALL_WR; 395303ae1cdSBernard Metzler else { 396303ae1cdSBernard Metzler rv = -EINVAL; 397303ae1cdSBernard Metzler goto err_out_xa; 398303ae1cdSBernard Metzler } 399303ae1cdSBernard Metzler } 400303ae1cdSBernard Metzler qp->pd = pd; 401a568814aSLeon Romanovsky qp->scq = to_siw_cq(attrs->send_cq); 402a568814aSLeon Romanovsky qp->rcq = to_siw_cq(attrs->recv_cq); 403303ae1cdSBernard Metzler 404303ae1cdSBernard Metzler if (attrs->srq) { 405303ae1cdSBernard Metzler /* 406303ae1cdSBernard Metzler * SRQ support. 407303ae1cdSBernard Metzler * Verbs 6.3.7: ignore RQ size, if SRQ present 408303ae1cdSBernard Metzler * Verbs 6.3.5: do not check PD of SRQ against PD of QP 409303ae1cdSBernard Metzler */ 410303ae1cdSBernard Metzler qp->srq = to_siw_srq(attrs->srq); 411303ae1cdSBernard Metzler qp->attrs.rq_size = 0; 41258fb0b56SBernard Metzler siw_dbg(base_dev, "QP [%u]: SRQ attached\n", 41358fb0b56SBernard Metzler qp->base_qp.qp_num); 414303ae1cdSBernard Metzler } else if (num_rqe) { 41558fb0b56SBernard Metzler if (udata) 416303ae1cdSBernard Metzler qp->recvq = 417303ae1cdSBernard Metzler vmalloc_user(num_rqe * sizeof(struct siw_rqe)); 41858fb0b56SBernard Metzler else 41958fb0b56SBernard Metzler qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 420303ae1cdSBernard Metzler 421303ae1cdSBernard Metzler if (qp->recvq == NULL) { 422303ae1cdSBernard Metzler rv = -ENOMEM; 423303ae1cdSBernard Metzler goto err_out_xa; 424303ae1cdSBernard Metzler } 425303ae1cdSBernard Metzler qp->attrs.rq_size = num_rqe; 426303ae1cdSBernard Metzler } 427303ae1cdSBernard Metzler qp->attrs.sq_size = num_sqe; 428303ae1cdSBernard Metzler qp->attrs.sq_max_sges = attrs->cap.max_send_sge; 429303ae1cdSBernard Metzler qp->attrs.rq_max_sges = attrs->cap.max_recv_sge; 430303ae1cdSBernard Metzler 431303ae1cdSBernard Metzler /* Make those two tunables fixed for now. */ 432303ae1cdSBernard Metzler qp->tx_ctx.gso_seg_limit = 1; 433303ae1cdSBernard Metzler qp->tx_ctx.zcopy_tx = zcopy_tx; 434303ae1cdSBernard Metzler 435303ae1cdSBernard Metzler qp->attrs.state = SIW_QP_STATE_IDLE; 436303ae1cdSBernard Metzler 437303ae1cdSBernard Metzler if (udata) { 438303ae1cdSBernard Metzler struct siw_uresp_create_qp uresp = {}; 439303ae1cdSBernard Metzler 440303ae1cdSBernard Metzler uresp.num_sqe = num_sqe; 441303ae1cdSBernard Metzler uresp.num_rqe = num_rqe; 442303ae1cdSBernard Metzler uresp.qp_id = qp_id(qp); 443303ae1cdSBernard Metzler 444303ae1cdSBernard Metzler if (qp->sendq) { 44511f1a755SMichal Kalderon length = num_sqe * sizeof(struct siw_sqe); 44611f1a755SMichal Kalderon qp->sq_entry = 44711f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->sendq, 44811f1a755SMichal Kalderon length, &uresp.sq_key); 44911f1a755SMichal Kalderon if (!qp->sq_entry) { 450303ae1cdSBernard Metzler rv = -ENOMEM; 451303ae1cdSBernard Metzler goto err_out_xa; 452303ae1cdSBernard Metzler } 45311f1a755SMichal Kalderon } 45411f1a755SMichal Kalderon 45511f1a755SMichal Kalderon if (qp->recvq) { 45611f1a755SMichal Kalderon length = num_rqe * sizeof(struct siw_rqe); 45711f1a755SMichal Kalderon qp->rq_entry = 45811f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->recvq, 45911f1a755SMichal Kalderon length, &uresp.rq_key); 46011f1a755SMichal Kalderon if (!qp->rq_entry) { 46111f1a755SMichal Kalderon uresp.sq_key = SIW_INVAL_UOBJ_KEY; 46211f1a755SMichal Kalderon rv = -ENOMEM; 46311f1a755SMichal Kalderon goto err_out_xa; 46411f1a755SMichal Kalderon } 46511f1a755SMichal Kalderon } 466303ae1cdSBernard Metzler 467303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 468303ae1cdSBernard Metzler rv = -EINVAL; 469303ae1cdSBernard Metzler goto err_out_xa; 470303ae1cdSBernard Metzler } 471303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 472303ae1cdSBernard Metzler if (rv) 473303ae1cdSBernard Metzler goto err_out_xa; 474303ae1cdSBernard Metzler } 475303ae1cdSBernard Metzler qp->tx_cpu = siw_get_tx_cpu(sdev); 476303ae1cdSBernard Metzler if (qp->tx_cpu < 0) { 477303ae1cdSBernard Metzler rv = -EINVAL; 478303ae1cdSBernard Metzler goto err_out_xa; 479303ae1cdSBernard Metzler } 480303ae1cdSBernard Metzler INIT_LIST_HEAD(&qp->devq); 481303ae1cdSBernard Metzler spin_lock_irqsave(&sdev->lock, flags); 482303ae1cdSBernard Metzler list_add_tail(&qp->devq, &sdev->qp_list); 483303ae1cdSBernard Metzler spin_unlock_irqrestore(&sdev->lock, flags); 484303ae1cdSBernard Metzler 48558fb0b56SBernard Metzler return &qp->base_qp; 486303ae1cdSBernard Metzler 487303ae1cdSBernard Metzler err_out_xa: 488303ae1cdSBernard Metzler xa_erase(&sdev->qp_xa, qp_id(qp)); 489303ae1cdSBernard Metzler err_out: 490303ae1cdSBernard Metzler if (qp) { 49111f1a755SMichal Kalderon if (uctx) { 49211f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry); 49311f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry); 49411f1a755SMichal Kalderon } 495303ae1cdSBernard Metzler vfree(qp->sendq); 496303ae1cdSBernard Metzler vfree(qp->recvq); 497303ae1cdSBernard Metzler kfree(qp); 498303ae1cdSBernard Metzler } 499303ae1cdSBernard Metzler atomic_dec(&sdev->num_qp); 500303ae1cdSBernard Metzler 501303ae1cdSBernard Metzler return ERR_PTR(rv); 502303ae1cdSBernard Metzler } 503303ae1cdSBernard Metzler 504303ae1cdSBernard Metzler /* 505303ae1cdSBernard Metzler * Minimum siw_query_qp() verb interface. 506303ae1cdSBernard Metzler * 507303ae1cdSBernard Metzler * @qp_attr_mask is not used but all available information is provided 508303ae1cdSBernard Metzler */ 509303ae1cdSBernard Metzler int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, 510303ae1cdSBernard Metzler int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 511303ae1cdSBernard Metzler { 512303ae1cdSBernard Metzler struct siw_qp *qp; 513303ae1cdSBernard Metzler struct siw_device *sdev; 514303ae1cdSBernard Metzler 515303ae1cdSBernard Metzler if (base_qp && qp_attr && qp_init_attr) { 516303ae1cdSBernard Metzler qp = to_siw_qp(base_qp); 517303ae1cdSBernard Metzler sdev = to_siw_dev(base_qp->device); 518303ae1cdSBernard Metzler } else { 519303ae1cdSBernard Metzler return -EINVAL; 520303ae1cdSBernard Metzler } 521303ae1cdSBernard Metzler qp_attr->cap.max_inline_data = SIW_MAX_INLINE; 522303ae1cdSBernard Metzler qp_attr->cap.max_send_wr = qp->attrs.sq_size; 523303ae1cdSBernard Metzler qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges; 524303ae1cdSBernard Metzler qp_attr->cap.max_recv_wr = qp->attrs.rq_size; 525303ae1cdSBernard Metzler qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges; 526303ae1cdSBernard Metzler qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 527303ae1cdSBernard Metzler qp_attr->max_rd_atomic = qp->attrs.irq_size; 528303ae1cdSBernard Metzler qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; 529303ae1cdSBernard Metzler 530303ae1cdSBernard Metzler qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 531303ae1cdSBernard Metzler IB_ACCESS_REMOTE_WRITE | 532303ae1cdSBernard Metzler IB_ACCESS_REMOTE_READ; 533303ae1cdSBernard Metzler 534303ae1cdSBernard Metzler qp_init_attr->qp_type = base_qp->qp_type; 535303ae1cdSBernard Metzler qp_init_attr->send_cq = base_qp->send_cq; 536303ae1cdSBernard Metzler qp_init_attr->recv_cq = base_qp->recv_cq; 537303ae1cdSBernard Metzler qp_init_attr->srq = base_qp->srq; 538303ae1cdSBernard Metzler 539303ae1cdSBernard Metzler qp_init_attr->cap = qp_attr->cap; 540303ae1cdSBernard Metzler 541303ae1cdSBernard Metzler return 0; 542303ae1cdSBernard Metzler } 543303ae1cdSBernard Metzler 544303ae1cdSBernard Metzler int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr, 545303ae1cdSBernard Metzler int attr_mask, struct ib_udata *udata) 546303ae1cdSBernard Metzler { 547303ae1cdSBernard Metzler struct siw_qp_attrs new_attrs; 548303ae1cdSBernard Metzler enum siw_qp_attr_mask siw_attr_mask = 0; 549303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 550303ae1cdSBernard Metzler int rv = 0; 551303ae1cdSBernard Metzler 552303ae1cdSBernard Metzler if (!attr_mask) 553303ae1cdSBernard Metzler return 0; 554303ae1cdSBernard Metzler 55526e990baSJason Gunthorpe if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 55626e990baSJason Gunthorpe return -EOPNOTSUPP; 55726e990baSJason Gunthorpe 558303ae1cdSBernard Metzler memset(&new_attrs, 0, sizeof(new_attrs)); 559303ae1cdSBernard Metzler 560303ae1cdSBernard Metzler if (attr_mask & IB_QP_ACCESS_FLAGS) { 561303ae1cdSBernard Metzler siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS; 562303ae1cdSBernard Metzler 563303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 564303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_READ_ENABLED; 565303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 566303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_WRITE_ENABLED; 567303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_MW_BIND) 568303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_BIND_ENABLED; 569303ae1cdSBernard Metzler } 570303ae1cdSBernard Metzler if (attr_mask & IB_QP_STATE) { 571303ae1cdSBernard Metzler siw_dbg_qp(qp, "desired IB QP state: %s\n", 572303ae1cdSBernard Metzler ib_qp_state_to_string[attr->qp_state]); 573303ae1cdSBernard Metzler 574303ae1cdSBernard Metzler new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state]; 575303ae1cdSBernard Metzler 576303ae1cdSBernard Metzler if (new_attrs.state > SIW_QP_STATE_RTS) 577303ae1cdSBernard Metzler qp->tx_ctx.tx_suspend = 1; 578303ae1cdSBernard Metzler 579303ae1cdSBernard Metzler siw_attr_mask |= SIW_QP_ATTR_STATE; 580303ae1cdSBernard Metzler } 581303ae1cdSBernard Metzler if (!siw_attr_mask) 582303ae1cdSBernard Metzler goto out; 583303ae1cdSBernard Metzler 584303ae1cdSBernard Metzler down_write(&qp->state_lock); 585303ae1cdSBernard Metzler 586303ae1cdSBernard Metzler rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask); 587303ae1cdSBernard Metzler 588303ae1cdSBernard Metzler up_write(&qp->state_lock); 589303ae1cdSBernard Metzler out: 590303ae1cdSBernard Metzler return rv; 591303ae1cdSBernard Metzler } 592303ae1cdSBernard Metzler 593303ae1cdSBernard Metzler int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) 594303ae1cdSBernard Metzler { 595303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 596303ae1cdSBernard Metzler struct siw_ucontext *uctx = 597303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 598303ae1cdSBernard Metzler base_ucontext); 599303ae1cdSBernard Metzler struct siw_qp_attrs qp_attrs; 600303ae1cdSBernard Metzler 601c536277eSBernard Metzler siw_dbg_qp(qp, "state %d\n", qp->attrs.state); 602303ae1cdSBernard Metzler 603303ae1cdSBernard Metzler /* 604303ae1cdSBernard Metzler * Mark QP as in process of destruction to prevent from 605303ae1cdSBernard Metzler * any async callbacks to RDMA core 606303ae1cdSBernard Metzler */ 607303ae1cdSBernard Metzler qp->attrs.flags |= SIW_QP_IN_DESTROY; 608303ae1cdSBernard Metzler qp->rx_stream.rx_suspend = 1; 609303ae1cdSBernard Metzler 61011f1a755SMichal Kalderon if (uctx) { 61111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry); 61211f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry); 61311f1a755SMichal Kalderon } 614303ae1cdSBernard Metzler 615303ae1cdSBernard Metzler down_write(&qp->state_lock); 616303ae1cdSBernard Metzler 617303ae1cdSBernard Metzler qp_attrs.state = SIW_QP_STATE_ERROR; 618303ae1cdSBernard Metzler siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE); 619303ae1cdSBernard Metzler 620303ae1cdSBernard Metzler if (qp->cep) { 621303ae1cdSBernard Metzler siw_cep_put(qp->cep); 622303ae1cdSBernard Metzler qp->cep = NULL; 623303ae1cdSBernard Metzler } 624303ae1cdSBernard Metzler up_write(&qp->state_lock); 625303ae1cdSBernard Metzler 626303ae1cdSBernard Metzler kfree(qp->tx_ctx.mpa_crc_hd); 627303ae1cdSBernard Metzler kfree(qp->rx_stream.mpa_crc_hd); 628303ae1cdSBernard Metzler 629303ae1cdSBernard Metzler qp->scq = qp->rcq = NULL; 630303ae1cdSBernard Metzler 631303ae1cdSBernard Metzler siw_qp_put(qp); 632303ae1cdSBernard Metzler 633303ae1cdSBernard Metzler return 0; 634303ae1cdSBernard Metzler } 635303ae1cdSBernard Metzler 636303ae1cdSBernard Metzler /* 637303ae1cdSBernard Metzler * siw_copy_inline_sgl() 638303ae1cdSBernard Metzler * 639303ae1cdSBernard Metzler * Prepare sgl of inlined data for sending. For userland callers 640303ae1cdSBernard Metzler * function checks if given buffer addresses and len's are within 641303ae1cdSBernard Metzler * process context bounds. 642303ae1cdSBernard Metzler * Data from all provided sge's are copied together into the wqe, 643303ae1cdSBernard Metzler * referenced by a single sge. 644303ae1cdSBernard Metzler */ 645303ae1cdSBernard Metzler static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, 646303ae1cdSBernard Metzler struct siw_sqe *sqe) 647303ae1cdSBernard Metzler { 648303ae1cdSBernard Metzler struct ib_sge *core_sge = core_wr->sg_list; 649303ae1cdSBernard Metzler void *kbuf = &sqe->sge[1]; 650303ae1cdSBernard Metzler int num_sge = core_wr->num_sge, bytes = 0; 651303ae1cdSBernard Metzler 652c536277eSBernard Metzler sqe->sge[0].laddr = (uintptr_t)kbuf; 653303ae1cdSBernard Metzler sqe->sge[0].lkey = 0; 654303ae1cdSBernard Metzler 655303ae1cdSBernard Metzler while (num_sge--) { 656303ae1cdSBernard Metzler if (!core_sge->length) { 657303ae1cdSBernard Metzler core_sge++; 658303ae1cdSBernard Metzler continue; 659303ae1cdSBernard Metzler } 660303ae1cdSBernard Metzler bytes += core_sge->length; 661303ae1cdSBernard Metzler if (bytes > SIW_MAX_INLINE) { 662303ae1cdSBernard Metzler bytes = -EINVAL; 663303ae1cdSBernard Metzler break; 664303ae1cdSBernard Metzler } 665303ae1cdSBernard Metzler memcpy(kbuf, (void *)(uintptr_t)core_sge->addr, 666303ae1cdSBernard Metzler core_sge->length); 667303ae1cdSBernard Metzler 668303ae1cdSBernard Metzler kbuf += core_sge->length; 669303ae1cdSBernard Metzler core_sge++; 670303ae1cdSBernard Metzler } 671303ae1cdSBernard Metzler sqe->sge[0].length = bytes > 0 ? bytes : 0; 672303ae1cdSBernard Metzler sqe->num_sge = bytes > 0 ? 1 : 0; 673303ae1cdSBernard Metzler 674303ae1cdSBernard Metzler return bytes; 675303ae1cdSBernard Metzler } 676303ae1cdSBernard Metzler 677cf049bb3SBernard Metzler /* Complete SQ WR's without processing */ 678cf049bb3SBernard Metzler static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, 679cf049bb3SBernard Metzler const struct ib_send_wr **bad_wr) 680cf049bb3SBernard Metzler { 681cf049bb3SBernard Metzler struct siw_sqe sqe = {}; 682cf049bb3SBernard Metzler int rv = 0; 683cf049bb3SBernard Metzler 684cf049bb3SBernard Metzler while (wr) { 685cf049bb3SBernard Metzler sqe.id = wr->wr_id; 686cf049bb3SBernard Metzler sqe.opcode = wr->opcode; 687cf049bb3SBernard Metzler rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); 688cf049bb3SBernard Metzler if (rv) { 689cf049bb3SBernard Metzler if (bad_wr) 690cf049bb3SBernard Metzler *bad_wr = wr; 691cf049bb3SBernard Metzler break; 692cf049bb3SBernard Metzler } 693cf049bb3SBernard Metzler wr = wr->next; 694cf049bb3SBernard Metzler } 695cf049bb3SBernard Metzler return rv; 696cf049bb3SBernard Metzler } 697cf049bb3SBernard Metzler 698cf049bb3SBernard Metzler /* Complete RQ WR's without processing */ 699cf049bb3SBernard Metzler static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, 700cf049bb3SBernard Metzler const struct ib_recv_wr **bad_wr) 701cf049bb3SBernard Metzler { 702cf049bb3SBernard Metzler struct siw_rqe rqe = {}; 703cf049bb3SBernard Metzler int rv = 0; 704cf049bb3SBernard Metzler 705cf049bb3SBernard Metzler while (wr) { 706cf049bb3SBernard Metzler rqe.id = wr->wr_id; 707cf049bb3SBernard Metzler rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR); 708cf049bb3SBernard Metzler if (rv) { 709cf049bb3SBernard Metzler if (bad_wr) 710cf049bb3SBernard Metzler *bad_wr = wr; 711cf049bb3SBernard Metzler break; 712cf049bb3SBernard Metzler } 713cf049bb3SBernard Metzler wr = wr->next; 714cf049bb3SBernard Metzler } 715cf049bb3SBernard Metzler return rv; 716cf049bb3SBernard Metzler } 717cf049bb3SBernard Metzler 718303ae1cdSBernard Metzler /* 719303ae1cdSBernard Metzler * siw_post_send() 720303ae1cdSBernard Metzler * 721303ae1cdSBernard Metzler * Post a list of S-WR's to a SQ. 722303ae1cdSBernard Metzler * 723303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP 724303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's 725303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure. 726303ae1cdSBernard Metzler */ 727303ae1cdSBernard Metzler int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, 728303ae1cdSBernard Metzler const struct ib_send_wr **bad_wr) 729303ae1cdSBernard Metzler { 730303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 731303ae1cdSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 732303ae1cdSBernard Metzler 733303ae1cdSBernard Metzler unsigned long flags; 734303ae1cdSBernard Metzler int rv = 0; 735303ae1cdSBernard Metzler 73658fb0b56SBernard Metzler if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { 737cf049bb3SBernard Metzler siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); 738cf049bb3SBernard Metzler *bad_wr = wr; 739cf049bb3SBernard Metzler return -EINVAL; 740cf049bb3SBernard Metzler } 741cf049bb3SBernard Metzler 742303ae1cdSBernard Metzler /* 743303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking 744303ae1cdSBernard Metzler * to accommodate kernel clients needs. 745303ae1cdSBernard Metzler */ 746303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) { 747cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 748cf049bb3SBernard Metzler /* 749cf049bb3SBernard Metzler * ERROR state is final, so we can be sure 750cf049bb3SBernard Metzler * this state will not change as long as the QP 751cf049bb3SBernard Metzler * exists. 752cf049bb3SBernard Metzler * 753cf049bb3SBernard Metzler * This handles an ib_drain_sq() call with 754cf049bb3SBernard Metzler * a concurrent request to set the QP state 755cf049bb3SBernard Metzler * to ERROR. 756cf049bb3SBernard Metzler */ 757cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr); 758cf049bb3SBernard Metzler } else { 759cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n", 760cf049bb3SBernard Metzler qp->attrs.state); 761303ae1cdSBernard Metzler *bad_wr = wr; 762cf049bb3SBernard Metzler rv = -ENOTCONN; 763cf049bb3SBernard Metzler } 764cf049bb3SBernard Metzler return rv; 765303ae1cdSBernard Metzler } 766303ae1cdSBernard Metzler if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) { 767cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 768cf049bb3SBernard Metzler /* 769cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP 770cf049bb3SBernard Metzler * is in ERROR state. SQ is guaranteed to 771cf049bb3SBernard Metzler * be empty, so WR complets in-order. 772cf049bb3SBernard Metzler * 773cf049bb3SBernard Metzler * Typically triggered by ib_drain_sq(). 774cf049bb3SBernard Metzler */ 775cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr); 776cf049bb3SBernard Metzler } else { 777cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n", 778cf049bb3SBernard Metzler qp->attrs.state); 779303ae1cdSBernard Metzler *bad_wr = wr; 780cf049bb3SBernard Metzler rv = -ENOTCONN; 781303ae1cdSBernard Metzler } 782303ae1cdSBernard Metzler up_read(&qp->state_lock); 783cf049bb3SBernard Metzler return rv; 784303ae1cdSBernard Metzler } 785303ae1cdSBernard Metzler spin_lock_irqsave(&qp->sq_lock, flags); 786303ae1cdSBernard Metzler 787303ae1cdSBernard Metzler while (wr) { 788303ae1cdSBernard Metzler u32 idx = qp->sq_put % qp->attrs.sq_size; 789303ae1cdSBernard Metzler struct siw_sqe *sqe = &qp->sendq[idx]; 790303ae1cdSBernard Metzler 791303ae1cdSBernard Metzler if (sqe->flags) { 792303ae1cdSBernard Metzler siw_dbg_qp(qp, "sq full\n"); 793303ae1cdSBernard Metzler rv = -ENOMEM; 794303ae1cdSBernard Metzler break; 795303ae1cdSBernard Metzler } 796303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.sq_max_sges) { 797303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); 798303ae1cdSBernard Metzler rv = -EINVAL; 799303ae1cdSBernard Metzler break; 800303ae1cdSBernard Metzler } 801303ae1cdSBernard Metzler sqe->id = wr->wr_id; 802303ae1cdSBernard Metzler 803303ae1cdSBernard Metzler if ((wr->send_flags & IB_SEND_SIGNALED) || 804303ae1cdSBernard Metzler (qp->attrs.flags & SIW_SIGNAL_ALL_WR)) 805303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SIGNALLED; 806303ae1cdSBernard Metzler 807303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_FENCE) 808303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_READ_FENCE; 809303ae1cdSBernard Metzler 810303ae1cdSBernard Metzler switch (wr->opcode) { 811303ae1cdSBernard Metzler case IB_WR_SEND: 812303ae1cdSBernard Metzler case IB_WR_SEND_WITH_INV: 813303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_SOLICITED) 814303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SOLICITED; 815303ae1cdSBernard Metzler 816303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) { 817303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, sqe->sge, 818303ae1cdSBernard Metzler wr->num_sge); 819303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge; 820303ae1cdSBernard Metzler } else { 821303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe); 822303ae1cdSBernard Metzler if (rv <= 0) { 823303ae1cdSBernard Metzler rv = -EINVAL; 824303ae1cdSBernard Metzler break; 825303ae1cdSBernard Metzler } 826303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE; 827303ae1cdSBernard Metzler sqe->num_sge = 1; 828303ae1cdSBernard Metzler } 829303ae1cdSBernard Metzler if (wr->opcode == IB_WR_SEND) 830303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND; 831303ae1cdSBernard Metzler else { 832303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND_REMOTE_INV; 833303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey; 834303ae1cdSBernard Metzler } 835303ae1cdSBernard Metzler break; 836303ae1cdSBernard Metzler 837303ae1cdSBernard Metzler case IB_WR_RDMA_READ_WITH_INV: 838303ae1cdSBernard Metzler case IB_WR_RDMA_READ: 839303ae1cdSBernard Metzler /* 840303ae1cdSBernard Metzler * iWarp restricts RREAD sink to SGL containing 841303ae1cdSBernard Metzler * 1 SGE only. we could relax to SGL with multiple 842303ae1cdSBernard Metzler * elements referring the SAME ltag or even sending 843303ae1cdSBernard Metzler * a private per-rreq tag referring to a checked 844303ae1cdSBernard Metzler * local sgl with MULTIPLE ltag's. 845303ae1cdSBernard Metzler */ 846303ae1cdSBernard Metzler if (unlikely(wr->num_sge != 1)) { 847303ae1cdSBernard Metzler rv = -EINVAL; 848303ae1cdSBernard Metzler break; 849303ae1cdSBernard Metzler } 850303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); 851303ae1cdSBernard Metzler /* 852303ae1cdSBernard Metzler * NOTE: zero length RREAD is allowed! 853303ae1cdSBernard Metzler */ 854303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr; 855303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey; 856303ae1cdSBernard Metzler sqe->num_sge = 1; 857303ae1cdSBernard Metzler 858303ae1cdSBernard Metzler if (wr->opcode == IB_WR_RDMA_READ) 859303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ; 860303ae1cdSBernard Metzler else 861303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ_LOCAL_INV; 862303ae1cdSBernard Metzler break; 863303ae1cdSBernard Metzler 864303ae1cdSBernard Metzler case IB_WR_RDMA_WRITE: 865303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) { 866303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0], 867303ae1cdSBernard Metzler wr->num_sge); 868303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge; 869303ae1cdSBernard Metzler } else { 870303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe); 871303ae1cdSBernard Metzler if (unlikely(rv < 0)) { 872303ae1cdSBernard Metzler rv = -EINVAL; 873303ae1cdSBernard Metzler break; 874303ae1cdSBernard Metzler } 875303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE; 876303ae1cdSBernard Metzler sqe->num_sge = 1; 877303ae1cdSBernard Metzler } 878303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr; 879303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey; 880303ae1cdSBernard Metzler sqe->opcode = SIW_OP_WRITE; 881303ae1cdSBernard Metzler break; 882303ae1cdSBernard Metzler 883303ae1cdSBernard Metzler case IB_WR_REG_MR: 884c536277eSBernard Metzler sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; 885303ae1cdSBernard Metzler sqe->rkey = reg_wr(wr)->key; 886303ae1cdSBernard Metzler sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 887303ae1cdSBernard Metzler sqe->opcode = SIW_OP_REG_MR; 888303ae1cdSBernard Metzler break; 889303ae1cdSBernard Metzler 890303ae1cdSBernard Metzler case IB_WR_LOCAL_INV: 891303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey; 892303ae1cdSBernard Metzler sqe->opcode = SIW_OP_INVAL_STAG; 893303ae1cdSBernard Metzler break; 894303ae1cdSBernard Metzler 895303ae1cdSBernard Metzler default: 896303ae1cdSBernard Metzler siw_dbg_qp(qp, "ib wr type %d unsupported\n", 897303ae1cdSBernard Metzler wr->opcode); 898303ae1cdSBernard Metzler rv = -EINVAL; 899303ae1cdSBernard Metzler break; 900303ae1cdSBernard Metzler } 901c536277eSBernard Metzler siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", 902c536277eSBernard Metzler sqe->opcode, sqe->flags, 903c536277eSBernard Metzler (void *)(uintptr_t)sqe->id); 904303ae1cdSBernard Metzler 905303ae1cdSBernard Metzler if (unlikely(rv < 0)) 906303ae1cdSBernard Metzler break; 907303ae1cdSBernard Metzler 908303ae1cdSBernard Metzler /* make SQE only valid after completely written */ 909303ae1cdSBernard Metzler smp_wmb(); 910303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_VALID; 911303ae1cdSBernard Metzler 912303ae1cdSBernard Metzler qp->sq_put++; 913303ae1cdSBernard Metzler wr = wr->next; 914303ae1cdSBernard Metzler } 915303ae1cdSBernard Metzler 916303ae1cdSBernard Metzler /* 917303ae1cdSBernard Metzler * Send directly if SQ processing is not in progress. 918303ae1cdSBernard Metzler * Eventual immediate errors (rv < 0) do not affect the involved 919303ae1cdSBernard Metzler * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ 920303ae1cdSBernard Metzler * processing, if new work is already pending. But rv must be passed 921303ae1cdSBernard Metzler * to caller. 922303ae1cdSBernard Metzler */ 923303ae1cdSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) { 924303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 925303ae1cdSBernard Metzler goto skip_direct_sending; 926303ae1cdSBernard Metzler } 927303ae1cdSBernard Metzler rv = siw_activate_tx(qp); 928303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 929303ae1cdSBernard Metzler 930303ae1cdSBernard Metzler if (rv <= 0) 931303ae1cdSBernard Metzler goto skip_direct_sending; 932303ae1cdSBernard Metzler 93358fb0b56SBernard Metzler if (rdma_is_kernel_res(&qp->base_qp.res)) { 934303ae1cdSBernard Metzler rv = siw_sq_start(qp); 935303ae1cdSBernard Metzler } else { 936303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 1; 937303ae1cdSBernard Metzler 938303ae1cdSBernard Metzler if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend)) 939303ae1cdSBernard Metzler siw_qp_cm_drop(qp, 0); 940303ae1cdSBernard Metzler 941303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 0; 942303ae1cdSBernard Metzler } 943303ae1cdSBernard Metzler skip_direct_sending: 944303ae1cdSBernard Metzler 945303ae1cdSBernard Metzler up_read(&qp->state_lock); 946303ae1cdSBernard Metzler 947303ae1cdSBernard Metzler if (rv >= 0) 948303ae1cdSBernard Metzler return 0; 949303ae1cdSBernard Metzler /* 950303ae1cdSBernard Metzler * Immediate error 951303ae1cdSBernard Metzler */ 952303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv); 953303ae1cdSBernard Metzler 954303ae1cdSBernard Metzler *bad_wr = wr; 955303ae1cdSBernard Metzler return rv; 956303ae1cdSBernard Metzler } 957303ae1cdSBernard Metzler 958303ae1cdSBernard Metzler /* 959303ae1cdSBernard Metzler * siw_post_receive() 960303ae1cdSBernard Metzler * 961303ae1cdSBernard Metzler * Post a list of R-WR's to a RQ. 962303ae1cdSBernard Metzler * 963303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP 964303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's 965303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure. 966303ae1cdSBernard Metzler */ 967303ae1cdSBernard Metzler int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, 968303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr) 969303ae1cdSBernard Metzler { 970303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 971303ae1cdSBernard Metzler unsigned long flags; 972303ae1cdSBernard Metzler int rv = 0; 973303ae1cdSBernard Metzler 974661f3859SBernard Metzler if (qp->srq || qp->attrs.rq_size == 0) { 975303ae1cdSBernard Metzler *bad_wr = wr; 976661f3859SBernard Metzler return -EINVAL; 977303ae1cdSBernard Metzler } 97858fb0b56SBernard Metzler if (!rdma_is_kernel_res(&qp->base_qp.res)) { 97958fb0b56SBernard Metzler siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n"); 980303ae1cdSBernard Metzler *bad_wr = wr; 981303ae1cdSBernard Metzler return -EINVAL; 982303ae1cdSBernard Metzler } 983cf049bb3SBernard Metzler 984303ae1cdSBernard Metzler /* 985303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking 986303ae1cdSBernard Metzler * to accommodate kernel clients needs. 987303ae1cdSBernard Metzler */ 988303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) { 989cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 990cf049bb3SBernard Metzler /* 991cf049bb3SBernard Metzler * ERROR state is final, so we can be sure 992cf049bb3SBernard Metzler * this state will not change as long as the QP 993cf049bb3SBernard Metzler * exists. 994cf049bb3SBernard Metzler * 995cf049bb3SBernard Metzler * This handles an ib_drain_rq() call with 996cf049bb3SBernard Metzler * a concurrent request to set the QP state 997cf049bb3SBernard Metzler * to ERROR. 998cf049bb3SBernard Metzler */ 999cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr); 1000cf049bb3SBernard Metzler } else { 1001cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n", 1002cf049bb3SBernard Metzler qp->attrs.state); 1003303ae1cdSBernard Metzler *bad_wr = wr; 1004cf049bb3SBernard Metzler rv = -ENOTCONN; 1005303ae1cdSBernard Metzler } 1006cf049bb3SBernard Metzler return rv; 1007303ae1cdSBernard Metzler } 1008303ae1cdSBernard Metzler if (qp->attrs.state > SIW_QP_STATE_RTS) { 1009cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 1010cf049bb3SBernard Metzler /* 1011cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP 1012cf049bb3SBernard Metzler * is in ERROR state. RQ is guaranteed to 1013cf049bb3SBernard Metzler * be empty, so WR complets in-order. 1014cf049bb3SBernard Metzler * 1015cf049bb3SBernard Metzler * Typically triggered by ib_drain_rq(). 1016cf049bb3SBernard Metzler */ 1017cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr); 1018cf049bb3SBernard Metzler } else { 1019cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n", 1020cf049bb3SBernard Metzler qp->attrs.state); 1021303ae1cdSBernard Metzler *bad_wr = wr; 1022cf049bb3SBernard Metzler rv = -ENOTCONN; 1023cf049bb3SBernard Metzler } 1024cf049bb3SBernard Metzler up_read(&qp->state_lock); 1025cf049bb3SBernard Metzler return rv; 1026303ae1cdSBernard Metzler } 1027303ae1cdSBernard Metzler /* 1028303ae1cdSBernard Metzler * Serialize potentially multiple producers. 1029303ae1cdSBernard Metzler * Not needed for single threaded consumer side. 1030303ae1cdSBernard Metzler */ 1031303ae1cdSBernard Metzler spin_lock_irqsave(&qp->rq_lock, flags); 1032303ae1cdSBernard Metzler 1033303ae1cdSBernard Metzler while (wr) { 1034303ae1cdSBernard Metzler u32 idx = qp->rq_put % qp->attrs.rq_size; 1035303ae1cdSBernard Metzler struct siw_rqe *rqe = &qp->recvq[idx]; 1036303ae1cdSBernard Metzler 1037303ae1cdSBernard Metzler if (rqe->flags) { 1038303ae1cdSBernard Metzler siw_dbg_qp(qp, "RQ full\n"); 1039303ae1cdSBernard Metzler rv = -ENOMEM; 1040303ae1cdSBernard Metzler break; 1041303ae1cdSBernard Metzler } 1042303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.rq_max_sges) { 1043303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); 1044303ae1cdSBernard Metzler rv = -EINVAL; 1045303ae1cdSBernard Metzler break; 1046303ae1cdSBernard Metzler } 1047303ae1cdSBernard Metzler rqe->id = wr->wr_id; 1048303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge; 1049303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); 1050303ae1cdSBernard Metzler 1051303ae1cdSBernard Metzler /* make sure RQE is completely written before valid */ 1052303ae1cdSBernard Metzler smp_wmb(); 1053303ae1cdSBernard Metzler 1054303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID; 1055303ae1cdSBernard Metzler 1056303ae1cdSBernard Metzler qp->rq_put++; 1057303ae1cdSBernard Metzler wr = wr->next; 1058303ae1cdSBernard Metzler } 1059303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->rq_lock, flags); 1060303ae1cdSBernard Metzler 1061303ae1cdSBernard Metzler up_read(&qp->state_lock); 1062303ae1cdSBernard Metzler 1063303ae1cdSBernard Metzler if (rv < 0) { 1064303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv); 1065303ae1cdSBernard Metzler *bad_wr = wr; 1066303ae1cdSBernard Metzler } 1067303ae1cdSBernard Metzler return rv > 0 ? 0 : rv; 1068303ae1cdSBernard Metzler } 1069303ae1cdSBernard Metzler 107043d781b9SLeon Romanovsky int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) 1071303ae1cdSBernard Metzler { 1072303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1073303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device); 1074303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1075303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1076303ae1cdSBernard Metzler base_ucontext); 1077303ae1cdSBernard Metzler 1078303ae1cdSBernard Metzler siw_dbg_cq(cq, "free CQ resources\n"); 1079303ae1cdSBernard Metzler 1080303ae1cdSBernard Metzler siw_cq_flush(cq); 1081303ae1cdSBernard Metzler 108211f1a755SMichal Kalderon if (ctx) 108311f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry); 1084303ae1cdSBernard Metzler 1085303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq); 1086303ae1cdSBernard Metzler 1087303ae1cdSBernard Metzler vfree(cq->queue); 108843d781b9SLeon Romanovsky return 0; 1089303ae1cdSBernard Metzler } 1090303ae1cdSBernard Metzler 1091303ae1cdSBernard Metzler /* 1092303ae1cdSBernard Metzler * siw_create_cq() 1093303ae1cdSBernard Metzler * 1094303ae1cdSBernard Metzler * Populate CQ of requested size 1095303ae1cdSBernard Metzler * 1096303ae1cdSBernard Metzler * @base_cq: CQ as allocated by RDMA midlayer 1097303ae1cdSBernard Metzler * @attr: Initial CQ attributes 1098303ae1cdSBernard Metzler * @udata: relates to user context 1099303ae1cdSBernard Metzler */ 1100303ae1cdSBernard Metzler 1101303ae1cdSBernard Metzler int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, 1102303ae1cdSBernard Metzler struct ib_udata *udata) 1103303ae1cdSBernard Metzler { 1104303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device); 1105303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1106303ae1cdSBernard Metzler int rv, size = attr->cqe; 1107303ae1cdSBernard Metzler 11081c407cb5SJason Gunthorpe if (attr->flags) 11091c407cb5SJason Gunthorpe return -EOPNOTSUPP; 11101c407cb5SJason Gunthorpe 1111303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) { 1112303ae1cdSBernard Metzler siw_dbg(base_cq->device, "too many CQ's\n"); 1113303ae1cdSBernard Metzler rv = -ENOMEM; 1114303ae1cdSBernard Metzler goto err_out; 1115303ae1cdSBernard Metzler } 1116303ae1cdSBernard Metzler if (size < 1 || size > sdev->attrs.max_cqe) { 1117303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ size error: %d\n", size); 1118303ae1cdSBernard Metzler rv = -EINVAL; 1119303ae1cdSBernard Metzler goto err_out; 1120303ae1cdSBernard Metzler } 1121303ae1cdSBernard Metzler size = roundup_pow_of_two(size); 1122303ae1cdSBernard Metzler cq->base_cq.cqe = size; 1123303ae1cdSBernard Metzler cq->num_cqe = size; 1124303ae1cdSBernard Metzler 112558fb0b56SBernard Metzler if (udata) 1126303ae1cdSBernard Metzler cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) + 1127303ae1cdSBernard Metzler sizeof(struct siw_cq_ctrl)); 112858fb0b56SBernard Metzler else 112958fb0b56SBernard Metzler cq->queue = vzalloc(size * sizeof(struct siw_cqe) + 113058fb0b56SBernard Metzler sizeof(struct siw_cq_ctrl)); 113158fb0b56SBernard Metzler 1132303ae1cdSBernard Metzler if (cq->queue == NULL) { 1133303ae1cdSBernard Metzler rv = -ENOMEM; 1134303ae1cdSBernard Metzler goto err_out; 1135303ae1cdSBernard Metzler } 1136303ae1cdSBernard Metzler get_random_bytes(&cq->id, 4); 1137303ae1cdSBernard Metzler siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id); 1138303ae1cdSBernard Metzler 1139303ae1cdSBernard Metzler spin_lock_init(&cq->lock); 1140303ae1cdSBernard Metzler 11412c8ccb37SBernard Metzler cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; 1142303ae1cdSBernard Metzler 1143303ae1cdSBernard Metzler if (udata) { 1144303ae1cdSBernard Metzler struct siw_uresp_create_cq uresp = {}; 1145303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1146303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1147303ae1cdSBernard Metzler base_ucontext); 114811f1a755SMichal Kalderon size_t length = size * sizeof(struct siw_cqe) + 114911f1a755SMichal Kalderon sizeof(struct siw_cq_ctrl); 1150303ae1cdSBernard Metzler 115111f1a755SMichal Kalderon cq->cq_entry = 115211f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, cq->queue, 115311f1a755SMichal Kalderon length, &uresp.cq_key); 115411f1a755SMichal Kalderon if (!cq->cq_entry) { 1155303ae1cdSBernard Metzler rv = -ENOMEM; 1156303ae1cdSBernard Metzler goto err_out; 1157303ae1cdSBernard Metzler } 115811f1a755SMichal Kalderon 1159303ae1cdSBernard Metzler uresp.cq_id = cq->id; 1160303ae1cdSBernard Metzler uresp.num_cqe = size; 1161303ae1cdSBernard Metzler 1162303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1163303ae1cdSBernard Metzler rv = -EINVAL; 1164303ae1cdSBernard Metzler goto err_out; 1165303ae1cdSBernard Metzler } 1166303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1167303ae1cdSBernard Metzler if (rv) 1168303ae1cdSBernard Metzler goto err_out; 1169303ae1cdSBernard Metzler } 1170303ae1cdSBernard Metzler return 0; 1171303ae1cdSBernard Metzler 1172303ae1cdSBernard Metzler err_out: 1173303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ creation failed: %d", rv); 1174303ae1cdSBernard Metzler 1175303ae1cdSBernard Metzler if (cq && cq->queue) { 1176303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1177303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1178303ae1cdSBernard Metzler base_ucontext); 117911f1a755SMichal Kalderon if (ctx) 118011f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry); 1181303ae1cdSBernard Metzler vfree(cq->queue); 1182303ae1cdSBernard Metzler } 1183303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq); 1184303ae1cdSBernard Metzler 1185303ae1cdSBernard Metzler return rv; 1186303ae1cdSBernard Metzler } 1187303ae1cdSBernard Metzler 1188303ae1cdSBernard Metzler /* 1189303ae1cdSBernard Metzler * siw_poll_cq() 1190303ae1cdSBernard Metzler * 1191303ae1cdSBernard Metzler * Reap CQ entries if available and copy work completion status into 1192303ae1cdSBernard Metzler * array of WC's provided by caller. Returns number of reaped CQE's. 1193303ae1cdSBernard Metzler * 1194303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ. 1195303ae1cdSBernard Metzler * @num_cqe: Maximum number of CQE's to reap. 1196303ae1cdSBernard Metzler * @wc: Array of work completions to be filled by siw. 1197303ae1cdSBernard Metzler */ 1198303ae1cdSBernard Metzler int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc) 1199303ae1cdSBernard Metzler { 1200303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1201303ae1cdSBernard Metzler int i; 1202303ae1cdSBernard Metzler 1203303ae1cdSBernard Metzler for (i = 0; i < num_cqe; i++) { 1204303ae1cdSBernard Metzler if (!siw_reap_cqe(cq, wc)) 1205303ae1cdSBernard Metzler break; 1206303ae1cdSBernard Metzler wc++; 1207303ae1cdSBernard Metzler } 1208303ae1cdSBernard Metzler return i; 1209303ae1cdSBernard Metzler } 1210303ae1cdSBernard Metzler 1211303ae1cdSBernard Metzler /* 1212303ae1cdSBernard Metzler * siw_req_notify_cq() 1213303ae1cdSBernard Metzler * 1214303ae1cdSBernard Metzler * Request notification for new CQE's added to that CQ. 1215303ae1cdSBernard Metzler * Defined flags: 1216303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification 1217303ae1cdSBernard Metzler * event if a WQE with notification flag set enters the CQ 1218303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification 1219303ae1cdSBernard Metzler * event if a WQE enters the CQ. 1220303ae1cdSBernard Metzler * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the 1221303ae1cdSBernard Metzler * number of not reaped CQE's regardless of its notification 1222303ae1cdSBernard Metzler * type and current or new CQ notification settings. 1223303ae1cdSBernard Metzler * 1224303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ. 1225303ae1cdSBernard Metzler * @flags: Requested notification flags. 1226303ae1cdSBernard Metzler */ 1227303ae1cdSBernard Metzler int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags) 1228303ae1cdSBernard Metzler { 1229303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1230303ae1cdSBernard Metzler 1231303ae1cdSBernard Metzler siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1232303ae1cdSBernard Metzler 1233303ae1cdSBernard Metzler if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 12342c8ccb37SBernard Metzler /* 12352c8ccb37SBernard Metzler * Enable CQ event for next solicited completion. 12362c8ccb37SBernard Metzler * and make it visible to all associated producers. 12372c8ccb37SBernard Metzler */ 12382c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); 1239303ae1cdSBernard Metzler else 12402c8ccb37SBernard Metzler /* 12412c8ccb37SBernard Metzler * Enable CQ event for any signalled completion. 12422c8ccb37SBernard Metzler * and make it visible to all associated producers. 12432c8ccb37SBernard Metzler */ 12442c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); 1245303ae1cdSBernard Metzler 1246303ae1cdSBernard Metzler if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1247303ae1cdSBernard Metzler return cq->cq_put - cq->cq_get; 1248303ae1cdSBernard Metzler 1249303ae1cdSBernard Metzler return 0; 1250303ae1cdSBernard Metzler } 1251303ae1cdSBernard Metzler 1252303ae1cdSBernard Metzler /* 1253303ae1cdSBernard Metzler * siw_dereg_mr() 1254303ae1cdSBernard Metzler * 1255303ae1cdSBernard Metzler * Release Memory Region. 1256303ae1cdSBernard Metzler * 1257303ae1cdSBernard Metzler * @base_mr: Base MR contained in siw MR. 1258303ae1cdSBernard Metzler * @udata: points to user context, unused. 1259303ae1cdSBernard Metzler */ 1260303ae1cdSBernard Metzler int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata) 1261303ae1cdSBernard Metzler { 1262303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr); 1263303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_mr->device); 1264303ae1cdSBernard Metzler 1265303ae1cdSBernard Metzler siw_dbg_mem(mr->mem, "deregister MR\n"); 1266303ae1cdSBernard Metzler 1267303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1268303ae1cdSBernard Metzler 1269303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1270303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1271303ae1cdSBernard Metzler 1272303ae1cdSBernard Metzler return 0; 1273303ae1cdSBernard Metzler } 1274303ae1cdSBernard Metzler 1275303ae1cdSBernard Metzler /* 1276303ae1cdSBernard Metzler * siw_reg_user_mr() 1277303ae1cdSBernard Metzler * 1278303ae1cdSBernard Metzler * Register Memory Region. 1279303ae1cdSBernard Metzler * 1280303ae1cdSBernard Metzler * @pd: Protection Domain 1281303ae1cdSBernard Metzler * @start: starting address of MR (virtual address) 1282303ae1cdSBernard Metzler * @len: len of MR 1283303ae1cdSBernard Metzler * @rnic_va: not used by siw 1284303ae1cdSBernard Metzler * @rights: MR access rights 1285303ae1cdSBernard Metzler * @udata: user buffer to communicate STag and Key. 1286303ae1cdSBernard Metzler */ 1287303ae1cdSBernard Metzler struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 1288303ae1cdSBernard Metzler u64 rnic_va, int rights, struct ib_udata *udata) 1289303ae1cdSBernard Metzler { 1290303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1291303ae1cdSBernard Metzler struct siw_umem *umem = NULL; 1292303ae1cdSBernard Metzler struct siw_ureq_reg_mr ureq; 1293303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1294303ae1cdSBernard Metzler 1295303ae1cdSBernard Metzler unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1296303ae1cdSBernard Metzler int rv; 1297303ae1cdSBernard Metzler 1298c536277eSBernard Metzler siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", 1299c536277eSBernard Metzler (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, 1300303ae1cdSBernard Metzler (unsigned long long)len); 1301303ae1cdSBernard Metzler 1302303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1303303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1304303ae1cdSBernard Metzler rv = -ENOMEM; 1305303ae1cdSBernard Metzler goto err_out; 1306303ae1cdSBernard Metzler } 1307303ae1cdSBernard Metzler if (!len) { 1308303ae1cdSBernard Metzler rv = -EINVAL; 1309303ae1cdSBernard Metzler goto err_out; 1310303ae1cdSBernard Metzler } 1311303ae1cdSBernard Metzler if (mem_limit != RLIM_INFINITY) { 1312303ae1cdSBernard Metzler unsigned long num_pages = 1313303ae1cdSBernard Metzler (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT; 1314303ae1cdSBernard Metzler mem_limit >>= PAGE_SHIFT; 1315303ae1cdSBernard Metzler 1316303ae1cdSBernard Metzler if (num_pages > mem_limit - current->mm->locked_vm) { 1317303ae1cdSBernard Metzler siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n", 1318303ae1cdSBernard Metzler num_pages, mem_limit, 1319303ae1cdSBernard Metzler current->mm->locked_vm); 1320303ae1cdSBernard Metzler rv = -ENOMEM; 1321303ae1cdSBernard Metzler goto err_out; 1322303ae1cdSBernard Metzler } 1323303ae1cdSBernard Metzler } 1324303ae1cdSBernard Metzler umem = siw_umem_get(start, len, ib_access_writable(rights)); 1325303ae1cdSBernard Metzler if (IS_ERR(umem)) { 1326303ae1cdSBernard Metzler rv = PTR_ERR(umem); 1327303ae1cdSBernard Metzler siw_dbg_pd(pd, "getting user memory failed: %d\n", rv); 1328303ae1cdSBernard Metzler umem = NULL; 1329303ae1cdSBernard Metzler goto err_out; 1330303ae1cdSBernard Metzler } 1331303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1332303ae1cdSBernard Metzler if (!mr) { 1333303ae1cdSBernard Metzler rv = -ENOMEM; 1334303ae1cdSBernard Metzler goto err_out; 1335303ae1cdSBernard Metzler } 1336303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, umem, start, len, rights); 1337303ae1cdSBernard Metzler if (rv) 1338303ae1cdSBernard Metzler goto err_out; 1339303ae1cdSBernard Metzler 1340303ae1cdSBernard Metzler if (udata) { 1341303ae1cdSBernard Metzler struct siw_uresp_reg_mr uresp = {}; 1342303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem; 1343303ae1cdSBernard Metzler 1344303ae1cdSBernard Metzler if (udata->inlen < sizeof(ureq)) { 1345303ae1cdSBernard Metzler rv = -EINVAL; 1346303ae1cdSBernard Metzler goto err_out; 1347303ae1cdSBernard Metzler } 1348303ae1cdSBernard Metzler rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq)); 1349303ae1cdSBernard Metzler if (rv) 1350303ae1cdSBernard Metzler goto err_out; 1351303ae1cdSBernard Metzler 1352303ae1cdSBernard Metzler mr->base_mr.lkey |= ureq.stag_key; 1353303ae1cdSBernard Metzler mr->base_mr.rkey |= ureq.stag_key; 1354303ae1cdSBernard Metzler mem->stag |= ureq.stag_key; 1355303ae1cdSBernard Metzler uresp.stag = mem->stag; 1356303ae1cdSBernard Metzler 1357303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1358303ae1cdSBernard Metzler rv = -EINVAL; 1359303ae1cdSBernard Metzler goto err_out; 1360303ae1cdSBernard Metzler } 1361303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1362303ae1cdSBernard Metzler if (rv) 1363303ae1cdSBernard Metzler goto err_out; 1364303ae1cdSBernard Metzler } 1365303ae1cdSBernard Metzler mr->mem->stag_valid = 1; 1366303ae1cdSBernard Metzler 1367303ae1cdSBernard Metzler return &mr->base_mr; 1368303ae1cdSBernard Metzler 1369303ae1cdSBernard Metzler err_out: 1370303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1371303ae1cdSBernard Metzler if (mr) { 1372303ae1cdSBernard Metzler if (mr->mem) 1373303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1374303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1375303ae1cdSBernard Metzler } else { 1376303ae1cdSBernard Metzler if (umem) 1377303ae1cdSBernard Metzler siw_umem_release(umem, false); 1378303ae1cdSBernard Metzler } 1379303ae1cdSBernard Metzler return ERR_PTR(rv); 1380303ae1cdSBernard Metzler } 1381303ae1cdSBernard Metzler 1382303ae1cdSBernard Metzler struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 138342a3b153SGal Pressman u32 max_sge) 1384303ae1cdSBernard Metzler { 1385303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1386303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1387303ae1cdSBernard Metzler struct siw_pbl *pbl = NULL; 1388303ae1cdSBernard Metzler int rv; 1389303ae1cdSBernard Metzler 1390303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1391303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1392303ae1cdSBernard Metzler rv = -ENOMEM; 1393303ae1cdSBernard Metzler goto err_out; 1394303ae1cdSBernard Metzler } 1395303ae1cdSBernard Metzler if (mr_type != IB_MR_TYPE_MEM_REG) { 1396303ae1cdSBernard Metzler siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type); 1397303ae1cdSBernard Metzler rv = -EOPNOTSUPP; 1398303ae1cdSBernard Metzler goto err_out; 1399303ae1cdSBernard Metzler } 1400303ae1cdSBernard Metzler if (max_sge > SIW_MAX_SGE_PBL) { 1401303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many sge's: %d\n", max_sge); 1402303ae1cdSBernard Metzler rv = -ENOMEM; 1403303ae1cdSBernard Metzler goto err_out; 1404303ae1cdSBernard Metzler } 1405303ae1cdSBernard Metzler pbl = siw_pbl_alloc(max_sge); 1406303ae1cdSBernard Metzler if (IS_ERR(pbl)) { 1407303ae1cdSBernard Metzler rv = PTR_ERR(pbl); 1408303ae1cdSBernard Metzler siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv); 1409303ae1cdSBernard Metzler pbl = NULL; 1410303ae1cdSBernard Metzler goto err_out; 1411303ae1cdSBernard Metzler } 1412303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1413303ae1cdSBernard Metzler if (!mr) { 1414303ae1cdSBernard Metzler rv = -ENOMEM; 1415303ae1cdSBernard Metzler goto err_out; 1416303ae1cdSBernard Metzler } 1417303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0); 1418303ae1cdSBernard Metzler if (rv) 1419303ae1cdSBernard Metzler goto err_out; 1420303ae1cdSBernard Metzler 1421303ae1cdSBernard Metzler mr->mem->is_pbl = 1; 1422303ae1cdSBernard Metzler 1423303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); 1424303ae1cdSBernard Metzler 1425303ae1cdSBernard Metzler return &mr->base_mr; 1426303ae1cdSBernard Metzler 1427303ae1cdSBernard Metzler err_out: 1428303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1429303ae1cdSBernard Metzler 1430303ae1cdSBernard Metzler if (!mr) { 1431303ae1cdSBernard Metzler kfree(pbl); 1432303ae1cdSBernard Metzler } else { 1433303ae1cdSBernard Metzler if (mr->mem) 1434303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1435303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1436303ae1cdSBernard Metzler } 1437303ae1cdSBernard Metzler siw_dbg_pd(pd, "failed: %d\n", rv); 1438303ae1cdSBernard Metzler 1439303ae1cdSBernard Metzler return ERR_PTR(rv); 1440303ae1cdSBernard Metzler } 1441303ae1cdSBernard Metzler 1442303ae1cdSBernard Metzler /* Just used to count number of pages being mapped */ 1443303ae1cdSBernard Metzler static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr) 1444303ae1cdSBernard Metzler { 1445303ae1cdSBernard Metzler return 0; 1446303ae1cdSBernard Metzler } 1447303ae1cdSBernard Metzler 1448303ae1cdSBernard Metzler int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, 1449303ae1cdSBernard Metzler unsigned int *sg_off) 1450303ae1cdSBernard Metzler { 1451303ae1cdSBernard Metzler struct scatterlist *slp; 1452303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr); 1453303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem; 1454303ae1cdSBernard Metzler struct siw_pbl *pbl = mem->pbl; 1455303ae1cdSBernard Metzler struct siw_pble *pble; 1456c536277eSBernard Metzler unsigned long pbl_size; 1457303ae1cdSBernard Metzler int i, rv; 1458303ae1cdSBernard Metzler 1459303ae1cdSBernard Metzler if (!pbl) { 1460303ae1cdSBernard Metzler siw_dbg_mem(mem, "no PBL allocated\n"); 1461303ae1cdSBernard Metzler return -EINVAL; 1462303ae1cdSBernard Metzler } 1463303ae1cdSBernard Metzler pble = pbl->pbe; 1464303ae1cdSBernard Metzler 1465303ae1cdSBernard Metzler if (pbl->max_buf < num_sle) { 1466303ae1cdSBernard Metzler siw_dbg_mem(mem, "too many SGE's: %d > %d\n", 1467303ae1cdSBernard Metzler mem->pbl->max_buf, num_sle); 1468303ae1cdSBernard Metzler return -ENOMEM; 1469303ae1cdSBernard Metzler } 1470303ae1cdSBernard Metzler for_each_sg(sl, slp, num_sle, i) { 1471303ae1cdSBernard Metzler if (sg_dma_len(slp) == 0) { 1472303ae1cdSBernard Metzler siw_dbg_mem(mem, "empty SGE\n"); 1473303ae1cdSBernard Metzler return -EINVAL; 1474303ae1cdSBernard Metzler } 1475303ae1cdSBernard Metzler if (i == 0) { 1476303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp); 1477303ae1cdSBernard Metzler pble->size = sg_dma_len(slp); 1478303ae1cdSBernard Metzler pble->pbl_off = 0; 1479303ae1cdSBernard Metzler pbl_size = pble->size; 1480303ae1cdSBernard Metzler pbl->num_buf = 1; 1481303ae1cdSBernard Metzler } else { 1482303ae1cdSBernard Metzler /* Merge PBL entries if adjacent */ 1483303ae1cdSBernard Metzler if (pble->addr + pble->size == sg_dma_address(slp)) { 1484303ae1cdSBernard Metzler pble->size += sg_dma_len(slp); 1485303ae1cdSBernard Metzler } else { 1486303ae1cdSBernard Metzler pble++; 1487303ae1cdSBernard Metzler pbl->num_buf++; 1488303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp); 1489303ae1cdSBernard Metzler pble->size = sg_dma_len(slp); 1490303ae1cdSBernard Metzler pble->pbl_off = pbl_size; 1491303ae1cdSBernard Metzler } 1492303ae1cdSBernard Metzler pbl_size += sg_dma_len(slp); 1493303ae1cdSBernard Metzler } 1494303ae1cdSBernard Metzler siw_dbg_mem(mem, 1495c536277eSBernard Metzler "sge[%d], size %u, addr 0x%p, total %lu\n", 1496c536277eSBernard Metzler i, pble->size, (void *)(uintptr_t)pble->addr, 1497c536277eSBernard Metzler pbl_size); 1498303ae1cdSBernard Metzler } 1499303ae1cdSBernard Metzler rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1500303ae1cdSBernard Metzler if (rv > 0) { 1501303ae1cdSBernard Metzler mem->len = base_mr->length; 1502303ae1cdSBernard Metzler mem->va = base_mr->iova; 1503303ae1cdSBernard Metzler siw_dbg_mem(mem, 1504c536277eSBernard Metzler "%llu bytes, start 0x%pK, %u SLE to %u entries\n", 1505c536277eSBernard Metzler mem->len, (void *)(uintptr_t)mem->va, num_sle, 1506c536277eSBernard Metzler pbl->num_buf); 1507303ae1cdSBernard Metzler } 1508303ae1cdSBernard Metzler return rv; 1509303ae1cdSBernard Metzler } 1510303ae1cdSBernard Metzler 1511303ae1cdSBernard Metzler /* 1512303ae1cdSBernard Metzler * siw_get_dma_mr() 1513303ae1cdSBernard Metzler * 1514303ae1cdSBernard Metzler * Create a (empty) DMA memory region, where no umem is attached. 1515303ae1cdSBernard Metzler */ 1516303ae1cdSBernard Metzler struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights) 1517303ae1cdSBernard Metzler { 1518303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1519303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1520303ae1cdSBernard Metzler int rv; 1521303ae1cdSBernard Metzler 1522303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1523303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1524303ae1cdSBernard Metzler rv = -ENOMEM; 1525303ae1cdSBernard Metzler goto err_out; 1526303ae1cdSBernard Metzler } 1527303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1528303ae1cdSBernard Metzler if (!mr) { 1529303ae1cdSBernard Metzler rv = -ENOMEM; 1530303ae1cdSBernard Metzler goto err_out; 1531303ae1cdSBernard Metzler } 1532303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights); 1533303ae1cdSBernard Metzler if (rv) 1534303ae1cdSBernard Metzler goto err_out; 1535303ae1cdSBernard Metzler 1536303ae1cdSBernard Metzler mr->mem->stag_valid = 1; 1537303ae1cdSBernard Metzler 1538303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); 1539303ae1cdSBernard Metzler 1540303ae1cdSBernard Metzler return &mr->base_mr; 1541303ae1cdSBernard Metzler 1542303ae1cdSBernard Metzler err_out: 1543303ae1cdSBernard Metzler if (rv) 1544303ae1cdSBernard Metzler kfree(mr); 1545303ae1cdSBernard Metzler 1546303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1547303ae1cdSBernard Metzler 1548303ae1cdSBernard Metzler return ERR_PTR(rv); 1549303ae1cdSBernard Metzler } 1550303ae1cdSBernard Metzler 1551303ae1cdSBernard Metzler /* 1552303ae1cdSBernard Metzler * siw_create_srq() 1553303ae1cdSBernard Metzler * 1554303ae1cdSBernard Metzler * Create Shared Receive Queue of attributes @init_attrs 1555303ae1cdSBernard Metzler * within protection domain given by @pd. 1556303ae1cdSBernard Metzler * 1557303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ. 1558303ae1cdSBernard Metzler * @init_attrs: SRQ init attributes. 1559303ae1cdSBernard Metzler * @udata: points to user context 1560303ae1cdSBernard Metzler */ 1561303ae1cdSBernard Metzler int siw_create_srq(struct ib_srq *base_srq, 1562303ae1cdSBernard Metzler struct ib_srq_init_attr *init_attrs, struct ib_udata *udata) 1563303ae1cdSBernard Metzler { 1564303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1565303ae1cdSBernard Metzler struct ib_srq_attr *attrs = &init_attrs->attr; 1566303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device); 1567303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1568303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1569303ae1cdSBernard Metzler base_ucontext); 1570303ae1cdSBernard Metzler int rv; 1571303ae1cdSBernard Metzler 1572652caba5SJason Gunthorpe if (init_attrs->srq_type != IB_SRQT_BASIC) 1573652caba5SJason Gunthorpe return -EOPNOTSUPP; 1574652caba5SJason Gunthorpe 1575303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) { 1576303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "too many SRQ's\n"); 1577303ae1cdSBernard Metzler rv = -ENOMEM; 1578303ae1cdSBernard Metzler goto err_out; 1579303ae1cdSBernard Metzler } 1580303ae1cdSBernard Metzler if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR || 1581303ae1cdSBernard Metzler attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) { 1582303ae1cdSBernard Metzler rv = -EINVAL; 1583303ae1cdSBernard Metzler goto err_out; 1584303ae1cdSBernard Metzler } 1585303ae1cdSBernard Metzler srq->max_sge = attrs->max_sge; 1586303ae1cdSBernard Metzler srq->num_rqe = roundup_pow_of_two(attrs->max_wr); 1587303ae1cdSBernard Metzler srq->limit = attrs->srq_limit; 1588303ae1cdSBernard Metzler if (srq->limit) 158958fb0b56SBernard Metzler srq->armed = true; 1590303ae1cdSBernard Metzler 159158fb0b56SBernard Metzler srq->is_kernel_res = !udata; 1592303ae1cdSBernard Metzler 1593303ae1cdSBernard Metzler if (udata) 1594303ae1cdSBernard Metzler srq->recvq = 1595303ae1cdSBernard Metzler vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe)); 1596303ae1cdSBernard Metzler else 1597303ae1cdSBernard Metzler srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe)); 1598303ae1cdSBernard Metzler 1599303ae1cdSBernard Metzler if (srq->recvq == NULL) { 1600303ae1cdSBernard Metzler rv = -ENOMEM; 1601303ae1cdSBernard Metzler goto err_out; 1602303ae1cdSBernard Metzler } 1603303ae1cdSBernard Metzler if (udata) { 1604303ae1cdSBernard Metzler struct siw_uresp_create_srq uresp = {}; 160511f1a755SMichal Kalderon size_t length = srq->num_rqe * sizeof(struct siw_rqe); 1606303ae1cdSBernard Metzler 160711f1a755SMichal Kalderon srq->srq_entry = 160811f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, srq->recvq, 160911f1a755SMichal Kalderon length, &uresp.srq_key); 161011f1a755SMichal Kalderon if (!srq->srq_entry) { 1611303ae1cdSBernard Metzler rv = -ENOMEM; 1612303ae1cdSBernard Metzler goto err_out; 1613303ae1cdSBernard Metzler } 161411f1a755SMichal Kalderon 1615303ae1cdSBernard Metzler uresp.num_rqe = srq->num_rqe; 1616303ae1cdSBernard Metzler 1617303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1618303ae1cdSBernard Metzler rv = -EINVAL; 1619303ae1cdSBernard Metzler goto err_out; 1620303ae1cdSBernard Metzler } 1621303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1622303ae1cdSBernard Metzler if (rv) 1623303ae1cdSBernard Metzler goto err_out; 1624303ae1cdSBernard Metzler } 1625303ae1cdSBernard Metzler spin_lock_init(&srq->lock); 1626303ae1cdSBernard Metzler 1627c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); 1628303ae1cdSBernard Metzler 1629303ae1cdSBernard Metzler return 0; 1630303ae1cdSBernard Metzler 1631303ae1cdSBernard Metzler err_out: 1632303ae1cdSBernard Metzler if (srq->recvq) { 163311f1a755SMichal Kalderon if (ctx) 163411f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry); 1635303ae1cdSBernard Metzler vfree(srq->recvq); 1636303ae1cdSBernard Metzler } 1637303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq); 1638303ae1cdSBernard Metzler 1639303ae1cdSBernard Metzler return rv; 1640303ae1cdSBernard Metzler } 1641303ae1cdSBernard Metzler 1642303ae1cdSBernard Metzler /* 1643303ae1cdSBernard Metzler * siw_modify_srq() 1644303ae1cdSBernard Metzler * 1645303ae1cdSBernard Metzler * Modify SRQ. The caller may resize SRQ and/or set/reset notification 1646303ae1cdSBernard Metzler * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification. 1647303ae1cdSBernard Metzler * 1648303ae1cdSBernard Metzler * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE 1649303ae1cdSBernard Metzler * parameter. siw_modify_srq() does not check the attrs->max_sge param. 1650303ae1cdSBernard Metzler */ 1651303ae1cdSBernard Metzler int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs, 1652303ae1cdSBernard Metzler enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 1653303ae1cdSBernard Metzler { 1654303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1655303ae1cdSBernard Metzler unsigned long flags; 1656303ae1cdSBernard Metzler int rv = 0; 1657303ae1cdSBernard Metzler 1658303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1659303ae1cdSBernard Metzler 1660303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_MAX_WR) { 1661303ae1cdSBernard Metzler /* resize request not yet supported */ 1662303ae1cdSBernard Metzler rv = -EOPNOTSUPP; 1663303ae1cdSBernard Metzler goto out; 1664303ae1cdSBernard Metzler } 1665303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_LIMIT) { 1666303ae1cdSBernard Metzler if (attrs->srq_limit) { 1667303ae1cdSBernard Metzler if (unlikely(attrs->srq_limit > srq->num_rqe)) { 1668303ae1cdSBernard Metzler rv = -EINVAL; 1669303ae1cdSBernard Metzler goto out; 1670303ae1cdSBernard Metzler } 167158fb0b56SBernard Metzler srq->armed = true; 1672303ae1cdSBernard Metzler } else { 167358fb0b56SBernard Metzler srq->armed = false; 1674303ae1cdSBernard Metzler } 1675303ae1cdSBernard Metzler srq->limit = attrs->srq_limit; 1676303ae1cdSBernard Metzler } 1677303ae1cdSBernard Metzler out: 1678303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1679303ae1cdSBernard Metzler 1680303ae1cdSBernard Metzler return rv; 1681303ae1cdSBernard Metzler } 1682303ae1cdSBernard Metzler 1683303ae1cdSBernard Metzler /* 1684303ae1cdSBernard Metzler * siw_query_srq() 1685303ae1cdSBernard Metzler * 1686303ae1cdSBernard Metzler * Query SRQ attributes. 1687303ae1cdSBernard Metzler */ 1688303ae1cdSBernard Metzler int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs) 1689303ae1cdSBernard Metzler { 1690303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1691303ae1cdSBernard Metzler unsigned long flags; 1692303ae1cdSBernard Metzler 1693303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1694303ae1cdSBernard Metzler 1695303ae1cdSBernard Metzler attrs->max_wr = srq->num_rqe; 1696303ae1cdSBernard Metzler attrs->max_sge = srq->max_sge; 1697303ae1cdSBernard Metzler attrs->srq_limit = srq->limit; 1698303ae1cdSBernard Metzler 1699303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1700303ae1cdSBernard Metzler 1701303ae1cdSBernard Metzler return 0; 1702303ae1cdSBernard Metzler } 1703303ae1cdSBernard Metzler 1704303ae1cdSBernard Metzler /* 1705303ae1cdSBernard Metzler * siw_destroy_srq() 1706303ae1cdSBernard Metzler * 1707303ae1cdSBernard Metzler * Destroy SRQ. 1708303ae1cdSBernard Metzler * It is assumed that the SRQ is not referenced by any 1709303ae1cdSBernard Metzler * QP anymore - the code trusts the RDMA core environment to keep track 1710303ae1cdSBernard Metzler * of QP references. 1711303ae1cdSBernard Metzler */ 1712119181d1SLeon Romanovsky int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) 1713303ae1cdSBernard Metzler { 1714303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1715303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device); 1716303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1717303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1718303ae1cdSBernard Metzler base_ucontext); 1719303ae1cdSBernard Metzler 172011f1a755SMichal Kalderon if (ctx) 172111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry); 1722303ae1cdSBernard Metzler vfree(srq->recvq); 1723303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq); 1724119181d1SLeon Romanovsky return 0; 1725303ae1cdSBernard Metzler } 1726303ae1cdSBernard Metzler 1727303ae1cdSBernard Metzler /* 1728303ae1cdSBernard Metzler * siw_post_srq_recv() 1729303ae1cdSBernard Metzler * 1730303ae1cdSBernard Metzler * Post a list of receive queue elements to SRQ. 1731303ae1cdSBernard Metzler * NOTE: The function does not check or lock a certain SRQ state 1732303ae1cdSBernard Metzler * during the post operation. The code simply trusts the 1733303ae1cdSBernard Metzler * RDMA core environment. 1734303ae1cdSBernard Metzler * 1735303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ 1736303ae1cdSBernard Metzler * @wr: List of R-WR's 1737303ae1cdSBernard Metzler * @bad_wr: Updated to failing WR if posting fails. 1738303ae1cdSBernard Metzler */ 1739303ae1cdSBernard Metzler int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, 1740303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr) 1741303ae1cdSBernard Metzler { 1742303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1743303ae1cdSBernard Metzler unsigned long flags; 1744303ae1cdSBernard Metzler int rv = 0; 1745303ae1cdSBernard Metzler 174658fb0b56SBernard Metzler if (unlikely(!srq->is_kernel_res)) { 1747303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, 1748c536277eSBernard Metzler "[SRQ]: no kernel post_recv for mapped srq\n"); 1749303ae1cdSBernard Metzler rv = -EINVAL; 1750303ae1cdSBernard Metzler goto out; 1751303ae1cdSBernard Metzler } 1752303ae1cdSBernard Metzler /* 1753303ae1cdSBernard Metzler * Serialize potentially multiple producers. 1754303ae1cdSBernard Metzler * Also needed to serialize potentially multiple 1755303ae1cdSBernard Metzler * consumers. 1756303ae1cdSBernard Metzler */ 1757303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1758303ae1cdSBernard Metzler 1759303ae1cdSBernard Metzler while (wr) { 1760303ae1cdSBernard Metzler u32 idx = srq->rq_put % srq->num_rqe; 1761303ae1cdSBernard Metzler struct siw_rqe *rqe = &srq->recvq[idx]; 1762303ae1cdSBernard Metzler 1763303ae1cdSBernard Metzler if (rqe->flags) { 1764303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "SRQ full\n"); 1765303ae1cdSBernard Metzler rv = -ENOMEM; 1766303ae1cdSBernard Metzler break; 1767303ae1cdSBernard Metzler } 1768303ae1cdSBernard Metzler if (unlikely(wr->num_sge > srq->max_sge)) { 1769303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, 1770c536277eSBernard Metzler "[SRQ]: too many sge's: %d\n", wr->num_sge); 1771303ae1cdSBernard Metzler rv = -EINVAL; 1772303ae1cdSBernard Metzler break; 1773303ae1cdSBernard Metzler } 1774303ae1cdSBernard Metzler rqe->id = wr->wr_id; 1775303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge; 1776303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); 1777303ae1cdSBernard Metzler 1778303ae1cdSBernard Metzler /* Make sure S-RQE is completely written before valid */ 1779303ae1cdSBernard Metzler smp_wmb(); 1780303ae1cdSBernard Metzler 1781303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID; 1782303ae1cdSBernard Metzler 1783303ae1cdSBernard Metzler srq->rq_put++; 1784303ae1cdSBernard Metzler wr = wr->next; 1785303ae1cdSBernard Metzler } 1786303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1787303ae1cdSBernard Metzler out: 1788303ae1cdSBernard Metzler if (unlikely(rv < 0)) { 1789c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); 1790303ae1cdSBernard Metzler *bad_wr = wr; 1791303ae1cdSBernard Metzler } 1792303ae1cdSBernard Metzler return rv; 1793303ae1cdSBernard Metzler } 1794303ae1cdSBernard Metzler 1795303ae1cdSBernard Metzler void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype) 1796303ae1cdSBernard Metzler { 1797303ae1cdSBernard Metzler struct ib_event event; 179858fb0b56SBernard Metzler struct ib_qp *base_qp = &qp->base_qp; 1799303ae1cdSBernard Metzler 1800303ae1cdSBernard Metzler /* 1801303ae1cdSBernard Metzler * Do not report asynchronous errors on QP which gets 1802303ae1cdSBernard Metzler * destroyed via verbs interface (siw_destroy_qp()) 1803303ae1cdSBernard Metzler */ 1804303ae1cdSBernard Metzler if (qp->attrs.flags & SIW_QP_IN_DESTROY) 1805303ae1cdSBernard Metzler return; 1806303ae1cdSBernard Metzler 1807303ae1cdSBernard Metzler event.event = etype; 1808303ae1cdSBernard Metzler event.device = base_qp->device; 1809303ae1cdSBernard Metzler event.element.qp = base_qp; 1810303ae1cdSBernard Metzler 1811303ae1cdSBernard Metzler if (base_qp->event_handler) { 1812303ae1cdSBernard Metzler siw_dbg_qp(qp, "reporting event %d\n", etype); 1813303ae1cdSBernard Metzler base_qp->event_handler(&event, base_qp->qp_context); 1814303ae1cdSBernard Metzler } 1815303ae1cdSBernard Metzler } 1816303ae1cdSBernard Metzler 1817303ae1cdSBernard Metzler void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype) 1818303ae1cdSBernard Metzler { 1819303ae1cdSBernard Metzler struct ib_event event; 1820303ae1cdSBernard Metzler struct ib_cq *base_cq = &cq->base_cq; 1821303ae1cdSBernard Metzler 1822303ae1cdSBernard Metzler event.event = etype; 1823303ae1cdSBernard Metzler event.device = base_cq->device; 1824303ae1cdSBernard Metzler event.element.cq = base_cq; 1825303ae1cdSBernard Metzler 1826303ae1cdSBernard Metzler if (base_cq->event_handler) { 1827303ae1cdSBernard Metzler siw_dbg_cq(cq, "reporting CQ event %d\n", etype); 1828303ae1cdSBernard Metzler base_cq->event_handler(&event, base_cq->cq_context); 1829303ae1cdSBernard Metzler } 1830303ae1cdSBernard Metzler } 1831303ae1cdSBernard Metzler 1832303ae1cdSBernard Metzler void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype) 1833303ae1cdSBernard Metzler { 1834303ae1cdSBernard Metzler struct ib_event event; 1835303ae1cdSBernard Metzler struct ib_srq *base_srq = &srq->base_srq; 1836303ae1cdSBernard Metzler 1837303ae1cdSBernard Metzler event.event = etype; 1838303ae1cdSBernard Metzler event.device = base_srq->device; 1839303ae1cdSBernard Metzler event.element.srq = base_srq; 1840303ae1cdSBernard Metzler 1841303ae1cdSBernard Metzler if (base_srq->event_handler) { 1842303ae1cdSBernard Metzler siw_dbg_pd(srq->base_srq.pd, 1843303ae1cdSBernard Metzler "reporting SRQ event %d\n", etype); 1844303ae1cdSBernard Metzler base_srq->event_handler(&event, base_srq->srq_context); 1845303ae1cdSBernard Metzler } 1846303ae1cdSBernard Metzler } 1847303ae1cdSBernard Metzler 18481fb7f897SMark Bloch void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype) 1849303ae1cdSBernard Metzler { 1850303ae1cdSBernard Metzler struct ib_event event; 1851303ae1cdSBernard Metzler 1852303ae1cdSBernard Metzler event.event = etype; 1853303ae1cdSBernard Metzler event.device = &sdev->base_dev; 1854303ae1cdSBernard Metzler event.element.port_num = port; 1855303ae1cdSBernard Metzler 1856303ae1cdSBernard Metzler siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype); 1857303ae1cdSBernard Metzler 1858303ae1cdSBernard Metzler ib_dispatch_event(&event); 1859303ae1cdSBernard Metzler } 1860