1303ae1cdSBernard Metzler // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2303ae1cdSBernard Metzler 3303ae1cdSBernard Metzler /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4303ae1cdSBernard Metzler /* Copyright (c) 2008-2019, IBM Corporation */ 5303ae1cdSBernard Metzler 6303ae1cdSBernard Metzler #include <linux/errno.h> 7303ae1cdSBernard Metzler #include <linux/types.h> 8303ae1cdSBernard Metzler #include <linux/uaccess.h> 9303ae1cdSBernard Metzler #include <linux/vmalloc.h> 10303ae1cdSBernard Metzler #include <linux/xarray.h> 110abfc79dSKamal Heib #include <net/addrconf.h> 12303ae1cdSBernard Metzler 13303ae1cdSBernard Metzler #include <rdma/iw_cm.h> 14303ae1cdSBernard Metzler #include <rdma/ib_verbs.h> 15303ae1cdSBernard Metzler #include <rdma/ib_user_verbs.h> 16303ae1cdSBernard Metzler #include <rdma/uverbs_ioctl.h> 17303ae1cdSBernard Metzler 18303ae1cdSBernard Metzler #include "siw.h" 19303ae1cdSBernard Metzler #include "siw_verbs.h" 20303ae1cdSBernard Metzler #include "siw_mem.h" 21303ae1cdSBernard Metzler 22303ae1cdSBernard Metzler static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = { 23303ae1cdSBernard Metzler [IB_QPS_RESET] = SIW_QP_STATE_IDLE, 24303ae1cdSBernard Metzler [IB_QPS_INIT] = SIW_QP_STATE_IDLE, 25303ae1cdSBernard Metzler [IB_QPS_RTR] = SIW_QP_STATE_RTR, 26303ae1cdSBernard Metzler [IB_QPS_RTS] = SIW_QP_STATE_RTS, 27303ae1cdSBernard Metzler [IB_QPS_SQD] = SIW_QP_STATE_CLOSING, 28303ae1cdSBernard Metzler [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE, 29303ae1cdSBernard Metzler [IB_QPS_ERR] = SIW_QP_STATE_ERROR 30303ae1cdSBernard Metzler }; 31303ae1cdSBernard Metzler 32303ae1cdSBernard Metzler static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = { 33303ae1cdSBernard Metzler [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR", 34303ae1cdSBernard Metzler [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE", 35303ae1cdSBernard Metzler [IB_QPS_ERR] = "ERR" 36303ae1cdSBernard Metzler }; 37303ae1cdSBernard Metzler 3811f1a755SMichal Kalderon void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 39303ae1cdSBernard Metzler { 4011f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry); 41303ae1cdSBernard Metzler 4211f1a755SMichal Kalderon kfree(entry); 43303ae1cdSBernard Metzler } 44303ae1cdSBernard Metzler 45303ae1cdSBernard Metzler int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma) 46303ae1cdSBernard Metzler { 47303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(ctx); 4811f1a755SMichal Kalderon size_t size = vma->vm_end - vma->vm_start; 4911f1a755SMichal Kalderon struct rdma_user_mmap_entry *rdma_entry; 5011f1a755SMichal Kalderon struct siw_user_mmap_entry *entry; 51303ae1cdSBernard Metzler int rv = -EINVAL; 52303ae1cdSBernard Metzler 53303ae1cdSBernard Metzler /* 54303ae1cdSBernard Metzler * Must be page aligned 55303ae1cdSBernard Metzler */ 56303ae1cdSBernard Metzler if (vma->vm_start & (PAGE_SIZE - 1)) { 57303ae1cdSBernard Metzler pr_warn("siw: mmap not page aligned\n"); 5811f1a755SMichal Kalderon return -EINVAL; 5911f1a755SMichal Kalderon } 6011f1a755SMichal Kalderon rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma); 6111f1a755SMichal Kalderon if (!rdma_entry) { 6211f1a755SMichal Kalderon siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n", 6311f1a755SMichal Kalderon vma->vm_pgoff, size); 6411f1a755SMichal Kalderon return -EINVAL; 6511f1a755SMichal Kalderon } 6611f1a755SMichal Kalderon entry = to_siw_mmap_entry(rdma_entry); 6711f1a755SMichal Kalderon 6811f1a755SMichal Kalderon rv = remap_vmalloc_range(vma, entry->address, 0); 6911f1a755SMichal Kalderon if (rv) { 7011f1a755SMichal Kalderon pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff, 7111f1a755SMichal Kalderon size); 72303ae1cdSBernard Metzler goto out; 73303ae1cdSBernard Metzler } 74303ae1cdSBernard Metzler out: 7511f1a755SMichal Kalderon rdma_user_mmap_entry_put(rdma_entry); 7611f1a755SMichal Kalderon 77303ae1cdSBernard Metzler return rv; 78303ae1cdSBernard Metzler } 79303ae1cdSBernard Metzler 80303ae1cdSBernard Metzler int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata) 81303ae1cdSBernard Metzler { 82303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_ctx->device); 83303ae1cdSBernard Metzler struct siw_ucontext *ctx = to_siw_ctx(base_ctx); 84303ae1cdSBernard Metzler struct siw_uresp_alloc_ctx uresp = {}; 85303ae1cdSBernard Metzler int rv; 86303ae1cdSBernard Metzler 87303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) { 88303ae1cdSBernard Metzler rv = -ENOMEM; 89303ae1cdSBernard Metzler goto err_out; 90303ae1cdSBernard Metzler } 91303ae1cdSBernard Metzler ctx->sdev = sdev; 92303ae1cdSBernard Metzler 93303ae1cdSBernard Metzler uresp.dev_id = sdev->vendor_part_id; 94303ae1cdSBernard Metzler 95303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 96303ae1cdSBernard Metzler rv = -EINVAL; 97303ae1cdSBernard Metzler goto err_out; 98303ae1cdSBernard Metzler } 99303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 100303ae1cdSBernard Metzler if (rv) 101303ae1cdSBernard Metzler goto err_out; 102303ae1cdSBernard Metzler 103303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "success. now %d context(s)\n", 104303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx)); 105303ae1cdSBernard Metzler 106303ae1cdSBernard Metzler return 0; 107303ae1cdSBernard Metzler 108303ae1cdSBernard Metzler err_out: 109303ae1cdSBernard Metzler atomic_dec(&sdev->num_ctx); 110303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv, 111303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx)); 112303ae1cdSBernard Metzler 113303ae1cdSBernard Metzler return rv; 114303ae1cdSBernard Metzler } 115303ae1cdSBernard Metzler 116303ae1cdSBernard Metzler void siw_dealloc_ucontext(struct ib_ucontext *base_ctx) 117303ae1cdSBernard Metzler { 118303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(base_ctx); 119303ae1cdSBernard Metzler 120303ae1cdSBernard Metzler atomic_dec(&uctx->sdev->num_ctx); 121303ae1cdSBernard Metzler } 122303ae1cdSBernard Metzler 123303ae1cdSBernard Metzler int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, 124303ae1cdSBernard Metzler struct ib_udata *udata) 125303ae1cdSBernard Metzler { 126303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 127303ae1cdSBernard Metzler 128303ae1cdSBernard Metzler if (udata->inlen || udata->outlen) 129303ae1cdSBernard Metzler return -EINVAL; 130303ae1cdSBernard Metzler 131303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr)); 132303ae1cdSBernard Metzler 133303ae1cdSBernard Metzler /* Revisit atomic caps if RFC 7306 gets supported */ 134303ae1cdSBernard Metzler attr->atomic_cap = 0; 135e945c653SJason Gunthorpe attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; 136e945c653SJason Gunthorpe attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG; 137303ae1cdSBernard Metzler attr->max_cq = sdev->attrs.max_cq; 138303ae1cdSBernard Metzler attr->max_cqe = sdev->attrs.max_cqe; 139303ae1cdSBernard Metzler attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL; 140303ae1cdSBernard Metzler attr->max_mr = sdev->attrs.max_mr; 141303ae1cdSBernard Metzler attr->max_mw = sdev->attrs.max_mw; 142303ae1cdSBernard Metzler attr->max_mr_size = ~0ull; 143303ae1cdSBernard Metzler attr->max_pd = sdev->attrs.max_pd; 144303ae1cdSBernard Metzler attr->max_qp = sdev->attrs.max_qp; 145303ae1cdSBernard Metzler attr->max_qp_init_rd_atom = sdev->attrs.max_ird; 146303ae1cdSBernard Metzler attr->max_qp_rd_atom = sdev->attrs.max_ord; 147303ae1cdSBernard Metzler attr->max_qp_wr = sdev->attrs.max_qp_wr; 148303ae1cdSBernard Metzler attr->max_recv_sge = sdev->attrs.max_sge; 149303ae1cdSBernard Metzler attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird; 150303ae1cdSBernard Metzler attr->max_send_sge = sdev->attrs.max_sge; 151303ae1cdSBernard Metzler attr->max_sge_rd = sdev->attrs.max_sge_rd; 152303ae1cdSBernard Metzler attr->max_srq = sdev->attrs.max_srq; 153303ae1cdSBernard Metzler attr->max_srq_sge = sdev->attrs.max_srq_sge; 154303ae1cdSBernard Metzler attr->max_srq_wr = sdev->attrs.max_srq_wr; 155303ae1cdSBernard Metzler attr->page_size_cap = PAGE_SIZE; 156303ae1cdSBernard Metzler attr->vendor_id = SIW_VENDOR_ID; 157303ae1cdSBernard Metzler attr->vendor_part_id = sdev->vendor_part_id; 158303ae1cdSBernard Metzler 1590abfc79dSKamal Heib addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 1600abfc79dSKamal Heib sdev->netdev->dev_addr); 161303ae1cdSBernard Metzler 162303ae1cdSBernard Metzler return 0; 163303ae1cdSBernard Metzler } 164303ae1cdSBernard Metzler 1651fb7f897SMark Bloch int siw_query_port(struct ib_device *base_dev, u32 port, 166303ae1cdSBernard Metzler struct ib_port_attr *attr) 167303ae1cdSBernard Metzler { 168303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 16925baba21SKamal Heib int rv; 170303ae1cdSBernard Metzler 171303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr)); 172303ae1cdSBernard Metzler 17325baba21SKamal Heib rv = ib_get_eth_speed(base_dev, port, &attr->active_speed, 17425baba21SKamal Heib &attr->active_width); 175303ae1cdSBernard Metzler attr->gid_tbl_len = 1; 176303ae1cdSBernard Metzler attr->max_msg_sz = -1; 177303ae1cdSBernard Metzler attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 178beb205ddSKamal Heib attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 17972a7720fSKamal Heib attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 18072a7720fSKamal Heib IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; 181303ae1cdSBernard Metzler attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; 182303ae1cdSBernard Metzler attr->state = sdev->state; 183303ae1cdSBernard Metzler /* 184303ae1cdSBernard Metzler * All zero 185303ae1cdSBernard Metzler * 186303ae1cdSBernard Metzler * attr->lid = 0; 187303ae1cdSBernard Metzler * attr->bad_pkey_cntr = 0; 188303ae1cdSBernard Metzler * attr->qkey_viol_cntr = 0; 189303ae1cdSBernard Metzler * attr->sm_lid = 0; 190303ae1cdSBernard Metzler * attr->lmc = 0; 191303ae1cdSBernard Metzler * attr->max_vl_num = 0; 192303ae1cdSBernard Metzler * attr->sm_sl = 0; 193303ae1cdSBernard Metzler * attr->subnet_timeout = 0; 194303ae1cdSBernard Metzler * attr->init_type_repy = 0; 195303ae1cdSBernard Metzler */ 19625baba21SKamal Heib return rv; 197303ae1cdSBernard Metzler } 198303ae1cdSBernard Metzler 1991fb7f897SMark Bloch int siw_get_port_immutable(struct ib_device *base_dev, u32 port, 200303ae1cdSBernard Metzler struct ib_port_immutable *port_immutable) 201303ae1cdSBernard Metzler { 202303ae1cdSBernard Metzler struct ib_port_attr attr; 203303ae1cdSBernard Metzler int rv = siw_query_port(base_dev, port, &attr); 204303ae1cdSBernard Metzler 205303ae1cdSBernard Metzler if (rv) 206303ae1cdSBernard Metzler return rv; 207303ae1cdSBernard Metzler 208303ae1cdSBernard Metzler port_immutable->gid_tbl_len = attr.gid_tbl_len; 209303ae1cdSBernard Metzler port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 210303ae1cdSBernard Metzler 211303ae1cdSBernard Metzler return 0; 212303ae1cdSBernard Metzler } 213303ae1cdSBernard Metzler 2141fb7f897SMark Bloch int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, 215303ae1cdSBernard Metzler union ib_gid *gid) 216303ae1cdSBernard Metzler { 217303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 218303ae1cdSBernard Metzler 219303ae1cdSBernard Metzler /* subnet_prefix == interface_id == 0; */ 220303ae1cdSBernard Metzler memset(gid, 0, sizeof(*gid)); 221303ae1cdSBernard Metzler memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6); 222303ae1cdSBernard Metzler 223303ae1cdSBernard Metzler return 0; 224303ae1cdSBernard Metzler } 225303ae1cdSBernard Metzler 226303ae1cdSBernard Metzler int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 227303ae1cdSBernard Metzler { 228303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 229303ae1cdSBernard Metzler 230303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) { 231303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd); 232303ae1cdSBernard Metzler return -ENOMEM; 233303ae1cdSBernard Metzler } 234303ae1cdSBernard Metzler siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd)); 235303ae1cdSBernard Metzler 236303ae1cdSBernard Metzler return 0; 237303ae1cdSBernard Metzler } 238303ae1cdSBernard Metzler 23991a7c58fSLeon Romanovsky int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 240303ae1cdSBernard Metzler { 241303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 242303ae1cdSBernard Metzler 243303ae1cdSBernard Metzler siw_dbg_pd(pd, "free PD\n"); 244303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd); 24591a7c58fSLeon Romanovsky return 0; 246303ae1cdSBernard Metzler } 247303ae1cdSBernard Metzler 248303ae1cdSBernard Metzler void siw_qp_get_ref(struct ib_qp *base_qp) 249303ae1cdSBernard Metzler { 250303ae1cdSBernard Metzler siw_qp_get(to_siw_qp(base_qp)); 251303ae1cdSBernard Metzler } 252303ae1cdSBernard Metzler 253303ae1cdSBernard Metzler void siw_qp_put_ref(struct ib_qp *base_qp) 254303ae1cdSBernard Metzler { 255303ae1cdSBernard Metzler siw_qp_put(to_siw_qp(base_qp)); 256303ae1cdSBernard Metzler } 257303ae1cdSBernard Metzler 25811f1a755SMichal Kalderon static struct rdma_user_mmap_entry * 25911f1a755SMichal Kalderon siw_mmap_entry_insert(struct siw_ucontext *uctx, 26011f1a755SMichal Kalderon void *address, size_t length, 26111f1a755SMichal Kalderon u64 *offset) 26211f1a755SMichal Kalderon { 26311f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 26411f1a755SMichal Kalderon int rv; 26511f1a755SMichal Kalderon 26611f1a755SMichal Kalderon *offset = SIW_INVAL_UOBJ_KEY; 26711f1a755SMichal Kalderon if (!entry) 26811f1a755SMichal Kalderon return NULL; 26911f1a755SMichal Kalderon 27011f1a755SMichal Kalderon entry->address = address; 27111f1a755SMichal Kalderon 27211f1a755SMichal Kalderon rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext, 27311f1a755SMichal Kalderon &entry->rdma_entry, 27411f1a755SMichal Kalderon length); 27511f1a755SMichal Kalderon if (rv) { 27611f1a755SMichal Kalderon kfree(entry); 27711f1a755SMichal Kalderon return NULL; 27811f1a755SMichal Kalderon } 27911f1a755SMichal Kalderon 28011f1a755SMichal Kalderon *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 28111f1a755SMichal Kalderon 28211f1a755SMichal Kalderon return &entry->rdma_entry; 28311f1a755SMichal Kalderon } 28411f1a755SMichal Kalderon 285303ae1cdSBernard Metzler /* 286303ae1cdSBernard Metzler * siw_create_qp() 287303ae1cdSBernard Metzler * 288303ae1cdSBernard Metzler * Create QP of requested size on given device. 289303ae1cdSBernard Metzler * 290514aee66SLeon Romanovsky * @qp: Queue pait 291303ae1cdSBernard Metzler * @attrs: Initial QP attributes. 292303ae1cdSBernard Metzler * @udata: used to provide QP ID, SQ and RQ size back to user. 293303ae1cdSBernard Metzler */ 294303ae1cdSBernard Metzler 295514aee66SLeon Romanovsky int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 296303ae1cdSBernard Metzler struct ib_udata *udata) 297303ae1cdSBernard Metzler { 298514aee66SLeon Romanovsky struct ib_pd *pd = ibqp->pd; 299514aee66SLeon Romanovsky struct siw_qp *qp = to_siw_qp(ibqp); 300303ae1cdSBernard Metzler struct ib_device *base_dev = pd->device; 301303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev); 302303ae1cdSBernard Metzler struct siw_ucontext *uctx = 303303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 304303ae1cdSBernard Metzler base_ucontext); 305303ae1cdSBernard Metzler unsigned long flags; 306303ae1cdSBernard Metzler int num_sqe, num_rqe, rv = 0; 30711f1a755SMichal Kalderon size_t length; 308303ae1cdSBernard Metzler 309303ae1cdSBernard Metzler siw_dbg(base_dev, "create new QP\n"); 310303ae1cdSBernard Metzler 3111f11a761SJason Gunthorpe if (attrs->create_flags) 312514aee66SLeon Romanovsky return -EOPNOTSUPP; 3131f11a761SJason Gunthorpe 314303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { 315303ae1cdSBernard Metzler siw_dbg(base_dev, "too many QP's\n"); 316a75badebSDan Carpenter rv = -ENOMEM; 317a75badebSDan Carpenter goto err_atomic; 318303ae1cdSBernard Metzler } 319303ae1cdSBernard Metzler if (attrs->qp_type != IB_QPT_RC) { 320303ae1cdSBernard Metzler siw_dbg(base_dev, "only RC QP's supported\n"); 321bb8865f4SKamal Heib rv = -EOPNOTSUPP; 322514aee66SLeon Romanovsky goto err_atomic; 323303ae1cdSBernard Metzler } 324303ae1cdSBernard Metzler if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || 325303ae1cdSBernard Metzler (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) || 326303ae1cdSBernard Metzler (attrs->cap.max_send_sge > SIW_MAX_SGE) || 327303ae1cdSBernard Metzler (attrs->cap.max_recv_sge > SIW_MAX_SGE)) { 328303ae1cdSBernard Metzler siw_dbg(base_dev, "QP size error\n"); 329303ae1cdSBernard Metzler rv = -EINVAL; 330514aee66SLeon Romanovsky goto err_atomic; 331303ae1cdSBernard Metzler } 332303ae1cdSBernard Metzler if (attrs->cap.max_inline_data > SIW_MAX_INLINE) { 333303ae1cdSBernard Metzler siw_dbg(base_dev, "max inline send: %d > %d\n", 334303ae1cdSBernard Metzler attrs->cap.max_inline_data, (int)SIW_MAX_INLINE); 335303ae1cdSBernard Metzler rv = -EINVAL; 336514aee66SLeon Romanovsky goto err_atomic; 337303ae1cdSBernard Metzler } 338303ae1cdSBernard Metzler /* 339303ae1cdSBernard Metzler * NOTE: we allow for zero element SQ and RQ WQE's SGL's 340303ae1cdSBernard Metzler * but not for a QP unable to hold any WQE (SQ + RQ) 341303ae1cdSBernard Metzler */ 342303ae1cdSBernard Metzler if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { 343303ae1cdSBernard Metzler siw_dbg(base_dev, "QP must have send or receive queue\n"); 344303ae1cdSBernard Metzler rv = -EINVAL; 345514aee66SLeon Romanovsky goto err_atomic; 346303ae1cdSBernard Metzler } 347303ae1cdSBernard Metzler 348a568814aSLeon Romanovsky if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { 349303ae1cdSBernard Metzler siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); 350303ae1cdSBernard Metzler rv = -EINVAL; 351514aee66SLeon Romanovsky goto err_atomic; 352303ae1cdSBernard Metzler } 353514aee66SLeon Romanovsky 354303ae1cdSBernard Metzler init_rwsem(&qp->state_lock); 355303ae1cdSBernard Metzler spin_lock_init(&qp->sq_lock); 356303ae1cdSBernard Metzler spin_lock_init(&qp->rq_lock); 357303ae1cdSBernard Metzler spin_lock_init(&qp->orq_lock); 358303ae1cdSBernard Metzler 359303ae1cdSBernard Metzler rv = siw_qp_add(sdev, qp); 360303ae1cdSBernard Metzler if (rv) 361514aee66SLeon Romanovsky goto err_atomic; 362303ae1cdSBernard Metzler 363661f3859SBernard Metzler num_sqe = attrs->cap.max_send_wr; 364661f3859SBernard Metzler num_rqe = attrs->cap.max_recv_wr; 365661f3859SBernard Metzler 366303ae1cdSBernard Metzler /* All queue indices are derived from modulo operations 367303ae1cdSBernard Metzler * on a free running 'get' (consumer) and 'put' (producer) 368303ae1cdSBernard Metzler * unsigned counter. Having queue sizes at power of two 369303ae1cdSBernard Metzler * avoids handling counter wrap around. 370303ae1cdSBernard Metzler */ 371661f3859SBernard Metzler if (num_sqe) 372661f3859SBernard Metzler num_sqe = roundup_pow_of_two(num_sqe); 373661f3859SBernard Metzler else { 374661f3859SBernard Metzler /* Zero sized SQ is not supported */ 375661f3859SBernard Metzler rv = -EINVAL; 376a3d83276SLeon Romanovsky goto err_out_xa; 377661f3859SBernard Metzler } 378661f3859SBernard Metzler if (num_rqe) 379661f3859SBernard Metzler num_rqe = roundup_pow_of_two(num_rqe); 380303ae1cdSBernard Metzler 38158fb0b56SBernard Metzler if (udata) 382303ae1cdSBernard Metzler qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); 38358fb0b56SBernard Metzler else 38458fb0b56SBernard Metzler qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe)); 385303ae1cdSBernard Metzler 386303ae1cdSBernard Metzler if (qp->sendq == NULL) { 387303ae1cdSBernard Metzler rv = -ENOMEM; 388303ae1cdSBernard Metzler goto err_out_xa; 389303ae1cdSBernard Metzler } 390303ae1cdSBernard Metzler if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) { 391303ae1cdSBernard Metzler if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) 392303ae1cdSBernard Metzler qp->attrs.flags |= SIW_SIGNAL_ALL_WR; 393303ae1cdSBernard Metzler else { 394303ae1cdSBernard Metzler rv = -EINVAL; 395303ae1cdSBernard Metzler goto err_out_xa; 396303ae1cdSBernard Metzler } 397303ae1cdSBernard Metzler } 398303ae1cdSBernard Metzler qp->pd = pd; 399a568814aSLeon Romanovsky qp->scq = to_siw_cq(attrs->send_cq); 400a568814aSLeon Romanovsky qp->rcq = to_siw_cq(attrs->recv_cq); 401303ae1cdSBernard Metzler 402303ae1cdSBernard Metzler if (attrs->srq) { 403303ae1cdSBernard Metzler /* 404303ae1cdSBernard Metzler * SRQ support. 405303ae1cdSBernard Metzler * Verbs 6.3.7: ignore RQ size, if SRQ present 406303ae1cdSBernard Metzler * Verbs 6.3.5: do not check PD of SRQ against PD of QP 407303ae1cdSBernard Metzler */ 408303ae1cdSBernard Metzler qp->srq = to_siw_srq(attrs->srq); 409303ae1cdSBernard Metzler qp->attrs.rq_size = 0; 41058fb0b56SBernard Metzler siw_dbg(base_dev, "QP [%u]: SRQ attached\n", 41158fb0b56SBernard Metzler qp->base_qp.qp_num); 412303ae1cdSBernard Metzler } else if (num_rqe) { 41358fb0b56SBernard Metzler if (udata) 414303ae1cdSBernard Metzler qp->recvq = 415303ae1cdSBernard Metzler vmalloc_user(num_rqe * sizeof(struct siw_rqe)); 41658fb0b56SBernard Metzler else 41758fb0b56SBernard Metzler qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 418303ae1cdSBernard Metzler 419303ae1cdSBernard Metzler if (qp->recvq == NULL) { 420303ae1cdSBernard Metzler rv = -ENOMEM; 421303ae1cdSBernard Metzler goto err_out_xa; 422303ae1cdSBernard Metzler } 423303ae1cdSBernard Metzler qp->attrs.rq_size = num_rqe; 424303ae1cdSBernard Metzler } 425303ae1cdSBernard Metzler qp->attrs.sq_size = num_sqe; 426303ae1cdSBernard Metzler qp->attrs.sq_max_sges = attrs->cap.max_send_sge; 427303ae1cdSBernard Metzler qp->attrs.rq_max_sges = attrs->cap.max_recv_sge; 428303ae1cdSBernard Metzler 429303ae1cdSBernard Metzler /* Make those two tunables fixed for now. */ 430303ae1cdSBernard Metzler qp->tx_ctx.gso_seg_limit = 1; 431303ae1cdSBernard Metzler qp->tx_ctx.zcopy_tx = zcopy_tx; 432303ae1cdSBernard Metzler 433303ae1cdSBernard Metzler qp->attrs.state = SIW_QP_STATE_IDLE; 434303ae1cdSBernard Metzler 435303ae1cdSBernard Metzler if (udata) { 436303ae1cdSBernard Metzler struct siw_uresp_create_qp uresp = {}; 437303ae1cdSBernard Metzler 438303ae1cdSBernard Metzler uresp.num_sqe = num_sqe; 439303ae1cdSBernard Metzler uresp.num_rqe = num_rqe; 440303ae1cdSBernard Metzler uresp.qp_id = qp_id(qp); 441303ae1cdSBernard Metzler 442303ae1cdSBernard Metzler if (qp->sendq) { 44311f1a755SMichal Kalderon length = num_sqe * sizeof(struct siw_sqe); 44411f1a755SMichal Kalderon qp->sq_entry = 44511f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->sendq, 44611f1a755SMichal Kalderon length, &uresp.sq_key); 44711f1a755SMichal Kalderon if (!qp->sq_entry) { 448303ae1cdSBernard Metzler rv = -ENOMEM; 449303ae1cdSBernard Metzler goto err_out_xa; 450303ae1cdSBernard Metzler } 45111f1a755SMichal Kalderon } 45211f1a755SMichal Kalderon 45311f1a755SMichal Kalderon if (qp->recvq) { 45411f1a755SMichal Kalderon length = num_rqe * sizeof(struct siw_rqe); 45511f1a755SMichal Kalderon qp->rq_entry = 45611f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->recvq, 45711f1a755SMichal Kalderon length, &uresp.rq_key); 45811f1a755SMichal Kalderon if (!qp->rq_entry) { 45911f1a755SMichal Kalderon uresp.sq_key = SIW_INVAL_UOBJ_KEY; 46011f1a755SMichal Kalderon rv = -ENOMEM; 46111f1a755SMichal Kalderon goto err_out_xa; 46211f1a755SMichal Kalderon } 46311f1a755SMichal Kalderon } 464303ae1cdSBernard Metzler 465303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 466303ae1cdSBernard Metzler rv = -EINVAL; 467303ae1cdSBernard Metzler goto err_out_xa; 468303ae1cdSBernard Metzler } 469303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 470303ae1cdSBernard Metzler if (rv) 471303ae1cdSBernard Metzler goto err_out_xa; 472303ae1cdSBernard Metzler } 473303ae1cdSBernard Metzler qp->tx_cpu = siw_get_tx_cpu(sdev); 474303ae1cdSBernard Metzler if (qp->tx_cpu < 0) { 475303ae1cdSBernard Metzler rv = -EINVAL; 476303ae1cdSBernard Metzler goto err_out_xa; 477303ae1cdSBernard Metzler } 478303ae1cdSBernard Metzler INIT_LIST_HEAD(&qp->devq); 479303ae1cdSBernard Metzler spin_lock_irqsave(&sdev->lock, flags); 480303ae1cdSBernard Metzler list_add_tail(&qp->devq, &sdev->qp_list); 481303ae1cdSBernard Metzler spin_unlock_irqrestore(&sdev->lock, flags); 482303ae1cdSBernard Metzler 483*a3c27880SBernard Metzler init_completion(&qp->qp_free); 484*a3c27880SBernard Metzler 485514aee66SLeon Romanovsky return 0; 486303ae1cdSBernard Metzler 487303ae1cdSBernard Metzler err_out_xa: 488303ae1cdSBernard Metzler xa_erase(&sdev->qp_xa, qp_id(qp)); 48911f1a755SMichal Kalderon if (uctx) { 49011f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry); 49111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry); 49211f1a755SMichal Kalderon } 493303ae1cdSBernard Metzler vfree(qp->sendq); 494303ae1cdSBernard Metzler vfree(qp->recvq); 495303ae1cdSBernard Metzler 496514aee66SLeon Romanovsky err_atomic: 497514aee66SLeon Romanovsky atomic_dec(&sdev->num_qp); 498514aee66SLeon Romanovsky return rv; 499303ae1cdSBernard Metzler } 500303ae1cdSBernard Metzler 501303ae1cdSBernard Metzler /* 502303ae1cdSBernard Metzler * Minimum siw_query_qp() verb interface. 503303ae1cdSBernard Metzler * 504303ae1cdSBernard Metzler * @qp_attr_mask is not used but all available information is provided 505303ae1cdSBernard Metzler */ 506303ae1cdSBernard Metzler int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, 507303ae1cdSBernard Metzler int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 508303ae1cdSBernard Metzler { 509303ae1cdSBernard Metzler struct siw_qp *qp; 510303ae1cdSBernard Metzler struct siw_device *sdev; 511303ae1cdSBernard Metzler 512303ae1cdSBernard Metzler if (base_qp && qp_attr && qp_init_attr) { 513303ae1cdSBernard Metzler qp = to_siw_qp(base_qp); 514303ae1cdSBernard Metzler sdev = to_siw_dev(base_qp->device); 515303ae1cdSBernard Metzler } else { 516303ae1cdSBernard Metzler return -EINVAL; 517303ae1cdSBernard Metzler } 518303ae1cdSBernard Metzler qp_attr->cap.max_inline_data = SIW_MAX_INLINE; 519303ae1cdSBernard Metzler qp_attr->cap.max_send_wr = qp->attrs.sq_size; 520303ae1cdSBernard Metzler qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges; 521303ae1cdSBernard Metzler qp_attr->cap.max_recv_wr = qp->attrs.rq_size; 522303ae1cdSBernard Metzler qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges; 523303ae1cdSBernard Metzler qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); 524303ae1cdSBernard Metzler qp_attr->max_rd_atomic = qp->attrs.irq_size; 525303ae1cdSBernard Metzler qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; 526303ae1cdSBernard Metzler 527303ae1cdSBernard Metzler qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 528303ae1cdSBernard Metzler IB_ACCESS_REMOTE_WRITE | 529303ae1cdSBernard Metzler IB_ACCESS_REMOTE_READ; 530303ae1cdSBernard Metzler 531303ae1cdSBernard Metzler qp_init_attr->qp_type = base_qp->qp_type; 532303ae1cdSBernard Metzler qp_init_attr->send_cq = base_qp->send_cq; 533303ae1cdSBernard Metzler qp_init_attr->recv_cq = base_qp->recv_cq; 534303ae1cdSBernard Metzler qp_init_attr->srq = base_qp->srq; 535303ae1cdSBernard Metzler 536303ae1cdSBernard Metzler qp_init_attr->cap = qp_attr->cap; 537303ae1cdSBernard Metzler 538303ae1cdSBernard Metzler return 0; 539303ae1cdSBernard Metzler } 540303ae1cdSBernard Metzler 541303ae1cdSBernard Metzler int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr, 542303ae1cdSBernard Metzler int attr_mask, struct ib_udata *udata) 543303ae1cdSBernard Metzler { 544303ae1cdSBernard Metzler struct siw_qp_attrs new_attrs; 545303ae1cdSBernard Metzler enum siw_qp_attr_mask siw_attr_mask = 0; 546303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 547303ae1cdSBernard Metzler int rv = 0; 548303ae1cdSBernard Metzler 549303ae1cdSBernard Metzler if (!attr_mask) 550303ae1cdSBernard Metzler return 0; 551303ae1cdSBernard Metzler 55226e990baSJason Gunthorpe if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 55326e990baSJason Gunthorpe return -EOPNOTSUPP; 55426e990baSJason Gunthorpe 555303ae1cdSBernard Metzler memset(&new_attrs, 0, sizeof(new_attrs)); 556303ae1cdSBernard Metzler 557303ae1cdSBernard Metzler if (attr_mask & IB_QP_ACCESS_FLAGS) { 558303ae1cdSBernard Metzler siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS; 559303ae1cdSBernard Metzler 560303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 561303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_READ_ENABLED; 562303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 563303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_WRITE_ENABLED; 564303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_MW_BIND) 565303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_BIND_ENABLED; 566303ae1cdSBernard Metzler } 567303ae1cdSBernard Metzler if (attr_mask & IB_QP_STATE) { 568303ae1cdSBernard Metzler siw_dbg_qp(qp, "desired IB QP state: %s\n", 569303ae1cdSBernard Metzler ib_qp_state_to_string[attr->qp_state]); 570303ae1cdSBernard Metzler 571303ae1cdSBernard Metzler new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state]; 572303ae1cdSBernard Metzler 573303ae1cdSBernard Metzler if (new_attrs.state > SIW_QP_STATE_RTS) 574303ae1cdSBernard Metzler qp->tx_ctx.tx_suspend = 1; 575303ae1cdSBernard Metzler 576303ae1cdSBernard Metzler siw_attr_mask |= SIW_QP_ATTR_STATE; 577303ae1cdSBernard Metzler } 578303ae1cdSBernard Metzler if (!siw_attr_mask) 579303ae1cdSBernard Metzler goto out; 580303ae1cdSBernard Metzler 581303ae1cdSBernard Metzler down_write(&qp->state_lock); 582303ae1cdSBernard Metzler 583303ae1cdSBernard Metzler rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask); 584303ae1cdSBernard Metzler 585303ae1cdSBernard Metzler up_write(&qp->state_lock); 586303ae1cdSBernard Metzler out: 587303ae1cdSBernard Metzler return rv; 588303ae1cdSBernard Metzler } 589303ae1cdSBernard Metzler 590303ae1cdSBernard Metzler int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) 591303ae1cdSBernard Metzler { 592303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 593303ae1cdSBernard Metzler struct siw_ucontext *uctx = 594303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 595303ae1cdSBernard Metzler base_ucontext); 596303ae1cdSBernard Metzler struct siw_qp_attrs qp_attrs; 597303ae1cdSBernard Metzler 598c536277eSBernard Metzler siw_dbg_qp(qp, "state %d\n", qp->attrs.state); 599303ae1cdSBernard Metzler 600303ae1cdSBernard Metzler /* 601303ae1cdSBernard Metzler * Mark QP as in process of destruction to prevent from 602303ae1cdSBernard Metzler * any async callbacks to RDMA core 603303ae1cdSBernard Metzler */ 604303ae1cdSBernard Metzler qp->attrs.flags |= SIW_QP_IN_DESTROY; 605303ae1cdSBernard Metzler qp->rx_stream.rx_suspend = 1; 606303ae1cdSBernard Metzler 60711f1a755SMichal Kalderon if (uctx) { 60811f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry); 60911f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry); 61011f1a755SMichal Kalderon } 611303ae1cdSBernard Metzler 612303ae1cdSBernard Metzler down_write(&qp->state_lock); 613303ae1cdSBernard Metzler 614303ae1cdSBernard Metzler qp_attrs.state = SIW_QP_STATE_ERROR; 615303ae1cdSBernard Metzler siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE); 616303ae1cdSBernard Metzler 617303ae1cdSBernard Metzler if (qp->cep) { 618303ae1cdSBernard Metzler siw_cep_put(qp->cep); 619303ae1cdSBernard Metzler qp->cep = NULL; 620303ae1cdSBernard Metzler } 621303ae1cdSBernard Metzler up_write(&qp->state_lock); 622303ae1cdSBernard Metzler 623303ae1cdSBernard Metzler kfree(qp->tx_ctx.mpa_crc_hd); 624303ae1cdSBernard Metzler kfree(qp->rx_stream.mpa_crc_hd); 625303ae1cdSBernard Metzler 626303ae1cdSBernard Metzler qp->scq = qp->rcq = NULL; 627303ae1cdSBernard Metzler 628303ae1cdSBernard Metzler siw_qp_put(qp); 629*a3c27880SBernard Metzler wait_for_completion(&qp->qp_free); 630303ae1cdSBernard Metzler 631303ae1cdSBernard Metzler return 0; 632303ae1cdSBernard Metzler } 633303ae1cdSBernard Metzler 634303ae1cdSBernard Metzler /* 635303ae1cdSBernard Metzler * siw_copy_inline_sgl() 636303ae1cdSBernard Metzler * 637303ae1cdSBernard Metzler * Prepare sgl of inlined data for sending. For userland callers 638303ae1cdSBernard Metzler * function checks if given buffer addresses and len's are within 639303ae1cdSBernard Metzler * process context bounds. 640303ae1cdSBernard Metzler * Data from all provided sge's are copied together into the wqe, 641303ae1cdSBernard Metzler * referenced by a single sge. 642303ae1cdSBernard Metzler */ 643303ae1cdSBernard Metzler static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, 644303ae1cdSBernard Metzler struct siw_sqe *sqe) 645303ae1cdSBernard Metzler { 646303ae1cdSBernard Metzler struct ib_sge *core_sge = core_wr->sg_list; 647303ae1cdSBernard Metzler void *kbuf = &sqe->sge[1]; 648303ae1cdSBernard Metzler int num_sge = core_wr->num_sge, bytes = 0; 649303ae1cdSBernard Metzler 650c536277eSBernard Metzler sqe->sge[0].laddr = (uintptr_t)kbuf; 651303ae1cdSBernard Metzler sqe->sge[0].lkey = 0; 652303ae1cdSBernard Metzler 653303ae1cdSBernard Metzler while (num_sge--) { 654303ae1cdSBernard Metzler if (!core_sge->length) { 655303ae1cdSBernard Metzler core_sge++; 656303ae1cdSBernard Metzler continue; 657303ae1cdSBernard Metzler } 658303ae1cdSBernard Metzler bytes += core_sge->length; 659303ae1cdSBernard Metzler if (bytes > SIW_MAX_INLINE) { 660303ae1cdSBernard Metzler bytes = -EINVAL; 661303ae1cdSBernard Metzler break; 662303ae1cdSBernard Metzler } 663303ae1cdSBernard Metzler memcpy(kbuf, (void *)(uintptr_t)core_sge->addr, 664303ae1cdSBernard Metzler core_sge->length); 665303ae1cdSBernard Metzler 666303ae1cdSBernard Metzler kbuf += core_sge->length; 667303ae1cdSBernard Metzler core_sge++; 668303ae1cdSBernard Metzler } 66976937fa5SJiapeng Chong sqe->sge[0].length = max(bytes, 0); 670303ae1cdSBernard Metzler sqe->num_sge = bytes > 0 ? 1 : 0; 671303ae1cdSBernard Metzler 672303ae1cdSBernard Metzler return bytes; 673303ae1cdSBernard Metzler } 674303ae1cdSBernard Metzler 675cf049bb3SBernard Metzler /* Complete SQ WR's without processing */ 676cf049bb3SBernard Metzler static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, 677cf049bb3SBernard Metzler const struct ib_send_wr **bad_wr) 678cf049bb3SBernard Metzler { 679cf049bb3SBernard Metzler struct siw_sqe sqe = {}; 680cf049bb3SBernard Metzler int rv = 0; 681cf049bb3SBernard Metzler 682cf049bb3SBernard Metzler while (wr) { 683cf049bb3SBernard Metzler sqe.id = wr->wr_id; 684cf049bb3SBernard Metzler sqe.opcode = wr->opcode; 685cf049bb3SBernard Metzler rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); 686cf049bb3SBernard Metzler if (rv) { 687cf049bb3SBernard Metzler if (bad_wr) 688cf049bb3SBernard Metzler *bad_wr = wr; 689cf049bb3SBernard Metzler break; 690cf049bb3SBernard Metzler } 691cf049bb3SBernard Metzler wr = wr->next; 692cf049bb3SBernard Metzler } 693cf049bb3SBernard Metzler return rv; 694cf049bb3SBernard Metzler } 695cf049bb3SBernard Metzler 696cf049bb3SBernard Metzler /* Complete RQ WR's without processing */ 697cf049bb3SBernard Metzler static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, 698cf049bb3SBernard Metzler const struct ib_recv_wr **bad_wr) 699cf049bb3SBernard Metzler { 700cf049bb3SBernard Metzler struct siw_rqe rqe = {}; 701cf049bb3SBernard Metzler int rv = 0; 702cf049bb3SBernard Metzler 703cf049bb3SBernard Metzler while (wr) { 704cf049bb3SBernard Metzler rqe.id = wr->wr_id; 705cf049bb3SBernard Metzler rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR); 706cf049bb3SBernard Metzler if (rv) { 707cf049bb3SBernard Metzler if (bad_wr) 708cf049bb3SBernard Metzler *bad_wr = wr; 709cf049bb3SBernard Metzler break; 710cf049bb3SBernard Metzler } 711cf049bb3SBernard Metzler wr = wr->next; 712cf049bb3SBernard Metzler } 713cf049bb3SBernard Metzler return rv; 714cf049bb3SBernard Metzler } 715cf049bb3SBernard Metzler 716303ae1cdSBernard Metzler /* 717303ae1cdSBernard Metzler * siw_post_send() 718303ae1cdSBernard Metzler * 719303ae1cdSBernard Metzler * Post a list of S-WR's to a SQ. 720303ae1cdSBernard Metzler * 721303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP 722303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's 723303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure. 724303ae1cdSBernard Metzler */ 725303ae1cdSBernard Metzler int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, 726303ae1cdSBernard Metzler const struct ib_send_wr **bad_wr) 727303ae1cdSBernard Metzler { 728303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 729303ae1cdSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp); 730303ae1cdSBernard Metzler 731303ae1cdSBernard Metzler unsigned long flags; 732303ae1cdSBernard Metzler int rv = 0; 733303ae1cdSBernard Metzler 73458fb0b56SBernard Metzler if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { 735cf049bb3SBernard Metzler siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); 736cf049bb3SBernard Metzler *bad_wr = wr; 737cf049bb3SBernard Metzler return -EINVAL; 738cf049bb3SBernard Metzler } 739cf049bb3SBernard Metzler 740303ae1cdSBernard Metzler /* 741303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking 742303ae1cdSBernard Metzler * to accommodate kernel clients needs. 743303ae1cdSBernard Metzler */ 744303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) { 745cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 746cf049bb3SBernard Metzler /* 747cf049bb3SBernard Metzler * ERROR state is final, so we can be sure 748cf049bb3SBernard Metzler * this state will not change as long as the QP 749cf049bb3SBernard Metzler * exists. 750cf049bb3SBernard Metzler * 751cf049bb3SBernard Metzler * This handles an ib_drain_sq() call with 752cf049bb3SBernard Metzler * a concurrent request to set the QP state 753cf049bb3SBernard Metzler * to ERROR. 754cf049bb3SBernard Metzler */ 755cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr); 756cf049bb3SBernard Metzler } else { 757cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n", 758cf049bb3SBernard Metzler qp->attrs.state); 759303ae1cdSBernard Metzler *bad_wr = wr; 760cf049bb3SBernard Metzler rv = -ENOTCONN; 761cf049bb3SBernard Metzler } 762cf049bb3SBernard Metzler return rv; 763303ae1cdSBernard Metzler } 764303ae1cdSBernard Metzler if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) { 765cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 766cf049bb3SBernard Metzler /* 767cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP 768cf049bb3SBernard Metzler * is in ERROR state. SQ is guaranteed to 769cf049bb3SBernard Metzler * be empty, so WR complets in-order. 770cf049bb3SBernard Metzler * 771cf049bb3SBernard Metzler * Typically triggered by ib_drain_sq(). 772cf049bb3SBernard Metzler */ 773cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr); 774cf049bb3SBernard Metzler } else { 775cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n", 776cf049bb3SBernard Metzler qp->attrs.state); 777303ae1cdSBernard Metzler *bad_wr = wr; 778cf049bb3SBernard Metzler rv = -ENOTCONN; 779303ae1cdSBernard Metzler } 780303ae1cdSBernard Metzler up_read(&qp->state_lock); 781cf049bb3SBernard Metzler return rv; 782303ae1cdSBernard Metzler } 783303ae1cdSBernard Metzler spin_lock_irqsave(&qp->sq_lock, flags); 784303ae1cdSBernard Metzler 785303ae1cdSBernard Metzler while (wr) { 786303ae1cdSBernard Metzler u32 idx = qp->sq_put % qp->attrs.sq_size; 787303ae1cdSBernard Metzler struct siw_sqe *sqe = &qp->sendq[idx]; 788303ae1cdSBernard Metzler 789303ae1cdSBernard Metzler if (sqe->flags) { 790303ae1cdSBernard Metzler siw_dbg_qp(qp, "sq full\n"); 791303ae1cdSBernard Metzler rv = -ENOMEM; 792303ae1cdSBernard Metzler break; 793303ae1cdSBernard Metzler } 794303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.sq_max_sges) { 795303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); 796303ae1cdSBernard Metzler rv = -EINVAL; 797303ae1cdSBernard Metzler break; 798303ae1cdSBernard Metzler } 799303ae1cdSBernard Metzler sqe->id = wr->wr_id; 800303ae1cdSBernard Metzler 801303ae1cdSBernard Metzler if ((wr->send_flags & IB_SEND_SIGNALED) || 802303ae1cdSBernard Metzler (qp->attrs.flags & SIW_SIGNAL_ALL_WR)) 803303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SIGNALLED; 804303ae1cdSBernard Metzler 805303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_FENCE) 806303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_READ_FENCE; 807303ae1cdSBernard Metzler 808303ae1cdSBernard Metzler switch (wr->opcode) { 809303ae1cdSBernard Metzler case IB_WR_SEND: 810303ae1cdSBernard Metzler case IB_WR_SEND_WITH_INV: 811303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_SOLICITED) 812303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SOLICITED; 813303ae1cdSBernard Metzler 814303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) { 815303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, sqe->sge, 816303ae1cdSBernard Metzler wr->num_sge); 817303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge; 818303ae1cdSBernard Metzler } else { 819303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe); 820303ae1cdSBernard Metzler if (rv <= 0) { 821303ae1cdSBernard Metzler rv = -EINVAL; 822303ae1cdSBernard Metzler break; 823303ae1cdSBernard Metzler } 824303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE; 825303ae1cdSBernard Metzler sqe->num_sge = 1; 826303ae1cdSBernard Metzler } 827303ae1cdSBernard Metzler if (wr->opcode == IB_WR_SEND) 828303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND; 829303ae1cdSBernard Metzler else { 830303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND_REMOTE_INV; 831303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey; 832303ae1cdSBernard Metzler } 833303ae1cdSBernard Metzler break; 834303ae1cdSBernard Metzler 835303ae1cdSBernard Metzler case IB_WR_RDMA_READ_WITH_INV: 836303ae1cdSBernard Metzler case IB_WR_RDMA_READ: 837303ae1cdSBernard Metzler /* 838303ae1cdSBernard Metzler * iWarp restricts RREAD sink to SGL containing 839303ae1cdSBernard Metzler * 1 SGE only. we could relax to SGL with multiple 840303ae1cdSBernard Metzler * elements referring the SAME ltag or even sending 841303ae1cdSBernard Metzler * a private per-rreq tag referring to a checked 842303ae1cdSBernard Metzler * local sgl with MULTIPLE ltag's. 843303ae1cdSBernard Metzler */ 844303ae1cdSBernard Metzler if (unlikely(wr->num_sge != 1)) { 845303ae1cdSBernard Metzler rv = -EINVAL; 846303ae1cdSBernard Metzler break; 847303ae1cdSBernard Metzler } 848303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); 849303ae1cdSBernard Metzler /* 850303ae1cdSBernard Metzler * NOTE: zero length RREAD is allowed! 851303ae1cdSBernard Metzler */ 852303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr; 853303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey; 854303ae1cdSBernard Metzler sqe->num_sge = 1; 855303ae1cdSBernard Metzler 856303ae1cdSBernard Metzler if (wr->opcode == IB_WR_RDMA_READ) 857303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ; 858303ae1cdSBernard Metzler else 859303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ_LOCAL_INV; 860303ae1cdSBernard Metzler break; 861303ae1cdSBernard Metzler 862303ae1cdSBernard Metzler case IB_WR_RDMA_WRITE: 863303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) { 864303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0], 865303ae1cdSBernard Metzler wr->num_sge); 866303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge; 867303ae1cdSBernard Metzler } else { 868303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe); 869303ae1cdSBernard Metzler if (unlikely(rv < 0)) { 870303ae1cdSBernard Metzler rv = -EINVAL; 871303ae1cdSBernard Metzler break; 872303ae1cdSBernard Metzler } 873303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE; 874303ae1cdSBernard Metzler sqe->num_sge = 1; 875303ae1cdSBernard Metzler } 876303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr; 877303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey; 878303ae1cdSBernard Metzler sqe->opcode = SIW_OP_WRITE; 879303ae1cdSBernard Metzler break; 880303ae1cdSBernard Metzler 881303ae1cdSBernard Metzler case IB_WR_REG_MR: 882c536277eSBernard Metzler sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; 883303ae1cdSBernard Metzler sqe->rkey = reg_wr(wr)->key; 884303ae1cdSBernard Metzler sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 885303ae1cdSBernard Metzler sqe->opcode = SIW_OP_REG_MR; 886303ae1cdSBernard Metzler break; 887303ae1cdSBernard Metzler 888303ae1cdSBernard Metzler case IB_WR_LOCAL_INV: 889303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey; 890303ae1cdSBernard Metzler sqe->opcode = SIW_OP_INVAL_STAG; 891303ae1cdSBernard Metzler break; 892303ae1cdSBernard Metzler 893303ae1cdSBernard Metzler default: 894303ae1cdSBernard Metzler siw_dbg_qp(qp, "ib wr type %d unsupported\n", 895303ae1cdSBernard Metzler wr->opcode); 896303ae1cdSBernard Metzler rv = -EINVAL; 897303ae1cdSBernard Metzler break; 898303ae1cdSBernard Metzler } 899c536277eSBernard Metzler siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", 900c536277eSBernard Metzler sqe->opcode, sqe->flags, 901c536277eSBernard Metzler (void *)(uintptr_t)sqe->id); 902303ae1cdSBernard Metzler 903303ae1cdSBernard Metzler if (unlikely(rv < 0)) 904303ae1cdSBernard Metzler break; 905303ae1cdSBernard Metzler 906303ae1cdSBernard Metzler /* make SQE only valid after completely written */ 907303ae1cdSBernard Metzler smp_wmb(); 908303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_VALID; 909303ae1cdSBernard Metzler 910303ae1cdSBernard Metzler qp->sq_put++; 911303ae1cdSBernard Metzler wr = wr->next; 912303ae1cdSBernard Metzler } 913303ae1cdSBernard Metzler 914303ae1cdSBernard Metzler /* 915303ae1cdSBernard Metzler * Send directly if SQ processing is not in progress. 916303ae1cdSBernard Metzler * Eventual immediate errors (rv < 0) do not affect the involved 917303ae1cdSBernard Metzler * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ 918303ae1cdSBernard Metzler * processing, if new work is already pending. But rv must be passed 919303ae1cdSBernard Metzler * to caller. 920303ae1cdSBernard Metzler */ 921303ae1cdSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) { 922303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 923303ae1cdSBernard Metzler goto skip_direct_sending; 924303ae1cdSBernard Metzler } 925303ae1cdSBernard Metzler rv = siw_activate_tx(qp); 926303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags); 927303ae1cdSBernard Metzler 928303ae1cdSBernard Metzler if (rv <= 0) 929303ae1cdSBernard Metzler goto skip_direct_sending; 930303ae1cdSBernard Metzler 93158fb0b56SBernard Metzler if (rdma_is_kernel_res(&qp->base_qp.res)) { 932303ae1cdSBernard Metzler rv = siw_sq_start(qp); 933303ae1cdSBernard Metzler } else { 934303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 1; 935303ae1cdSBernard Metzler 936303ae1cdSBernard Metzler if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend)) 937303ae1cdSBernard Metzler siw_qp_cm_drop(qp, 0); 938303ae1cdSBernard Metzler 939303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 0; 940303ae1cdSBernard Metzler } 941303ae1cdSBernard Metzler skip_direct_sending: 942303ae1cdSBernard Metzler 943303ae1cdSBernard Metzler up_read(&qp->state_lock); 944303ae1cdSBernard Metzler 945303ae1cdSBernard Metzler if (rv >= 0) 946303ae1cdSBernard Metzler return 0; 947303ae1cdSBernard Metzler /* 948303ae1cdSBernard Metzler * Immediate error 949303ae1cdSBernard Metzler */ 950303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv); 951303ae1cdSBernard Metzler 952303ae1cdSBernard Metzler *bad_wr = wr; 953303ae1cdSBernard Metzler return rv; 954303ae1cdSBernard Metzler } 955303ae1cdSBernard Metzler 956303ae1cdSBernard Metzler /* 957303ae1cdSBernard Metzler * siw_post_receive() 958303ae1cdSBernard Metzler * 959303ae1cdSBernard Metzler * Post a list of R-WR's to a RQ. 960303ae1cdSBernard Metzler * 961303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP 962303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's 963303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure. 964303ae1cdSBernard Metzler */ 965303ae1cdSBernard Metzler int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, 966303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr) 967303ae1cdSBernard Metzler { 968303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp); 969303ae1cdSBernard Metzler unsigned long flags; 970303ae1cdSBernard Metzler int rv = 0; 971303ae1cdSBernard Metzler 972661f3859SBernard Metzler if (qp->srq || qp->attrs.rq_size == 0) { 973303ae1cdSBernard Metzler *bad_wr = wr; 974661f3859SBernard Metzler return -EINVAL; 975303ae1cdSBernard Metzler } 97658fb0b56SBernard Metzler if (!rdma_is_kernel_res(&qp->base_qp.res)) { 97758fb0b56SBernard Metzler siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n"); 978303ae1cdSBernard Metzler *bad_wr = wr; 979303ae1cdSBernard Metzler return -EINVAL; 980303ae1cdSBernard Metzler } 981cf049bb3SBernard Metzler 982303ae1cdSBernard Metzler /* 983303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking 984303ae1cdSBernard Metzler * to accommodate kernel clients needs. 985303ae1cdSBernard Metzler */ 986303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) { 987cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 988cf049bb3SBernard Metzler /* 989cf049bb3SBernard Metzler * ERROR state is final, so we can be sure 990cf049bb3SBernard Metzler * this state will not change as long as the QP 991cf049bb3SBernard Metzler * exists. 992cf049bb3SBernard Metzler * 993cf049bb3SBernard Metzler * This handles an ib_drain_rq() call with 994cf049bb3SBernard Metzler * a concurrent request to set the QP state 995cf049bb3SBernard Metzler * to ERROR. 996cf049bb3SBernard Metzler */ 997cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr); 998cf049bb3SBernard Metzler } else { 999cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n", 1000cf049bb3SBernard Metzler qp->attrs.state); 1001303ae1cdSBernard Metzler *bad_wr = wr; 1002cf049bb3SBernard Metzler rv = -ENOTCONN; 1003303ae1cdSBernard Metzler } 1004cf049bb3SBernard Metzler return rv; 1005303ae1cdSBernard Metzler } 1006303ae1cdSBernard Metzler if (qp->attrs.state > SIW_QP_STATE_RTS) { 1007cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) { 1008cf049bb3SBernard Metzler /* 1009cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP 1010cf049bb3SBernard Metzler * is in ERROR state. RQ is guaranteed to 1011cf049bb3SBernard Metzler * be empty, so WR complets in-order. 1012cf049bb3SBernard Metzler * 1013cf049bb3SBernard Metzler * Typically triggered by ib_drain_rq(). 1014cf049bb3SBernard Metzler */ 1015cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr); 1016cf049bb3SBernard Metzler } else { 1017cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n", 1018cf049bb3SBernard Metzler qp->attrs.state); 1019303ae1cdSBernard Metzler *bad_wr = wr; 1020cf049bb3SBernard Metzler rv = -ENOTCONN; 1021cf049bb3SBernard Metzler } 1022cf049bb3SBernard Metzler up_read(&qp->state_lock); 1023cf049bb3SBernard Metzler return rv; 1024303ae1cdSBernard Metzler } 1025303ae1cdSBernard Metzler /* 1026303ae1cdSBernard Metzler * Serialize potentially multiple producers. 1027303ae1cdSBernard Metzler * Not needed for single threaded consumer side. 1028303ae1cdSBernard Metzler */ 1029303ae1cdSBernard Metzler spin_lock_irqsave(&qp->rq_lock, flags); 1030303ae1cdSBernard Metzler 1031303ae1cdSBernard Metzler while (wr) { 1032303ae1cdSBernard Metzler u32 idx = qp->rq_put % qp->attrs.rq_size; 1033303ae1cdSBernard Metzler struct siw_rqe *rqe = &qp->recvq[idx]; 1034303ae1cdSBernard Metzler 1035303ae1cdSBernard Metzler if (rqe->flags) { 1036303ae1cdSBernard Metzler siw_dbg_qp(qp, "RQ full\n"); 1037303ae1cdSBernard Metzler rv = -ENOMEM; 1038303ae1cdSBernard Metzler break; 1039303ae1cdSBernard Metzler } 1040303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.rq_max_sges) { 1041303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); 1042303ae1cdSBernard Metzler rv = -EINVAL; 1043303ae1cdSBernard Metzler break; 1044303ae1cdSBernard Metzler } 1045303ae1cdSBernard Metzler rqe->id = wr->wr_id; 1046303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge; 1047303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); 1048303ae1cdSBernard Metzler 1049303ae1cdSBernard Metzler /* make sure RQE is completely written before valid */ 1050303ae1cdSBernard Metzler smp_wmb(); 1051303ae1cdSBernard Metzler 1052303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID; 1053303ae1cdSBernard Metzler 1054303ae1cdSBernard Metzler qp->rq_put++; 1055303ae1cdSBernard Metzler wr = wr->next; 1056303ae1cdSBernard Metzler } 1057303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->rq_lock, flags); 1058303ae1cdSBernard Metzler 1059303ae1cdSBernard Metzler up_read(&qp->state_lock); 1060303ae1cdSBernard Metzler 1061303ae1cdSBernard Metzler if (rv < 0) { 1062303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv); 1063303ae1cdSBernard Metzler *bad_wr = wr; 1064303ae1cdSBernard Metzler } 1065303ae1cdSBernard Metzler return rv > 0 ? 0 : rv; 1066303ae1cdSBernard Metzler } 1067303ae1cdSBernard Metzler 106843d781b9SLeon Romanovsky int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) 1069303ae1cdSBernard Metzler { 1070303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1071303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device); 1072303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1073303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1074303ae1cdSBernard Metzler base_ucontext); 1075303ae1cdSBernard Metzler 1076303ae1cdSBernard Metzler siw_dbg_cq(cq, "free CQ resources\n"); 1077303ae1cdSBernard Metzler 1078303ae1cdSBernard Metzler siw_cq_flush(cq); 1079303ae1cdSBernard Metzler 108011f1a755SMichal Kalderon if (ctx) 108111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry); 1082303ae1cdSBernard Metzler 1083303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq); 1084303ae1cdSBernard Metzler 1085303ae1cdSBernard Metzler vfree(cq->queue); 108643d781b9SLeon Romanovsky return 0; 1087303ae1cdSBernard Metzler } 1088303ae1cdSBernard Metzler 1089303ae1cdSBernard Metzler /* 1090303ae1cdSBernard Metzler * siw_create_cq() 1091303ae1cdSBernard Metzler * 1092303ae1cdSBernard Metzler * Populate CQ of requested size 1093303ae1cdSBernard Metzler * 1094303ae1cdSBernard Metzler * @base_cq: CQ as allocated by RDMA midlayer 1095303ae1cdSBernard Metzler * @attr: Initial CQ attributes 1096303ae1cdSBernard Metzler * @udata: relates to user context 1097303ae1cdSBernard Metzler */ 1098303ae1cdSBernard Metzler 1099303ae1cdSBernard Metzler int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, 1100303ae1cdSBernard Metzler struct ib_udata *udata) 1101303ae1cdSBernard Metzler { 1102303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device); 1103303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1104303ae1cdSBernard Metzler int rv, size = attr->cqe; 1105303ae1cdSBernard Metzler 11061c407cb5SJason Gunthorpe if (attr->flags) 11071c407cb5SJason Gunthorpe return -EOPNOTSUPP; 11081c407cb5SJason Gunthorpe 1109303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) { 1110303ae1cdSBernard Metzler siw_dbg(base_cq->device, "too many CQ's\n"); 1111303ae1cdSBernard Metzler rv = -ENOMEM; 1112303ae1cdSBernard Metzler goto err_out; 1113303ae1cdSBernard Metzler } 1114303ae1cdSBernard Metzler if (size < 1 || size > sdev->attrs.max_cqe) { 1115303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ size error: %d\n", size); 1116303ae1cdSBernard Metzler rv = -EINVAL; 1117303ae1cdSBernard Metzler goto err_out; 1118303ae1cdSBernard Metzler } 1119303ae1cdSBernard Metzler size = roundup_pow_of_two(size); 1120303ae1cdSBernard Metzler cq->base_cq.cqe = size; 1121303ae1cdSBernard Metzler cq->num_cqe = size; 1122303ae1cdSBernard Metzler 112358fb0b56SBernard Metzler if (udata) 1124303ae1cdSBernard Metzler cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) + 1125303ae1cdSBernard Metzler sizeof(struct siw_cq_ctrl)); 112658fb0b56SBernard Metzler else 112758fb0b56SBernard Metzler cq->queue = vzalloc(size * sizeof(struct siw_cqe) + 112858fb0b56SBernard Metzler sizeof(struct siw_cq_ctrl)); 112958fb0b56SBernard Metzler 1130303ae1cdSBernard Metzler if (cq->queue == NULL) { 1131303ae1cdSBernard Metzler rv = -ENOMEM; 1132303ae1cdSBernard Metzler goto err_out; 1133303ae1cdSBernard Metzler } 1134303ae1cdSBernard Metzler get_random_bytes(&cq->id, 4); 1135303ae1cdSBernard Metzler siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id); 1136303ae1cdSBernard Metzler 1137303ae1cdSBernard Metzler spin_lock_init(&cq->lock); 1138303ae1cdSBernard Metzler 11392c8ccb37SBernard Metzler cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; 1140303ae1cdSBernard Metzler 1141303ae1cdSBernard Metzler if (udata) { 1142303ae1cdSBernard Metzler struct siw_uresp_create_cq uresp = {}; 1143303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1144303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1145303ae1cdSBernard Metzler base_ucontext); 114611f1a755SMichal Kalderon size_t length = size * sizeof(struct siw_cqe) + 114711f1a755SMichal Kalderon sizeof(struct siw_cq_ctrl); 1148303ae1cdSBernard Metzler 114911f1a755SMichal Kalderon cq->cq_entry = 115011f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, cq->queue, 115111f1a755SMichal Kalderon length, &uresp.cq_key); 115211f1a755SMichal Kalderon if (!cq->cq_entry) { 1153303ae1cdSBernard Metzler rv = -ENOMEM; 1154303ae1cdSBernard Metzler goto err_out; 1155303ae1cdSBernard Metzler } 115611f1a755SMichal Kalderon 1157303ae1cdSBernard Metzler uresp.cq_id = cq->id; 1158303ae1cdSBernard Metzler uresp.num_cqe = size; 1159303ae1cdSBernard Metzler 1160303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1161303ae1cdSBernard Metzler rv = -EINVAL; 1162303ae1cdSBernard Metzler goto err_out; 1163303ae1cdSBernard Metzler } 1164303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1165303ae1cdSBernard Metzler if (rv) 1166303ae1cdSBernard Metzler goto err_out; 1167303ae1cdSBernard Metzler } 1168303ae1cdSBernard Metzler return 0; 1169303ae1cdSBernard Metzler 1170303ae1cdSBernard Metzler err_out: 1171303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ creation failed: %d", rv); 1172303ae1cdSBernard Metzler 1173aeea6cc0SAndrey Strachuk if (cq->queue) { 1174303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1175303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1176303ae1cdSBernard Metzler base_ucontext); 117711f1a755SMichal Kalderon if (ctx) 117811f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry); 1179303ae1cdSBernard Metzler vfree(cq->queue); 1180303ae1cdSBernard Metzler } 1181303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq); 1182303ae1cdSBernard Metzler 1183303ae1cdSBernard Metzler return rv; 1184303ae1cdSBernard Metzler } 1185303ae1cdSBernard Metzler 1186303ae1cdSBernard Metzler /* 1187303ae1cdSBernard Metzler * siw_poll_cq() 1188303ae1cdSBernard Metzler * 1189303ae1cdSBernard Metzler * Reap CQ entries if available and copy work completion status into 1190303ae1cdSBernard Metzler * array of WC's provided by caller. Returns number of reaped CQE's. 1191303ae1cdSBernard Metzler * 1192303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ. 1193303ae1cdSBernard Metzler * @num_cqe: Maximum number of CQE's to reap. 1194303ae1cdSBernard Metzler * @wc: Array of work completions to be filled by siw. 1195303ae1cdSBernard Metzler */ 1196303ae1cdSBernard Metzler int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc) 1197303ae1cdSBernard Metzler { 1198303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1199303ae1cdSBernard Metzler int i; 1200303ae1cdSBernard Metzler 1201303ae1cdSBernard Metzler for (i = 0; i < num_cqe; i++) { 1202303ae1cdSBernard Metzler if (!siw_reap_cqe(cq, wc)) 1203303ae1cdSBernard Metzler break; 1204303ae1cdSBernard Metzler wc++; 1205303ae1cdSBernard Metzler } 1206303ae1cdSBernard Metzler return i; 1207303ae1cdSBernard Metzler } 1208303ae1cdSBernard Metzler 1209303ae1cdSBernard Metzler /* 1210303ae1cdSBernard Metzler * siw_req_notify_cq() 1211303ae1cdSBernard Metzler * 1212303ae1cdSBernard Metzler * Request notification for new CQE's added to that CQ. 1213303ae1cdSBernard Metzler * Defined flags: 1214303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification 1215303ae1cdSBernard Metzler * event if a WQE with notification flag set enters the CQ 1216303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification 1217303ae1cdSBernard Metzler * event if a WQE enters the CQ. 1218303ae1cdSBernard Metzler * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the 1219303ae1cdSBernard Metzler * number of not reaped CQE's regardless of its notification 1220303ae1cdSBernard Metzler * type and current or new CQ notification settings. 1221303ae1cdSBernard Metzler * 1222303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ. 1223303ae1cdSBernard Metzler * @flags: Requested notification flags. 1224303ae1cdSBernard Metzler */ 1225303ae1cdSBernard Metzler int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags) 1226303ae1cdSBernard Metzler { 1227303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq); 1228303ae1cdSBernard Metzler 1229303ae1cdSBernard Metzler siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1230303ae1cdSBernard Metzler 1231303ae1cdSBernard Metzler if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 12322c8ccb37SBernard Metzler /* 12332c8ccb37SBernard Metzler * Enable CQ event for next solicited completion. 12342c8ccb37SBernard Metzler * and make it visible to all associated producers. 12352c8ccb37SBernard Metzler */ 12362c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); 1237303ae1cdSBernard Metzler else 12382c8ccb37SBernard Metzler /* 12392c8ccb37SBernard Metzler * Enable CQ event for any signalled completion. 12402c8ccb37SBernard Metzler * and make it visible to all associated producers. 12412c8ccb37SBernard Metzler */ 12422c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); 1243303ae1cdSBernard Metzler 1244303ae1cdSBernard Metzler if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1245303ae1cdSBernard Metzler return cq->cq_put - cq->cq_get; 1246303ae1cdSBernard Metzler 1247303ae1cdSBernard Metzler return 0; 1248303ae1cdSBernard Metzler } 1249303ae1cdSBernard Metzler 1250303ae1cdSBernard Metzler /* 1251303ae1cdSBernard Metzler * siw_dereg_mr() 1252303ae1cdSBernard Metzler * 1253303ae1cdSBernard Metzler * Release Memory Region. 1254303ae1cdSBernard Metzler * 1255303ae1cdSBernard Metzler * @base_mr: Base MR contained in siw MR. 1256303ae1cdSBernard Metzler * @udata: points to user context, unused. 1257303ae1cdSBernard Metzler */ 1258303ae1cdSBernard Metzler int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata) 1259303ae1cdSBernard Metzler { 1260303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr); 1261303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_mr->device); 1262303ae1cdSBernard Metzler 1263303ae1cdSBernard Metzler siw_dbg_mem(mr->mem, "deregister MR\n"); 1264303ae1cdSBernard Metzler 1265303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1266303ae1cdSBernard Metzler 1267303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1268303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1269303ae1cdSBernard Metzler 1270303ae1cdSBernard Metzler return 0; 1271303ae1cdSBernard Metzler } 1272303ae1cdSBernard Metzler 1273303ae1cdSBernard Metzler /* 1274303ae1cdSBernard Metzler * siw_reg_user_mr() 1275303ae1cdSBernard Metzler * 1276303ae1cdSBernard Metzler * Register Memory Region. 1277303ae1cdSBernard Metzler * 1278303ae1cdSBernard Metzler * @pd: Protection Domain 1279303ae1cdSBernard Metzler * @start: starting address of MR (virtual address) 1280303ae1cdSBernard Metzler * @len: len of MR 1281303ae1cdSBernard Metzler * @rnic_va: not used by siw 1282303ae1cdSBernard Metzler * @rights: MR access rights 1283303ae1cdSBernard Metzler * @udata: user buffer to communicate STag and Key. 1284303ae1cdSBernard Metzler */ 1285303ae1cdSBernard Metzler struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 1286303ae1cdSBernard Metzler u64 rnic_va, int rights, struct ib_udata *udata) 1287303ae1cdSBernard Metzler { 1288303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1289303ae1cdSBernard Metzler struct siw_umem *umem = NULL; 1290303ae1cdSBernard Metzler struct siw_ureq_reg_mr ureq; 1291303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1292303ae1cdSBernard Metzler 1293303ae1cdSBernard Metzler unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1294303ae1cdSBernard Metzler int rv; 1295303ae1cdSBernard Metzler 1296c536277eSBernard Metzler siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", 1297c536277eSBernard Metzler (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, 1298303ae1cdSBernard Metzler (unsigned long long)len); 1299303ae1cdSBernard Metzler 1300303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1301303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1302303ae1cdSBernard Metzler rv = -ENOMEM; 1303303ae1cdSBernard Metzler goto err_out; 1304303ae1cdSBernard Metzler } 1305303ae1cdSBernard Metzler if (!len) { 1306303ae1cdSBernard Metzler rv = -EINVAL; 1307303ae1cdSBernard Metzler goto err_out; 1308303ae1cdSBernard Metzler } 1309303ae1cdSBernard Metzler if (mem_limit != RLIM_INFINITY) { 1310303ae1cdSBernard Metzler unsigned long num_pages = 1311303ae1cdSBernard Metzler (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT; 1312303ae1cdSBernard Metzler mem_limit >>= PAGE_SHIFT; 1313303ae1cdSBernard Metzler 1314303ae1cdSBernard Metzler if (num_pages > mem_limit - current->mm->locked_vm) { 1315303ae1cdSBernard Metzler siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n", 1316303ae1cdSBernard Metzler num_pages, mem_limit, 1317303ae1cdSBernard Metzler current->mm->locked_vm); 1318303ae1cdSBernard Metzler rv = -ENOMEM; 1319303ae1cdSBernard Metzler goto err_out; 1320303ae1cdSBernard Metzler } 1321303ae1cdSBernard Metzler } 1322303ae1cdSBernard Metzler umem = siw_umem_get(start, len, ib_access_writable(rights)); 1323303ae1cdSBernard Metzler if (IS_ERR(umem)) { 1324303ae1cdSBernard Metzler rv = PTR_ERR(umem); 1325303ae1cdSBernard Metzler siw_dbg_pd(pd, "getting user memory failed: %d\n", rv); 1326303ae1cdSBernard Metzler umem = NULL; 1327303ae1cdSBernard Metzler goto err_out; 1328303ae1cdSBernard Metzler } 1329303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1330303ae1cdSBernard Metzler if (!mr) { 1331303ae1cdSBernard Metzler rv = -ENOMEM; 1332303ae1cdSBernard Metzler goto err_out; 1333303ae1cdSBernard Metzler } 1334303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, umem, start, len, rights); 1335303ae1cdSBernard Metzler if (rv) 1336303ae1cdSBernard Metzler goto err_out; 1337303ae1cdSBernard Metzler 1338303ae1cdSBernard Metzler if (udata) { 1339303ae1cdSBernard Metzler struct siw_uresp_reg_mr uresp = {}; 1340303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem; 1341303ae1cdSBernard Metzler 1342303ae1cdSBernard Metzler if (udata->inlen < sizeof(ureq)) { 1343303ae1cdSBernard Metzler rv = -EINVAL; 1344303ae1cdSBernard Metzler goto err_out; 1345303ae1cdSBernard Metzler } 1346303ae1cdSBernard Metzler rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq)); 1347303ae1cdSBernard Metzler if (rv) 1348303ae1cdSBernard Metzler goto err_out; 1349303ae1cdSBernard Metzler 1350303ae1cdSBernard Metzler mr->base_mr.lkey |= ureq.stag_key; 1351303ae1cdSBernard Metzler mr->base_mr.rkey |= ureq.stag_key; 1352303ae1cdSBernard Metzler mem->stag |= ureq.stag_key; 1353303ae1cdSBernard Metzler uresp.stag = mem->stag; 1354303ae1cdSBernard Metzler 1355303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1356303ae1cdSBernard Metzler rv = -EINVAL; 1357303ae1cdSBernard Metzler goto err_out; 1358303ae1cdSBernard Metzler } 1359303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1360303ae1cdSBernard Metzler if (rv) 1361303ae1cdSBernard Metzler goto err_out; 1362303ae1cdSBernard Metzler } 1363303ae1cdSBernard Metzler mr->mem->stag_valid = 1; 1364303ae1cdSBernard Metzler 1365303ae1cdSBernard Metzler return &mr->base_mr; 1366303ae1cdSBernard Metzler 1367303ae1cdSBernard Metzler err_out: 1368303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1369303ae1cdSBernard Metzler if (mr) { 1370303ae1cdSBernard Metzler if (mr->mem) 1371303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1372303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1373303ae1cdSBernard Metzler } else { 1374303ae1cdSBernard Metzler if (umem) 1375303ae1cdSBernard Metzler siw_umem_release(umem, false); 1376303ae1cdSBernard Metzler } 1377303ae1cdSBernard Metzler return ERR_PTR(rv); 1378303ae1cdSBernard Metzler } 1379303ae1cdSBernard Metzler 1380303ae1cdSBernard Metzler struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 138142a3b153SGal Pressman u32 max_sge) 1382303ae1cdSBernard Metzler { 1383303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1384303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1385303ae1cdSBernard Metzler struct siw_pbl *pbl = NULL; 1386303ae1cdSBernard Metzler int rv; 1387303ae1cdSBernard Metzler 1388303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1389303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1390303ae1cdSBernard Metzler rv = -ENOMEM; 1391303ae1cdSBernard Metzler goto err_out; 1392303ae1cdSBernard Metzler } 1393303ae1cdSBernard Metzler if (mr_type != IB_MR_TYPE_MEM_REG) { 1394303ae1cdSBernard Metzler siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type); 1395303ae1cdSBernard Metzler rv = -EOPNOTSUPP; 1396303ae1cdSBernard Metzler goto err_out; 1397303ae1cdSBernard Metzler } 1398303ae1cdSBernard Metzler if (max_sge > SIW_MAX_SGE_PBL) { 1399303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many sge's: %d\n", max_sge); 1400303ae1cdSBernard Metzler rv = -ENOMEM; 1401303ae1cdSBernard Metzler goto err_out; 1402303ae1cdSBernard Metzler } 1403303ae1cdSBernard Metzler pbl = siw_pbl_alloc(max_sge); 1404303ae1cdSBernard Metzler if (IS_ERR(pbl)) { 1405303ae1cdSBernard Metzler rv = PTR_ERR(pbl); 1406303ae1cdSBernard Metzler siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv); 1407303ae1cdSBernard Metzler pbl = NULL; 1408303ae1cdSBernard Metzler goto err_out; 1409303ae1cdSBernard Metzler } 1410303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1411303ae1cdSBernard Metzler if (!mr) { 1412303ae1cdSBernard Metzler rv = -ENOMEM; 1413303ae1cdSBernard Metzler goto err_out; 1414303ae1cdSBernard Metzler } 1415303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0); 1416303ae1cdSBernard Metzler if (rv) 1417303ae1cdSBernard Metzler goto err_out; 1418303ae1cdSBernard Metzler 1419303ae1cdSBernard Metzler mr->mem->is_pbl = 1; 1420303ae1cdSBernard Metzler 1421303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); 1422303ae1cdSBernard Metzler 1423303ae1cdSBernard Metzler return &mr->base_mr; 1424303ae1cdSBernard Metzler 1425303ae1cdSBernard Metzler err_out: 1426303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1427303ae1cdSBernard Metzler 1428303ae1cdSBernard Metzler if (!mr) { 1429303ae1cdSBernard Metzler kfree(pbl); 1430303ae1cdSBernard Metzler } else { 1431303ae1cdSBernard Metzler if (mr->mem) 1432303ae1cdSBernard Metzler siw_mr_drop_mem(mr); 1433303ae1cdSBernard Metzler kfree_rcu(mr, rcu); 1434303ae1cdSBernard Metzler } 1435303ae1cdSBernard Metzler siw_dbg_pd(pd, "failed: %d\n", rv); 1436303ae1cdSBernard Metzler 1437303ae1cdSBernard Metzler return ERR_PTR(rv); 1438303ae1cdSBernard Metzler } 1439303ae1cdSBernard Metzler 1440303ae1cdSBernard Metzler /* Just used to count number of pages being mapped */ 1441303ae1cdSBernard Metzler static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr) 1442303ae1cdSBernard Metzler { 1443303ae1cdSBernard Metzler return 0; 1444303ae1cdSBernard Metzler } 1445303ae1cdSBernard Metzler 1446303ae1cdSBernard Metzler int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, 1447303ae1cdSBernard Metzler unsigned int *sg_off) 1448303ae1cdSBernard Metzler { 1449303ae1cdSBernard Metzler struct scatterlist *slp; 1450303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr); 1451303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem; 1452303ae1cdSBernard Metzler struct siw_pbl *pbl = mem->pbl; 1453303ae1cdSBernard Metzler struct siw_pble *pble; 1454c536277eSBernard Metzler unsigned long pbl_size; 1455303ae1cdSBernard Metzler int i, rv; 1456303ae1cdSBernard Metzler 1457303ae1cdSBernard Metzler if (!pbl) { 1458303ae1cdSBernard Metzler siw_dbg_mem(mem, "no PBL allocated\n"); 1459303ae1cdSBernard Metzler return -EINVAL; 1460303ae1cdSBernard Metzler } 1461303ae1cdSBernard Metzler pble = pbl->pbe; 1462303ae1cdSBernard Metzler 1463303ae1cdSBernard Metzler if (pbl->max_buf < num_sle) { 1464303ae1cdSBernard Metzler siw_dbg_mem(mem, "too many SGE's: %d > %d\n", 1465303ae1cdSBernard Metzler mem->pbl->max_buf, num_sle); 1466303ae1cdSBernard Metzler return -ENOMEM; 1467303ae1cdSBernard Metzler } 1468303ae1cdSBernard Metzler for_each_sg(sl, slp, num_sle, i) { 1469303ae1cdSBernard Metzler if (sg_dma_len(slp) == 0) { 1470303ae1cdSBernard Metzler siw_dbg_mem(mem, "empty SGE\n"); 1471303ae1cdSBernard Metzler return -EINVAL; 1472303ae1cdSBernard Metzler } 1473303ae1cdSBernard Metzler if (i == 0) { 1474303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp); 1475303ae1cdSBernard Metzler pble->size = sg_dma_len(slp); 1476303ae1cdSBernard Metzler pble->pbl_off = 0; 1477303ae1cdSBernard Metzler pbl_size = pble->size; 1478303ae1cdSBernard Metzler pbl->num_buf = 1; 1479303ae1cdSBernard Metzler } else { 1480303ae1cdSBernard Metzler /* Merge PBL entries if adjacent */ 1481303ae1cdSBernard Metzler if (pble->addr + pble->size == sg_dma_address(slp)) { 1482303ae1cdSBernard Metzler pble->size += sg_dma_len(slp); 1483303ae1cdSBernard Metzler } else { 1484303ae1cdSBernard Metzler pble++; 1485303ae1cdSBernard Metzler pbl->num_buf++; 1486303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp); 1487303ae1cdSBernard Metzler pble->size = sg_dma_len(slp); 1488303ae1cdSBernard Metzler pble->pbl_off = pbl_size; 1489303ae1cdSBernard Metzler } 1490303ae1cdSBernard Metzler pbl_size += sg_dma_len(slp); 1491303ae1cdSBernard Metzler } 1492303ae1cdSBernard Metzler siw_dbg_mem(mem, 1493c536277eSBernard Metzler "sge[%d], size %u, addr 0x%p, total %lu\n", 1494c536277eSBernard Metzler i, pble->size, (void *)(uintptr_t)pble->addr, 1495c536277eSBernard Metzler pbl_size); 1496303ae1cdSBernard Metzler } 1497303ae1cdSBernard Metzler rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1498303ae1cdSBernard Metzler if (rv > 0) { 1499303ae1cdSBernard Metzler mem->len = base_mr->length; 1500303ae1cdSBernard Metzler mem->va = base_mr->iova; 1501303ae1cdSBernard Metzler siw_dbg_mem(mem, 1502c536277eSBernard Metzler "%llu bytes, start 0x%pK, %u SLE to %u entries\n", 1503c536277eSBernard Metzler mem->len, (void *)(uintptr_t)mem->va, num_sle, 1504c536277eSBernard Metzler pbl->num_buf); 1505303ae1cdSBernard Metzler } 1506303ae1cdSBernard Metzler return rv; 1507303ae1cdSBernard Metzler } 1508303ae1cdSBernard Metzler 1509303ae1cdSBernard Metzler /* 1510303ae1cdSBernard Metzler * siw_get_dma_mr() 1511303ae1cdSBernard Metzler * 1512303ae1cdSBernard Metzler * Create a (empty) DMA memory region, where no umem is attached. 1513303ae1cdSBernard Metzler */ 1514303ae1cdSBernard Metzler struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights) 1515303ae1cdSBernard Metzler { 1516303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device); 1517303ae1cdSBernard Metzler struct siw_mr *mr = NULL; 1518303ae1cdSBernard Metzler int rv; 1519303ae1cdSBernard Metzler 1520303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1521303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n"); 1522303ae1cdSBernard Metzler rv = -ENOMEM; 1523303ae1cdSBernard Metzler goto err_out; 1524303ae1cdSBernard Metzler } 1525303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1526303ae1cdSBernard Metzler if (!mr) { 1527303ae1cdSBernard Metzler rv = -ENOMEM; 1528303ae1cdSBernard Metzler goto err_out; 1529303ae1cdSBernard Metzler } 1530303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights); 1531303ae1cdSBernard Metzler if (rv) 1532303ae1cdSBernard Metzler goto err_out; 1533303ae1cdSBernard Metzler 1534303ae1cdSBernard Metzler mr->mem->stag_valid = 1; 1535303ae1cdSBernard Metzler 1536303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); 1537303ae1cdSBernard Metzler 1538303ae1cdSBernard Metzler return &mr->base_mr; 1539303ae1cdSBernard Metzler 1540303ae1cdSBernard Metzler err_out: 1541303ae1cdSBernard Metzler if (rv) 1542303ae1cdSBernard Metzler kfree(mr); 1543303ae1cdSBernard Metzler 1544303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr); 1545303ae1cdSBernard Metzler 1546303ae1cdSBernard Metzler return ERR_PTR(rv); 1547303ae1cdSBernard Metzler } 1548303ae1cdSBernard Metzler 1549303ae1cdSBernard Metzler /* 1550303ae1cdSBernard Metzler * siw_create_srq() 1551303ae1cdSBernard Metzler * 1552303ae1cdSBernard Metzler * Create Shared Receive Queue of attributes @init_attrs 1553303ae1cdSBernard Metzler * within protection domain given by @pd. 1554303ae1cdSBernard Metzler * 1555303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ. 1556303ae1cdSBernard Metzler * @init_attrs: SRQ init attributes. 1557303ae1cdSBernard Metzler * @udata: points to user context 1558303ae1cdSBernard Metzler */ 1559303ae1cdSBernard Metzler int siw_create_srq(struct ib_srq *base_srq, 1560303ae1cdSBernard Metzler struct ib_srq_init_attr *init_attrs, struct ib_udata *udata) 1561303ae1cdSBernard Metzler { 1562303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1563303ae1cdSBernard Metzler struct ib_srq_attr *attrs = &init_attrs->attr; 1564303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device); 1565303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1566303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1567303ae1cdSBernard Metzler base_ucontext); 1568303ae1cdSBernard Metzler int rv; 1569303ae1cdSBernard Metzler 1570652caba5SJason Gunthorpe if (init_attrs->srq_type != IB_SRQT_BASIC) 1571652caba5SJason Gunthorpe return -EOPNOTSUPP; 1572652caba5SJason Gunthorpe 1573303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) { 1574303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "too many SRQ's\n"); 1575303ae1cdSBernard Metzler rv = -ENOMEM; 1576303ae1cdSBernard Metzler goto err_out; 1577303ae1cdSBernard Metzler } 1578303ae1cdSBernard Metzler if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR || 1579303ae1cdSBernard Metzler attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) { 1580303ae1cdSBernard Metzler rv = -EINVAL; 1581303ae1cdSBernard Metzler goto err_out; 1582303ae1cdSBernard Metzler } 1583303ae1cdSBernard Metzler srq->max_sge = attrs->max_sge; 1584303ae1cdSBernard Metzler srq->num_rqe = roundup_pow_of_two(attrs->max_wr); 1585303ae1cdSBernard Metzler srq->limit = attrs->srq_limit; 1586303ae1cdSBernard Metzler if (srq->limit) 158758fb0b56SBernard Metzler srq->armed = true; 1588303ae1cdSBernard Metzler 158958fb0b56SBernard Metzler srq->is_kernel_res = !udata; 1590303ae1cdSBernard Metzler 1591303ae1cdSBernard Metzler if (udata) 1592303ae1cdSBernard Metzler srq->recvq = 1593303ae1cdSBernard Metzler vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe)); 1594303ae1cdSBernard Metzler else 1595303ae1cdSBernard Metzler srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe)); 1596303ae1cdSBernard Metzler 1597303ae1cdSBernard Metzler if (srq->recvq == NULL) { 1598303ae1cdSBernard Metzler rv = -ENOMEM; 1599303ae1cdSBernard Metzler goto err_out; 1600303ae1cdSBernard Metzler } 1601303ae1cdSBernard Metzler if (udata) { 1602303ae1cdSBernard Metzler struct siw_uresp_create_srq uresp = {}; 160311f1a755SMichal Kalderon size_t length = srq->num_rqe * sizeof(struct siw_rqe); 1604303ae1cdSBernard Metzler 160511f1a755SMichal Kalderon srq->srq_entry = 160611f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, srq->recvq, 160711f1a755SMichal Kalderon length, &uresp.srq_key); 160811f1a755SMichal Kalderon if (!srq->srq_entry) { 1609303ae1cdSBernard Metzler rv = -ENOMEM; 1610303ae1cdSBernard Metzler goto err_out; 1611303ae1cdSBernard Metzler } 161211f1a755SMichal Kalderon 1613303ae1cdSBernard Metzler uresp.num_rqe = srq->num_rqe; 1614303ae1cdSBernard Metzler 1615303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) { 1616303ae1cdSBernard Metzler rv = -EINVAL; 1617303ae1cdSBernard Metzler goto err_out; 1618303ae1cdSBernard Metzler } 1619303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1620303ae1cdSBernard Metzler if (rv) 1621303ae1cdSBernard Metzler goto err_out; 1622303ae1cdSBernard Metzler } 1623303ae1cdSBernard Metzler spin_lock_init(&srq->lock); 1624303ae1cdSBernard Metzler 1625c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); 1626303ae1cdSBernard Metzler 1627303ae1cdSBernard Metzler return 0; 1628303ae1cdSBernard Metzler 1629303ae1cdSBernard Metzler err_out: 1630303ae1cdSBernard Metzler if (srq->recvq) { 163111f1a755SMichal Kalderon if (ctx) 163211f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry); 1633303ae1cdSBernard Metzler vfree(srq->recvq); 1634303ae1cdSBernard Metzler } 1635303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq); 1636303ae1cdSBernard Metzler 1637303ae1cdSBernard Metzler return rv; 1638303ae1cdSBernard Metzler } 1639303ae1cdSBernard Metzler 1640303ae1cdSBernard Metzler /* 1641303ae1cdSBernard Metzler * siw_modify_srq() 1642303ae1cdSBernard Metzler * 1643303ae1cdSBernard Metzler * Modify SRQ. The caller may resize SRQ and/or set/reset notification 1644303ae1cdSBernard Metzler * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification. 1645303ae1cdSBernard Metzler * 1646303ae1cdSBernard Metzler * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE 1647303ae1cdSBernard Metzler * parameter. siw_modify_srq() does not check the attrs->max_sge param. 1648303ae1cdSBernard Metzler */ 1649303ae1cdSBernard Metzler int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs, 1650303ae1cdSBernard Metzler enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 1651303ae1cdSBernard Metzler { 1652303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1653303ae1cdSBernard Metzler unsigned long flags; 1654303ae1cdSBernard Metzler int rv = 0; 1655303ae1cdSBernard Metzler 1656303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1657303ae1cdSBernard Metzler 1658303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_MAX_WR) { 1659303ae1cdSBernard Metzler /* resize request not yet supported */ 1660303ae1cdSBernard Metzler rv = -EOPNOTSUPP; 1661303ae1cdSBernard Metzler goto out; 1662303ae1cdSBernard Metzler } 1663303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_LIMIT) { 1664303ae1cdSBernard Metzler if (attrs->srq_limit) { 1665303ae1cdSBernard Metzler if (unlikely(attrs->srq_limit > srq->num_rqe)) { 1666303ae1cdSBernard Metzler rv = -EINVAL; 1667303ae1cdSBernard Metzler goto out; 1668303ae1cdSBernard Metzler } 166958fb0b56SBernard Metzler srq->armed = true; 1670303ae1cdSBernard Metzler } else { 167158fb0b56SBernard Metzler srq->armed = false; 1672303ae1cdSBernard Metzler } 1673303ae1cdSBernard Metzler srq->limit = attrs->srq_limit; 1674303ae1cdSBernard Metzler } 1675303ae1cdSBernard Metzler out: 1676303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1677303ae1cdSBernard Metzler 1678303ae1cdSBernard Metzler return rv; 1679303ae1cdSBernard Metzler } 1680303ae1cdSBernard Metzler 1681303ae1cdSBernard Metzler /* 1682303ae1cdSBernard Metzler * siw_query_srq() 1683303ae1cdSBernard Metzler * 1684303ae1cdSBernard Metzler * Query SRQ attributes. 1685303ae1cdSBernard Metzler */ 1686303ae1cdSBernard Metzler int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs) 1687303ae1cdSBernard Metzler { 1688303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1689303ae1cdSBernard Metzler unsigned long flags; 1690303ae1cdSBernard Metzler 1691303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1692303ae1cdSBernard Metzler 1693303ae1cdSBernard Metzler attrs->max_wr = srq->num_rqe; 1694303ae1cdSBernard Metzler attrs->max_sge = srq->max_sge; 1695303ae1cdSBernard Metzler attrs->srq_limit = srq->limit; 1696303ae1cdSBernard Metzler 1697303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1698303ae1cdSBernard Metzler 1699303ae1cdSBernard Metzler return 0; 1700303ae1cdSBernard Metzler } 1701303ae1cdSBernard Metzler 1702303ae1cdSBernard Metzler /* 1703303ae1cdSBernard Metzler * siw_destroy_srq() 1704303ae1cdSBernard Metzler * 1705303ae1cdSBernard Metzler * Destroy SRQ. 1706303ae1cdSBernard Metzler * It is assumed that the SRQ is not referenced by any 1707303ae1cdSBernard Metzler * QP anymore - the code trusts the RDMA core environment to keep track 1708303ae1cdSBernard Metzler * of QP references. 1709303ae1cdSBernard Metzler */ 1710119181d1SLeon Romanovsky int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata) 1711303ae1cdSBernard Metzler { 1712303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1713303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device); 1714303ae1cdSBernard Metzler struct siw_ucontext *ctx = 1715303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext, 1716303ae1cdSBernard Metzler base_ucontext); 1717303ae1cdSBernard Metzler 171811f1a755SMichal Kalderon if (ctx) 171911f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry); 1720303ae1cdSBernard Metzler vfree(srq->recvq); 1721303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq); 1722119181d1SLeon Romanovsky return 0; 1723303ae1cdSBernard Metzler } 1724303ae1cdSBernard Metzler 1725303ae1cdSBernard Metzler /* 1726303ae1cdSBernard Metzler * siw_post_srq_recv() 1727303ae1cdSBernard Metzler * 1728303ae1cdSBernard Metzler * Post a list of receive queue elements to SRQ. 1729303ae1cdSBernard Metzler * NOTE: The function does not check or lock a certain SRQ state 1730303ae1cdSBernard Metzler * during the post operation. The code simply trusts the 1731303ae1cdSBernard Metzler * RDMA core environment. 1732303ae1cdSBernard Metzler * 1733303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ 1734303ae1cdSBernard Metzler * @wr: List of R-WR's 1735303ae1cdSBernard Metzler * @bad_wr: Updated to failing WR if posting fails. 1736303ae1cdSBernard Metzler */ 1737303ae1cdSBernard Metzler int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, 1738303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr) 1739303ae1cdSBernard Metzler { 1740303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq); 1741303ae1cdSBernard Metzler unsigned long flags; 1742303ae1cdSBernard Metzler int rv = 0; 1743303ae1cdSBernard Metzler 174458fb0b56SBernard Metzler if (unlikely(!srq->is_kernel_res)) { 1745303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, 1746c536277eSBernard Metzler "[SRQ]: no kernel post_recv for mapped srq\n"); 1747303ae1cdSBernard Metzler rv = -EINVAL; 1748303ae1cdSBernard Metzler goto out; 1749303ae1cdSBernard Metzler } 1750303ae1cdSBernard Metzler /* 1751303ae1cdSBernard Metzler * Serialize potentially multiple producers. 1752303ae1cdSBernard Metzler * Also needed to serialize potentially multiple 1753303ae1cdSBernard Metzler * consumers. 1754303ae1cdSBernard Metzler */ 1755303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags); 1756303ae1cdSBernard Metzler 1757303ae1cdSBernard Metzler while (wr) { 1758303ae1cdSBernard Metzler u32 idx = srq->rq_put % srq->num_rqe; 1759303ae1cdSBernard Metzler struct siw_rqe *rqe = &srq->recvq[idx]; 1760303ae1cdSBernard Metzler 1761303ae1cdSBernard Metzler if (rqe->flags) { 1762303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "SRQ full\n"); 1763303ae1cdSBernard Metzler rv = -ENOMEM; 1764303ae1cdSBernard Metzler break; 1765303ae1cdSBernard Metzler } 1766303ae1cdSBernard Metzler if (unlikely(wr->num_sge > srq->max_sge)) { 1767303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, 1768c536277eSBernard Metzler "[SRQ]: too many sge's: %d\n", wr->num_sge); 1769303ae1cdSBernard Metzler rv = -EINVAL; 1770303ae1cdSBernard Metzler break; 1771303ae1cdSBernard Metzler } 1772303ae1cdSBernard Metzler rqe->id = wr->wr_id; 1773303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge; 1774303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); 1775303ae1cdSBernard Metzler 1776303ae1cdSBernard Metzler /* Make sure S-RQE is completely written before valid */ 1777303ae1cdSBernard Metzler smp_wmb(); 1778303ae1cdSBernard Metzler 1779303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID; 1780303ae1cdSBernard Metzler 1781303ae1cdSBernard Metzler srq->rq_put++; 1782303ae1cdSBernard Metzler wr = wr->next; 1783303ae1cdSBernard Metzler } 1784303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags); 1785303ae1cdSBernard Metzler out: 1786303ae1cdSBernard Metzler if (unlikely(rv < 0)) { 1787c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); 1788303ae1cdSBernard Metzler *bad_wr = wr; 1789303ae1cdSBernard Metzler } 1790303ae1cdSBernard Metzler return rv; 1791303ae1cdSBernard Metzler } 1792303ae1cdSBernard Metzler 1793303ae1cdSBernard Metzler void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype) 1794303ae1cdSBernard Metzler { 1795303ae1cdSBernard Metzler struct ib_event event; 179658fb0b56SBernard Metzler struct ib_qp *base_qp = &qp->base_qp; 1797303ae1cdSBernard Metzler 1798303ae1cdSBernard Metzler /* 1799303ae1cdSBernard Metzler * Do not report asynchronous errors on QP which gets 1800303ae1cdSBernard Metzler * destroyed via verbs interface (siw_destroy_qp()) 1801303ae1cdSBernard Metzler */ 1802303ae1cdSBernard Metzler if (qp->attrs.flags & SIW_QP_IN_DESTROY) 1803303ae1cdSBernard Metzler return; 1804303ae1cdSBernard Metzler 1805303ae1cdSBernard Metzler event.event = etype; 1806303ae1cdSBernard Metzler event.device = base_qp->device; 1807303ae1cdSBernard Metzler event.element.qp = base_qp; 1808303ae1cdSBernard Metzler 1809303ae1cdSBernard Metzler if (base_qp->event_handler) { 1810303ae1cdSBernard Metzler siw_dbg_qp(qp, "reporting event %d\n", etype); 1811303ae1cdSBernard Metzler base_qp->event_handler(&event, base_qp->qp_context); 1812303ae1cdSBernard Metzler } 1813303ae1cdSBernard Metzler } 1814303ae1cdSBernard Metzler 1815303ae1cdSBernard Metzler void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype) 1816303ae1cdSBernard Metzler { 1817303ae1cdSBernard Metzler struct ib_event event; 1818303ae1cdSBernard Metzler struct ib_cq *base_cq = &cq->base_cq; 1819303ae1cdSBernard Metzler 1820303ae1cdSBernard Metzler event.event = etype; 1821303ae1cdSBernard Metzler event.device = base_cq->device; 1822303ae1cdSBernard Metzler event.element.cq = base_cq; 1823303ae1cdSBernard Metzler 1824303ae1cdSBernard Metzler if (base_cq->event_handler) { 1825303ae1cdSBernard Metzler siw_dbg_cq(cq, "reporting CQ event %d\n", etype); 1826303ae1cdSBernard Metzler base_cq->event_handler(&event, base_cq->cq_context); 1827303ae1cdSBernard Metzler } 1828303ae1cdSBernard Metzler } 1829303ae1cdSBernard Metzler 1830303ae1cdSBernard Metzler void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype) 1831303ae1cdSBernard Metzler { 1832303ae1cdSBernard Metzler struct ib_event event; 1833303ae1cdSBernard Metzler struct ib_srq *base_srq = &srq->base_srq; 1834303ae1cdSBernard Metzler 1835303ae1cdSBernard Metzler event.event = etype; 1836303ae1cdSBernard Metzler event.device = base_srq->device; 1837303ae1cdSBernard Metzler event.element.srq = base_srq; 1838303ae1cdSBernard Metzler 1839303ae1cdSBernard Metzler if (base_srq->event_handler) { 1840303ae1cdSBernard Metzler siw_dbg_pd(srq->base_srq.pd, 1841303ae1cdSBernard Metzler "reporting SRQ event %d\n", etype); 1842303ae1cdSBernard Metzler base_srq->event_handler(&event, base_srq->srq_context); 1843303ae1cdSBernard Metzler } 1844303ae1cdSBernard Metzler } 1845303ae1cdSBernard Metzler 18461fb7f897SMark Bloch void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype) 1847303ae1cdSBernard Metzler { 1848303ae1cdSBernard Metzler struct ib_event event; 1849303ae1cdSBernard Metzler 1850303ae1cdSBernard Metzler event.event = etype; 1851303ae1cdSBernard Metzler event.device = &sdev->base_dev; 1852303ae1cdSBernard Metzler event.element.port_num = port; 1853303ae1cdSBernard Metzler 1854303ae1cdSBernard Metzler siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype); 1855303ae1cdSBernard Metzler 1856303ae1cdSBernard Metzler ib_dispatch_event(&event); 1857303ae1cdSBernard Metzler } 1858