1303ae1cdSBernard Metzler // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2303ae1cdSBernard Metzler
3303ae1cdSBernard Metzler /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4303ae1cdSBernard Metzler /* Copyright (c) 2008-2019, IBM Corporation */
5303ae1cdSBernard Metzler
6303ae1cdSBernard Metzler #include <linux/errno.h>
7303ae1cdSBernard Metzler #include <linux/types.h>
8303ae1cdSBernard Metzler #include <linux/uaccess.h>
9303ae1cdSBernard Metzler #include <linux/vmalloc.h>
10303ae1cdSBernard Metzler #include <linux/xarray.h>
110abfc79dSKamal Heib #include <net/addrconf.h>
12303ae1cdSBernard Metzler
13303ae1cdSBernard Metzler #include <rdma/iw_cm.h>
14303ae1cdSBernard Metzler #include <rdma/ib_verbs.h>
15303ae1cdSBernard Metzler #include <rdma/ib_user_verbs.h>
16303ae1cdSBernard Metzler #include <rdma/uverbs_ioctl.h>
17303ae1cdSBernard Metzler
18303ae1cdSBernard Metzler #include "siw.h"
19303ae1cdSBernard Metzler #include "siw_verbs.h"
20303ae1cdSBernard Metzler #include "siw_mem.h"
21303ae1cdSBernard Metzler
22303ae1cdSBernard Metzler static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
23303ae1cdSBernard Metzler [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
24303ae1cdSBernard Metzler [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
25303ae1cdSBernard Metzler [IB_QPS_RTR] = SIW_QP_STATE_RTR,
26303ae1cdSBernard Metzler [IB_QPS_RTS] = SIW_QP_STATE_RTS,
27303ae1cdSBernard Metzler [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
28303ae1cdSBernard Metzler [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
29303ae1cdSBernard Metzler [IB_QPS_ERR] = SIW_QP_STATE_ERROR
30303ae1cdSBernard Metzler };
31303ae1cdSBernard Metzler
32303ae1cdSBernard Metzler static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
33303ae1cdSBernard Metzler [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
34303ae1cdSBernard Metzler [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
35303ae1cdSBernard Metzler [IB_QPS_ERR] = "ERR"
36303ae1cdSBernard Metzler };
37303ae1cdSBernard Metzler
siw_mmap_free(struct rdma_user_mmap_entry * rdma_entry)3811f1a755SMichal Kalderon void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
39303ae1cdSBernard Metzler {
4011f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
41303ae1cdSBernard Metzler
4211f1a755SMichal Kalderon kfree(entry);
43303ae1cdSBernard Metzler }
44303ae1cdSBernard Metzler
siw_mmap(struct ib_ucontext * ctx,struct vm_area_struct * vma)45303ae1cdSBernard Metzler int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
46303ae1cdSBernard Metzler {
47303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(ctx);
4811f1a755SMichal Kalderon size_t size = vma->vm_end - vma->vm_start;
4911f1a755SMichal Kalderon struct rdma_user_mmap_entry *rdma_entry;
5011f1a755SMichal Kalderon struct siw_user_mmap_entry *entry;
51303ae1cdSBernard Metzler int rv = -EINVAL;
52303ae1cdSBernard Metzler
53303ae1cdSBernard Metzler /*
54303ae1cdSBernard Metzler * Must be page aligned
55303ae1cdSBernard Metzler */
56303ae1cdSBernard Metzler if (vma->vm_start & (PAGE_SIZE - 1)) {
57303ae1cdSBernard Metzler pr_warn("siw: mmap not page aligned\n");
5811f1a755SMichal Kalderon return -EINVAL;
5911f1a755SMichal Kalderon }
6011f1a755SMichal Kalderon rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
6111f1a755SMichal Kalderon if (!rdma_entry) {
6211f1a755SMichal Kalderon siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
6311f1a755SMichal Kalderon vma->vm_pgoff, size);
6411f1a755SMichal Kalderon return -EINVAL;
6511f1a755SMichal Kalderon }
6611f1a755SMichal Kalderon entry = to_siw_mmap_entry(rdma_entry);
6711f1a755SMichal Kalderon
6811f1a755SMichal Kalderon rv = remap_vmalloc_range(vma, entry->address, 0);
6911f1a755SMichal Kalderon if (rv) {
7011f1a755SMichal Kalderon pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
7111f1a755SMichal Kalderon size);
72303ae1cdSBernard Metzler goto out;
73303ae1cdSBernard Metzler }
74303ae1cdSBernard Metzler out:
7511f1a755SMichal Kalderon rdma_user_mmap_entry_put(rdma_entry);
7611f1a755SMichal Kalderon
77303ae1cdSBernard Metzler return rv;
78303ae1cdSBernard Metzler }
79303ae1cdSBernard Metzler
siw_alloc_ucontext(struct ib_ucontext * base_ctx,struct ib_udata * udata)80303ae1cdSBernard Metzler int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
81303ae1cdSBernard Metzler {
82303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_ctx->device);
83303ae1cdSBernard Metzler struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
84303ae1cdSBernard Metzler struct siw_uresp_alloc_ctx uresp = {};
85303ae1cdSBernard Metzler int rv;
86303ae1cdSBernard Metzler
87303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
88303ae1cdSBernard Metzler rv = -ENOMEM;
89303ae1cdSBernard Metzler goto err_out;
90303ae1cdSBernard Metzler }
91303ae1cdSBernard Metzler ctx->sdev = sdev;
92303ae1cdSBernard Metzler
93303ae1cdSBernard Metzler uresp.dev_id = sdev->vendor_part_id;
94303ae1cdSBernard Metzler
95303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) {
96303ae1cdSBernard Metzler rv = -EINVAL;
97303ae1cdSBernard Metzler goto err_out;
98303ae1cdSBernard Metzler }
99303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
100303ae1cdSBernard Metzler if (rv)
101303ae1cdSBernard Metzler goto err_out;
102303ae1cdSBernard Metzler
103303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "success. now %d context(s)\n",
104303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx));
105303ae1cdSBernard Metzler
106303ae1cdSBernard Metzler return 0;
107303ae1cdSBernard Metzler
108303ae1cdSBernard Metzler err_out:
109303ae1cdSBernard Metzler atomic_dec(&sdev->num_ctx);
110303ae1cdSBernard Metzler siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
111303ae1cdSBernard Metzler atomic_read(&sdev->num_ctx));
112303ae1cdSBernard Metzler
113303ae1cdSBernard Metzler return rv;
114303ae1cdSBernard Metzler }
115303ae1cdSBernard Metzler
siw_dealloc_ucontext(struct ib_ucontext * base_ctx)116303ae1cdSBernard Metzler void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
117303ae1cdSBernard Metzler {
118303ae1cdSBernard Metzler struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
119303ae1cdSBernard Metzler
120303ae1cdSBernard Metzler atomic_dec(&uctx->sdev->num_ctx);
121303ae1cdSBernard Metzler }
122303ae1cdSBernard Metzler
siw_query_device(struct ib_device * base_dev,struct ib_device_attr * attr,struct ib_udata * udata)123303ae1cdSBernard Metzler int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
124303ae1cdSBernard Metzler struct ib_udata *udata)
125303ae1cdSBernard Metzler {
126303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev);
127303ae1cdSBernard Metzler
128303ae1cdSBernard Metzler if (udata->inlen || udata->outlen)
129303ae1cdSBernard Metzler return -EINVAL;
130303ae1cdSBernard Metzler
131303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr));
132303ae1cdSBernard Metzler
133303ae1cdSBernard Metzler /* Revisit atomic caps if RFC 7306 gets supported */
134303ae1cdSBernard Metzler attr->atomic_cap = 0;
135e945c653SJason Gunthorpe attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
136e945c653SJason Gunthorpe attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
137303ae1cdSBernard Metzler attr->max_cq = sdev->attrs.max_cq;
138303ae1cdSBernard Metzler attr->max_cqe = sdev->attrs.max_cqe;
139303ae1cdSBernard Metzler attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
140303ae1cdSBernard Metzler attr->max_mr = sdev->attrs.max_mr;
141303ae1cdSBernard Metzler attr->max_mw = sdev->attrs.max_mw;
142303ae1cdSBernard Metzler attr->max_mr_size = ~0ull;
143303ae1cdSBernard Metzler attr->max_pd = sdev->attrs.max_pd;
144303ae1cdSBernard Metzler attr->max_qp = sdev->attrs.max_qp;
145303ae1cdSBernard Metzler attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
146303ae1cdSBernard Metzler attr->max_qp_rd_atom = sdev->attrs.max_ord;
147303ae1cdSBernard Metzler attr->max_qp_wr = sdev->attrs.max_qp_wr;
148303ae1cdSBernard Metzler attr->max_recv_sge = sdev->attrs.max_sge;
149303ae1cdSBernard Metzler attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
150303ae1cdSBernard Metzler attr->max_send_sge = sdev->attrs.max_sge;
151303ae1cdSBernard Metzler attr->max_sge_rd = sdev->attrs.max_sge_rd;
152303ae1cdSBernard Metzler attr->max_srq = sdev->attrs.max_srq;
153303ae1cdSBernard Metzler attr->max_srq_sge = sdev->attrs.max_srq_sge;
154303ae1cdSBernard Metzler attr->max_srq_wr = sdev->attrs.max_srq_wr;
155303ae1cdSBernard Metzler attr->page_size_cap = PAGE_SIZE;
156303ae1cdSBernard Metzler attr->vendor_id = SIW_VENDOR_ID;
157303ae1cdSBernard Metzler attr->vendor_part_id = sdev->vendor_part_id;
158303ae1cdSBernard Metzler
1590abfc79dSKamal Heib addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
160bad5b6e3SChuck Lever sdev->raw_gid);
161303ae1cdSBernard Metzler
162303ae1cdSBernard Metzler return 0;
163303ae1cdSBernard Metzler }
164303ae1cdSBernard Metzler
siw_query_port(struct ib_device * base_dev,u32 port,struct ib_port_attr * attr)1651fb7f897SMark Bloch int siw_query_port(struct ib_device *base_dev, u32 port,
166303ae1cdSBernard Metzler struct ib_port_attr *attr)
167303ae1cdSBernard Metzler {
168303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev);
16925baba21SKamal Heib int rv;
170303ae1cdSBernard Metzler
171303ae1cdSBernard Metzler memset(attr, 0, sizeof(*attr));
172303ae1cdSBernard Metzler
17325baba21SKamal Heib rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
17425baba21SKamal Heib &attr->active_width);
175303ae1cdSBernard Metzler attr->gid_tbl_len = 1;
176303ae1cdSBernard Metzler attr->max_msg_sz = -1;
177303ae1cdSBernard Metzler attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
178beb205ddSKamal Heib attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
17972a7720fSKamal Heib attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
18072a7720fSKamal Heib IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
181303ae1cdSBernard Metzler attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
182303ae1cdSBernard Metzler attr->state = sdev->state;
183303ae1cdSBernard Metzler /*
184303ae1cdSBernard Metzler * All zero
185303ae1cdSBernard Metzler *
186303ae1cdSBernard Metzler * attr->lid = 0;
187303ae1cdSBernard Metzler * attr->bad_pkey_cntr = 0;
188303ae1cdSBernard Metzler * attr->qkey_viol_cntr = 0;
189303ae1cdSBernard Metzler * attr->sm_lid = 0;
190303ae1cdSBernard Metzler * attr->lmc = 0;
191303ae1cdSBernard Metzler * attr->max_vl_num = 0;
192303ae1cdSBernard Metzler * attr->sm_sl = 0;
193303ae1cdSBernard Metzler * attr->subnet_timeout = 0;
194303ae1cdSBernard Metzler * attr->init_type_repy = 0;
195303ae1cdSBernard Metzler */
19625baba21SKamal Heib return rv;
197303ae1cdSBernard Metzler }
198303ae1cdSBernard Metzler
siw_get_port_immutable(struct ib_device * base_dev,u32 port,struct ib_port_immutable * port_immutable)1991fb7f897SMark Bloch int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
200303ae1cdSBernard Metzler struct ib_port_immutable *port_immutable)
201303ae1cdSBernard Metzler {
202303ae1cdSBernard Metzler struct ib_port_attr attr;
203303ae1cdSBernard Metzler int rv = siw_query_port(base_dev, port, &attr);
204303ae1cdSBernard Metzler
205303ae1cdSBernard Metzler if (rv)
206303ae1cdSBernard Metzler return rv;
207303ae1cdSBernard Metzler
208303ae1cdSBernard Metzler port_immutable->gid_tbl_len = attr.gid_tbl_len;
209303ae1cdSBernard Metzler port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
210303ae1cdSBernard Metzler
211303ae1cdSBernard Metzler return 0;
212303ae1cdSBernard Metzler }
213303ae1cdSBernard Metzler
siw_query_gid(struct ib_device * base_dev,u32 port,int idx,union ib_gid * gid)2141fb7f897SMark Bloch int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
215303ae1cdSBernard Metzler union ib_gid *gid)
216303ae1cdSBernard Metzler {
217303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev);
218303ae1cdSBernard Metzler
219303ae1cdSBernard Metzler /* subnet_prefix == interface_id == 0; */
220303ae1cdSBernard Metzler memset(gid, 0, sizeof(*gid));
221bad5b6e3SChuck Lever memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
222303ae1cdSBernard Metzler
223303ae1cdSBernard Metzler return 0;
224303ae1cdSBernard Metzler }
225303ae1cdSBernard Metzler
siw_alloc_pd(struct ib_pd * pd,struct ib_udata * udata)226303ae1cdSBernard Metzler int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
227303ae1cdSBernard Metzler {
228303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device);
229303ae1cdSBernard Metzler
230303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
231303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd);
232303ae1cdSBernard Metzler return -ENOMEM;
233303ae1cdSBernard Metzler }
234303ae1cdSBernard Metzler siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
235303ae1cdSBernard Metzler
236303ae1cdSBernard Metzler return 0;
237303ae1cdSBernard Metzler }
238303ae1cdSBernard Metzler
siw_dealloc_pd(struct ib_pd * pd,struct ib_udata * udata)23991a7c58fSLeon Romanovsky int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
240303ae1cdSBernard Metzler {
241303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device);
242303ae1cdSBernard Metzler
243303ae1cdSBernard Metzler siw_dbg_pd(pd, "free PD\n");
244303ae1cdSBernard Metzler atomic_dec(&sdev->num_pd);
24591a7c58fSLeon Romanovsky return 0;
246303ae1cdSBernard Metzler }
247303ae1cdSBernard Metzler
siw_qp_get_ref(struct ib_qp * base_qp)248303ae1cdSBernard Metzler void siw_qp_get_ref(struct ib_qp *base_qp)
249303ae1cdSBernard Metzler {
250303ae1cdSBernard Metzler siw_qp_get(to_siw_qp(base_qp));
251303ae1cdSBernard Metzler }
252303ae1cdSBernard Metzler
siw_qp_put_ref(struct ib_qp * base_qp)253303ae1cdSBernard Metzler void siw_qp_put_ref(struct ib_qp *base_qp)
254303ae1cdSBernard Metzler {
255303ae1cdSBernard Metzler siw_qp_put(to_siw_qp(base_qp));
256303ae1cdSBernard Metzler }
257303ae1cdSBernard Metzler
25811f1a755SMichal Kalderon static struct rdma_user_mmap_entry *
siw_mmap_entry_insert(struct siw_ucontext * uctx,void * address,size_t length,u64 * offset)25911f1a755SMichal Kalderon siw_mmap_entry_insert(struct siw_ucontext *uctx,
26011f1a755SMichal Kalderon void *address, size_t length,
26111f1a755SMichal Kalderon u64 *offset)
26211f1a755SMichal Kalderon {
26311f1a755SMichal Kalderon struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
26411f1a755SMichal Kalderon int rv;
26511f1a755SMichal Kalderon
26611f1a755SMichal Kalderon *offset = SIW_INVAL_UOBJ_KEY;
26711f1a755SMichal Kalderon if (!entry)
26811f1a755SMichal Kalderon return NULL;
26911f1a755SMichal Kalderon
27011f1a755SMichal Kalderon entry->address = address;
27111f1a755SMichal Kalderon
27211f1a755SMichal Kalderon rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
27311f1a755SMichal Kalderon &entry->rdma_entry,
27411f1a755SMichal Kalderon length);
27511f1a755SMichal Kalderon if (rv) {
27611f1a755SMichal Kalderon kfree(entry);
27711f1a755SMichal Kalderon return NULL;
27811f1a755SMichal Kalderon }
27911f1a755SMichal Kalderon
28011f1a755SMichal Kalderon *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
28111f1a755SMichal Kalderon
28211f1a755SMichal Kalderon return &entry->rdma_entry;
28311f1a755SMichal Kalderon }
28411f1a755SMichal Kalderon
285303ae1cdSBernard Metzler /*
286303ae1cdSBernard Metzler * siw_create_qp()
287303ae1cdSBernard Metzler *
288303ae1cdSBernard Metzler * Create QP of requested size on given device.
289303ae1cdSBernard Metzler *
290514aee66SLeon Romanovsky * @qp: Queue pait
291303ae1cdSBernard Metzler * @attrs: Initial QP attributes.
292303ae1cdSBernard Metzler * @udata: used to provide QP ID, SQ and RQ size back to user.
293303ae1cdSBernard Metzler */
294303ae1cdSBernard Metzler
siw_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attrs,struct ib_udata * udata)295514aee66SLeon Romanovsky int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
296303ae1cdSBernard Metzler struct ib_udata *udata)
297303ae1cdSBernard Metzler {
298514aee66SLeon Romanovsky struct ib_pd *pd = ibqp->pd;
299514aee66SLeon Romanovsky struct siw_qp *qp = to_siw_qp(ibqp);
300303ae1cdSBernard Metzler struct ib_device *base_dev = pd->device;
301303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_dev);
302303ae1cdSBernard Metzler struct siw_ucontext *uctx =
303303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
304303ae1cdSBernard Metzler base_ucontext);
305303ae1cdSBernard Metzler unsigned long flags;
306303ae1cdSBernard Metzler int num_sqe, num_rqe, rv = 0;
30711f1a755SMichal Kalderon size_t length;
308303ae1cdSBernard Metzler
309303ae1cdSBernard Metzler siw_dbg(base_dev, "create new QP\n");
310303ae1cdSBernard Metzler
3111f11a761SJason Gunthorpe if (attrs->create_flags)
312514aee66SLeon Romanovsky return -EOPNOTSUPP;
3131f11a761SJason Gunthorpe
314303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
315303ae1cdSBernard Metzler siw_dbg(base_dev, "too many QP's\n");
316a75badebSDan Carpenter rv = -ENOMEM;
317a75badebSDan Carpenter goto err_atomic;
318303ae1cdSBernard Metzler }
319303ae1cdSBernard Metzler if (attrs->qp_type != IB_QPT_RC) {
320303ae1cdSBernard Metzler siw_dbg(base_dev, "only RC QP's supported\n");
321bb8865f4SKamal Heib rv = -EOPNOTSUPP;
322514aee66SLeon Romanovsky goto err_atomic;
323303ae1cdSBernard Metzler }
324303ae1cdSBernard Metzler if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
325303ae1cdSBernard Metzler (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
326303ae1cdSBernard Metzler (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
327303ae1cdSBernard Metzler (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
328303ae1cdSBernard Metzler siw_dbg(base_dev, "QP size error\n");
329303ae1cdSBernard Metzler rv = -EINVAL;
330514aee66SLeon Romanovsky goto err_atomic;
331303ae1cdSBernard Metzler }
332303ae1cdSBernard Metzler if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
333303ae1cdSBernard Metzler siw_dbg(base_dev, "max inline send: %d > %d\n",
334303ae1cdSBernard Metzler attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
335303ae1cdSBernard Metzler rv = -EINVAL;
336514aee66SLeon Romanovsky goto err_atomic;
337303ae1cdSBernard Metzler }
338303ae1cdSBernard Metzler /*
339303ae1cdSBernard Metzler * NOTE: we allow for zero element SQ and RQ WQE's SGL's
340303ae1cdSBernard Metzler * but not for a QP unable to hold any WQE (SQ + RQ)
341303ae1cdSBernard Metzler */
342303ae1cdSBernard Metzler if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
343303ae1cdSBernard Metzler siw_dbg(base_dev, "QP must have send or receive queue\n");
344303ae1cdSBernard Metzler rv = -EINVAL;
345514aee66SLeon Romanovsky goto err_atomic;
346303ae1cdSBernard Metzler }
347303ae1cdSBernard Metzler
348a568814aSLeon Romanovsky if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
349303ae1cdSBernard Metzler siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
350303ae1cdSBernard Metzler rv = -EINVAL;
351514aee66SLeon Romanovsky goto err_atomic;
352303ae1cdSBernard Metzler }
353514aee66SLeon Romanovsky
354303ae1cdSBernard Metzler init_rwsem(&qp->state_lock);
355303ae1cdSBernard Metzler spin_lock_init(&qp->sq_lock);
356303ae1cdSBernard Metzler spin_lock_init(&qp->rq_lock);
357303ae1cdSBernard Metzler spin_lock_init(&qp->orq_lock);
358303ae1cdSBernard Metzler
359303ae1cdSBernard Metzler rv = siw_qp_add(sdev, qp);
360303ae1cdSBernard Metzler if (rv)
361514aee66SLeon Romanovsky goto err_atomic;
362303ae1cdSBernard Metzler
363661f3859SBernard Metzler num_sqe = attrs->cap.max_send_wr;
364661f3859SBernard Metzler num_rqe = attrs->cap.max_recv_wr;
365661f3859SBernard Metzler
366303ae1cdSBernard Metzler /* All queue indices are derived from modulo operations
367303ae1cdSBernard Metzler * on a free running 'get' (consumer) and 'put' (producer)
368303ae1cdSBernard Metzler * unsigned counter. Having queue sizes at power of two
369303ae1cdSBernard Metzler * avoids handling counter wrap around.
370303ae1cdSBernard Metzler */
371661f3859SBernard Metzler if (num_sqe)
372661f3859SBernard Metzler num_sqe = roundup_pow_of_two(num_sqe);
373661f3859SBernard Metzler else {
374661f3859SBernard Metzler /* Zero sized SQ is not supported */
375661f3859SBernard Metzler rv = -EINVAL;
376a3d83276SLeon Romanovsky goto err_out_xa;
377661f3859SBernard Metzler }
378661f3859SBernard Metzler if (num_rqe)
379661f3859SBernard Metzler num_rqe = roundup_pow_of_two(num_rqe);
380303ae1cdSBernard Metzler
38158fb0b56SBernard Metzler if (udata)
382303ae1cdSBernard Metzler qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
38358fb0b56SBernard Metzler else
3849191df00SJulia Lawall qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
385303ae1cdSBernard Metzler
386303ae1cdSBernard Metzler if (qp->sendq == NULL) {
387303ae1cdSBernard Metzler rv = -ENOMEM;
388303ae1cdSBernard Metzler goto err_out_xa;
389303ae1cdSBernard Metzler }
390303ae1cdSBernard Metzler if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
391303ae1cdSBernard Metzler if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
392303ae1cdSBernard Metzler qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
393303ae1cdSBernard Metzler else {
394303ae1cdSBernard Metzler rv = -EINVAL;
395303ae1cdSBernard Metzler goto err_out_xa;
396303ae1cdSBernard Metzler }
397303ae1cdSBernard Metzler }
398303ae1cdSBernard Metzler qp->pd = pd;
399a568814aSLeon Romanovsky qp->scq = to_siw_cq(attrs->send_cq);
400a568814aSLeon Romanovsky qp->rcq = to_siw_cq(attrs->recv_cq);
401303ae1cdSBernard Metzler
402303ae1cdSBernard Metzler if (attrs->srq) {
403303ae1cdSBernard Metzler /*
404303ae1cdSBernard Metzler * SRQ support.
405303ae1cdSBernard Metzler * Verbs 6.3.7: ignore RQ size, if SRQ present
406303ae1cdSBernard Metzler * Verbs 6.3.5: do not check PD of SRQ against PD of QP
407303ae1cdSBernard Metzler */
408303ae1cdSBernard Metzler qp->srq = to_siw_srq(attrs->srq);
409303ae1cdSBernard Metzler qp->attrs.rq_size = 0;
41058fb0b56SBernard Metzler siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
41158fb0b56SBernard Metzler qp->base_qp.qp_num);
412303ae1cdSBernard Metzler } else if (num_rqe) {
41358fb0b56SBernard Metzler if (udata)
414303ae1cdSBernard Metzler qp->recvq =
415303ae1cdSBernard Metzler vmalloc_user(num_rqe * sizeof(struct siw_rqe));
41658fb0b56SBernard Metzler else
4179191df00SJulia Lawall qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
418303ae1cdSBernard Metzler
419303ae1cdSBernard Metzler if (qp->recvq == NULL) {
420303ae1cdSBernard Metzler rv = -ENOMEM;
421303ae1cdSBernard Metzler goto err_out_xa;
422303ae1cdSBernard Metzler }
423303ae1cdSBernard Metzler qp->attrs.rq_size = num_rqe;
424303ae1cdSBernard Metzler }
425303ae1cdSBernard Metzler qp->attrs.sq_size = num_sqe;
426303ae1cdSBernard Metzler qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
427303ae1cdSBernard Metzler qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
428303ae1cdSBernard Metzler
429303ae1cdSBernard Metzler /* Make those two tunables fixed for now. */
430303ae1cdSBernard Metzler qp->tx_ctx.gso_seg_limit = 1;
431303ae1cdSBernard Metzler qp->tx_ctx.zcopy_tx = zcopy_tx;
432303ae1cdSBernard Metzler
433303ae1cdSBernard Metzler qp->attrs.state = SIW_QP_STATE_IDLE;
434303ae1cdSBernard Metzler
435303ae1cdSBernard Metzler if (udata) {
436303ae1cdSBernard Metzler struct siw_uresp_create_qp uresp = {};
437303ae1cdSBernard Metzler
438303ae1cdSBernard Metzler uresp.num_sqe = num_sqe;
439303ae1cdSBernard Metzler uresp.num_rqe = num_rqe;
440303ae1cdSBernard Metzler uresp.qp_id = qp_id(qp);
441303ae1cdSBernard Metzler
442303ae1cdSBernard Metzler if (qp->sendq) {
44311f1a755SMichal Kalderon length = num_sqe * sizeof(struct siw_sqe);
44411f1a755SMichal Kalderon qp->sq_entry =
44511f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->sendq,
44611f1a755SMichal Kalderon length, &uresp.sq_key);
44711f1a755SMichal Kalderon if (!qp->sq_entry) {
448303ae1cdSBernard Metzler rv = -ENOMEM;
449303ae1cdSBernard Metzler goto err_out_xa;
450303ae1cdSBernard Metzler }
45111f1a755SMichal Kalderon }
45211f1a755SMichal Kalderon
45311f1a755SMichal Kalderon if (qp->recvq) {
45411f1a755SMichal Kalderon length = num_rqe * sizeof(struct siw_rqe);
45511f1a755SMichal Kalderon qp->rq_entry =
45611f1a755SMichal Kalderon siw_mmap_entry_insert(uctx, qp->recvq,
45711f1a755SMichal Kalderon length, &uresp.rq_key);
45811f1a755SMichal Kalderon if (!qp->rq_entry) {
45911f1a755SMichal Kalderon uresp.sq_key = SIW_INVAL_UOBJ_KEY;
46011f1a755SMichal Kalderon rv = -ENOMEM;
46111f1a755SMichal Kalderon goto err_out_xa;
46211f1a755SMichal Kalderon }
46311f1a755SMichal Kalderon }
464303ae1cdSBernard Metzler
465303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) {
466303ae1cdSBernard Metzler rv = -EINVAL;
467303ae1cdSBernard Metzler goto err_out_xa;
468303ae1cdSBernard Metzler }
469303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
470303ae1cdSBernard Metzler if (rv)
471303ae1cdSBernard Metzler goto err_out_xa;
472303ae1cdSBernard Metzler }
473303ae1cdSBernard Metzler qp->tx_cpu = siw_get_tx_cpu(sdev);
474303ae1cdSBernard Metzler if (qp->tx_cpu < 0) {
475303ae1cdSBernard Metzler rv = -EINVAL;
476303ae1cdSBernard Metzler goto err_out_xa;
477303ae1cdSBernard Metzler }
478303ae1cdSBernard Metzler INIT_LIST_HEAD(&qp->devq);
479303ae1cdSBernard Metzler spin_lock_irqsave(&sdev->lock, flags);
480303ae1cdSBernard Metzler list_add_tail(&qp->devq, &sdev->qp_list);
481303ae1cdSBernard Metzler spin_unlock_irqrestore(&sdev->lock, flags);
482303ae1cdSBernard Metzler
483a3c27880SBernard Metzler init_completion(&qp->qp_free);
484a3c27880SBernard Metzler
485514aee66SLeon Romanovsky return 0;
486303ae1cdSBernard Metzler
487303ae1cdSBernard Metzler err_out_xa:
488303ae1cdSBernard Metzler xa_erase(&sdev->qp_xa, qp_id(qp));
48911f1a755SMichal Kalderon if (uctx) {
49011f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry);
49111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry);
49211f1a755SMichal Kalderon }
493303ae1cdSBernard Metzler vfree(qp->sendq);
494303ae1cdSBernard Metzler vfree(qp->recvq);
495303ae1cdSBernard Metzler
496514aee66SLeon Romanovsky err_atomic:
497514aee66SLeon Romanovsky atomic_dec(&sdev->num_qp);
498514aee66SLeon Romanovsky return rv;
499303ae1cdSBernard Metzler }
500303ae1cdSBernard Metzler
501303ae1cdSBernard Metzler /*
502303ae1cdSBernard Metzler * Minimum siw_query_qp() verb interface.
503303ae1cdSBernard Metzler *
504303ae1cdSBernard Metzler * @qp_attr_mask is not used but all available information is provided
505303ae1cdSBernard Metzler */
siw_query_qp(struct ib_qp * base_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)506303ae1cdSBernard Metzler int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
507303ae1cdSBernard Metzler int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
508303ae1cdSBernard Metzler {
509303ae1cdSBernard Metzler struct siw_qp *qp;
510303ae1cdSBernard Metzler struct siw_device *sdev;
511303ae1cdSBernard Metzler
512303ae1cdSBernard Metzler if (base_qp && qp_attr && qp_init_attr) {
513303ae1cdSBernard Metzler qp = to_siw_qp(base_qp);
514303ae1cdSBernard Metzler sdev = to_siw_dev(base_qp->device);
515303ae1cdSBernard Metzler } else {
516303ae1cdSBernard Metzler return -EINVAL;
517303ae1cdSBernard Metzler }
518303ae1cdSBernard Metzler qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
519303ae1cdSBernard Metzler qp_attr->cap.max_send_wr = qp->attrs.sq_size;
520303ae1cdSBernard Metzler qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
521303ae1cdSBernard Metzler qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
522303ae1cdSBernard Metzler qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
523303ae1cdSBernard Metzler qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
524303ae1cdSBernard Metzler qp_attr->max_rd_atomic = qp->attrs.irq_size;
525303ae1cdSBernard Metzler qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
526303ae1cdSBernard Metzler
527303ae1cdSBernard Metzler qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
528303ae1cdSBernard Metzler IB_ACCESS_REMOTE_WRITE |
529303ae1cdSBernard Metzler IB_ACCESS_REMOTE_READ;
530303ae1cdSBernard Metzler
531303ae1cdSBernard Metzler qp_init_attr->qp_type = base_qp->qp_type;
532303ae1cdSBernard Metzler qp_init_attr->send_cq = base_qp->send_cq;
533303ae1cdSBernard Metzler qp_init_attr->recv_cq = base_qp->recv_cq;
534303ae1cdSBernard Metzler qp_init_attr->srq = base_qp->srq;
535303ae1cdSBernard Metzler
536303ae1cdSBernard Metzler qp_init_attr->cap = qp_attr->cap;
537303ae1cdSBernard Metzler
538303ae1cdSBernard Metzler return 0;
539303ae1cdSBernard Metzler }
540303ae1cdSBernard Metzler
siw_verbs_modify_qp(struct ib_qp * base_qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)541303ae1cdSBernard Metzler int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
542303ae1cdSBernard Metzler int attr_mask, struct ib_udata *udata)
543303ae1cdSBernard Metzler {
544303ae1cdSBernard Metzler struct siw_qp_attrs new_attrs;
545303ae1cdSBernard Metzler enum siw_qp_attr_mask siw_attr_mask = 0;
546303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp);
547303ae1cdSBernard Metzler int rv = 0;
548303ae1cdSBernard Metzler
549303ae1cdSBernard Metzler if (!attr_mask)
550303ae1cdSBernard Metzler return 0;
551303ae1cdSBernard Metzler
55226e990baSJason Gunthorpe if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
55326e990baSJason Gunthorpe return -EOPNOTSUPP;
55426e990baSJason Gunthorpe
555303ae1cdSBernard Metzler memset(&new_attrs, 0, sizeof(new_attrs));
556303ae1cdSBernard Metzler
557303ae1cdSBernard Metzler if (attr_mask & IB_QP_ACCESS_FLAGS) {
558303ae1cdSBernard Metzler siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
559303ae1cdSBernard Metzler
560303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
561303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_READ_ENABLED;
562303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
563303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
564303ae1cdSBernard Metzler if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
565303ae1cdSBernard Metzler new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
566303ae1cdSBernard Metzler }
567303ae1cdSBernard Metzler if (attr_mask & IB_QP_STATE) {
568303ae1cdSBernard Metzler siw_dbg_qp(qp, "desired IB QP state: %s\n",
569303ae1cdSBernard Metzler ib_qp_state_to_string[attr->qp_state]);
570303ae1cdSBernard Metzler
571303ae1cdSBernard Metzler new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
572303ae1cdSBernard Metzler
573303ae1cdSBernard Metzler if (new_attrs.state > SIW_QP_STATE_RTS)
574303ae1cdSBernard Metzler qp->tx_ctx.tx_suspend = 1;
575303ae1cdSBernard Metzler
576303ae1cdSBernard Metzler siw_attr_mask |= SIW_QP_ATTR_STATE;
577303ae1cdSBernard Metzler }
578303ae1cdSBernard Metzler if (!siw_attr_mask)
579303ae1cdSBernard Metzler goto out;
580303ae1cdSBernard Metzler
581303ae1cdSBernard Metzler down_write(&qp->state_lock);
582303ae1cdSBernard Metzler
583303ae1cdSBernard Metzler rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
584303ae1cdSBernard Metzler
585303ae1cdSBernard Metzler up_write(&qp->state_lock);
586303ae1cdSBernard Metzler out:
587303ae1cdSBernard Metzler return rv;
588303ae1cdSBernard Metzler }
589303ae1cdSBernard Metzler
siw_destroy_qp(struct ib_qp * base_qp,struct ib_udata * udata)590303ae1cdSBernard Metzler int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
591303ae1cdSBernard Metzler {
592303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp);
593303ae1cdSBernard Metzler struct siw_ucontext *uctx =
594303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
595303ae1cdSBernard Metzler base_ucontext);
596303ae1cdSBernard Metzler struct siw_qp_attrs qp_attrs;
597303ae1cdSBernard Metzler
598c536277eSBernard Metzler siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
599303ae1cdSBernard Metzler
600303ae1cdSBernard Metzler /*
601303ae1cdSBernard Metzler * Mark QP as in process of destruction to prevent from
602303ae1cdSBernard Metzler * any async callbacks to RDMA core
603303ae1cdSBernard Metzler */
604303ae1cdSBernard Metzler qp->attrs.flags |= SIW_QP_IN_DESTROY;
605303ae1cdSBernard Metzler qp->rx_stream.rx_suspend = 1;
606303ae1cdSBernard Metzler
60711f1a755SMichal Kalderon if (uctx) {
60811f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->sq_entry);
60911f1a755SMichal Kalderon rdma_user_mmap_entry_remove(qp->rq_entry);
61011f1a755SMichal Kalderon }
611303ae1cdSBernard Metzler
612303ae1cdSBernard Metzler down_write(&qp->state_lock);
613303ae1cdSBernard Metzler
614303ae1cdSBernard Metzler qp_attrs.state = SIW_QP_STATE_ERROR;
615303ae1cdSBernard Metzler siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
616303ae1cdSBernard Metzler
617303ae1cdSBernard Metzler if (qp->cep) {
618303ae1cdSBernard Metzler siw_cep_put(qp->cep);
619303ae1cdSBernard Metzler qp->cep = NULL;
620303ae1cdSBernard Metzler }
621303ae1cdSBernard Metzler up_write(&qp->state_lock);
622303ae1cdSBernard Metzler
623303ae1cdSBernard Metzler kfree(qp->tx_ctx.mpa_crc_hd);
624303ae1cdSBernard Metzler kfree(qp->rx_stream.mpa_crc_hd);
625303ae1cdSBernard Metzler
626303ae1cdSBernard Metzler qp->scq = qp->rcq = NULL;
627303ae1cdSBernard Metzler
628303ae1cdSBernard Metzler siw_qp_put(qp);
629a3c27880SBernard Metzler wait_for_completion(&qp->qp_free);
630303ae1cdSBernard Metzler
631303ae1cdSBernard Metzler return 0;
632303ae1cdSBernard Metzler }
633303ae1cdSBernard Metzler
634303ae1cdSBernard Metzler /*
635303ae1cdSBernard Metzler * siw_copy_inline_sgl()
636303ae1cdSBernard Metzler *
637303ae1cdSBernard Metzler * Prepare sgl of inlined data for sending. For userland callers
638303ae1cdSBernard Metzler * function checks if given buffer addresses and len's are within
639303ae1cdSBernard Metzler * process context bounds.
640303ae1cdSBernard Metzler * Data from all provided sge's are copied together into the wqe,
641303ae1cdSBernard Metzler * referenced by a single sge.
642303ae1cdSBernard Metzler */
siw_copy_inline_sgl(const struct ib_send_wr * core_wr,struct siw_sqe * sqe)643303ae1cdSBernard Metzler static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
644303ae1cdSBernard Metzler struct siw_sqe *sqe)
645303ae1cdSBernard Metzler {
646303ae1cdSBernard Metzler struct ib_sge *core_sge = core_wr->sg_list;
647303ae1cdSBernard Metzler void *kbuf = &sqe->sge[1];
648303ae1cdSBernard Metzler int num_sge = core_wr->num_sge, bytes = 0;
649303ae1cdSBernard Metzler
650c536277eSBernard Metzler sqe->sge[0].laddr = (uintptr_t)kbuf;
651303ae1cdSBernard Metzler sqe->sge[0].lkey = 0;
652303ae1cdSBernard Metzler
653303ae1cdSBernard Metzler while (num_sge--) {
654303ae1cdSBernard Metzler if (!core_sge->length) {
655303ae1cdSBernard Metzler core_sge++;
656303ae1cdSBernard Metzler continue;
657303ae1cdSBernard Metzler }
658303ae1cdSBernard Metzler bytes += core_sge->length;
659303ae1cdSBernard Metzler if (bytes > SIW_MAX_INLINE) {
660303ae1cdSBernard Metzler bytes = -EINVAL;
661303ae1cdSBernard Metzler break;
662303ae1cdSBernard Metzler }
6638d7c7c0eSJason Gunthorpe memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
664303ae1cdSBernard Metzler core_sge->length);
665303ae1cdSBernard Metzler
666303ae1cdSBernard Metzler kbuf += core_sge->length;
667303ae1cdSBernard Metzler core_sge++;
668303ae1cdSBernard Metzler }
66976937fa5SJiapeng Chong sqe->sge[0].length = max(bytes, 0);
670303ae1cdSBernard Metzler sqe->num_sge = bytes > 0 ? 1 : 0;
671303ae1cdSBernard Metzler
672303ae1cdSBernard Metzler return bytes;
673303ae1cdSBernard Metzler }
674303ae1cdSBernard Metzler
675cf049bb3SBernard Metzler /* Complete SQ WR's without processing */
siw_sq_flush_wr(struct siw_qp * qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)676cf049bb3SBernard Metzler static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
677cf049bb3SBernard Metzler const struct ib_send_wr **bad_wr)
678cf049bb3SBernard Metzler {
679cf049bb3SBernard Metzler int rv = 0;
680cf049bb3SBernard Metzler
681cf049bb3SBernard Metzler while (wr) {
682bdf1da5dSBernard Metzler struct siw_sqe sqe = {};
683bdf1da5dSBernard Metzler
684bdf1da5dSBernard Metzler switch (wr->opcode) {
685bdf1da5dSBernard Metzler case IB_WR_RDMA_WRITE:
686bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_WRITE;
687bdf1da5dSBernard Metzler break;
688bdf1da5dSBernard Metzler case IB_WR_RDMA_READ:
689bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_READ;
690bdf1da5dSBernard Metzler break;
691bdf1da5dSBernard Metzler case IB_WR_RDMA_READ_WITH_INV:
692bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_READ_LOCAL_INV;
693bdf1da5dSBernard Metzler break;
694bdf1da5dSBernard Metzler case IB_WR_SEND:
695bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_SEND;
696bdf1da5dSBernard Metzler break;
697bdf1da5dSBernard Metzler case IB_WR_SEND_WITH_IMM:
698bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_SEND_WITH_IMM;
699bdf1da5dSBernard Metzler break;
700bdf1da5dSBernard Metzler case IB_WR_SEND_WITH_INV:
701bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_SEND_REMOTE_INV;
702bdf1da5dSBernard Metzler break;
703bdf1da5dSBernard Metzler case IB_WR_LOCAL_INV:
704bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_INVAL_STAG;
705bdf1da5dSBernard Metzler break;
706bdf1da5dSBernard Metzler case IB_WR_REG_MR:
707bdf1da5dSBernard Metzler sqe.opcode = SIW_OP_REG_MR;
708bdf1da5dSBernard Metzler break;
709bdf1da5dSBernard Metzler default:
710bdf1da5dSBernard Metzler rv = -EINVAL;
711bdf1da5dSBernard Metzler break;
712bdf1da5dSBernard Metzler }
713bdf1da5dSBernard Metzler if (!rv) {
714cf049bb3SBernard Metzler sqe.id = wr->wr_id;
715bdf1da5dSBernard Metzler rv = siw_sqe_complete(qp, &sqe, 0,
716bdf1da5dSBernard Metzler SIW_WC_WR_FLUSH_ERR);
717bdf1da5dSBernard Metzler }
718cf049bb3SBernard Metzler if (rv) {
719cf049bb3SBernard Metzler if (bad_wr)
720cf049bb3SBernard Metzler *bad_wr = wr;
721cf049bb3SBernard Metzler break;
722cf049bb3SBernard Metzler }
723cf049bb3SBernard Metzler wr = wr->next;
724cf049bb3SBernard Metzler }
725cf049bb3SBernard Metzler return rv;
726cf049bb3SBernard Metzler }
727cf049bb3SBernard Metzler
728cf049bb3SBernard Metzler /* Complete RQ WR's without processing */
siw_rq_flush_wr(struct siw_qp * qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)729cf049bb3SBernard Metzler static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
730cf049bb3SBernard Metzler const struct ib_recv_wr **bad_wr)
731cf049bb3SBernard Metzler {
732cf049bb3SBernard Metzler struct siw_rqe rqe = {};
733cf049bb3SBernard Metzler int rv = 0;
734cf049bb3SBernard Metzler
735cf049bb3SBernard Metzler while (wr) {
736cf049bb3SBernard Metzler rqe.id = wr->wr_id;
737cf049bb3SBernard Metzler rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
738cf049bb3SBernard Metzler if (rv) {
739cf049bb3SBernard Metzler if (bad_wr)
740cf049bb3SBernard Metzler *bad_wr = wr;
741cf049bb3SBernard Metzler break;
742cf049bb3SBernard Metzler }
743cf049bb3SBernard Metzler wr = wr->next;
744cf049bb3SBernard Metzler }
745cf049bb3SBernard Metzler return rv;
746cf049bb3SBernard Metzler }
747cf049bb3SBernard Metzler
748303ae1cdSBernard Metzler /*
749303ae1cdSBernard Metzler * siw_post_send()
750303ae1cdSBernard Metzler *
751303ae1cdSBernard Metzler * Post a list of S-WR's to a SQ.
752303ae1cdSBernard Metzler *
753303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP
754303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's
755303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure.
756303ae1cdSBernard Metzler */
siw_post_send(struct ib_qp * base_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)757303ae1cdSBernard Metzler int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
758303ae1cdSBernard Metzler const struct ib_send_wr **bad_wr)
759303ae1cdSBernard Metzler {
760303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp);
761303ae1cdSBernard Metzler struct siw_wqe *wqe = tx_wqe(qp);
762303ae1cdSBernard Metzler
763303ae1cdSBernard Metzler unsigned long flags;
764303ae1cdSBernard Metzler int rv = 0;
765303ae1cdSBernard Metzler
76658fb0b56SBernard Metzler if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
767cf049bb3SBernard Metzler siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
768cf049bb3SBernard Metzler *bad_wr = wr;
769cf049bb3SBernard Metzler return -EINVAL;
770cf049bb3SBernard Metzler }
771cf049bb3SBernard Metzler
772303ae1cdSBernard Metzler /*
773303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking
774303ae1cdSBernard Metzler * to accommodate kernel clients needs.
775303ae1cdSBernard Metzler */
776303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) {
777cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) {
778cf049bb3SBernard Metzler /*
779cf049bb3SBernard Metzler * ERROR state is final, so we can be sure
780cf049bb3SBernard Metzler * this state will not change as long as the QP
781cf049bb3SBernard Metzler * exists.
782cf049bb3SBernard Metzler *
783cf049bb3SBernard Metzler * This handles an ib_drain_sq() call with
784cf049bb3SBernard Metzler * a concurrent request to set the QP state
785cf049bb3SBernard Metzler * to ERROR.
786cf049bb3SBernard Metzler */
787cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr);
788cf049bb3SBernard Metzler } else {
789cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n",
790cf049bb3SBernard Metzler qp->attrs.state);
791303ae1cdSBernard Metzler *bad_wr = wr;
792cf049bb3SBernard Metzler rv = -ENOTCONN;
793cf049bb3SBernard Metzler }
794cf049bb3SBernard Metzler return rv;
795303ae1cdSBernard Metzler }
796303ae1cdSBernard Metzler if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
797cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) {
798cf049bb3SBernard Metzler /*
799cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP
800cf049bb3SBernard Metzler * is in ERROR state. SQ is guaranteed to
801cf049bb3SBernard Metzler * be empty, so WR complets in-order.
802cf049bb3SBernard Metzler *
803cf049bb3SBernard Metzler * Typically triggered by ib_drain_sq().
804cf049bb3SBernard Metzler */
805cf049bb3SBernard Metzler rv = siw_sq_flush_wr(qp, wr, bad_wr);
806cf049bb3SBernard Metzler } else {
807cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n",
808cf049bb3SBernard Metzler qp->attrs.state);
809303ae1cdSBernard Metzler *bad_wr = wr;
810cf049bb3SBernard Metzler rv = -ENOTCONN;
811303ae1cdSBernard Metzler }
812303ae1cdSBernard Metzler up_read(&qp->state_lock);
813cf049bb3SBernard Metzler return rv;
814303ae1cdSBernard Metzler }
815303ae1cdSBernard Metzler spin_lock_irqsave(&qp->sq_lock, flags);
816303ae1cdSBernard Metzler
817303ae1cdSBernard Metzler while (wr) {
818303ae1cdSBernard Metzler u32 idx = qp->sq_put % qp->attrs.sq_size;
819303ae1cdSBernard Metzler struct siw_sqe *sqe = &qp->sendq[idx];
820303ae1cdSBernard Metzler
821303ae1cdSBernard Metzler if (sqe->flags) {
822303ae1cdSBernard Metzler siw_dbg_qp(qp, "sq full\n");
823303ae1cdSBernard Metzler rv = -ENOMEM;
824303ae1cdSBernard Metzler break;
825303ae1cdSBernard Metzler }
826303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.sq_max_sges) {
827303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
828303ae1cdSBernard Metzler rv = -EINVAL;
829303ae1cdSBernard Metzler break;
830303ae1cdSBernard Metzler }
831303ae1cdSBernard Metzler sqe->id = wr->wr_id;
832303ae1cdSBernard Metzler
833303ae1cdSBernard Metzler if ((wr->send_flags & IB_SEND_SIGNALED) ||
834303ae1cdSBernard Metzler (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
835303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SIGNALLED;
836303ae1cdSBernard Metzler
837303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_FENCE)
838303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_READ_FENCE;
839303ae1cdSBernard Metzler
840303ae1cdSBernard Metzler switch (wr->opcode) {
841303ae1cdSBernard Metzler case IB_WR_SEND:
842303ae1cdSBernard Metzler case IB_WR_SEND_WITH_INV:
843303ae1cdSBernard Metzler if (wr->send_flags & IB_SEND_SOLICITED)
844303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_SOLICITED;
845303ae1cdSBernard Metzler
846303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) {
847303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, sqe->sge,
848303ae1cdSBernard Metzler wr->num_sge);
849303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge;
850303ae1cdSBernard Metzler } else {
851303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe);
852303ae1cdSBernard Metzler if (rv <= 0) {
853303ae1cdSBernard Metzler rv = -EINVAL;
854303ae1cdSBernard Metzler break;
855303ae1cdSBernard Metzler }
856303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE;
857303ae1cdSBernard Metzler sqe->num_sge = 1;
858303ae1cdSBernard Metzler }
859303ae1cdSBernard Metzler if (wr->opcode == IB_WR_SEND)
860303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND;
861303ae1cdSBernard Metzler else {
862303ae1cdSBernard Metzler sqe->opcode = SIW_OP_SEND_REMOTE_INV;
863303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey;
864303ae1cdSBernard Metzler }
865303ae1cdSBernard Metzler break;
866303ae1cdSBernard Metzler
867303ae1cdSBernard Metzler case IB_WR_RDMA_READ_WITH_INV:
868303ae1cdSBernard Metzler case IB_WR_RDMA_READ:
869303ae1cdSBernard Metzler /*
870303ae1cdSBernard Metzler * iWarp restricts RREAD sink to SGL containing
871303ae1cdSBernard Metzler * 1 SGE only. we could relax to SGL with multiple
872303ae1cdSBernard Metzler * elements referring the SAME ltag or even sending
873303ae1cdSBernard Metzler * a private per-rreq tag referring to a checked
874303ae1cdSBernard Metzler * local sgl with MULTIPLE ltag's.
875303ae1cdSBernard Metzler */
876303ae1cdSBernard Metzler if (unlikely(wr->num_sge != 1)) {
877303ae1cdSBernard Metzler rv = -EINVAL;
878303ae1cdSBernard Metzler break;
879303ae1cdSBernard Metzler }
880303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
881303ae1cdSBernard Metzler /*
882303ae1cdSBernard Metzler * NOTE: zero length RREAD is allowed!
883303ae1cdSBernard Metzler */
884303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr;
885303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey;
886303ae1cdSBernard Metzler sqe->num_sge = 1;
887303ae1cdSBernard Metzler
888303ae1cdSBernard Metzler if (wr->opcode == IB_WR_RDMA_READ)
889303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ;
890303ae1cdSBernard Metzler else
891303ae1cdSBernard Metzler sqe->opcode = SIW_OP_READ_LOCAL_INV;
892303ae1cdSBernard Metzler break;
893303ae1cdSBernard Metzler
894303ae1cdSBernard Metzler case IB_WR_RDMA_WRITE:
895303ae1cdSBernard Metzler if (!(wr->send_flags & IB_SEND_INLINE)) {
896303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, &sqe->sge[0],
897303ae1cdSBernard Metzler wr->num_sge);
898303ae1cdSBernard Metzler sqe->num_sge = wr->num_sge;
899303ae1cdSBernard Metzler } else {
900303ae1cdSBernard Metzler rv = siw_copy_inline_sgl(wr, sqe);
901303ae1cdSBernard Metzler if (unlikely(rv < 0)) {
902303ae1cdSBernard Metzler rv = -EINVAL;
903303ae1cdSBernard Metzler break;
904303ae1cdSBernard Metzler }
905303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_INLINE;
906303ae1cdSBernard Metzler sqe->num_sge = 1;
907303ae1cdSBernard Metzler }
908303ae1cdSBernard Metzler sqe->raddr = rdma_wr(wr)->remote_addr;
909303ae1cdSBernard Metzler sqe->rkey = rdma_wr(wr)->rkey;
910303ae1cdSBernard Metzler sqe->opcode = SIW_OP_WRITE;
911303ae1cdSBernard Metzler break;
912303ae1cdSBernard Metzler
913303ae1cdSBernard Metzler case IB_WR_REG_MR:
914c536277eSBernard Metzler sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
915303ae1cdSBernard Metzler sqe->rkey = reg_wr(wr)->key;
916303ae1cdSBernard Metzler sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
917303ae1cdSBernard Metzler sqe->opcode = SIW_OP_REG_MR;
918303ae1cdSBernard Metzler break;
919303ae1cdSBernard Metzler
920303ae1cdSBernard Metzler case IB_WR_LOCAL_INV:
921303ae1cdSBernard Metzler sqe->rkey = wr->ex.invalidate_rkey;
922303ae1cdSBernard Metzler sqe->opcode = SIW_OP_INVAL_STAG;
923303ae1cdSBernard Metzler break;
924303ae1cdSBernard Metzler
925303ae1cdSBernard Metzler default:
926303ae1cdSBernard Metzler siw_dbg_qp(qp, "ib wr type %d unsupported\n",
927303ae1cdSBernard Metzler wr->opcode);
928303ae1cdSBernard Metzler rv = -EINVAL;
929303ae1cdSBernard Metzler break;
930303ae1cdSBernard Metzler }
931c536277eSBernard Metzler siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
932c536277eSBernard Metzler sqe->opcode, sqe->flags,
933c536277eSBernard Metzler (void *)(uintptr_t)sqe->id);
934303ae1cdSBernard Metzler
935303ae1cdSBernard Metzler if (unlikely(rv < 0))
936303ae1cdSBernard Metzler break;
937303ae1cdSBernard Metzler
938303ae1cdSBernard Metzler /* make SQE only valid after completely written */
939303ae1cdSBernard Metzler smp_wmb();
940303ae1cdSBernard Metzler sqe->flags |= SIW_WQE_VALID;
941303ae1cdSBernard Metzler
942303ae1cdSBernard Metzler qp->sq_put++;
943303ae1cdSBernard Metzler wr = wr->next;
944303ae1cdSBernard Metzler }
945303ae1cdSBernard Metzler
946303ae1cdSBernard Metzler /*
947303ae1cdSBernard Metzler * Send directly if SQ processing is not in progress.
948303ae1cdSBernard Metzler * Eventual immediate errors (rv < 0) do not affect the involved
949303ae1cdSBernard Metzler * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
950303ae1cdSBernard Metzler * processing, if new work is already pending. But rv must be passed
951303ae1cdSBernard Metzler * to caller.
952303ae1cdSBernard Metzler */
953303ae1cdSBernard Metzler if (wqe->wr_status != SIW_WR_IDLE) {
954303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags);
955303ae1cdSBernard Metzler goto skip_direct_sending;
956303ae1cdSBernard Metzler }
957303ae1cdSBernard Metzler rv = siw_activate_tx(qp);
958303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->sq_lock, flags);
959303ae1cdSBernard Metzler
960303ae1cdSBernard Metzler if (rv <= 0)
961303ae1cdSBernard Metzler goto skip_direct_sending;
962303ae1cdSBernard Metzler
96358fb0b56SBernard Metzler if (rdma_is_kernel_res(&qp->base_qp.res)) {
964303ae1cdSBernard Metzler rv = siw_sq_start(qp);
965303ae1cdSBernard Metzler } else {
966303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 1;
967303ae1cdSBernard Metzler
968303ae1cdSBernard Metzler if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
969303ae1cdSBernard Metzler siw_qp_cm_drop(qp, 0);
970303ae1cdSBernard Metzler
971303ae1cdSBernard Metzler qp->tx_ctx.in_syscall = 0;
972303ae1cdSBernard Metzler }
973303ae1cdSBernard Metzler skip_direct_sending:
974303ae1cdSBernard Metzler
975303ae1cdSBernard Metzler up_read(&qp->state_lock);
976303ae1cdSBernard Metzler
977303ae1cdSBernard Metzler if (rv >= 0)
978303ae1cdSBernard Metzler return 0;
979303ae1cdSBernard Metzler /*
980303ae1cdSBernard Metzler * Immediate error
981303ae1cdSBernard Metzler */
982303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv);
983303ae1cdSBernard Metzler
984303ae1cdSBernard Metzler *bad_wr = wr;
985303ae1cdSBernard Metzler return rv;
986303ae1cdSBernard Metzler }
987303ae1cdSBernard Metzler
988303ae1cdSBernard Metzler /*
989303ae1cdSBernard Metzler * siw_post_receive()
990303ae1cdSBernard Metzler *
991303ae1cdSBernard Metzler * Post a list of R-WR's to a RQ.
992303ae1cdSBernard Metzler *
993303ae1cdSBernard Metzler * @base_qp: Base QP contained in siw QP
994303ae1cdSBernard Metzler * @wr: Null terminated list of user WR's
995303ae1cdSBernard Metzler * @bad_wr: Points to failing WR in case of synchronous failure.
996303ae1cdSBernard Metzler */
siw_post_receive(struct ib_qp * base_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)997303ae1cdSBernard Metzler int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
998303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr)
999303ae1cdSBernard Metzler {
1000303ae1cdSBernard Metzler struct siw_qp *qp = to_siw_qp(base_qp);
1001303ae1cdSBernard Metzler unsigned long flags;
1002303ae1cdSBernard Metzler int rv = 0;
1003303ae1cdSBernard Metzler
1004661f3859SBernard Metzler if (qp->srq || qp->attrs.rq_size == 0) {
1005303ae1cdSBernard Metzler *bad_wr = wr;
1006661f3859SBernard Metzler return -EINVAL;
1007303ae1cdSBernard Metzler }
100858fb0b56SBernard Metzler if (!rdma_is_kernel_res(&qp->base_qp.res)) {
100958fb0b56SBernard Metzler siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
1010303ae1cdSBernard Metzler *bad_wr = wr;
1011303ae1cdSBernard Metzler return -EINVAL;
1012303ae1cdSBernard Metzler }
1013cf049bb3SBernard Metzler
1014303ae1cdSBernard Metzler /*
1015303ae1cdSBernard Metzler * Try to acquire QP state lock. Must be non-blocking
1016303ae1cdSBernard Metzler * to accommodate kernel clients needs.
1017303ae1cdSBernard Metzler */
1018303ae1cdSBernard Metzler if (!down_read_trylock(&qp->state_lock)) {
1019cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1020cf049bb3SBernard Metzler /*
1021cf049bb3SBernard Metzler * ERROR state is final, so we can be sure
1022cf049bb3SBernard Metzler * this state will not change as long as the QP
1023cf049bb3SBernard Metzler * exists.
1024cf049bb3SBernard Metzler *
1025cf049bb3SBernard Metzler * This handles an ib_drain_rq() call with
1026cf049bb3SBernard Metzler * a concurrent request to set the QP state
1027cf049bb3SBernard Metzler * to ERROR.
1028cf049bb3SBernard Metzler */
1029cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr);
1030cf049bb3SBernard Metzler } else {
1031cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP locked, state %d\n",
1032cf049bb3SBernard Metzler qp->attrs.state);
1033303ae1cdSBernard Metzler *bad_wr = wr;
1034cf049bb3SBernard Metzler rv = -ENOTCONN;
1035303ae1cdSBernard Metzler }
1036cf049bb3SBernard Metzler return rv;
1037303ae1cdSBernard Metzler }
1038303ae1cdSBernard Metzler if (qp->attrs.state > SIW_QP_STATE_RTS) {
1039cf049bb3SBernard Metzler if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1040cf049bb3SBernard Metzler /*
1041cf049bb3SBernard Metzler * Immediately flush this WR to CQ, if QP
1042cf049bb3SBernard Metzler * is in ERROR state. RQ is guaranteed to
1043cf049bb3SBernard Metzler * be empty, so WR complets in-order.
1044cf049bb3SBernard Metzler *
1045cf049bb3SBernard Metzler * Typically triggered by ib_drain_rq().
1046cf049bb3SBernard Metzler */
1047cf049bb3SBernard Metzler rv = siw_rq_flush_wr(qp, wr, bad_wr);
1048cf049bb3SBernard Metzler } else {
1049cf049bb3SBernard Metzler siw_dbg_qp(qp, "QP out of state %d\n",
1050cf049bb3SBernard Metzler qp->attrs.state);
1051303ae1cdSBernard Metzler *bad_wr = wr;
1052cf049bb3SBernard Metzler rv = -ENOTCONN;
1053cf049bb3SBernard Metzler }
1054cf049bb3SBernard Metzler up_read(&qp->state_lock);
1055cf049bb3SBernard Metzler return rv;
1056303ae1cdSBernard Metzler }
1057303ae1cdSBernard Metzler /*
1058303ae1cdSBernard Metzler * Serialize potentially multiple producers.
1059303ae1cdSBernard Metzler * Not needed for single threaded consumer side.
1060303ae1cdSBernard Metzler */
1061303ae1cdSBernard Metzler spin_lock_irqsave(&qp->rq_lock, flags);
1062303ae1cdSBernard Metzler
1063303ae1cdSBernard Metzler while (wr) {
1064303ae1cdSBernard Metzler u32 idx = qp->rq_put % qp->attrs.rq_size;
1065303ae1cdSBernard Metzler struct siw_rqe *rqe = &qp->recvq[idx];
1066303ae1cdSBernard Metzler
1067303ae1cdSBernard Metzler if (rqe->flags) {
1068303ae1cdSBernard Metzler siw_dbg_qp(qp, "RQ full\n");
1069303ae1cdSBernard Metzler rv = -ENOMEM;
1070303ae1cdSBernard Metzler break;
1071303ae1cdSBernard Metzler }
1072303ae1cdSBernard Metzler if (wr->num_sge > qp->attrs.rq_max_sges) {
1073303ae1cdSBernard Metzler siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1074303ae1cdSBernard Metzler rv = -EINVAL;
1075303ae1cdSBernard Metzler break;
1076303ae1cdSBernard Metzler }
1077303ae1cdSBernard Metzler rqe->id = wr->wr_id;
1078303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge;
1079303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1080303ae1cdSBernard Metzler
1081303ae1cdSBernard Metzler /* make sure RQE is completely written before valid */
1082303ae1cdSBernard Metzler smp_wmb();
1083303ae1cdSBernard Metzler
1084303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID;
1085303ae1cdSBernard Metzler
1086303ae1cdSBernard Metzler qp->rq_put++;
1087303ae1cdSBernard Metzler wr = wr->next;
1088303ae1cdSBernard Metzler }
1089303ae1cdSBernard Metzler spin_unlock_irqrestore(&qp->rq_lock, flags);
1090303ae1cdSBernard Metzler
1091303ae1cdSBernard Metzler up_read(&qp->state_lock);
1092303ae1cdSBernard Metzler
1093303ae1cdSBernard Metzler if (rv < 0) {
1094303ae1cdSBernard Metzler siw_dbg_qp(qp, "error %d\n", rv);
1095303ae1cdSBernard Metzler *bad_wr = wr;
1096303ae1cdSBernard Metzler }
1097303ae1cdSBernard Metzler return rv > 0 ? 0 : rv;
1098303ae1cdSBernard Metzler }
1099303ae1cdSBernard Metzler
siw_destroy_cq(struct ib_cq * base_cq,struct ib_udata * udata)110043d781b9SLeon Romanovsky int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
1101303ae1cdSBernard Metzler {
1102303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq);
1103303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device);
1104303ae1cdSBernard Metzler struct siw_ucontext *ctx =
1105303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
1106303ae1cdSBernard Metzler base_ucontext);
1107303ae1cdSBernard Metzler
1108303ae1cdSBernard Metzler siw_dbg_cq(cq, "free CQ resources\n");
1109303ae1cdSBernard Metzler
1110303ae1cdSBernard Metzler siw_cq_flush(cq);
1111303ae1cdSBernard Metzler
111211f1a755SMichal Kalderon if (ctx)
111311f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry);
1114303ae1cdSBernard Metzler
1115303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq);
1116303ae1cdSBernard Metzler
1117303ae1cdSBernard Metzler vfree(cq->queue);
111843d781b9SLeon Romanovsky return 0;
1119303ae1cdSBernard Metzler }
1120303ae1cdSBernard Metzler
1121303ae1cdSBernard Metzler /*
1122303ae1cdSBernard Metzler * siw_create_cq()
1123303ae1cdSBernard Metzler *
1124303ae1cdSBernard Metzler * Populate CQ of requested size
1125303ae1cdSBernard Metzler *
1126303ae1cdSBernard Metzler * @base_cq: CQ as allocated by RDMA midlayer
1127303ae1cdSBernard Metzler * @attr: Initial CQ attributes
1128303ae1cdSBernard Metzler * @udata: relates to user context
1129303ae1cdSBernard Metzler */
1130303ae1cdSBernard Metzler
siw_create_cq(struct ib_cq * base_cq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1131303ae1cdSBernard Metzler int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1132303ae1cdSBernard Metzler struct ib_udata *udata)
1133303ae1cdSBernard Metzler {
1134303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_cq->device);
1135303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq);
1136303ae1cdSBernard Metzler int rv, size = attr->cqe;
1137303ae1cdSBernard Metzler
11381c407cb5SJason Gunthorpe if (attr->flags)
11391c407cb5SJason Gunthorpe return -EOPNOTSUPP;
11401c407cb5SJason Gunthorpe
1141303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1142303ae1cdSBernard Metzler siw_dbg(base_cq->device, "too many CQ's\n");
1143303ae1cdSBernard Metzler rv = -ENOMEM;
1144303ae1cdSBernard Metzler goto err_out;
1145303ae1cdSBernard Metzler }
1146303ae1cdSBernard Metzler if (size < 1 || size > sdev->attrs.max_cqe) {
1147303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1148303ae1cdSBernard Metzler rv = -EINVAL;
1149303ae1cdSBernard Metzler goto err_out;
1150303ae1cdSBernard Metzler }
1151303ae1cdSBernard Metzler size = roundup_pow_of_two(size);
1152303ae1cdSBernard Metzler cq->base_cq.cqe = size;
1153303ae1cdSBernard Metzler cq->num_cqe = size;
1154303ae1cdSBernard Metzler
115558fb0b56SBernard Metzler if (udata)
1156303ae1cdSBernard Metzler cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1157303ae1cdSBernard Metzler sizeof(struct siw_cq_ctrl));
115858fb0b56SBernard Metzler else
115958fb0b56SBernard Metzler cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
116058fb0b56SBernard Metzler sizeof(struct siw_cq_ctrl));
116158fb0b56SBernard Metzler
1162303ae1cdSBernard Metzler if (cq->queue == NULL) {
1163303ae1cdSBernard Metzler rv = -ENOMEM;
1164303ae1cdSBernard Metzler goto err_out;
1165303ae1cdSBernard Metzler }
1166303ae1cdSBernard Metzler get_random_bytes(&cq->id, 4);
1167303ae1cdSBernard Metzler siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1168303ae1cdSBernard Metzler
1169303ae1cdSBernard Metzler spin_lock_init(&cq->lock);
1170303ae1cdSBernard Metzler
11712c8ccb37SBernard Metzler cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1172303ae1cdSBernard Metzler
1173303ae1cdSBernard Metzler if (udata) {
1174303ae1cdSBernard Metzler struct siw_uresp_create_cq uresp = {};
1175303ae1cdSBernard Metzler struct siw_ucontext *ctx =
1176303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
1177303ae1cdSBernard Metzler base_ucontext);
117811f1a755SMichal Kalderon size_t length = size * sizeof(struct siw_cqe) +
117911f1a755SMichal Kalderon sizeof(struct siw_cq_ctrl);
1180303ae1cdSBernard Metzler
118111f1a755SMichal Kalderon cq->cq_entry =
118211f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, cq->queue,
118311f1a755SMichal Kalderon length, &uresp.cq_key);
118411f1a755SMichal Kalderon if (!cq->cq_entry) {
1185303ae1cdSBernard Metzler rv = -ENOMEM;
1186303ae1cdSBernard Metzler goto err_out;
1187303ae1cdSBernard Metzler }
118811f1a755SMichal Kalderon
1189303ae1cdSBernard Metzler uresp.cq_id = cq->id;
1190303ae1cdSBernard Metzler uresp.num_cqe = size;
1191303ae1cdSBernard Metzler
1192303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) {
1193303ae1cdSBernard Metzler rv = -EINVAL;
1194303ae1cdSBernard Metzler goto err_out;
1195303ae1cdSBernard Metzler }
1196303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1197303ae1cdSBernard Metzler if (rv)
1198303ae1cdSBernard Metzler goto err_out;
1199303ae1cdSBernard Metzler }
1200303ae1cdSBernard Metzler return 0;
1201303ae1cdSBernard Metzler
1202303ae1cdSBernard Metzler err_out:
1203303ae1cdSBernard Metzler siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1204303ae1cdSBernard Metzler
1205aeea6cc0SAndrey Strachuk if (cq->queue) {
1206303ae1cdSBernard Metzler struct siw_ucontext *ctx =
1207303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
1208303ae1cdSBernard Metzler base_ucontext);
120911f1a755SMichal Kalderon if (ctx)
121011f1a755SMichal Kalderon rdma_user_mmap_entry_remove(cq->cq_entry);
1211303ae1cdSBernard Metzler vfree(cq->queue);
1212303ae1cdSBernard Metzler }
1213303ae1cdSBernard Metzler atomic_dec(&sdev->num_cq);
1214303ae1cdSBernard Metzler
1215303ae1cdSBernard Metzler return rv;
1216303ae1cdSBernard Metzler }
1217303ae1cdSBernard Metzler
1218303ae1cdSBernard Metzler /*
1219303ae1cdSBernard Metzler * siw_poll_cq()
1220303ae1cdSBernard Metzler *
1221303ae1cdSBernard Metzler * Reap CQ entries if available and copy work completion status into
1222303ae1cdSBernard Metzler * array of WC's provided by caller. Returns number of reaped CQE's.
1223303ae1cdSBernard Metzler *
1224303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ.
1225303ae1cdSBernard Metzler * @num_cqe: Maximum number of CQE's to reap.
1226303ae1cdSBernard Metzler * @wc: Array of work completions to be filled by siw.
1227303ae1cdSBernard Metzler */
siw_poll_cq(struct ib_cq * base_cq,int num_cqe,struct ib_wc * wc)1228303ae1cdSBernard Metzler int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1229303ae1cdSBernard Metzler {
1230303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq);
1231303ae1cdSBernard Metzler int i;
1232303ae1cdSBernard Metzler
1233303ae1cdSBernard Metzler for (i = 0; i < num_cqe; i++) {
1234303ae1cdSBernard Metzler if (!siw_reap_cqe(cq, wc))
1235303ae1cdSBernard Metzler break;
1236303ae1cdSBernard Metzler wc++;
1237303ae1cdSBernard Metzler }
1238303ae1cdSBernard Metzler return i;
1239303ae1cdSBernard Metzler }
1240303ae1cdSBernard Metzler
1241303ae1cdSBernard Metzler /*
1242303ae1cdSBernard Metzler * siw_req_notify_cq()
1243303ae1cdSBernard Metzler *
1244303ae1cdSBernard Metzler * Request notification for new CQE's added to that CQ.
1245303ae1cdSBernard Metzler * Defined flags:
1246303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
1247303ae1cdSBernard Metzler * event if a WQE with notification flag set enters the CQ
1248303ae1cdSBernard Metzler * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
1249303ae1cdSBernard Metzler * event if a WQE enters the CQ.
1250303ae1cdSBernard Metzler * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
1251303ae1cdSBernard Metzler * number of not reaped CQE's regardless of its notification
1252303ae1cdSBernard Metzler * type and current or new CQ notification settings.
1253303ae1cdSBernard Metzler *
1254303ae1cdSBernard Metzler * @base_cq: Base CQ contained in siw CQ.
1255303ae1cdSBernard Metzler * @flags: Requested notification flags.
1256303ae1cdSBernard Metzler */
siw_req_notify_cq(struct ib_cq * base_cq,enum ib_cq_notify_flags flags)1257303ae1cdSBernard Metzler int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1258303ae1cdSBernard Metzler {
1259303ae1cdSBernard Metzler struct siw_cq *cq = to_siw_cq(base_cq);
1260303ae1cdSBernard Metzler
1261303ae1cdSBernard Metzler siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1262303ae1cdSBernard Metzler
1263303ae1cdSBernard Metzler if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
12642c8ccb37SBernard Metzler /*
12652c8ccb37SBernard Metzler * Enable CQ event for next solicited completion.
12662c8ccb37SBernard Metzler * and make it visible to all associated producers.
12672c8ccb37SBernard Metzler */
12682c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1269303ae1cdSBernard Metzler else
12702c8ccb37SBernard Metzler /*
12712c8ccb37SBernard Metzler * Enable CQ event for any signalled completion.
12722c8ccb37SBernard Metzler * and make it visible to all associated producers.
12732c8ccb37SBernard Metzler */
12742c8ccb37SBernard Metzler smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1275303ae1cdSBernard Metzler
1276303ae1cdSBernard Metzler if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1277303ae1cdSBernard Metzler return cq->cq_put - cq->cq_get;
1278303ae1cdSBernard Metzler
1279303ae1cdSBernard Metzler return 0;
1280303ae1cdSBernard Metzler }
1281303ae1cdSBernard Metzler
1282303ae1cdSBernard Metzler /*
1283303ae1cdSBernard Metzler * siw_dereg_mr()
1284303ae1cdSBernard Metzler *
1285303ae1cdSBernard Metzler * Release Memory Region.
1286303ae1cdSBernard Metzler *
1287303ae1cdSBernard Metzler * @base_mr: Base MR contained in siw MR.
1288303ae1cdSBernard Metzler * @udata: points to user context, unused.
1289303ae1cdSBernard Metzler */
siw_dereg_mr(struct ib_mr * base_mr,struct ib_udata * udata)1290303ae1cdSBernard Metzler int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1291303ae1cdSBernard Metzler {
1292303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr);
1293303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_mr->device);
1294303ae1cdSBernard Metzler
1295303ae1cdSBernard Metzler siw_dbg_mem(mr->mem, "deregister MR\n");
1296303ae1cdSBernard Metzler
1297303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr);
1298303ae1cdSBernard Metzler
1299303ae1cdSBernard Metzler siw_mr_drop_mem(mr);
1300303ae1cdSBernard Metzler kfree_rcu(mr, rcu);
1301303ae1cdSBernard Metzler
1302303ae1cdSBernard Metzler return 0;
1303303ae1cdSBernard Metzler }
1304303ae1cdSBernard Metzler
1305303ae1cdSBernard Metzler /*
1306303ae1cdSBernard Metzler * siw_reg_user_mr()
1307303ae1cdSBernard Metzler *
1308303ae1cdSBernard Metzler * Register Memory Region.
1309303ae1cdSBernard Metzler *
1310303ae1cdSBernard Metzler * @pd: Protection Domain
1311303ae1cdSBernard Metzler * @start: starting address of MR (virtual address)
1312303ae1cdSBernard Metzler * @len: len of MR
1313303ae1cdSBernard Metzler * @rnic_va: not used by siw
1314303ae1cdSBernard Metzler * @rights: MR access rights
1315303ae1cdSBernard Metzler * @udata: user buffer to communicate STag and Key.
1316303ae1cdSBernard Metzler */
siw_reg_user_mr(struct ib_pd * pd,u64 start,u64 len,u64 rnic_va,int rights,struct ib_udata * udata)1317303ae1cdSBernard Metzler struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1318303ae1cdSBernard Metzler u64 rnic_va, int rights, struct ib_udata *udata)
1319303ae1cdSBernard Metzler {
1320303ae1cdSBernard Metzler struct siw_mr *mr = NULL;
1321303ae1cdSBernard Metzler struct siw_umem *umem = NULL;
1322303ae1cdSBernard Metzler struct siw_ureq_reg_mr ureq;
1323303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device);
1324303ae1cdSBernard Metzler
1325303ae1cdSBernard Metzler unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1326303ae1cdSBernard Metzler int rv;
1327303ae1cdSBernard Metzler
1328c536277eSBernard Metzler siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1329c536277eSBernard Metzler (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1330303ae1cdSBernard Metzler (unsigned long long)len);
1331303ae1cdSBernard Metzler
1332303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1333303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n");
1334303ae1cdSBernard Metzler rv = -ENOMEM;
1335303ae1cdSBernard Metzler goto err_out;
1336303ae1cdSBernard Metzler }
1337303ae1cdSBernard Metzler if (!len) {
1338303ae1cdSBernard Metzler rv = -EINVAL;
1339303ae1cdSBernard Metzler goto err_out;
1340303ae1cdSBernard Metzler }
1341303ae1cdSBernard Metzler if (mem_limit != RLIM_INFINITY) {
1342303ae1cdSBernard Metzler unsigned long num_pages =
1343303ae1cdSBernard Metzler (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1344303ae1cdSBernard Metzler mem_limit >>= PAGE_SHIFT;
1345303ae1cdSBernard Metzler
1346303ae1cdSBernard Metzler if (num_pages > mem_limit - current->mm->locked_vm) {
1347303ae1cdSBernard Metzler siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1348303ae1cdSBernard Metzler num_pages, mem_limit,
1349303ae1cdSBernard Metzler current->mm->locked_vm);
1350303ae1cdSBernard Metzler rv = -ENOMEM;
1351303ae1cdSBernard Metzler goto err_out;
1352303ae1cdSBernard Metzler }
1353303ae1cdSBernard Metzler }
1354303ae1cdSBernard Metzler umem = siw_umem_get(start, len, ib_access_writable(rights));
1355303ae1cdSBernard Metzler if (IS_ERR(umem)) {
1356303ae1cdSBernard Metzler rv = PTR_ERR(umem);
1357303ae1cdSBernard Metzler siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1358303ae1cdSBernard Metzler umem = NULL;
1359303ae1cdSBernard Metzler goto err_out;
1360303ae1cdSBernard Metzler }
1361303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1362303ae1cdSBernard Metzler if (!mr) {
1363303ae1cdSBernard Metzler rv = -ENOMEM;
1364303ae1cdSBernard Metzler goto err_out;
1365303ae1cdSBernard Metzler }
1366303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1367303ae1cdSBernard Metzler if (rv)
1368303ae1cdSBernard Metzler goto err_out;
1369303ae1cdSBernard Metzler
1370303ae1cdSBernard Metzler if (udata) {
1371303ae1cdSBernard Metzler struct siw_uresp_reg_mr uresp = {};
1372303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem;
1373303ae1cdSBernard Metzler
1374303ae1cdSBernard Metzler if (udata->inlen < sizeof(ureq)) {
1375303ae1cdSBernard Metzler rv = -EINVAL;
1376303ae1cdSBernard Metzler goto err_out;
1377303ae1cdSBernard Metzler }
1378303ae1cdSBernard Metzler rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1379303ae1cdSBernard Metzler if (rv)
1380303ae1cdSBernard Metzler goto err_out;
1381303ae1cdSBernard Metzler
1382303ae1cdSBernard Metzler mr->base_mr.lkey |= ureq.stag_key;
1383303ae1cdSBernard Metzler mr->base_mr.rkey |= ureq.stag_key;
1384303ae1cdSBernard Metzler mem->stag |= ureq.stag_key;
1385303ae1cdSBernard Metzler uresp.stag = mem->stag;
1386303ae1cdSBernard Metzler
1387303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) {
1388303ae1cdSBernard Metzler rv = -EINVAL;
1389303ae1cdSBernard Metzler goto err_out;
1390303ae1cdSBernard Metzler }
1391303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1392303ae1cdSBernard Metzler if (rv)
1393303ae1cdSBernard Metzler goto err_out;
1394303ae1cdSBernard Metzler }
1395303ae1cdSBernard Metzler mr->mem->stag_valid = 1;
1396303ae1cdSBernard Metzler
1397303ae1cdSBernard Metzler return &mr->base_mr;
1398303ae1cdSBernard Metzler
1399303ae1cdSBernard Metzler err_out:
1400303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr);
1401303ae1cdSBernard Metzler if (mr) {
1402303ae1cdSBernard Metzler if (mr->mem)
1403303ae1cdSBernard Metzler siw_mr_drop_mem(mr);
1404303ae1cdSBernard Metzler kfree_rcu(mr, rcu);
1405303ae1cdSBernard Metzler } else {
1406303ae1cdSBernard Metzler if (umem)
1407303ae1cdSBernard Metzler siw_umem_release(umem, false);
1408303ae1cdSBernard Metzler }
1409303ae1cdSBernard Metzler return ERR_PTR(rv);
1410303ae1cdSBernard Metzler }
1411303ae1cdSBernard Metzler
siw_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_sge)1412303ae1cdSBernard Metzler struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
141342a3b153SGal Pressman u32 max_sge)
1414303ae1cdSBernard Metzler {
1415303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device);
1416303ae1cdSBernard Metzler struct siw_mr *mr = NULL;
1417303ae1cdSBernard Metzler struct siw_pbl *pbl = NULL;
1418303ae1cdSBernard Metzler int rv;
1419303ae1cdSBernard Metzler
1420303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1421303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n");
1422303ae1cdSBernard Metzler rv = -ENOMEM;
1423303ae1cdSBernard Metzler goto err_out;
1424303ae1cdSBernard Metzler }
1425303ae1cdSBernard Metzler if (mr_type != IB_MR_TYPE_MEM_REG) {
1426303ae1cdSBernard Metzler siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1427303ae1cdSBernard Metzler rv = -EOPNOTSUPP;
1428303ae1cdSBernard Metzler goto err_out;
1429303ae1cdSBernard Metzler }
1430303ae1cdSBernard Metzler if (max_sge > SIW_MAX_SGE_PBL) {
1431303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1432303ae1cdSBernard Metzler rv = -ENOMEM;
1433303ae1cdSBernard Metzler goto err_out;
1434303ae1cdSBernard Metzler }
1435303ae1cdSBernard Metzler pbl = siw_pbl_alloc(max_sge);
1436303ae1cdSBernard Metzler if (IS_ERR(pbl)) {
1437303ae1cdSBernard Metzler rv = PTR_ERR(pbl);
1438303ae1cdSBernard Metzler siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1439303ae1cdSBernard Metzler pbl = NULL;
1440303ae1cdSBernard Metzler goto err_out;
1441303ae1cdSBernard Metzler }
1442303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1443303ae1cdSBernard Metzler if (!mr) {
1444303ae1cdSBernard Metzler rv = -ENOMEM;
1445303ae1cdSBernard Metzler goto err_out;
1446303ae1cdSBernard Metzler }
1447303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1448303ae1cdSBernard Metzler if (rv)
1449303ae1cdSBernard Metzler goto err_out;
1450303ae1cdSBernard Metzler
1451303ae1cdSBernard Metzler mr->mem->is_pbl = 1;
1452303ae1cdSBernard Metzler
1453303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1454303ae1cdSBernard Metzler
1455303ae1cdSBernard Metzler return &mr->base_mr;
1456303ae1cdSBernard Metzler
1457303ae1cdSBernard Metzler err_out:
1458303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr);
1459303ae1cdSBernard Metzler
1460303ae1cdSBernard Metzler if (!mr) {
1461303ae1cdSBernard Metzler kfree(pbl);
1462303ae1cdSBernard Metzler } else {
1463303ae1cdSBernard Metzler if (mr->mem)
1464303ae1cdSBernard Metzler siw_mr_drop_mem(mr);
1465303ae1cdSBernard Metzler kfree_rcu(mr, rcu);
1466303ae1cdSBernard Metzler }
1467303ae1cdSBernard Metzler siw_dbg_pd(pd, "failed: %d\n", rv);
1468303ae1cdSBernard Metzler
1469303ae1cdSBernard Metzler return ERR_PTR(rv);
1470303ae1cdSBernard Metzler }
1471303ae1cdSBernard Metzler
1472303ae1cdSBernard Metzler /* Just used to count number of pages being mapped */
siw_set_pbl_page(struct ib_mr * base_mr,u64 buf_addr)1473303ae1cdSBernard Metzler static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1474303ae1cdSBernard Metzler {
1475303ae1cdSBernard Metzler return 0;
1476303ae1cdSBernard Metzler }
1477303ae1cdSBernard Metzler
siw_map_mr_sg(struct ib_mr * base_mr,struct scatterlist * sl,int num_sle,unsigned int * sg_off)1478303ae1cdSBernard Metzler int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1479303ae1cdSBernard Metzler unsigned int *sg_off)
1480303ae1cdSBernard Metzler {
1481303ae1cdSBernard Metzler struct scatterlist *slp;
1482303ae1cdSBernard Metzler struct siw_mr *mr = to_siw_mr(base_mr);
1483303ae1cdSBernard Metzler struct siw_mem *mem = mr->mem;
1484303ae1cdSBernard Metzler struct siw_pbl *pbl = mem->pbl;
1485303ae1cdSBernard Metzler struct siw_pble *pble;
1486c536277eSBernard Metzler unsigned long pbl_size;
1487303ae1cdSBernard Metzler int i, rv;
1488303ae1cdSBernard Metzler
1489303ae1cdSBernard Metzler if (!pbl) {
1490303ae1cdSBernard Metzler siw_dbg_mem(mem, "no PBL allocated\n");
1491303ae1cdSBernard Metzler return -EINVAL;
1492303ae1cdSBernard Metzler }
1493303ae1cdSBernard Metzler pble = pbl->pbe;
1494303ae1cdSBernard Metzler
1495303ae1cdSBernard Metzler if (pbl->max_buf < num_sle) {
1496303ae1cdSBernard Metzler siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1497*bee024d2SGuoqing Jiang num_sle, pbl->max_buf);
1498303ae1cdSBernard Metzler return -ENOMEM;
1499303ae1cdSBernard Metzler }
1500303ae1cdSBernard Metzler for_each_sg(sl, slp, num_sle, i) {
1501303ae1cdSBernard Metzler if (sg_dma_len(slp) == 0) {
1502303ae1cdSBernard Metzler siw_dbg_mem(mem, "empty SGE\n");
1503303ae1cdSBernard Metzler return -EINVAL;
1504303ae1cdSBernard Metzler }
1505303ae1cdSBernard Metzler if (i == 0) {
1506303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp);
1507303ae1cdSBernard Metzler pble->size = sg_dma_len(slp);
1508303ae1cdSBernard Metzler pble->pbl_off = 0;
1509303ae1cdSBernard Metzler pbl_size = pble->size;
1510303ae1cdSBernard Metzler pbl->num_buf = 1;
1511303ae1cdSBernard Metzler } else {
1512303ae1cdSBernard Metzler /* Merge PBL entries if adjacent */
1513303ae1cdSBernard Metzler if (pble->addr + pble->size == sg_dma_address(slp)) {
1514303ae1cdSBernard Metzler pble->size += sg_dma_len(slp);
1515303ae1cdSBernard Metzler } else {
1516303ae1cdSBernard Metzler pble++;
1517303ae1cdSBernard Metzler pbl->num_buf++;
1518303ae1cdSBernard Metzler pble->addr = sg_dma_address(slp);
1519303ae1cdSBernard Metzler pble->size = sg_dma_len(slp);
1520303ae1cdSBernard Metzler pble->pbl_off = pbl_size;
1521303ae1cdSBernard Metzler }
1522303ae1cdSBernard Metzler pbl_size += sg_dma_len(slp);
1523303ae1cdSBernard Metzler }
1524303ae1cdSBernard Metzler siw_dbg_mem(mem,
1525c536277eSBernard Metzler "sge[%d], size %u, addr 0x%p, total %lu\n",
15268d7c7c0eSJason Gunthorpe i, pble->size, ib_virt_dma_to_ptr(pble->addr),
1527c536277eSBernard Metzler pbl_size);
1528303ae1cdSBernard Metzler }
1529303ae1cdSBernard Metzler rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1530303ae1cdSBernard Metzler if (rv > 0) {
1531303ae1cdSBernard Metzler mem->len = base_mr->length;
1532303ae1cdSBernard Metzler mem->va = base_mr->iova;
1533303ae1cdSBernard Metzler siw_dbg_mem(mem,
1534c536277eSBernard Metzler "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1535c536277eSBernard Metzler mem->len, (void *)(uintptr_t)mem->va, num_sle,
1536c536277eSBernard Metzler pbl->num_buf);
1537303ae1cdSBernard Metzler }
1538303ae1cdSBernard Metzler return rv;
1539303ae1cdSBernard Metzler }
1540303ae1cdSBernard Metzler
1541303ae1cdSBernard Metzler /*
1542303ae1cdSBernard Metzler * siw_get_dma_mr()
1543303ae1cdSBernard Metzler *
1544303ae1cdSBernard Metzler * Create a (empty) DMA memory region, where no umem is attached.
1545303ae1cdSBernard Metzler */
siw_get_dma_mr(struct ib_pd * pd,int rights)1546303ae1cdSBernard Metzler struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1547303ae1cdSBernard Metzler {
1548303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(pd->device);
1549303ae1cdSBernard Metzler struct siw_mr *mr = NULL;
1550303ae1cdSBernard Metzler int rv;
1551303ae1cdSBernard Metzler
1552303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1553303ae1cdSBernard Metzler siw_dbg_pd(pd, "too many mr's\n");
1554303ae1cdSBernard Metzler rv = -ENOMEM;
1555303ae1cdSBernard Metzler goto err_out;
1556303ae1cdSBernard Metzler }
1557303ae1cdSBernard Metzler mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1558303ae1cdSBernard Metzler if (!mr) {
1559303ae1cdSBernard Metzler rv = -ENOMEM;
1560303ae1cdSBernard Metzler goto err_out;
1561303ae1cdSBernard Metzler }
1562303ae1cdSBernard Metzler rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1563303ae1cdSBernard Metzler if (rv)
1564303ae1cdSBernard Metzler goto err_out;
1565303ae1cdSBernard Metzler
1566303ae1cdSBernard Metzler mr->mem->stag_valid = 1;
1567303ae1cdSBernard Metzler
1568303ae1cdSBernard Metzler siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1569303ae1cdSBernard Metzler
1570303ae1cdSBernard Metzler return &mr->base_mr;
1571303ae1cdSBernard Metzler
1572303ae1cdSBernard Metzler err_out:
1573303ae1cdSBernard Metzler if (rv)
1574303ae1cdSBernard Metzler kfree(mr);
1575303ae1cdSBernard Metzler
1576303ae1cdSBernard Metzler atomic_dec(&sdev->num_mr);
1577303ae1cdSBernard Metzler
1578303ae1cdSBernard Metzler return ERR_PTR(rv);
1579303ae1cdSBernard Metzler }
1580303ae1cdSBernard Metzler
1581303ae1cdSBernard Metzler /*
1582303ae1cdSBernard Metzler * siw_create_srq()
1583303ae1cdSBernard Metzler *
1584303ae1cdSBernard Metzler * Create Shared Receive Queue of attributes @init_attrs
1585303ae1cdSBernard Metzler * within protection domain given by @pd.
1586303ae1cdSBernard Metzler *
1587303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ.
1588303ae1cdSBernard Metzler * @init_attrs: SRQ init attributes.
1589303ae1cdSBernard Metzler * @udata: points to user context
1590303ae1cdSBernard Metzler */
siw_create_srq(struct ib_srq * base_srq,struct ib_srq_init_attr * init_attrs,struct ib_udata * udata)1591303ae1cdSBernard Metzler int siw_create_srq(struct ib_srq *base_srq,
1592303ae1cdSBernard Metzler struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1593303ae1cdSBernard Metzler {
1594303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq);
1595303ae1cdSBernard Metzler struct ib_srq_attr *attrs = &init_attrs->attr;
1596303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device);
1597303ae1cdSBernard Metzler struct siw_ucontext *ctx =
1598303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
1599303ae1cdSBernard Metzler base_ucontext);
1600303ae1cdSBernard Metzler int rv;
1601303ae1cdSBernard Metzler
1602652caba5SJason Gunthorpe if (init_attrs->srq_type != IB_SRQT_BASIC)
1603652caba5SJason Gunthorpe return -EOPNOTSUPP;
1604652caba5SJason Gunthorpe
1605303ae1cdSBernard Metzler if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1606303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1607303ae1cdSBernard Metzler rv = -ENOMEM;
1608303ae1cdSBernard Metzler goto err_out;
1609303ae1cdSBernard Metzler }
1610303ae1cdSBernard Metzler if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1611303ae1cdSBernard Metzler attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1612303ae1cdSBernard Metzler rv = -EINVAL;
1613303ae1cdSBernard Metzler goto err_out;
1614303ae1cdSBernard Metzler }
1615303ae1cdSBernard Metzler srq->max_sge = attrs->max_sge;
1616303ae1cdSBernard Metzler srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1617303ae1cdSBernard Metzler srq->limit = attrs->srq_limit;
1618303ae1cdSBernard Metzler if (srq->limit)
161958fb0b56SBernard Metzler srq->armed = true;
1620303ae1cdSBernard Metzler
162158fb0b56SBernard Metzler srq->is_kernel_res = !udata;
1622303ae1cdSBernard Metzler
1623303ae1cdSBernard Metzler if (udata)
1624303ae1cdSBernard Metzler srq->recvq =
1625303ae1cdSBernard Metzler vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1626303ae1cdSBernard Metzler else
16279191df00SJulia Lawall srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe));
1628303ae1cdSBernard Metzler
1629303ae1cdSBernard Metzler if (srq->recvq == NULL) {
1630303ae1cdSBernard Metzler rv = -ENOMEM;
1631303ae1cdSBernard Metzler goto err_out;
1632303ae1cdSBernard Metzler }
1633303ae1cdSBernard Metzler if (udata) {
1634303ae1cdSBernard Metzler struct siw_uresp_create_srq uresp = {};
163511f1a755SMichal Kalderon size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1636303ae1cdSBernard Metzler
163711f1a755SMichal Kalderon srq->srq_entry =
163811f1a755SMichal Kalderon siw_mmap_entry_insert(ctx, srq->recvq,
163911f1a755SMichal Kalderon length, &uresp.srq_key);
164011f1a755SMichal Kalderon if (!srq->srq_entry) {
1641303ae1cdSBernard Metzler rv = -ENOMEM;
1642303ae1cdSBernard Metzler goto err_out;
1643303ae1cdSBernard Metzler }
164411f1a755SMichal Kalderon
1645303ae1cdSBernard Metzler uresp.num_rqe = srq->num_rqe;
1646303ae1cdSBernard Metzler
1647303ae1cdSBernard Metzler if (udata->outlen < sizeof(uresp)) {
1648303ae1cdSBernard Metzler rv = -EINVAL;
1649303ae1cdSBernard Metzler goto err_out;
1650303ae1cdSBernard Metzler }
1651303ae1cdSBernard Metzler rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1652303ae1cdSBernard Metzler if (rv)
1653303ae1cdSBernard Metzler goto err_out;
1654303ae1cdSBernard Metzler }
1655303ae1cdSBernard Metzler spin_lock_init(&srq->lock);
1656303ae1cdSBernard Metzler
1657c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1658303ae1cdSBernard Metzler
1659303ae1cdSBernard Metzler return 0;
1660303ae1cdSBernard Metzler
1661303ae1cdSBernard Metzler err_out:
1662303ae1cdSBernard Metzler if (srq->recvq) {
166311f1a755SMichal Kalderon if (ctx)
166411f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry);
1665303ae1cdSBernard Metzler vfree(srq->recvq);
1666303ae1cdSBernard Metzler }
1667303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq);
1668303ae1cdSBernard Metzler
1669303ae1cdSBernard Metzler return rv;
1670303ae1cdSBernard Metzler }
1671303ae1cdSBernard Metzler
1672303ae1cdSBernard Metzler /*
1673303ae1cdSBernard Metzler * siw_modify_srq()
1674303ae1cdSBernard Metzler *
1675303ae1cdSBernard Metzler * Modify SRQ. The caller may resize SRQ and/or set/reset notification
1676303ae1cdSBernard Metzler * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
1677303ae1cdSBernard Metzler *
1678303ae1cdSBernard Metzler * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
1679303ae1cdSBernard Metzler * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1680303ae1cdSBernard Metzler */
siw_modify_srq(struct ib_srq * base_srq,struct ib_srq_attr * attrs,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)1681303ae1cdSBernard Metzler int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1682303ae1cdSBernard Metzler enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1683303ae1cdSBernard Metzler {
1684303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq);
1685303ae1cdSBernard Metzler unsigned long flags;
1686303ae1cdSBernard Metzler int rv = 0;
1687303ae1cdSBernard Metzler
1688303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags);
1689303ae1cdSBernard Metzler
1690303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_MAX_WR) {
1691303ae1cdSBernard Metzler /* resize request not yet supported */
1692303ae1cdSBernard Metzler rv = -EOPNOTSUPP;
1693303ae1cdSBernard Metzler goto out;
1694303ae1cdSBernard Metzler }
1695303ae1cdSBernard Metzler if (attr_mask & IB_SRQ_LIMIT) {
1696303ae1cdSBernard Metzler if (attrs->srq_limit) {
1697303ae1cdSBernard Metzler if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1698303ae1cdSBernard Metzler rv = -EINVAL;
1699303ae1cdSBernard Metzler goto out;
1700303ae1cdSBernard Metzler }
170158fb0b56SBernard Metzler srq->armed = true;
1702303ae1cdSBernard Metzler } else {
170358fb0b56SBernard Metzler srq->armed = false;
1704303ae1cdSBernard Metzler }
1705303ae1cdSBernard Metzler srq->limit = attrs->srq_limit;
1706303ae1cdSBernard Metzler }
1707303ae1cdSBernard Metzler out:
1708303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags);
1709303ae1cdSBernard Metzler
1710303ae1cdSBernard Metzler return rv;
1711303ae1cdSBernard Metzler }
1712303ae1cdSBernard Metzler
1713303ae1cdSBernard Metzler /*
1714303ae1cdSBernard Metzler * siw_query_srq()
1715303ae1cdSBernard Metzler *
1716303ae1cdSBernard Metzler * Query SRQ attributes.
1717303ae1cdSBernard Metzler */
siw_query_srq(struct ib_srq * base_srq,struct ib_srq_attr * attrs)1718303ae1cdSBernard Metzler int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1719303ae1cdSBernard Metzler {
1720303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq);
1721303ae1cdSBernard Metzler unsigned long flags;
1722303ae1cdSBernard Metzler
1723303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags);
1724303ae1cdSBernard Metzler
1725303ae1cdSBernard Metzler attrs->max_wr = srq->num_rqe;
1726303ae1cdSBernard Metzler attrs->max_sge = srq->max_sge;
1727303ae1cdSBernard Metzler attrs->srq_limit = srq->limit;
1728303ae1cdSBernard Metzler
1729303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags);
1730303ae1cdSBernard Metzler
1731303ae1cdSBernard Metzler return 0;
1732303ae1cdSBernard Metzler }
1733303ae1cdSBernard Metzler
1734303ae1cdSBernard Metzler /*
1735303ae1cdSBernard Metzler * siw_destroy_srq()
1736303ae1cdSBernard Metzler *
1737303ae1cdSBernard Metzler * Destroy SRQ.
1738303ae1cdSBernard Metzler * It is assumed that the SRQ is not referenced by any
1739303ae1cdSBernard Metzler * QP anymore - the code trusts the RDMA core environment to keep track
1740303ae1cdSBernard Metzler * of QP references.
1741303ae1cdSBernard Metzler */
siw_destroy_srq(struct ib_srq * base_srq,struct ib_udata * udata)1742119181d1SLeon Romanovsky int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1743303ae1cdSBernard Metzler {
1744303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq);
1745303ae1cdSBernard Metzler struct siw_device *sdev = to_siw_dev(base_srq->device);
1746303ae1cdSBernard Metzler struct siw_ucontext *ctx =
1747303ae1cdSBernard Metzler rdma_udata_to_drv_context(udata, struct siw_ucontext,
1748303ae1cdSBernard Metzler base_ucontext);
1749303ae1cdSBernard Metzler
175011f1a755SMichal Kalderon if (ctx)
175111f1a755SMichal Kalderon rdma_user_mmap_entry_remove(srq->srq_entry);
1752303ae1cdSBernard Metzler vfree(srq->recvq);
1753303ae1cdSBernard Metzler atomic_dec(&sdev->num_srq);
1754119181d1SLeon Romanovsky return 0;
1755303ae1cdSBernard Metzler }
1756303ae1cdSBernard Metzler
1757303ae1cdSBernard Metzler /*
1758303ae1cdSBernard Metzler * siw_post_srq_recv()
1759303ae1cdSBernard Metzler *
1760303ae1cdSBernard Metzler * Post a list of receive queue elements to SRQ.
1761303ae1cdSBernard Metzler * NOTE: The function does not check or lock a certain SRQ state
1762303ae1cdSBernard Metzler * during the post operation. The code simply trusts the
1763303ae1cdSBernard Metzler * RDMA core environment.
1764303ae1cdSBernard Metzler *
1765303ae1cdSBernard Metzler * @base_srq: Base SRQ contained in siw SRQ
1766303ae1cdSBernard Metzler * @wr: List of R-WR's
1767303ae1cdSBernard Metzler * @bad_wr: Updated to failing WR if posting fails.
1768303ae1cdSBernard Metzler */
siw_post_srq_recv(struct ib_srq * base_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1769303ae1cdSBernard Metzler int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1770303ae1cdSBernard Metzler const struct ib_recv_wr **bad_wr)
1771303ae1cdSBernard Metzler {
1772303ae1cdSBernard Metzler struct siw_srq *srq = to_siw_srq(base_srq);
1773303ae1cdSBernard Metzler unsigned long flags;
1774303ae1cdSBernard Metzler int rv = 0;
1775303ae1cdSBernard Metzler
177658fb0b56SBernard Metzler if (unlikely(!srq->is_kernel_res)) {
1777303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd,
1778c536277eSBernard Metzler "[SRQ]: no kernel post_recv for mapped srq\n");
1779303ae1cdSBernard Metzler rv = -EINVAL;
1780303ae1cdSBernard Metzler goto out;
1781303ae1cdSBernard Metzler }
1782303ae1cdSBernard Metzler /*
1783303ae1cdSBernard Metzler * Serialize potentially multiple producers.
1784303ae1cdSBernard Metzler * Also needed to serialize potentially multiple
1785303ae1cdSBernard Metzler * consumers.
1786303ae1cdSBernard Metzler */
1787303ae1cdSBernard Metzler spin_lock_irqsave(&srq->lock, flags);
1788303ae1cdSBernard Metzler
1789303ae1cdSBernard Metzler while (wr) {
1790303ae1cdSBernard Metzler u32 idx = srq->rq_put % srq->num_rqe;
1791303ae1cdSBernard Metzler struct siw_rqe *rqe = &srq->recvq[idx];
1792303ae1cdSBernard Metzler
1793303ae1cdSBernard Metzler if (rqe->flags) {
1794303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd, "SRQ full\n");
1795303ae1cdSBernard Metzler rv = -ENOMEM;
1796303ae1cdSBernard Metzler break;
1797303ae1cdSBernard Metzler }
1798303ae1cdSBernard Metzler if (unlikely(wr->num_sge > srq->max_sge)) {
1799303ae1cdSBernard Metzler siw_dbg_pd(base_srq->pd,
1800c536277eSBernard Metzler "[SRQ]: too many sge's: %d\n", wr->num_sge);
1801303ae1cdSBernard Metzler rv = -EINVAL;
1802303ae1cdSBernard Metzler break;
1803303ae1cdSBernard Metzler }
1804303ae1cdSBernard Metzler rqe->id = wr->wr_id;
1805303ae1cdSBernard Metzler rqe->num_sge = wr->num_sge;
1806303ae1cdSBernard Metzler siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1807303ae1cdSBernard Metzler
1808303ae1cdSBernard Metzler /* Make sure S-RQE is completely written before valid */
1809303ae1cdSBernard Metzler smp_wmb();
1810303ae1cdSBernard Metzler
1811303ae1cdSBernard Metzler rqe->flags = SIW_WQE_VALID;
1812303ae1cdSBernard Metzler
1813303ae1cdSBernard Metzler srq->rq_put++;
1814303ae1cdSBernard Metzler wr = wr->next;
1815303ae1cdSBernard Metzler }
1816303ae1cdSBernard Metzler spin_unlock_irqrestore(&srq->lock, flags);
1817303ae1cdSBernard Metzler out:
1818303ae1cdSBernard Metzler if (unlikely(rv < 0)) {
1819c536277eSBernard Metzler siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1820303ae1cdSBernard Metzler *bad_wr = wr;
1821303ae1cdSBernard Metzler }
1822303ae1cdSBernard Metzler return rv;
1823303ae1cdSBernard Metzler }
1824303ae1cdSBernard Metzler
siw_qp_event(struct siw_qp * qp,enum ib_event_type etype)1825303ae1cdSBernard Metzler void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1826303ae1cdSBernard Metzler {
1827303ae1cdSBernard Metzler struct ib_event event;
182858fb0b56SBernard Metzler struct ib_qp *base_qp = &qp->base_qp;
1829303ae1cdSBernard Metzler
1830303ae1cdSBernard Metzler /*
1831303ae1cdSBernard Metzler * Do not report asynchronous errors on QP which gets
1832303ae1cdSBernard Metzler * destroyed via verbs interface (siw_destroy_qp())
1833303ae1cdSBernard Metzler */
1834303ae1cdSBernard Metzler if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1835303ae1cdSBernard Metzler return;
1836303ae1cdSBernard Metzler
1837303ae1cdSBernard Metzler event.event = etype;
1838303ae1cdSBernard Metzler event.device = base_qp->device;
1839303ae1cdSBernard Metzler event.element.qp = base_qp;
1840303ae1cdSBernard Metzler
1841303ae1cdSBernard Metzler if (base_qp->event_handler) {
1842303ae1cdSBernard Metzler siw_dbg_qp(qp, "reporting event %d\n", etype);
1843303ae1cdSBernard Metzler base_qp->event_handler(&event, base_qp->qp_context);
1844303ae1cdSBernard Metzler }
1845303ae1cdSBernard Metzler }
1846303ae1cdSBernard Metzler
siw_cq_event(struct siw_cq * cq,enum ib_event_type etype)1847303ae1cdSBernard Metzler void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1848303ae1cdSBernard Metzler {
1849303ae1cdSBernard Metzler struct ib_event event;
1850303ae1cdSBernard Metzler struct ib_cq *base_cq = &cq->base_cq;
1851303ae1cdSBernard Metzler
1852303ae1cdSBernard Metzler event.event = etype;
1853303ae1cdSBernard Metzler event.device = base_cq->device;
1854303ae1cdSBernard Metzler event.element.cq = base_cq;
1855303ae1cdSBernard Metzler
1856303ae1cdSBernard Metzler if (base_cq->event_handler) {
1857303ae1cdSBernard Metzler siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1858303ae1cdSBernard Metzler base_cq->event_handler(&event, base_cq->cq_context);
1859303ae1cdSBernard Metzler }
1860303ae1cdSBernard Metzler }
1861303ae1cdSBernard Metzler
siw_srq_event(struct siw_srq * srq,enum ib_event_type etype)1862303ae1cdSBernard Metzler void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1863303ae1cdSBernard Metzler {
1864303ae1cdSBernard Metzler struct ib_event event;
1865303ae1cdSBernard Metzler struct ib_srq *base_srq = &srq->base_srq;
1866303ae1cdSBernard Metzler
1867303ae1cdSBernard Metzler event.event = etype;
1868303ae1cdSBernard Metzler event.device = base_srq->device;
1869303ae1cdSBernard Metzler event.element.srq = base_srq;
1870303ae1cdSBernard Metzler
1871303ae1cdSBernard Metzler if (base_srq->event_handler) {
1872303ae1cdSBernard Metzler siw_dbg_pd(srq->base_srq.pd,
1873303ae1cdSBernard Metzler "reporting SRQ event %d\n", etype);
1874303ae1cdSBernard Metzler base_srq->event_handler(&event, base_srq->srq_context);
1875303ae1cdSBernard Metzler }
1876303ae1cdSBernard Metzler }
1877303ae1cdSBernard Metzler
siw_port_event(struct siw_device * sdev,u32 port,enum ib_event_type etype)18781fb7f897SMark Bloch void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
1879303ae1cdSBernard Metzler {
1880303ae1cdSBernard Metzler struct ib_event event;
1881303ae1cdSBernard Metzler
1882303ae1cdSBernard Metzler event.event = etype;
1883303ae1cdSBernard Metzler event.device = &sdev->base_dev;
1884303ae1cdSBernard Metzler event.element.port_num = port;
1885303ae1cdSBernard Metzler
1886303ae1cdSBernard Metzler siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1887303ae1cdSBernard Metzler
1888303ae1cdSBernard Metzler ib_dispatch_event(&event);
1889303ae1cdSBernard Metzler }
1890