1*d164bf64SCai Huoqing // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2aad9158bSDennis Dalessandro /*
3fe314195SDennis Dalessandro * Copyright(c) 2016 Intel Corporation.
4aad9158bSDennis Dalessandro */
5aad9158bSDennis Dalessandro
6b8f881b9SJubin John #include <linux/err.h>
7b8f881b9SJubin John #include <linux/slab.h>
8b8f881b9SJubin John #include <linux/vmalloc.h>
989944450SShamir Rabinovitch #include <rdma/uverbs_ioctl.h>
10b8f881b9SJubin John
11aad9158bSDennis Dalessandro #include "srq.h"
1279a225beSDennis Dalessandro #include "vt.h"
13dabac6e4SKamenee Arumugam #include "qp.h"
1490793f71SDennis Dalessandro /**
1590793f71SDennis Dalessandro * rvt_driver_srq_init - init srq resources on a per driver basis
1690793f71SDennis Dalessandro * @rdi: rvt dev structure
1790793f71SDennis Dalessandro *
18b8f881b9SJubin John * Do any initialization needed when a driver registers with rdmavt.
19b8f881b9SJubin John */
rvt_driver_srq_init(struct rvt_dev_info * rdi)20b8f881b9SJubin John void rvt_driver_srq_init(struct rvt_dev_info *rdi)
21b8f881b9SJubin John {
22b8f881b9SJubin John spin_lock_init(&rdi->n_srqs_lock);
23b8f881b9SJubin John rdi->n_srqs_allocated = 0;
24b8f881b9SJubin John }
25b8f881b9SJubin John
26aad9158bSDennis Dalessandro /**
27aad9158bSDennis Dalessandro * rvt_create_srq - create a shared receive queue
28ce4cc52fSLee Jones * @ibsrq: the protection domain of the SRQ to create
29aad9158bSDennis Dalessandro * @srq_init_attr: the attributes of the SRQ
30aad9158bSDennis Dalessandro * @udata: data from libibverbs when creating a user SRQ
3190793f71SDennis Dalessandro *
3268e326deSLeon Romanovsky * Return: 0 on success
33aad9158bSDennis Dalessandro */
rvt_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)3468e326deSLeon Romanovsky int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
35aad9158bSDennis Dalessandro struct ib_udata *udata)
36aad9158bSDennis Dalessandro {
3768e326deSLeon Romanovsky struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
3868e326deSLeon Romanovsky struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
39b8f881b9SJubin John u32 sz;
4068e326deSLeon Romanovsky int ret;
41b8f881b9SJubin John
42b8f881b9SJubin John if (srq_init_attr->srq_type != IB_SRQT_BASIC)
4368e326deSLeon Romanovsky return -EOPNOTSUPP;
44b8f881b9SJubin John
45b8f881b9SJubin John if (srq_init_attr->attr.max_sge == 0 ||
46b8f881b9SJubin John srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
47b8f881b9SJubin John srq_init_attr->attr.max_wr == 0 ||
48b8f881b9SJubin John srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
4968e326deSLeon Romanovsky return -EINVAL;
50b8f881b9SJubin John
51b8f881b9SJubin John /*
52b8f881b9SJubin John * Need to use vmalloc() if we want to support large #s of entries.
53b8f881b9SJubin John */
54b8f881b9SJubin John srq->rq.size = srq_init_attr->attr.max_wr + 1;
55b8f881b9SJubin John srq->rq.max_sge = srq_init_attr->attr.max_sge;
56b8f881b9SJubin John sz = sizeof(struct ib_sge) * srq->rq.max_sge +
57b8f881b9SJubin John sizeof(struct rvt_rwqe);
58dabac6e4SKamenee Arumugam if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
59dabac6e4SKamenee Arumugam dev->dparms.node, udata)) {
6068e326deSLeon Romanovsky ret = -ENOMEM;
61b8f881b9SJubin John goto bail_srq;
62b8f881b9SJubin John }
63b8f881b9SJubin John
64b8f881b9SJubin John /*
65b8f881b9SJubin John * Return the address of the RWQ as the offset to mmap.
66b8f881b9SJubin John * See rvt_mmap() for details.
67b8f881b9SJubin John */
68b8f881b9SJubin John if (udata && udata->outlen >= sizeof(__u64)) {
69b8f881b9SJubin John u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
70b8f881b9SJubin John
71ff23dfa1SShamir Rabinovitch srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
7247c370c1SSudip Mukherjee if (IS_ERR(srq->ip)) {
7347c370c1SSudip Mukherjee ret = PTR_ERR(srq->ip);
74b8f881b9SJubin John goto bail_wq;
75b8f881b9SJubin John }
76b8f881b9SJubin John
7768e326deSLeon Romanovsky ret = ib_copy_to_udata(udata, &srq->ip->offset,
78b8f881b9SJubin John sizeof(srq->ip->offset));
7968e326deSLeon Romanovsky if (ret)
80b8f881b9SJubin John goto bail_ip;
81b8f881b9SJubin John }
82b8f881b9SJubin John
83b8f881b9SJubin John /*
84b8f881b9SJubin John * ib_create_srq() will initialize srq->ibsrq.
85b8f881b9SJubin John */
86b8f881b9SJubin John spin_lock_init(&srq->rq.lock);
87b8f881b9SJubin John srq->limit = srq_init_attr->attr.srq_limit;
88b8f881b9SJubin John
89b8f881b9SJubin John spin_lock(&dev->n_srqs_lock);
90b8f881b9SJubin John if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
91b8f881b9SJubin John spin_unlock(&dev->n_srqs_lock);
9268e326deSLeon Romanovsky ret = -ENOMEM;
93b8f881b9SJubin John goto bail_ip;
94b8f881b9SJubin John }
95b8f881b9SJubin John
96b8f881b9SJubin John dev->n_srqs_allocated++;
97b8f881b9SJubin John spin_unlock(&dev->n_srqs_lock);
98b8f881b9SJubin John
99b8f881b9SJubin John if (srq->ip) {
100b8f881b9SJubin John spin_lock_irq(&dev->pending_lock);
101b8f881b9SJubin John list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
102b8f881b9SJubin John spin_unlock_irq(&dev->pending_lock);
103b8f881b9SJubin John }
104b8f881b9SJubin John
10568e326deSLeon Romanovsky return 0;
106b8f881b9SJubin John
107b8f881b9SJubin John bail_ip:
108b8f881b9SJubin John kfree(srq->ip);
109b8f881b9SJubin John bail_wq:
110dabac6e4SKamenee Arumugam rvt_free_rq(&srq->rq);
111b8f881b9SJubin John bail_srq:
112b8f881b9SJubin John return ret;
113aad9158bSDennis Dalessandro }
114aad9158bSDennis Dalessandro
115aad9158bSDennis Dalessandro /**
116aad9158bSDennis Dalessandro * rvt_modify_srq - modify a shared receive queue
117aad9158bSDennis Dalessandro * @ibsrq: the SRQ to modify
118aad9158bSDennis Dalessandro * @attr: the new attributes of the SRQ
119aad9158bSDennis Dalessandro * @attr_mask: indicates which attributes to modify
120aad9158bSDennis Dalessandro * @udata: user data for libibverbs.so
12190793f71SDennis Dalessandro *
12290793f71SDennis Dalessandro * Return: 0 on success
123aad9158bSDennis Dalessandro */
rvt_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)124aad9158bSDennis Dalessandro int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
125aad9158bSDennis Dalessandro enum ib_srq_attr_mask attr_mask,
126aad9158bSDennis Dalessandro struct ib_udata *udata)
127aad9158bSDennis Dalessandro {
128b8f881b9SJubin John struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
129b8f881b9SJubin John struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
130dabac6e4SKamenee Arumugam struct rvt_rq tmp_rq = {};
131b8f881b9SJubin John int ret = 0;
132b8f881b9SJubin John
133b8f881b9SJubin John if (attr_mask & IB_SRQ_MAX_WR) {
134dabac6e4SKamenee Arumugam struct rvt_krwq *okwq = NULL;
135dabac6e4SKamenee Arumugam struct rvt_rwq *owq = NULL;
136b8f881b9SJubin John struct rvt_rwqe *p;
137b8f881b9SJubin John u32 sz, size, n, head, tail;
138b8f881b9SJubin John
139b8f881b9SJubin John /* Check that the requested sizes are below the limits. */
140b8f881b9SJubin John if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
141b8f881b9SJubin John ((attr_mask & IB_SRQ_LIMIT) ?
142b8f881b9SJubin John attr->srq_limit : srq->limit) > attr->max_wr)
143b8f881b9SJubin John return -EINVAL;
144b8f881b9SJubin John sz = sizeof(struct rvt_rwqe) +
145b8f881b9SJubin John srq->rq.max_sge * sizeof(struct ib_sge);
146b8f881b9SJubin John size = attr->max_wr + 1;
147dabac6e4SKamenee Arumugam if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
148dabac6e4SKamenee Arumugam udata))
149b8f881b9SJubin John return -ENOMEM;
150b8f881b9SJubin John /* Check that we can write the offset to mmap. */
151b8f881b9SJubin John if (udata && udata->inlen >= sizeof(__u64)) {
152b8f881b9SJubin John __u64 offset_addr;
153b8f881b9SJubin John __u64 offset = 0;
154b8f881b9SJubin John
155b8f881b9SJubin John ret = ib_copy_from_udata(&offset_addr, udata,
156b8f881b9SJubin John sizeof(offset_addr));
157b8f881b9SJubin John if (ret)
158b8f881b9SJubin John goto bail_free;
159b8f881b9SJubin John udata->outbuf = (void __user *)
160b8f881b9SJubin John (unsigned long)offset_addr;
161b8f881b9SJubin John ret = ib_copy_to_udata(udata, &offset,
162b8f881b9SJubin John sizeof(offset));
163b8f881b9SJubin John if (ret)
164b8f881b9SJubin John goto bail_free;
165b8f881b9SJubin John }
166b8f881b9SJubin John
167f592ae3cSKamenee Arumugam spin_lock_irq(&srq->rq.kwq->c_lock);
168b8f881b9SJubin John /*
169b8f881b9SJubin John * validate head and tail pointer values and compute
170b8f881b9SJubin John * the number of remaining WQEs.
171b8f881b9SJubin John */
172dabac6e4SKamenee Arumugam if (udata) {
173b8f881b9SJubin John owq = srq->rq.wq;
174dabac6e4SKamenee Arumugam head = RDMA_READ_UAPI_ATOMIC(owq->head);
175dabac6e4SKamenee Arumugam tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
176dabac6e4SKamenee Arumugam } else {
177dabac6e4SKamenee Arumugam okwq = srq->rq.kwq;
178dabac6e4SKamenee Arumugam head = okwq->head;
179dabac6e4SKamenee Arumugam tail = okwq->tail;
180dabac6e4SKamenee Arumugam }
181b8f881b9SJubin John if (head >= srq->rq.size || tail >= srq->rq.size) {
182b8f881b9SJubin John ret = -EINVAL;
183b8f881b9SJubin John goto bail_unlock;
184b8f881b9SJubin John }
185b8f881b9SJubin John n = head;
186b8f881b9SJubin John if (n < tail)
187b8f881b9SJubin John n += srq->rq.size - tail;
188b8f881b9SJubin John else
189b8f881b9SJubin John n -= tail;
190b8f881b9SJubin John if (size <= n) {
191b8f881b9SJubin John ret = -EINVAL;
192b8f881b9SJubin John goto bail_unlock;
193b8f881b9SJubin John }
194b8f881b9SJubin John n = 0;
195dabac6e4SKamenee Arumugam p = tmp_rq.kwq->curr_wq;
196b8f881b9SJubin John while (tail != head) {
197b8f881b9SJubin John struct rvt_rwqe *wqe;
198b8f881b9SJubin John int i;
199b8f881b9SJubin John
200b8f881b9SJubin John wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
201b8f881b9SJubin John p->wr_id = wqe->wr_id;
202b8f881b9SJubin John p->num_sge = wqe->num_sge;
203b8f881b9SJubin John for (i = 0; i < wqe->num_sge; i++)
204b8f881b9SJubin John p->sg_list[i] = wqe->sg_list[i];
205b8f881b9SJubin John n++;
206b8f881b9SJubin John p = (struct rvt_rwqe *)((char *)p + sz);
207b8f881b9SJubin John if (++tail >= srq->rq.size)
208b8f881b9SJubin John tail = 0;
209b8f881b9SJubin John }
210dabac6e4SKamenee Arumugam srq->rq.kwq = tmp_rq.kwq;
211dabac6e4SKamenee Arumugam if (udata) {
212dabac6e4SKamenee Arumugam srq->rq.wq = tmp_rq.wq;
213dabac6e4SKamenee Arumugam RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
214dabac6e4SKamenee Arumugam RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
215dabac6e4SKamenee Arumugam } else {
216dabac6e4SKamenee Arumugam tmp_rq.kwq->head = n;
217dabac6e4SKamenee Arumugam tmp_rq.kwq->tail = 0;
218dabac6e4SKamenee Arumugam }
219b8f881b9SJubin John srq->rq.size = size;
220b8f881b9SJubin John if (attr_mask & IB_SRQ_LIMIT)
221b8f881b9SJubin John srq->limit = attr->srq_limit;
222f592ae3cSKamenee Arumugam spin_unlock_irq(&srq->rq.kwq->c_lock);
223b8f881b9SJubin John
224b8f881b9SJubin John vfree(owq);
225dabac6e4SKamenee Arumugam kvfree(okwq);
226b8f881b9SJubin John
227b8f881b9SJubin John if (srq->ip) {
228b8f881b9SJubin John struct rvt_mmap_info *ip = srq->ip;
229b8f881b9SJubin John struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
230b8f881b9SJubin John u32 s = sizeof(struct rvt_rwq) + size * sz;
231b8f881b9SJubin John
232dabac6e4SKamenee Arumugam rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
233b8f881b9SJubin John
234b8f881b9SJubin John /*
235b8f881b9SJubin John * Return the offset to mmap.
236b8f881b9SJubin John * See rvt_mmap() for details.
237b8f881b9SJubin John */
238b8f881b9SJubin John if (udata && udata->inlen >= sizeof(__u64)) {
239b8f881b9SJubin John ret = ib_copy_to_udata(udata, &ip->offset,
240b8f881b9SJubin John sizeof(ip->offset));
241b8f881b9SJubin John if (ret)
242b8f881b9SJubin John return ret;
243b8f881b9SJubin John }
244b8f881b9SJubin John
245b8f881b9SJubin John /*
246b8f881b9SJubin John * Put user mapping info onto the pending list
247b8f881b9SJubin John * unless it already is on the list.
248b8f881b9SJubin John */
249b8f881b9SJubin John spin_lock_irq(&dev->pending_lock);
250b8f881b9SJubin John if (list_empty(&ip->pending_mmaps))
251b8f881b9SJubin John list_add(&ip->pending_mmaps,
252b8f881b9SJubin John &dev->pending_mmaps);
253b8f881b9SJubin John spin_unlock_irq(&dev->pending_lock);
254b8f881b9SJubin John }
255b8f881b9SJubin John } else if (attr_mask & IB_SRQ_LIMIT) {
256f592ae3cSKamenee Arumugam spin_lock_irq(&srq->rq.kwq->c_lock);
257b8f881b9SJubin John if (attr->srq_limit >= srq->rq.size)
258b8f881b9SJubin John ret = -EINVAL;
259b8f881b9SJubin John else
260b8f881b9SJubin John srq->limit = attr->srq_limit;
261f592ae3cSKamenee Arumugam spin_unlock_irq(&srq->rq.kwq->c_lock);
262b8f881b9SJubin John }
263b8f881b9SJubin John return ret;
264b8f881b9SJubin John
265b8f881b9SJubin John bail_unlock:
266f592ae3cSKamenee Arumugam spin_unlock_irq(&srq->rq.kwq->c_lock);
267b8f881b9SJubin John bail_free:
268dabac6e4SKamenee Arumugam rvt_free_rq(&tmp_rq);
269b8f881b9SJubin John return ret;
270aad9158bSDennis Dalessandro }
271aad9158bSDennis Dalessandro
272ce4cc52fSLee Jones /**
273ce4cc52fSLee Jones * rvt_query_srq - query srq data
27490793f71SDennis Dalessandro * @ibsrq: srq to query
27590793f71SDennis Dalessandro * @attr: return info in attr
27690793f71SDennis Dalessandro *
27790793f71SDennis Dalessandro * Return: always 0
27890793f71SDennis Dalessandro */
rvt_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)279aad9158bSDennis Dalessandro int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
280aad9158bSDennis Dalessandro {
281b8f881b9SJubin John struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
282b8f881b9SJubin John
283b8f881b9SJubin John attr->max_wr = srq->rq.size - 1;
284b8f881b9SJubin John attr->max_sge = srq->rq.max_sge;
285b8f881b9SJubin John attr->srq_limit = srq->limit;
286b8f881b9SJubin John return 0;
287aad9158bSDennis Dalessandro }
288aad9158bSDennis Dalessandro
28990793f71SDennis Dalessandro /**
29090793f71SDennis Dalessandro * rvt_destroy_srq - destory an srq
29190793f71SDennis Dalessandro * @ibsrq: srq object to destroy
292ce4cc52fSLee Jones * @udata: user data for libibverbs.so
29390793f71SDennis Dalessandro */
rvt_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)294119181d1SLeon Romanovsky int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
295aad9158bSDennis Dalessandro {
296b8f881b9SJubin John struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
297b8f881b9SJubin John struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
298aad9158bSDennis Dalessandro
299b8f881b9SJubin John spin_lock(&dev->n_srqs_lock);
300b8f881b9SJubin John dev->n_srqs_allocated--;
301b8f881b9SJubin John spin_unlock(&dev->n_srqs_lock);
302b8f881b9SJubin John if (srq->ip)
303b8f881b9SJubin John kref_put(&srq->ip->ref, rvt_release_mmap_info);
304dabac6e4SKamenee Arumugam kvfree(srq->rq.kwq);
305119181d1SLeon Romanovsky return 0;
306b8f881b9SJubin John }
307