1 /*
2  * Copyright (c) 2016-2017 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52 
53 #include "pvrdma.h"
54 
55 int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
56 			 struct ib_recv_wr **bad_wr)
57 {
58 	/* No support for kernel clients. */
59 	return -EOPNOTSUPP;
60 }
61 
62 /**
63  * pvrdma_query_srq - query shared receive queue
64  * @ibsrq: the shared receive queue to query
65  * @srq_attr: attributes to query and return to client
66  *
67  * @return: 0 for success, otherwise returns an errno.
68  */
69 int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
70 {
71 	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
72 	struct pvrdma_srq *srq = to_vsrq(ibsrq);
73 	union pvrdma_cmd_req req;
74 	union pvrdma_cmd_resp rsp;
75 	struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
76 	struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
77 	int ret;
78 
79 	memset(cmd, 0, sizeof(*cmd));
80 	cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
81 	cmd->srq_handle = srq->srq_handle;
82 
83 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
84 	if (ret < 0) {
85 		dev_warn(&dev->pdev->dev,
86 			 "could not query shared receive queue, error: %d\n",
87 			 ret);
88 		return -EINVAL;
89 	}
90 
91 	srq_attr->srq_limit = resp->attrs.srq_limit;
92 	srq_attr->max_wr = resp->attrs.max_wr;
93 	srq_attr->max_sge = resp->attrs.max_sge;
94 
95 	return 0;
96 }
97 
98 /**
99  * pvrdma_create_srq - create shared receive queue
100  * @pd: protection domain
101  * @init_attr: shared receive queue attributes
102  * @udata: user data
103  *
104  * @return: the ib_srq pointer on success, otherwise returns an errno.
105  */
106 struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
107 				 struct ib_srq_init_attr *init_attr,
108 				 struct ib_udata *udata)
109 {
110 	struct pvrdma_srq *srq = NULL;
111 	struct pvrdma_dev *dev = to_vdev(pd->device);
112 	union pvrdma_cmd_req req;
113 	union pvrdma_cmd_resp rsp;
114 	struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
115 	struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
116 	struct pvrdma_create_srq ucmd;
117 	unsigned long flags;
118 	int ret;
119 
120 	if (!(pd->uobject && udata)) {
121 		/* No support for kernel clients. */
122 		dev_warn(&dev->pdev->dev,
123 			 "no shared receive queue support for kernel client\n");
124 		return ERR_PTR(-EOPNOTSUPP);
125 	}
126 
127 	if (init_attr->srq_type != IB_SRQT_BASIC) {
128 		dev_warn(&dev->pdev->dev,
129 			 "shared receive queue type %d not supported\n",
130 			 init_attr->srq_type);
131 		return ERR_PTR(-EINVAL);
132 	}
133 
134 	if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
135 	    init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
136 		dev_warn(&dev->pdev->dev,
137 			 "shared receive queue size invalid\n");
138 		return ERR_PTR(-EINVAL);
139 	}
140 
141 	if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
142 		return ERR_PTR(-ENOMEM);
143 
144 	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
145 	if (!srq) {
146 		ret = -ENOMEM;
147 		goto err_srq;
148 	}
149 
150 	spin_lock_init(&srq->lock);
151 	refcount_set(&srq->refcnt, 1);
152 	init_waitqueue_head(&srq->wait);
153 
154 	dev_dbg(&dev->pdev->dev,
155 		"create shared receive queue from user space\n");
156 
157 	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
158 		ret = -EFAULT;
159 		goto err_srq;
160 	}
161 
162 	srq->umem = ib_umem_get(pd->uobject->context,
163 				ucmd.buf_addr,
164 				ucmd.buf_size, 0, 0);
165 	if (IS_ERR(srq->umem)) {
166 		ret = PTR_ERR(srq->umem);
167 		goto err_srq;
168 	}
169 
170 	srq->npages = ib_umem_page_count(srq->umem);
171 
172 	if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
173 		dev_warn(&dev->pdev->dev,
174 			 "overflow pages in shared receive queue\n");
175 		ret = -EINVAL;
176 		goto err_umem;
177 	}
178 
179 	ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
180 	if (ret) {
181 		dev_warn(&dev->pdev->dev,
182 			 "could not allocate page directory\n");
183 		goto err_umem;
184 	}
185 
186 	pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
187 
188 	memset(cmd, 0, sizeof(*cmd));
189 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
190 	cmd->srq_type = init_attr->srq_type;
191 	cmd->nchunks = srq->npages;
192 	cmd->pd_handle = to_vpd(pd)->pd_handle;
193 	cmd->attrs.max_wr = init_attr->attr.max_wr;
194 	cmd->attrs.max_sge = init_attr->attr.max_sge;
195 	cmd->attrs.srq_limit = init_attr->attr.srq_limit;
196 	cmd->pdir_dma = srq->pdir.dir_dma;
197 
198 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
199 	if (ret < 0) {
200 		dev_warn(&dev->pdev->dev,
201 			 "could not create shared receive queue, error: %d\n",
202 			 ret);
203 		goto err_page_dir;
204 	}
205 
206 	srq->srq_handle = resp->srqn;
207 	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
208 	dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
209 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
210 
211 	/* Copy udata back. */
212 	if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
213 		dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
214 		pvrdma_destroy_srq(&srq->ibsrq);
215 		return ERR_PTR(-EINVAL);
216 	}
217 
218 	return &srq->ibsrq;
219 
220 err_page_dir:
221 	pvrdma_page_dir_cleanup(dev, &srq->pdir);
222 err_umem:
223 	ib_umem_release(srq->umem);
224 err_srq:
225 	kfree(srq);
226 	atomic_dec(&dev->num_srqs);
227 
228 	return ERR_PTR(ret);
229 }
230 
231 static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&dev->srq_tbl_lock, flags);
236 	dev->srq_tbl[srq->srq_handle] = NULL;
237 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
238 
239 	refcount_dec(&srq->refcnt);
240 	wait_event(srq->wait, !refcount_read(&srq->refcnt));
241 
242 	/* There is no support for kernel clients, so this is safe. */
243 	ib_umem_release(srq->umem);
244 
245 	pvrdma_page_dir_cleanup(dev, &srq->pdir);
246 
247 	kfree(srq);
248 
249 	atomic_dec(&dev->num_srqs);
250 }
251 
252 /**
253  * pvrdma_destroy_srq - destroy shared receive queue
254  * @srq: the shared receive queue to destroy
255  *
256  * @return: 0 for success.
257  */
258 int pvrdma_destroy_srq(struct ib_srq *srq)
259 {
260 	struct pvrdma_srq *vsrq = to_vsrq(srq);
261 	union pvrdma_cmd_req req;
262 	struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
263 	struct pvrdma_dev *dev = to_vdev(srq->device);
264 	int ret;
265 
266 	memset(cmd, 0, sizeof(*cmd));
267 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
268 	cmd->srq_handle = vsrq->srq_handle;
269 
270 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
271 	if (ret < 0)
272 		dev_warn(&dev->pdev->dev,
273 			 "destroy shared receive queue failed, error: %d\n",
274 			 ret);
275 
276 	pvrdma_free_srq(dev, vsrq);
277 
278 	return 0;
279 }
280 
281 /**
282  * pvrdma_modify_srq - modify shared receive queue attributes
283  * @ibsrq: the shared receive queue to modify
284  * @attr: the shared receive queue's new attributes
285  * @attr_mask: attributes mask
286  * @udata: user data
287  *
288  * @returns 0 on success, otherwise returns an errno.
289  */
290 int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
291 		      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
292 {
293 	struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
294 	union pvrdma_cmd_req req;
295 	struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
296 	struct pvrdma_dev *dev = to_vdev(ibsrq->device);
297 	int ret;
298 
299 	/* Only support SRQ limit. */
300 	if (!(attr_mask & IB_SRQ_LIMIT))
301 		return -EINVAL;
302 
303 	memset(cmd, 0, sizeof(*cmd));
304 	cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
305 	cmd->srq_handle = vsrq->srq_handle;
306 	cmd->attrs.srq_limit = attr->srq_limit;
307 	cmd->attr_mask = attr_mask;
308 
309 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
310 	if (ret < 0) {
311 		dev_warn(&dev->pdev->dev,
312 			 "could not modify shared receive queue, error: %d\n",
313 			 ret);
314 
315 		return -EINVAL;
316 	}
317 
318 	return ret;
319 }
320