1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52 
53 #include "pvrdma.h"
54 
55 /**
56  * pvrdma_req_notify_cq - request notification for a completion queue
57  * @ibcq: the completion queue
58  * @notify_flags: notification flags
59  *
60  * @return: 0 for success.
61  */
62 int pvrdma_req_notify_cq(struct ib_cq *ibcq,
63 			 enum ib_cq_notify_flags notify_flags)
64 {
65 	struct pvrdma_dev *dev = to_vdev(ibcq->device);
66 	struct pvrdma_cq *cq = to_vcq(ibcq);
67 	u32 val = cq->cq_handle;
68 	unsigned long flags;
69 	int has_data = 0;
70 
71 	val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
72 		PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
73 
74 	spin_lock_irqsave(&cq->cq_lock, flags);
75 
76 	pvrdma_write_uar_cq(dev, val);
77 
78 	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
79 		unsigned int head;
80 
81 		has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
82 						    cq->ibcq.cqe, &head);
83 		if (unlikely(has_data == PVRDMA_INVALID_IDX))
84 			dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
85 	}
86 
87 	spin_unlock_irqrestore(&cq->cq_lock, flags);
88 
89 	return has_data;
90 }
91 
92 /**
93  * pvrdma_create_cq - create completion queue
94  * @ibdev: the device
95  * @attr: completion queue attributes
96  * @context: user context
97  * @udata: user data
98  *
99  * @return: ib_cq completion queue pointer on success,
100  *          otherwise returns negative errno.
101  */
102 struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103 			       const struct ib_cq_init_attr *attr,
104 			       struct ib_ucontext *context,
105 			       struct ib_udata *udata)
106 {
107 	int entries = attr->cqe;
108 	struct pvrdma_dev *dev = to_vdev(ibdev);
109 	struct pvrdma_cq *cq;
110 	int ret;
111 	int npages;
112 	unsigned long flags;
113 	union pvrdma_cmd_req req;
114 	union pvrdma_cmd_resp rsp;
115 	struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116 	struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117 	struct pvrdma_create_cq ucmd;
118 
119 	BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
120 
121 	entries = roundup_pow_of_two(entries);
122 	if (entries < 1 || entries > dev->dsr->caps.max_cqe)
123 		return ERR_PTR(-EINVAL);
124 
125 	if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
126 		return ERR_PTR(-ENOMEM);
127 
128 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
129 	if (!cq) {
130 		atomic_dec(&dev->num_cqs);
131 		return ERR_PTR(-ENOMEM);
132 	}
133 
134 	cq->ibcq.cqe = entries;
135 
136 	if (context) {
137 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
138 			ret = -EFAULT;
139 			goto err_cq;
140 		}
141 
142 		cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
143 				       IB_ACCESS_LOCAL_WRITE, 1);
144 		if (IS_ERR(cq->umem)) {
145 			ret = PTR_ERR(cq->umem);
146 			goto err_cq;
147 		}
148 
149 		npages = ib_umem_page_count(cq->umem);
150 	} else {
151 		cq->is_kernel = true;
152 
153 		/* One extra page for shared ring state */
154 		npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
155 			      PAGE_SIZE - 1) / PAGE_SIZE;
156 
157 		/* Skip header page. */
158 		cq->offset = PAGE_SIZE;
159 	}
160 
161 	if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
162 		dev_warn(&dev->pdev->dev,
163 			 "overflow pages in completion queue\n");
164 		ret = -EINVAL;
165 		goto err_umem;
166 	}
167 
168 	ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
169 	if (ret) {
170 		dev_warn(&dev->pdev->dev,
171 			 "could not allocate page directory\n");
172 		goto err_umem;
173 	}
174 
175 	/* Ring state is always the first page. Set in library for user cq. */
176 	if (cq->is_kernel)
177 		cq->ring_state = cq->pdir.pages[0];
178 	else
179 		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
180 
181 	atomic_set(&cq->refcnt, 1);
182 	init_waitqueue_head(&cq->wait);
183 	spin_lock_init(&cq->cq_lock);
184 
185 	memset(cmd, 0, sizeof(*cmd));
186 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
187 	cmd->nchunks = npages;
188 	cmd->ctx_handle = (context) ?
189 		(u64)to_vucontext(context)->ctx_handle : 0;
190 	cmd->cqe = entries;
191 	cmd->pdir_dma = cq->pdir.dir_dma;
192 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
193 	if (ret < 0) {
194 		dev_warn(&dev->pdev->dev,
195 			 "could not create completion queue, error: %d\n", ret);
196 		goto err_page_dir;
197 	}
198 
199 	cq->ibcq.cqe = resp->cqe;
200 	cq->cq_handle = resp->cq_handle;
201 	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
202 	dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
203 	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
204 
205 	if (context) {
206 		cq->uar = &(to_vucontext(context)->uar);
207 
208 		/* Copy udata back. */
209 		if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
210 			dev_warn(&dev->pdev->dev,
211 				 "failed to copy back udata\n");
212 			pvrdma_destroy_cq(&cq->ibcq);
213 			return ERR_PTR(-EINVAL);
214 		}
215 	}
216 
217 	return &cq->ibcq;
218 
219 err_page_dir:
220 	pvrdma_page_dir_cleanup(dev, &cq->pdir);
221 err_umem:
222 	if (context)
223 		ib_umem_release(cq->umem);
224 err_cq:
225 	atomic_dec(&dev->num_cqs);
226 	kfree(cq);
227 
228 	return ERR_PTR(ret);
229 }
230 
231 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
232 {
233 	atomic_dec(&cq->refcnt);
234 	wait_event(cq->wait, !atomic_read(&cq->refcnt));
235 
236 	if (!cq->is_kernel)
237 		ib_umem_release(cq->umem);
238 
239 	pvrdma_page_dir_cleanup(dev, &cq->pdir);
240 	kfree(cq);
241 }
242 
243 /**
244  * pvrdma_destroy_cq - destroy completion queue
245  * @cq: the completion queue to destroy.
246  *
247  * @return: 0 for success.
248  */
249 int pvrdma_destroy_cq(struct ib_cq *cq)
250 {
251 	struct pvrdma_cq *vcq = to_vcq(cq);
252 	union pvrdma_cmd_req req;
253 	struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
254 	struct pvrdma_dev *dev = to_vdev(cq->device);
255 	unsigned long flags;
256 	int ret;
257 
258 	memset(cmd, 0, sizeof(*cmd));
259 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
260 	cmd->cq_handle = vcq->cq_handle;
261 
262 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
263 	if (ret < 0)
264 		dev_warn(&dev->pdev->dev,
265 			 "could not destroy completion queue, error: %d\n",
266 			 ret);
267 
268 	/* free cq's resources */
269 	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
270 	dev->cq_tbl[vcq->cq_handle] = NULL;
271 	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
272 
273 	pvrdma_free_cq(dev, vcq);
274 	atomic_dec(&dev->num_cqs);
275 
276 	return ret;
277 }
278 
279 /**
280  * pvrdma_modify_cq - modify the CQ moderation parameters
281  * @ibcq: the CQ to modify
282  * @cq_count: number of CQEs that will trigger an event
283  * @cq_period: max period of time in usec before triggering an event
284  *
285  * @return: -EOPNOTSUPP as CQ resize is not supported.
286  */
287 int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
288 {
289 	return -EOPNOTSUPP;
290 }
291 
292 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
293 {
294 	return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
295 					&cq->pdir,
296 					cq->offset +
297 					sizeof(struct pvrdma_cqe) * i);
298 }
299 
300 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
301 {
302 	unsigned int head;
303 	int has_data;
304 
305 	if (!cq->is_kernel)
306 		return;
307 
308 	/* Lock held */
309 	has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
310 					    cq->ibcq.cqe, &head);
311 	if (unlikely(has_data > 0)) {
312 		int items;
313 		int curr;
314 		int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
315 				      cq->ibcq.cqe);
316 		struct pvrdma_cqe *cqe;
317 		struct pvrdma_cqe *curr_cqe;
318 
319 		items = (tail > head) ? (tail - head) :
320 			(cq->ibcq.cqe - head + tail);
321 		curr = --tail;
322 		while (items-- > 0) {
323 			if (curr < 0)
324 				curr = cq->ibcq.cqe - 1;
325 			if (tail < 0)
326 				tail = cq->ibcq.cqe - 1;
327 			curr_cqe = get_cqe(cq, curr);
328 			if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
329 				if (curr != tail) {
330 					cqe = get_cqe(cq, tail);
331 					*cqe = *curr_cqe;
332 				}
333 				tail--;
334 			} else {
335 				pvrdma_idx_ring_inc(
336 					&cq->ring_state->rx.cons_head,
337 					cq->ibcq.cqe);
338 			}
339 			curr--;
340 		}
341 	}
342 }
343 
344 static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
345 			   struct ib_wc *wc)
346 {
347 	struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
348 	int has_data;
349 	unsigned int head;
350 	bool tried = false;
351 	struct pvrdma_cqe *cqe;
352 
353 retry:
354 	has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
355 					    cq->ibcq.cqe, &head);
356 	if (has_data == 0) {
357 		if (tried)
358 			return -EAGAIN;
359 
360 		pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
361 
362 		tried = true;
363 		goto retry;
364 	} else if (has_data == PVRDMA_INVALID_IDX) {
365 		dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
366 		return -EAGAIN;
367 	}
368 
369 	cqe = get_cqe(cq, head);
370 
371 	/* Ensure cqe is valid. */
372 	rmb();
373 	if (dev->qp_tbl[cqe->qp & 0xffff])
374 		*cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
375 	else
376 		return -EAGAIN;
377 
378 	wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
379 	wc->status = pvrdma_wc_status_to_ib(cqe->status);
380 	wc->wr_id = cqe->wr_id;
381 	wc->qp = &(*cur_qp)->ibqp;
382 	wc->byte_len = cqe->byte_len;
383 	wc->ex.imm_data = cqe->imm_data;
384 	wc->src_qp = cqe->src_qp;
385 	wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
386 	wc->pkey_index = cqe->pkey_index;
387 	wc->slid = cqe->slid;
388 	wc->sl = cqe->sl;
389 	wc->dlid_path_bits = cqe->dlid_path_bits;
390 	wc->port_num = cqe->port_num;
391 	wc->vendor_err = cqe->vendor_err;
392 	wc->network_hdr_type = cqe->network_hdr_type;
393 
394 	/* Update shared ring state */
395 	pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
396 
397 	return 0;
398 }
399 
400 /**
401  * pvrdma_poll_cq - poll for work completion queue entries
402  * @ibcq: completion queue
403  * @num_entries: the maximum number of entries
404  * @entry: pointer to work completion array
405  *
406  * @return: number of polled completion entries
407  */
408 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
409 {
410 	struct pvrdma_cq *cq = to_vcq(ibcq);
411 	struct pvrdma_qp *cur_qp = NULL;
412 	unsigned long flags;
413 	int npolled;
414 
415 	if (num_entries < 1 || wc == NULL)
416 		return 0;
417 
418 	spin_lock_irqsave(&cq->cq_lock, flags);
419 	for (npolled = 0; npolled < num_entries; ++npolled) {
420 		if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
421 			break;
422 	}
423 
424 	spin_unlock_irqrestore(&cq->cq_lock, flags);
425 
426 	/* Ensure we do not return errors from poll_cq */
427 	return npolled;
428 }
429 
430 /**
431  * pvrdma_resize_cq - resize CQ
432  * @ibcq: the completion queue
433  * @entries: CQ entries
434  * @udata: user data
435  *
436  * @return: -EOPNOTSUPP as CQ resize is not supported.
437  */
438 int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
439 {
440 	return -EOPNOTSUPP;
441 }
442