xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cq.c (revision 461ba3e7)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
38 
39 #include "mlx4_ib.h"
40 #include <rdma/mlx4-abi.h>
41 #include <rdma/uverbs_ioctl.h>
42 
43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
44 {
45 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
46 	ibcq->comp_handler(ibcq, ibcq->cq_context);
47 }
48 
49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
50 {
51 	struct ib_event event;
52 	struct ib_cq *ibcq;
53 
54 	if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
55 		pr_warn("Unexpected event type %d "
56 		       "on CQ %06x\n", type, cq->cqn);
57 		return;
58 	}
59 
60 	ibcq = &to_mibcq(cq)->ibcq;
61 	if (ibcq->event_handler) {
62 		event.device     = ibcq->device;
63 		event.event      = IB_EVENT_CQ_ERR;
64 		event.element.cq = ibcq;
65 		ibcq->event_handler(&event, ibcq->cq_context);
66 	}
67 }
68 
69 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
70 {
71 	return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
72 }
73 
74 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
75 {
76 	return get_cqe_from_buf(&cq->buf, n);
77 }
78 
79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
80 {
81 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 	struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
83 
84 	return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
85 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
86 }
87 
88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
89 {
90 	return get_sw_cqe(cq, cq->mcq.cons_index);
91 }
92 
93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
94 {
95 	struct mlx4_ib_cq *mcq = to_mcq(cq);
96 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
97 
98 	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
99 }
100 
101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
102 {
103 	int err;
104 
105 	err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
106 			     PAGE_SIZE * 2, &buf->buf);
107 
108 	if (err)
109 		goto out;
110 
111 	buf->entry_size = dev->dev->caps.cqe_size;
112 	err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
113 				    &buf->mtt);
114 	if (err)
115 		goto err_buf;
116 
117 	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
118 	if (err)
119 		goto err_mtt;
120 
121 	return 0;
122 
123 err_mtt:
124 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
125 
126 err_buf:
127 	mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
128 
129 out:
130 	return err;
131 }
132 
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
134 {
135 	mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
136 }
137 
138 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
139 			       struct mlx4_ib_cq_buf *buf,
140 			       struct ib_umem **umem, u64 buf_addr, int cqe)
141 {
142 	int err;
143 	int cqe_size = dev->dev->caps.cqe_size;
144 	int shift;
145 	int n;
146 
147 	*umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
148 			    IB_ACCESS_LOCAL_WRITE);
149 	if (IS_ERR(*umem))
150 		return PTR_ERR(*umem);
151 
152 	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
153 	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
154 
155 	if (err)
156 		goto err_buf;
157 
158 	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
159 	if (err)
160 		goto err_mtt;
161 
162 	return 0;
163 
164 err_mtt:
165 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
166 
167 err_buf:
168 	ib_umem_release(*umem);
169 
170 	return err;
171 }
172 
173 #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
174 int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
175 		      struct ib_udata *udata)
176 {
177 	struct ib_device *ibdev = ibcq->device;
178 	int entries = attr->cqe;
179 	int vector = attr->comp_vector;
180 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
181 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
182 	struct mlx4_uar *uar;
183 	void *buf_addr;
184 	int err;
185 	struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
186 		udata, struct mlx4_ib_ucontext, ibucontext);
187 
188 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
189 		return -EINVAL;
190 
191 	if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
192 		return -EINVAL;
193 
194 	entries      = roundup_pow_of_two(entries + 1);
195 	cq->ibcq.cqe = entries - 1;
196 	mutex_init(&cq->resize_mutex);
197 	spin_lock_init(&cq->lock);
198 	cq->resize_buf = NULL;
199 	cq->resize_umem = NULL;
200 	cq->create_flags = attr->flags;
201 	INIT_LIST_HEAD(&cq->send_qp_list);
202 	INIT_LIST_HEAD(&cq->recv_qp_list);
203 
204 	if (udata) {
205 		struct mlx4_ib_create_cq ucmd;
206 
207 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
208 			err = -EFAULT;
209 			goto err_cq;
210 		}
211 
212 		buf_addr = (void *)(unsigned long)ucmd.buf_addr;
213 		err = mlx4_ib_get_cq_umem(dev, &cq->buf, &cq->umem,
214 					  ucmd.buf_addr, entries);
215 		if (err)
216 			goto err_cq;
217 
218 		err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
219 		if (err)
220 			goto err_mtt;
221 
222 		uar = &context->uar;
223 		cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
224 	} else {
225 		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
226 		if (err)
227 			goto err_cq;
228 
229 		cq->mcq.set_ci_db  = cq->db.db;
230 		cq->mcq.arm_db     = cq->db.db + 1;
231 		*cq->mcq.set_ci_db = 0;
232 		*cq->mcq.arm_db    = 0;
233 
234 		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
235 		if (err)
236 			goto err_db;
237 
238 		buf_addr = &cq->buf.buf;
239 
240 		uar = &dev->priv_uar;
241 		cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
242 	}
243 
244 	if (dev->eq_table)
245 		vector = dev->eq_table[vector % ibdev->num_comp_vectors];
246 
247 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
248 			    &cq->mcq, vector, 0,
249 			    !!(cq->create_flags &
250 			       IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
251 			    buf_addr, !!udata);
252 	if (err)
253 		goto err_dbmap;
254 
255 	if (udata)
256 		cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
257 	else
258 		cq->mcq.comp = mlx4_ib_cq_comp;
259 	cq->mcq.event = mlx4_ib_cq_event;
260 
261 	if (udata)
262 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
263 			err = -EFAULT;
264 			goto err_cq_free;
265 		}
266 
267 	return 0;
268 
269 err_cq_free:
270 	mlx4_cq_free(dev->dev, &cq->mcq);
271 
272 err_dbmap:
273 	if (udata)
274 		mlx4_ib_db_unmap_user(context, &cq->db);
275 
276 err_mtt:
277 	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
278 
279 	ib_umem_release(cq->umem);
280 	if (!udata)
281 		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
282 
283 err_db:
284 	if (!udata)
285 		mlx4_db_free(dev->dev, &cq->db);
286 err_cq:
287 	return err;
288 }
289 
290 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
291 				  int entries)
292 {
293 	int err;
294 
295 	if (cq->resize_buf)
296 		return -EBUSY;
297 
298 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
299 	if (!cq->resize_buf)
300 		return -ENOMEM;
301 
302 	err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
303 	if (err) {
304 		kfree(cq->resize_buf);
305 		cq->resize_buf = NULL;
306 		return err;
307 	}
308 
309 	cq->resize_buf->cqe = entries - 1;
310 
311 	return 0;
312 }
313 
314 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
315 				   int entries, struct ib_udata *udata)
316 {
317 	struct mlx4_ib_resize_cq ucmd;
318 	int err;
319 
320 	if (cq->resize_umem)
321 		return -EBUSY;
322 
323 	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
324 		return -EFAULT;
325 
326 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
327 	if (!cq->resize_buf)
328 		return -ENOMEM;
329 
330 	err = mlx4_ib_get_cq_umem(dev, &cq->resize_buf->buf, &cq->resize_umem,
331 				  ucmd.buf_addr, entries);
332 	if (err) {
333 		kfree(cq->resize_buf);
334 		cq->resize_buf = NULL;
335 		return err;
336 	}
337 
338 	cq->resize_buf->cqe = entries - 1;
339 
340 	return 0;
341 }
342 
343 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
344 {
345 	u32 i;
346 
347 	i = cq->mcq.cons_index;
348 	while (get_sw_cqe(cq, i))
349 		++i;
350 
351 	return i - cq->mcq.cons_index;
352 }
353 
354 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
355 {
356 	struct mlx4_cqe *cqe, *new_cqe;
357 	int i;
358 	int cqe_size = cq->buf.entry_size;
359 	int cqe_inc = cqe_size == 64 ? 1 : 0;
360 
361 	i = cq->mcq.cons_index;
362 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
363 	cqe += cqe_inc;
364 
365 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
366 		new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
367 					   (i + 1) & cq->resize_buf->cqe);
368 		memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
369 		new_cqe += cqe_inc;
370 
371 		new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
372 			(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
373 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
374 		cqe += cqe_inc;
375 	}
376 	++cq->mcq.cons_index;
377 }
378 
379 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
380 {
381 	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
382 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
383 	struct mlx4_mtt mtt;
384 	int outst_cqe;
385 	int err;
386 
387 	mutex_lock(&cq->resize_mutex);
388 	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
389 		err = -EINVAL;
390 		goto out;
391 	}
392 
393 	entries = roundup_pow_of_two(entries + 1);
394 	if (entries == ibcq->cqe + 1) {
395 		err = 0;
396 		goto out;
397 	}
398 
399 	if (entries > dev->dev->caps.max_cqes + 1) {
400 		err = -EINVAL;
401 		goto out;
402 	}
403 
404 	if (ibcq->uobject) {
405 		err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
406 		if (err)
407 			goto out;
408 	} else {
409 		/* Can't be smaller than the number of outstanding CQEs */
410 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
411 		if (entries < outst_cqe + 1) {
412 			err = -EINVAL;
413 			goto out;
414 		}
415 
416 		err = mlx4_alloc_resize_buf(dev, cq, entries);
417 		if (err)
418 			goto out;
419 	}
420 
421 	mtt = cq->buf.mtt;
422 
423 	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
424 	if (err)
425 		goto err_buf;
426 
427 	mlx4_mtt_cleanup(dev->dev, &mtt);
428 	if (ibcq->uobject) {
429 		cq->buf      = cq->resize_buf->buf;
430 		cq->ibcq.cqe = cq->resize_buf->cqe;
431 		ib_umem_release(cq->umem);
432 		cq->umem     = cq->resize_umem;
433 
434 		kfree(cq->resize_buf);
435 		cq->resize_buf = NULL;
436 		cq->resize_umem = NULL;
437 	} else {
438 		struct mlx4_ib_cq_buf tmp_buf;
439 		int tmp_cqe = 0;
440 
441 		spin_lock_irq(&cq->lock);
442 		if (cq->resize_buf) {
443 			mlx4_ib_cq_resize_copy_cqes(cq);
444 			tmp_buf = cq->buf;
445 			tmp_cqe = cq->ibcq.cqe;
446 			cq->buf      = cq->resize_buf->buf;
447 			cq->ibcq.cqe = cq->resize_buf->cqe;
448 
449 			kfree(cq->resize_buf);
450 			cq->resize_buf = NULL;
451 		}
452 		spin_unlock_irq(&cq->lock);
453 
454 		if (tmp_cqe)
455 			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
456 	}
457 
458 	goto out;
459 
460 err_buf:
461 	mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
462 	if (!ibcq->uobject)
463 		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
464 				    cq->resize_buf->cqe);
465 
466 	kfree(cq->resize_buf);
467 	cq->resize_buf = NULL;
468 
469 	ib_umem_release(cq->resize_umem);
470 	cq->resize_umem = NULL;
471 out:
472 	mutex_unlock(&cq->resize_mutex);
473 
474 	return err;
475 }
476 
477 int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
478 {
479 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
480 	struct mlx4_ib_cq *mcq = to_mcq(cq);
481 
482 	mlx4_cq_free(dev->dev, &mcq->mcq);
483 	mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
484 
485 	if (udata) {
486 		mlx4_ib_db_unmap_user(
487 			rdma_udata_to_drv_context(
488 				udata,
489 				struct mlx4_ib_ucontext,
490 				ibucontext),
491 			&mcq->db);
492 	} else {
493 		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
494 		mlx4_db_free(dev->dev, &mcq->db);
495 	}
496 	ib_umem_release(mcq->umem);
497 	return 0;
498 }
499 
500 static void dump_cqe(void *cqe)
501 {
502 	__be32 *buf = cqe;
503 
504 	pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
505 	       be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
506 	       be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
507 	       be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
508 }
509 
510 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
511 				     struct ib_wc *wc)
512 {
513 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
514 		pr_debug("local QP operation err "
515 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
516 		       "opcode = %02x)\n",
517 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
518 		       cqe->vendor_err_syndrome,
519 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
520 		dump_cqe(cqe);
521 	}
522 
523 	switch (cqe->syndrome) {
524 	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
525 		wc->status = IB_WC_LOC_LEN_ERR;
526 		break;
527 	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
528 		wc->status = IB_WC_LOC_QP_OP_ERR;
529 		break;
530 	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
531 		wc->status = IB_WC_LOC_PROT_ERR;
532 		break;
533 	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
534 		wc->status = IB_WC_WR_FLUSH_ERR;
535 		break;
536 	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
537 		wc->status = IB_WC_MW_BIND_ERR;
538 		break;
539 	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
540 		wc->status = IB_WC_BAD_RESP_ERR;
541 		break;
542 	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
543 		wc->status = IB_WC_LOC_ACCESS_ERR;
544 		break;
545 	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
546 		wc->status = IB_WC_REM_INV_REQ_ERR;
547 		break;
548 	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
549 		wc->status = IB_WC_REM_ACCESS_ERR;
550 		break;
551 	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
552 		wc->status = IB_WC_REM_OP_ERR;
553 		break;
554 	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
555 		wc->status = IB_WC_RETRY_EXC_ERR;
556 		break;
557 	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
558 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
559 		break;
560 	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
561 		wc->status = IB_WC_REM_ABORT_ERR;
562 		break;
563 	default:
564 		wc->status = IB_WC_GENERAL_ERR;
565 		break;
566 	}
567 
568 	wc->vendor_err = cqe->vendor_err_syndrome;
569 }
570 
571 static int mlx4_ib_ipoib_csum_ok(__be16 status, u8 badfcs_enc, __be16 checksum)
572 {
573 	return ((badfcs_enc & MLX4_CQE_STATUS_L4_CSUM) ||
574 		((status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
575 		 (status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
576 				       MLX4_CQE_STATUS_UDP)) &&
577 		 (checksum == cpu_to_be16(0xffff))));
578 }
579 
580 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
581 			    unsigned tail, struct mlx4_cqe *cqe, int is_eth)
582 {
583 	struct mlx4_ib_proxy_sqp_hdr *hdr;
584 
585 	ib_dma_sync_single_for_cpu(qp->ibqp.device,
586 				   qp->sqp_proxy_rcv[tail].map,
587 				   sizeof (struct mlx4_ib_proxy_sqp_hdr),
588 				   DMA_FROM_DEVICE);
589 	hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
590 	wc->pkey_index	= be16_to_cpu(hdr->tun.pkey_index);
591 	wc->src_qp	= be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
592 	wc->wc_flags   |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
593 	wc->dlid_path_bits = 0;
594 
595 	if (is_eth) {
596 		wc->slid = 0;
597 		wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
598 		memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
599 		memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
600 		wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
601 	} else {
602 		wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
603 		wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
604 	}
605 }
606 
607 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
608 			       struct ib_wc *wc, int *npolled, int is_send)
609 {
610 	struct mlx4_ib_wq *wq;
611 	unsigned cur;
612 	int i;
613 
614 	wq = is_send ? &qp->sq : &qp->rq;
615 	cur = wq->head - wq->tail;
616 
617 	if (cur == 0)
618 		return;
619 
620 	for (i = 0;  i < cur && *npolled < num_entries; i++) {
621 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
622 		wc->status = IB_WC_WR_FLUSH_ERR;
623 		wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
624 		wq->tail++;
625 		(*npolled)++;
626 		wc->qp = &qp->ibqp;
627 		wc++;
628 	}
629 }
630 
631 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
632 				 struct ib_wc *wc, int *npolled)
633 {
634 	struct mlx4_ib_qp *qp;
635 
636 	*npolled = 0;
637 	/* Find uncompleted WQEs belonging to that cq and return
638 	 * simulated FLUSH_ERR completions
639 	 */
640 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
641 		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
642 		if (*npolled >= num_entries)
643 			goto out;
644 	}
645 
646 	list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
647 		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
648 		if (*npolled >= num_entries)
649 			goto out;
650 	}
651 
652 out:
653 	return;
654 }
655 
656 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
657 			    struct mlx4_ib_qp **cur_qp,
658 			    struct ib_wc *wc)
659 {
660 	struct mlx4_cqe *cqe;
661 	struct mlx4_qp *mqp;
662 	struct mlx4_ib_wq *wq;
663 	struct mlx4_ib_srq *srq;
664 	struct mlx4_srq *msrq = NULL;
665 	int is_send;
666 	int is_error;
667 	int is_eth;
668 	u32 g_mlpath_rqpn;
669 	u16 wqe_ctr;
670 	unsigned tail = 0;
671 
672 repoll:
673 	cqe = next_cqe_sw(cq);
674 	if (!cqe)
675 		return -EAGAIN;
676 
677 	if (cq->buf.entry_size == 64)
678 		cqe++;
679 
680 	++cq->mcq.cons_index;
681 
682 	/*
683 	 * Make sure we read CQ entry contents after we've checked the
684 	 * ownership bit.
685 	 */
686 	rmb();
687 
688 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
689 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
690 		MLX4_CQE_OPCODE_ERROR;
691 
692 	/* Resize CQ in progress */
693 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
694 		if (cq->resize_buf) {
695 			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
696 
697 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
698 			cq->buf      = cq->resize_buf->buf;
699 			cq->ibcq.cqe = cq->resize_buf->cqe;
700 
701 			kfree(cq->resize_buf);
702 			cq->resize_buf = NULL;
703 		}
704 
705 		goto repoll;
706 	}
707 
708 	if (!*cur_qp ||
709 	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
710 		/*
711 		 * We do not have to take the QP table lock here,
712 		 * because CQs will be locked while QPs are removed
713 		 * from the table.
714 		 */
715 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
716 				       be32_to_cpu(cqe->vlan_my_qpn));
717 		*cur_qp = to_mibqp(mqp);
718 	}
719 
720 	wc->qp = &(*cur_qp)->ibqp;
721 
722 	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
723 		u32 srq_num;
724 		g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
725 		srq_num       = g_mlpath_rqpn & 0xffffff;
726 		/* SRQ is also in the radix tree */
727 		msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
728 				       srq_num);
729 	}
730 
731 	if (is_send) {
732 		wq = &(*cur_qp)->sq;
733 		if (!(*cur_qp)->sq_signal_bits) {
734 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
735 			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
736 		}
737 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
738 		++wq->tail;
739 	} else if ((*cur_qp)->ibqp.srq) {
740 		srq = to_msrq((*cur_qp)->ibqp.srq);
741 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
742 		wc->wr_id = srq->wrid[wqe_ctr];
743 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
744 	} else if (msrq) {
745 		srq = to_mibsrq(msrq);
746 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
747 		wc->wr_id = srq->wrid[wqe_ctr];
748 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
749 	} else {
750 		wq	  = &(*cur_qp)->rq;
751 		tail	  = wq->tail & (wq->wqe_cnt - 1);
752 		wc->wr_id = wq->wrid[tail];
753 		++wq->tail;
754 	}
755 
756 	if (unlikely(is_error)) {
757 		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
758 		return 0;
759 	}
760 
761 	wc->status = IB_WC_SUCCESS;
762 
763 	if (is_send) {
764 		wc->wc_flags = 0;
765 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
766 		case MLX4_OPCODE_RDMA_WRITE_IMM:
767 			wc->wc_flags |= IB_WC_WITH_IMM;
768 			fallthrough;
769 		case MLX4_OPCODE_RDMA_WRITE:
770 			wc->opcode    = IB_WC_RDMA_WRITE;
771 			break;
772 		case MLX4_OPCODE_SEND_IMM:
773 			wc->wc_flags |= IB_WC_WITH_IMM;
774 			fallthrough;
775 		case MLX4_OPCODE_SEND:
776 		case MLX4_OPCODE_SEND_INVAL:
777 			wc->opcode    = IB_WC_SEND;
778 			break;
779 		case MLX4_OPCODE_RDMA_READ:
780 			wc->opcode    = IB_WC_RDMA_READ;
781 			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
782 			break;
783 		case MLX4_OPCODE_ATOMIC_CS:
784 			wc->opcode    = IB_WC_COMP_SWAP;
785 			wc->byte_len  = 8;
786 			break;
787 		case MLX4_OPCODE_ATOMIC_FA:
788 			wc->opcode    = IB_WC_FETCH_ADD;
789 			wc->byte_len  = 8;
790 			break;
791 		case MLX4_OPCODE_MASKED_ATOMIC_CS:
792 			wc->opcode    = IB_WC_MASKED_COMP_SWAP;
793 			wc->byte_len  = 8;
794 			break;
795 		case MLX4_OPCODE_MASKED_ATOMIC_FA:
796 			wc->opcode    = IB_WC_MASKED_FETCH_ADD;
797 			wc->byte_len  = 8;
798 			break;
799 		case MLX4_OPCODE_LSO:
800 			wc->opcode    = IB_WC_LSO;
801 			break;
802 		case MLX4_OPCODE_FMR:
803 			wc->opcode    = IB_WC_REG_MR;
804 			break;
805 		case MLX4_OPCODE_LOCAL_INVAL:
806 			wc->opcode    = IB_WC_LOCAL_INV;
807 			break;
808 		}
809 	} else {
810 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
811 
812 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
813 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
814 			wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
815 			wc->wc_flags	= IB_WC_WITH_IMM;
816 			wc->ex.imm_data = cqe->immed_rss_invalid;
817 			break;
818 		case MLX4_RECV_OPCODE_SEND_INVAL:
819 			wc->opcode	= IB_WC_RECV;
820 			wc->wc_flags	= IB_WC_WITH_INVALIDATE;
821 			wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
822 			break;
823 		case MLX4_RECV_OPCODE_SEND:
824 			wc->opcode   = IB_WC_RECV;
825 			wc->wc_flags = 0;
826 			break;
827 		case MLX4_RECV_OPCODE_SEND_IMM:
828 			wc->opcode	= IB_WC_RECV;
829 			wc->wc_flags	= IB_WC_WITH_IMM;
830 			wc->ex.imm_data = cqe->immed_rss_invalid;
831 			break;
832 		}
833 
834 		is_eth = (rdma_port_get_link_layer(wc->qp->device,
835 						  (*cur_qp)->port) ==
836 			  IB_LINK_LAYER_ETHERNET);
837 		if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
838 			if ((*cur_qp)->mlx4_ib_qp_type &
839 			    (MLX4_IB_QPT_PROXY_SMI_OWNER |
840 			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
841 				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
842 						is_eth);
843 				return 0;
844 			}
845 		}
846 
847 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
848 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
849 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
850 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
851 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
852 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
853 					cqe->badfcs_enc,
854 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
855 		if (is_eth) {
856 			wc->slid = 0;
857 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
858 			if (be32_to_cpu(cqe->vlan_my_qpn) &
859 					MLX4_CQE_CVLAN_PRESENT_MASK) {
860 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
861 					MLX4_CQE_VID_MASK;
862 			} else {
863 				wc->vlan_id = 0xffff;
864 			}
865 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
866 			wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
867 		} else {
868 			wc->slid = be16_to_cpu(cqe->rlid);
869 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
870 			wc->vlan_id = 0xffff;
871 		}
872 	}
873 
874 	return 0;
875 }
876 
877 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
878 {
879 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
880 	struct mlx4_ib_qp *cur_qp = NULL;
881 	unsigned long flags;
882 	int npolled;
883 	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
884 
885 	spin_lock_irqsave(&cq->lock, flags);
886 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
887 		mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
888 		goto out;
889 	}
890 
891 	for (npolled = 0; npolled < num_entries; ++npolled) {
892 		if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
893 			break;
894 	}
895 
896 	mlx4_cq_set_ci(&cq->mcq);
897 
898 out:
899 	spin_unlock_irqrestore(&cq->lock, flags);
900 
901 	return npolled;
902 }
903 
904 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
905 {
906 	mlx4_cq_arm(&to_mcq(ibcq)->mcq,
907 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
908 		    MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
909 		    to_mdev(ibcq->device)->uar_map,
910 		    MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
911 
912 	return 0;
913 }
914 
915 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
916 {
917 	u32 prod_index;
918 	int nfreed = 0;
919 	struct mlx4_cqe *cqe, *dest;
920 	u8 owner_bit;
921 	int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
922 
923 	/*
924 	 * First we need to find the current producer index, so we
925 	 * know where to start cleaning from.  It doesn't matter if HW
926 	 * adds new entries after this loop -- the QP we're worried
927 	 * about is already in RESET, so the new entries won't come
928 	 * from our QP and therefore don't need to be checked.
929 	 */
930 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
931 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
932 			break;
933 
934 	/*
935 	 * Now sweep backwards through the CQ, removing CQ entries
936 	 * that match our QP by copying older entries on top of them.
937 	 */
938 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
939 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
940 		cqe += cqe_inc;
941 
942 		if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
943 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
944 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
945 			++nfreed;
946 		} else if (nfreed) {
947 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
948 			dest += cqe_inc;
949 
950 			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
951 			memcpy(dest, cqe, sizeof *cqe);
952 			dest->owner_sr_opcode = owner_bit |
953 				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
954 		}
955 	}
956 
957 	if (nfreed) {
958 		cq->mcq.cons_index += nfreed;
959 		/*
960 		 * Make sure update of buffer contents is done before
961 		 * updating consumer index.
962 		 */
963 		wmb();
964 		mlx4_cq_set_ci(&cq->mcq);
965 	}
966 }
967 
968 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
969 {
970 	spin_lock_irq(&cq->lock);
971 	__mlx4_ib_cq_clean(cq, qpn, srq);
972 	spin_unlock_irq(&cq->lock);
973 }
974