xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cq.c (revision 93dc544c)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 
37 #include "mlx4_ib.h"
38 #include "user.h"
39 
40 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
41 {
42 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
43 	ibcq->comp_handler(ibcq, ibcq->cq_context);
44 }
45 
46 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
47 {
48 	struct ib_event event;
49 	struct ib_cq *ibcq;
50 
51 	if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
52 		printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
53 		       "on CQ %06x\n", type, cq->cqn);
54 		return;
55 	}
56 
57 	ibcq = &to_mibcq(cq)->ibcq;
58 	if (ibcq->event_handler) {
59 		event.device     = ibcq->device;
60 		event.event      = IB_EVENT_CQ_ERR;
61 		event.element.cq = ibcq;
62 		ibcq->event_handler(&event, ibcq->cq_context);
63 	}
64 }
65 
66 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
67 {
68 	return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
69 }
70 
71 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
72 {
73 	return get_cqe_from_buf(&cq->buf, n);
74 }
75 
76 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
77 {
78 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
79 
80 	return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
81 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
82 }
83 
84 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
85 {
86 	return get_sw_cqe(cq, cq->mcq.cons_index);
87 }
88 
89 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
90 {
91 	struct mlx4_ib_cq *mcq = to_mcq(cq);
92 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
93 
94 	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
95 }
96 
97 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
98 {
99 	int err;
100 
101 	err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
102 			     PAGE_SIZE * 2, &buf->buf);
103 
104 	if (err)
105 		goto out;
106 
107 	err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
108 				    &buf->mtt);
109 	if (err)
110 		goto err_buf;
111 
112 	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
113 	if (err)
114 		goto err_mtt;
115 
116 	return 0;
117 
118 err_mtt:
119 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
120 
121 err_buf:
122 	mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
123 			      &buf->buf);
124 
125 out:
126 	return err;
127 }
128 
129 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
130 {
131 	mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
132 }
133 
134 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
135 			       struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
136 			       u64 buf_addr, int cqe)
137 {
138 	int err;
139 
140 	*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
141 			    IB_ACCESS_LOCAL_WRITE, 1);
142 	if (IS_ERR(*umem))
143 		return PTR_ERR(*umem);
144 
145 	err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
146 			    ilog2((*umem)->page_size), &buf->mtt);
147 	if (err)
148 		goto err_buf;
149 
150 	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
151 	if (err)
152 		goto err_mtt;
153 
154 	return 0;
155 
156 err_mtt:
157 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
158 
159 err_buf:
160 	ib_umem_release(*umem);
161 
162 	return err;
163 }
164 
165 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
166 				struct ib_ucontext *context,
167 				struct ib_udata *udata)
168 {
169 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
170 	struct mlx4_ib_cq *cq;
171 	struct mlx4_uar *uar;
172 	int err;
173 
174 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
175 		return ERR_PTR(-EINVAL);
176 
177 	cq = kmalloc(sizeof *cq, GFP_KERNEL);
178 	if (!cq)
179 		return ERR_PTR(-ENOMEM);
180 
181 	entries      = roundup_pow_of_two(entries + 1);
182 	cq->ibcq.cqe = entries - 1;
183 	mutex_init(&cq->resize_mutex);
184 	spin_lock_init(&cq->lock);
185 	cq->resize_buf = NULL;
186 	cq->resize_umem = NULL;
187 
188 	if (context) {
189 		struct mlx4_ib_create_cq ucmd;
190 
191 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
192 			err = -EFAULT;
193 			goto err_cq;
194 		}
195 
196 		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
197 					  ucmd.buf_addr, entries);
198 		if (err)
199 			goto err_cq;
200 
201 		err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
202 					  &cq->db);
203 		if (err)
204 			goto err_mtt;
205 
206 		uar = &to_mucontext(context)->uar;
207 	} else {
208 		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
209 		if (err)
210 			goto err_cq;
211 
212 		cq->mcq.set_ci_db  = cq->db.db;
213 		cq->mcq.arm_db     = cq->db.db + 1;
214 		*cq->mcq.set_ci_db = 0;
215 		*cq->mcq.arm_db    = 0;
216 
217 		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
218 		if (err)
219 			goto err_db;
220 
221 		uar = &dev->priv_uar;
222 	}
223 
224 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
225 			    cq->db.dma, &cq->mcq, 0);
226 	if (err)
227 		goto err_dbmap;
228 
229 	cq->mcq.comp  = mlx4_ib_cq_comp;
230 	cq->mcq.event = mlx4_ib_cq_event;
231 
232 	if (context)
233 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
234 			err = -EFAULT;
235 			goto err_dbmap;
236 		}
237 
238 	return &cq->ibcq;
239 
240 err_dbmap:
241 	if (context)
242 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
243 
244 err_mtt:
245 	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
246 
247 	if (context)
248 		ib_umem_release(cq->umem);
249 	else
250 		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
251 
252 err_db:
253 	if (!context)
254 		mlx4_db_free(dev->dev, &cq->db);
255 
256 err_cq:
257 	kfree(cq);
258 
259 	return ERR_PTR(err);
260 }
261 
262 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
263 				  int entries)
264 {
265 	int err;
266 
267 	if (cq->resize_buf)
268 		return -EBUSY;
269 
270 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
271 	if (!cq->resize_buf)
272 		return -ENOMEM;
273 
274 	err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
275 	if (err) {
276 		kfree(cq->resize_buf);
277 		cq->resize_buf = NULL;
278 		return err;
279 	}
280 
281 	cq->resize_buf->cqe = entries - 1;
282 
283 	return 0;
284 }
285 
286 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
287 				   int entries, struct ib_udata *udata)
288 {
289 	struct mlx4_ib_resize_cq ucmd;
290 	int err;
291 
292 	if (cq->resize_umem)
293 		return -EBUSY;
294 
295 	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
296 		return -EFAULT;
297 
298 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
299 	if (!cq->resize_buf)
300 		return -ENOMEM;
301 
302 	err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
303 				  &cq->resize_umem, ucmd.buf_addr, entries);
304 	if (err) {
305 		kfree(cq->resize_buf);
306 		cq->resize_buf = NULL;
307 		return err;
308 	}
309 
310 	cq->resize_buf->cqe = entries - 1;
311 
312 	return 0;
313 }
314 
315 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
316 {
317 	u32 i;
318 
319 	i = cq->mcq.cons_index;
320 	while (get_sw_cqe(cq, i & cq->ibcq.cqe))
321 		++i;
322 
323 	return i - cq->mcq.cons_index;
324 }
325 
326 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
327 {
328 	struct mlx4_cqe *cqe;
329 	int i;
330 
331 	i = cq->mcq.cons_index;
332 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
333 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
334 		memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
335 					(i + 1) & cq->resize_buf->cqe),
336 			get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
337 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
338 	}
339 	++cq->mcq.cons_index;
340 }
341 
342 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
343 {
344 	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
345 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
346 	int outst_cqe;
347 	int err;
348 
349 	mutex_lock(&cq->resize_mutex);
350 
351 	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
352 		err = -EINVAL;
353 		goto out;
354 	}
355 
356 	entries = roundup_pow_of_two(entries + 1);
357 	if (entries == ibcq->cqe + 1) {
358 		err = 0;
359 		goto out;
360 	}
361 
362 	if (ibcq->uobject) {
363 		err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
364 		if (err)
365 			goto out;
366 	} else {
367 		/* Can't be smaller then the number of outstanding CQEs */
368 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
369 		if (entries < outst_cqe + 1) {
370 			err = 0;
371 			goto out;
372 		}
373 
374 		err = mlx4_alloc_resize_buf(dev, cq, entries);
375 		if (err)
376 			goto out;
377 	}
378 
379 	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
380 	if (err)
381 		goto err_buf;
382 
383 	if (ibcq->uobject) {
384 		cq->buf      = cq->resize_buf->buf;
385 		cq->ibcq.cqe = cq->resize_buf->cqe;
386 		ib_umem_release(cq->umem);
387 		cq->umem     = cq->resize_umem;
388 
389 		kfree(cq->resize_buf);
390 		cq->resize_buf = NULL;
391 		cq->resize_umem = NULL;
392 	} else {
393 		spin_lock_irq(&cq->lock);
394 		if (cq->resize_buf) {
395 			mlx4_ib_cq_resize_copy_cqes(cq);
396 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
397 			cq->buf      = cq->resize_buf->buf;
398 			cq->ibcq.cqe = cq->resize_buf->cqe;
399 
400 			kfree(cq->resize_buf);
401 			cq->resize_buf = NULL;
402 		}
403 		spin_unlock_irq(&cq->lock);
404 	}
405 
406 	goto out;
407 
408 err_buf:
409 	if (!ibcq->uobject)
410 		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
411 				    cq->resize_buf->cqe);
412 
413 	kfree(cq->resize_buf);
414 	cq->resize_buf = NULL;
415 
416 	if (cq->resize_umem) {
417 		ib_umem_release(cq->resize_umem);
418 		cq->resize_umem = NULL;
419 	}
420 
421 out:
422 	mutex_unlock(&cq->resize_mutex);
423 	return err;
424 }
425 
426 int mlx4_ib_destroy_cq(struct ib_cq *cq)
427 {
428 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
429 	struct mlx4_ib_cq *mcq = to_mcq(cq);
430 
431 	mlx4_cq_free(dev->dev, &mcq->mcq);
432 	mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
433 
434 	if (cq->uobject) {
435 		mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
436 		ib_umem_release(mcq->umem);
437 	} else {
438 		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
439 		mlx4_db_free(dev->dev, &mcq->db);
440 	}
441 
442 	kfree(mcq);
443 
444 	return 0;
445 }
446 
447 static void dump_cqe(void *cqe)
448 {
449 	__be32 *buf = cqe;
450 
451 	printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
452 	       be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
453 	       be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
454 	       be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
455 }
456 
457 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
458 				     struct ib_wc *wc)
459 {
460 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
461 		printk(KERN_DEBUG "local QP operation err "
462 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
463 		       "opcode = %02x)\n",
464 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
465 		       cqe->vendor_err_syndrome,
466 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
467 		dump_cqe(cqe);
468 	}
469 
470 	switch (cqe->syndrome) {
471 	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
472 		wc->status = IB_WC_LOC_LEN_ERR;
473 		break;
474 	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
475 		wc->status = IB_WC_LOC_QP_OP_ERR;
476 		break;
477 	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
478 		wc->status = IB_WC_LOC_PROT_ERR;
479 		break;
480 	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
481 		wc->status = IB_WC_WR_FLUSH_ERR;
482 		break;
483 	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
484 		wc->status = IB_WC_MW_BIND_ERR;
485 		break;
486 	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
487 		wc->status = IB_WC_BAD_RESP_ERR;
488 		break;
489 	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
490 		wc->status = IB_WC_LOC_ACCESS_ERR;
491 		break;
492 	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
493 		wc->status = IB_WC_REM_INV_REQ_ERR;
494 		break;
495 	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
496 		wc->status = IB_WC_REM_ACCESS_ERR;
497 		break;
498 	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
499 		wc->status = IB_WC_REM_OP_ERR;
500 		break;
501 	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
502 		wc->status = IB_WC_RETRY_EXC_ERR;
503 		break;
504 	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
505 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
506 		break;
507 	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
508 		wc->status = IB_WC_REM_ABORT_ERR;
509 		break;
510 	default:
511 		wc->status = IB_WC_GENERAL_ERR;
512 		break;
513 	}
514 
515 	wc->vendor_err = cqe->vendor_err_syndrome;
516 }
517 
518 static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
519 {
520 	return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
521 				      MLX4_CQE_IPOIB_STATUS_IPV4F	|
522 				      MLX4_CQE_IPOIB_STATUS_IPV4OPT	|
523 				      MLX4_CQE_IPOIB_STATUS_IPV6	|
524 				      MLX4_CQE_IPOIB_STATUS_IPOK)) ==
525 		cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
526 			    MLX4_CQE_IPOIB_STATUS_IPOK))		&&
527 		(status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP	|
528 				      MLX4_CQE_IPOIB_STATUS_TCP))	&&
529 		checksum == cpu_to_be16(0xffff);
530 }
531 
532 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
533 			    struct mlx4_ib_qp **cur_qp,
534 			    struct ib_wc *wc)
535 {
536 	struct mlx4_cqe *cqe;
537 	struct mlx4_qp *mqp;
538 	struct mlx4_ib_wq *wq;
539 	struct mlx4_ib_srq *srq;
540 	int is_send;
541 	int is_error;
542 	u32 g_mlpath_rqpn;
543 	u16 wqe_ctr;
544 
545 repoll:
546 	cqe = next_cqe_sw(cq);
547 	if (!cqe)
548 		return -EAGAIN;
549 
550 	++cq->mcq.cons_index;
551 
552 	/*
553 	 * Make sure we read CQ entry contents after we've checked the
554 	 * ownership bit.
555 	 */
556 	rmb();
557 
558 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
559 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
560 		MLX4_CQE_OPCODE_ERROR;
561 
562 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
563 		     is_send)) {
564 		printk(KERN_WARNING "Completion for NOP opcode detected!\n");
565 		return -EINVAL;
566 	}
567 
568 	/* Resize CQ in progress */
569 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
570 		if (cq->resize_buf) {
571 			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
572 
573 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
574 			cq->buf      = cq->resize_buf->buf;
575 			cq->ibcq.cqe = cq->resize_buf->cqe;
576 
577 			kfree(cq->resize_buf);
578 			cq->resize_buf = NULL;
579 		}
580 
581 		goto repoll;
582 	}
583 
584 	if (!*cur_qp ||
585 	    (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
586 		/*
587 		 * We do not have to take the QP table lock here,
588 		 * because CQs will be locked while QPs are removed
589 		 * from the table.
590 		 */
591 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
592 				       be32_to_cpu(cqe->my_qpn));
593 		if (unlikely(!mqp)) {
594 			printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
595 			       cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff);
596 			return -EINVAL;
597 		}
598 
599 		*cur_qp = to_mibqp(mqp);
600 	}
601 
602 	wc->qp = &(*cur_qp)->ibqp;
603 
604 	if (is_send) {
605 		wq = &(*cur_qp)->sq;
606 		if (!(*cur_qp)->sq_signal_bits) {
607 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
608 			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
609 		}
610 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
611 		++wq->tail;
612 	} else if ((*cur_qp)->ibqp.srq) {
613 		srq = to_msrq((*cur_qp)->ibqp.srq);
614 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
615 		wc->wr_id = srq->wrid[wqe_ctr];
616 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
617 	} else {
618 		wq	  = &(*cur_qp)->rq;
619 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
620 		++wq->tail;
621 	}
622 
623 	if (unlikely(is_error)) {
624 		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
625 		return 0;
626 	}
627 
628 	wc->status = IB_WC_SUCCESS;
629 
630 	if (is_send) {
631 		wc->wc_flags = 0;
632 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
633 		case MLX4_OPCODE_RDMA_WRITE_IMM:
634 			wc->wc_flags |= IB_WC_WITH_IMM;
635 		case MLX4_OPCODE_RDMA_WRITE:
636 			wc->opcode    = IB_WC_RDMA_WRITE;
637 			break;
638 		case MLX4_OPCODE_SEND_IMM:
639 			wc->wc_flags |= IB_WC_WITH_IMM;
640 		case MLX4_OPCODE_SEND:
641 		case MLX4_OPCODE_SEND_INVAL:
642 			wc->opcode    = IB_WC_SEND;
643 			break;
644 		case MLX4_OPCODE_RDMA_READ:
645 			wc->opcode    = IB_WC_RDMA_READ;
646 			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
647 			break;
648 		case MLX4_OPCODE_ATOMIC_CS:
649 			wc->opcode    = IB_WC_COMP_SWAP;
650 			wc->byte_len  = 8;
651 			break;
652 		case MLX4_OPCODE_ATOMIC_FA:
653 			wc->opcode    = IB_WC_FETCH_ADD;
654 			wc->byte_len  = 8;
655 			break;
656 		case MLX4_OPCODE_BIND_MW:
657 			wc->opcode    = IB_WC_BIND_MW;
658 			break;
659 		case MLX4_OPCODE_LSO:
660 			wc->opcode    = IB_WC_LSO;
661 			break;
662 		case MLX4_OPCODE_FMR:
663 			wc->opcode    = IB_WC_FAST_REG_MR;
664 			break;
665 		case MLX4_OPCODE_LOCAL_INVAL:
666 			wc->opcode    = IB_WC_LOCAL_INV;
667 			break;
668 		}
669 	} else {
670 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
671 
672 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
673 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
674 			wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
675 			wc->wc_flags	= IB_WC_WITH_IMM;
676 			wc->ex.imm_data = cqe->immed_rss_invalid;
677 			break;
678 		case MLX4_RECV_OPCODE_SEND_INVAL:
679 			wc->opcode	= IB_WC_RECV;
680 			wc->wc_flags	= IB_WC_WITH_INVALIDATE;
681 			wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
682 			break;
683 		case MLX4_RECV_OPCODE_SEND:
684 			wc->opcode   = IB_WC_RECV;
685 			wc->wc_flags = 0;
686 			break;
687 		case MLX4_RECV_OPCODE_SEND_IMM:
688 			wc->opcode	= IB_WC_RECV;
689 			wc->wc_flags	= IB_WC_WITH_IMM;
690 			wc->ex.imm_data = cqe->immed_rss_invalid;
691 			break;
692 		}
693 
694 		wc->slid	   = be16_to_cpu(cqe->rlid);
695 		wc->sl		   = cqe->sl >> 4;
696 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
697 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
698 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
699 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
700 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
701 		wc->csum_ok	   = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
702 							   cqe->checksum);
703 	}
704 
705 	return 0;
706 }
707 
708 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
709 {
710 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
711 	struct mlx4_ib_qp *cur_qp = NULL;
712 	unsigned long flags;
713 	int npolled;
714 	int err = 0;
715 
716 	spin_lock_irqsave(&cq->lock, flags);
717 
718 	for (npolled = 0; npolled < num_entries; ++npolled) {
719 		err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
720 		if (err)
721 			break;
722 	}
723 
724 	if (npolled)
725 		mlx4_cq_set_ci(&cq->mcq);
726 
727 	spin_unlock_irqrestore(&cq->lock, flags);
728 
729 	if (err == 0 || err == -EAGAIN)
730 		return npolled;
731 	else
732 		return err;
733 }
734 
735 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
736 {
737 	mlx4_cq_arm(&to_mcq(ibcq)->mcq,
738 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
739 		    MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
740 		    to_mdev(ibcq->device)->uar_map,
741 		    MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
742 
743 	return 0;
744 }
745 
746 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
747 {
748 	u32 prod_index;
749 	int nfreed = 0;
750 	struct mlx4_cqe *cqe, *dest;
751 	u8 owner_bit;
752 
753 	/*
754 	 * First we need to find the current producer index, so we
755 	 * know where to start cleaning from.  It doesn't matter if HW
756 	 * adds new entries after this loop -- the QP we're worried
757 	 * about is already in RESET, so the new entries won't come
758 	 * from our QP and therefore don't need to be checked.
759 	 */
760 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
761 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
762 			break;
763 
764 	/*
765 	 * Now sweep backwards through the CQ, removing CQ entries
766 	 * that match our QP by copying older entries on top of them.
767 	 */
768 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
769 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
770 		if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) {
771 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
772 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
773 			++nfreed;
774 		} else if (nfreed) {
775 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
776 			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
777 			memcpy(dest, cqe, sizeof *cqe);
778 			dest->owner_sr_opcode = owner_bit |
779 				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
780 		}
781 	}
782 
783 	if (nfreed) {
784 		cq->mcq.cons_index += nfreed;
785 		/*
786 		 * Make sure update of buffer contents is done before
787 		 * updating consumer index.
788 		 */
789 		wmb();
790 		mlx4_cq_set_ci(&cq->mcq);
791 	}
792 }
793 
794 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
795 {
796 	spin_lock_irq(&cq->lock);
797 	__mlx4_ib_cq_clean(cq, qpn, srq);
798 	spin_unlock_irq(&cq->lock);
799 }
800