xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cq.c (revision f42b3800)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx4/cq.h>
34 #include <linux/mlx4/qp.h>
35 
36 #include "mlx4_ib.h"
37 #include "user.h"
38 
39 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
40 {
41 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
42 	ibcq->comp_handler(ibcq, ibcq->cq_context);
43 }
44 
45 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
46 {
47 	struct ib_event event;
48 	struct ib_cq *ibcq;
49 
50 	if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
51 		printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
52 		       "on CQ %06x\n", type, cq->cqn);
53 		return;
54 	}
55 
56 	ibcq = &to_mibcq(cq)->ibcq;
57 	if (ibcq->event_handler) {
58 		event.device     = ibcq->device;
59 		event.event      = IB_EVENT_CQ_ERR;
60 		event.element.cq = ibcq;
61 		ibcq->event_handler(&event, ibcq->cq_context);
62 	}
63 }
64 
65 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
66 {
67 	return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
68 }
69 
70 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
71 {
72 	return get_cqe_from_buf(&cq->buf, n);
73 }
74 
75 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
76 {
77 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
78 
79 	return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
80 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
81 }
82 
83 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
84 {
85 	return get_sw_cqe(cq, cq->mcq.cons_index);
86 }
87 
88 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
89 {
90 	struct mlx4_ib_cq *mcq = to_mcq(cq);
91 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
92 
93 	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
94 }
95 
96 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
97 {
98 	int err;
99 
100 	err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
101 			     PAGE_SIZE * 2, &buf->buf);
102 
103 	if (err)
104 		goto out;
105 
106 	err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
107 				    &buf->mtt);
108 	if (err)
109 		goto err_buf;
110 
111 	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
112 	if (err)
113 		goto err_mtt;
114 
115 	return 0;
116 
117 err_mtt:
118 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
119 
120 err_buf:
121 	mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
122 			      &buf->buf);
123 
124 out:
125 	return err;
126 }
127 
128 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
129 {
130 	mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
131 }
132 
133 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
134 			       struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
135 			       u64 buf_addr, int cqe)
136 {
137 	int err;
138 
139 	*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
140 			    IB_ACCESS_LOCAL_WRITE);
141 	if (IS_ERR(*umem))
142 		return PTR_ERR(*umem);
143 
144 	err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
145 			    ilog2((*umem)->page_size), &buf->mtt);
146 	if (err)
147 		goto err_buf;
148 
149 	err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
150 	if (err)
151 		goto err_mtt;
152 
153 	return 0;
154 
155 err_mtt:
156 	mlx4_mtt_cleanup(dev->dev, &buf->mtt);
157 
158 err_buf:
159 	ib_umem_release(*umem);
160 
161 	return err;
162 }
163 
164 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
165 				struct ib_ucontext *context,
166 				struct ib_udata *udata)
167 {
168 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
169 	struct mlx4_ib_cq *cq;
170 	struct mlx4_uar *uar;
171 	int err;
172 
173 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
174 		return ERR_PTR(-EINVAL);
175 
176 	cq = kmalloc(sizeof *cq, GFP_KERNEL);
177 	if (!cq)
178 		return ERR_PTR(-ENOMEM);
179 
180 	entries      = roundup_pow_of_two(entries + 1);
181 	cq->ibcq.cqe = entries - 1;
182 	mutex_init(&cq->resize_mutex);
183 	spin_lock_init(&cq->lock);
184 	cq->resize_buf = NULL;
185 	cq->resize_umem = NULL;
186 
187 	if (context) {
188 		struct mlx4_ib_create_cq ucmd;
189 
190 		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
191 			err = -EFAULT;
192 			goto err_cq;
193 		}
194 
195 		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
196 					  ucmd.buf_addr, entries);
197 		if (err)
198 			goto err_cq;
199 
200 		err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
201 					  &cq->db);
202 		if (err)
203 			goto err_mtt;
204 
205 		uar = &to_mucontext(context)->uar;
206 	} else {
207 		err = mlx4_ib_db_alloc(dev, &cq->db, 1);
208 		if (err)
209 			goto err_cq;
210 
211 		cq->mcq.set_ci_db  = cq->db.db;
212 		cq->mcq.arm_db     = cq->db.db + 1;
213 		*cq->mcq.set_ci_db = 0;
214 		*cq->mcq.arm_db    = 0;
215 
216 		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
217 		if (err)
218 			goto err_db;
219 
220 		uar = &dev->priv_uar;
221 	}
222 
223 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
224 			    cq->db.dma, &cq->mcq);
225 	if (err)
226 		goto err_dbmap;
227 
228 	cq->mcq.comp  = mlx4_ib_cq_comp;
229 	cq->mcq.event = mlx4_ib_cq_event;
230 
231 	if (context)
232 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
233 			err = -EFAULT;
234 			goto err_dbmap;
235 		}
236 
237 	return &cq->ibcq;
238 
239 err_dbmap:
240 	if (context)
241 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
242 
243 err_mtt:
244 	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
245 
246 	if (context)
247 		ib_umem_release(cq->umem);
248 	else
249 		mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
250 
251 err_db:
252 	if (!context)
253 		mlx4_ib_db_free(dev, &cq->db);
254 
255 err_cq:
256 	kfree(cq);
257 
258 	return ERR_PTR(err);
259 }
260 
261 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
262 				  int entries)
263 {
264 	int err;
265 
266 	if (cq->resize_buf)
267 		return -EBUSY;
268 
269 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
270 	if (!cq->resize_buf)
271 		return -ENOMEM;
272 
273 	err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
274 	if (err) {
275 		kfree(cq->resize_buf);
276 		cq->resize_buf = NULL;
277 		return err;
278 	}
279 
280 	cq->resize_buf->cqe = entries - 1;
281 
282 	return 0;
283 }
284 
285 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
286 				   int entries, struct ib_udata *udata)
287 {
288 	struct mlx4_ib_resize_cq ucmd;
289 	int err;
290 
291 	if (cq->resize_umem)
292 		return -EBUSY;
293 
294 	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
295 		return -EFAULT;
296 
297 	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
298 	if (!cq->resize_buf)
299 		return -ENOMEM;
300 
301 	err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
302 				  &cq->resize_umem, ucmd.buf_addr, entries);
303 	if (err) {
304 		kfree(cq->resize_buf);
305 		cq->resize_buf = NULL;
306 		return err;
307 	}
308 
309 	cq->resize_buf->cqe = entries - 1;
310 
311 	return 0;
312 }
313 
314 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
315 {
316 	u32 i;
317 
318 	i = cq->mcq.cons_index;
319 	while (get_sw_cqe(cq, i & cq->ibcq.cqe))
320 		++i;
321 
322 	return i - cq->mcq.cons_index;
323 }
324 
325 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
326 {
327 	struct mlx4_cqe *cqe;
328 	int i;
329 
330 	i = cq->mcq.cons_index;
331 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
332 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
333 		memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
334 					(i + 1) & cq->resize_buf->cqe),
335 			get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
336 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
337 	}
338 	++cq->mcq.cons_index;
339 }
340 
341 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
342 {
343 	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
344 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
345 	int outst_cqe;
346 	int err;
347 
348 	mutex_lock(&cq->resize_mutex);
349 
350 	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
351 		err = -EINVAL;
352 		goto out;
353 	}
354 
355 	entries = roundup_pow_of_two(entries + 1);
356 	if (entries == ibcq->cqe + 1) {
357 		err = 0;
358 		goto out;
359 	}
360 
361 	if (ibcq->uobject) {
362 		err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
363 		if (err)
364 			goto out;
365 	} else {
366 		/* Can't be smaller then the number of outstanding CQEs */
367 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
368 		if (entries < outst_cqe + 1) {
369 			err = 0;
370 			goto out;
371 		}
372 
373 		err = mlx4_alloc_resize_buf(dev, cq, entries);
374 		if (err)
375 			goto out;
376 	}
377 
378 	err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
379 	if (err)
380 		goto err_buf;
381 
382 	if (ibcq->uobject) {
383 		cq->buf      = cq->resize_buf->buf;
384 		cq->ibcq.cqe = cq->resize_buf->cqe;
385 		ib_umem_release(cq->umem);
386 		cq->umem     = cq->resize_umem;
387 
388 		kfree(cq->resize_buf);
389 		cq->resize_buf = NULL;
390 		cq->resize_umem = NULL;
391 	} else {
392 		spin_lock_irq(&cq->lock);
393 		if (cq->resize_buf) {
394 			mlx4_ib_cq_resize_copy_cqes(cq);
395 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
396 			cq->buf      = cq->resize_buf->buf;
397 			cq->ibcq.cqe = cq->resize_buf->cqe;
398 
399 			kfree(cq->resize_buf);
400 			cq->resize_buf = NULL;
401 		}
402 		spin_unlock_irq(&cq->lock);
403 	}
404 
405 	goto out;
406 
407 err_buf:
408 	if (!ibcq->uobject)
409 		mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
410 				    cq->resize_buf->cqe);
411 
412 	kfree(cq->resize_buf);
413 	cq->resize_buf = NULL;
414 
415 	if (cq->resize_umem) {
416 		ib_umem_release(cq->resize_umem);
417 		cq->resize_umem = NULL;
418 	}
419 
420 out:
421 	mutex_unlock(&cq->resize_mutex);
422 	return err;
423 }
424 
425 int mlx4_ib_destroy_cq(struct ib_cq *cq)
426 {
427 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
428 	struct mlx4_ib_cq *mcq = to_mcq(cq);
429 
430 	mlx4_cq_free(dev->dev, &mcq->mcq);
431 	mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
432 
433 	if (cq->uobject) {
434 		mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
435 		ib_umem_release(mcq->umem);
436 	} else {
437 		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
438 		mlx4_ib_db_free(dev, &mcq->db);
439 	}
440 
441 	kfree(mcq);
442 
443 	return 0;
444 }
445 
446 static void dump_cqe(void *cqe)
447 {
448 	__be32 *buf = cqe;
449 
450 	printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
451 	       be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
452 	       be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
453 	       be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
454 }
455 
456 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
457 				     struct ib_wc *wc)
458 {
459 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
460 		printk(KERN_DEBUG "local QP operation err "
461 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
462 		       "opcode = %02x)\n",
463 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
464 		       cqe->vendor_err_syndrome,
465 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
466 		dump_cqe(cqe);
467 	}
468 
469 	switch (cqe->syndrome) {
470 	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
471 		wc->status = IB_WC_LOC_LEN_ERR;
472 		break;
473 	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
474 		wc->status = IB_WC_LOC_QP_OP_ERR;
475 		break;
476 	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
477 		wc->status = IB_WC_LOC_PROT_ERR;
478 		break;
479 	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
480 		wc->status = IB_WC_WR_FLUSH_ERR;
481 		break;
482 	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
483 		wc->status = IB_WC_MW_BIND_ERR;
484 		break;
485 	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
486 		wc->status = IB_WC_BAD_RESP_ERR;
487 		break;
488 	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
489 		wc->status = IB_WC_LOC_ACCESS_ERR;
490 		break;
491 	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
492 		wc->status = IB_WC_REM_INV_REQ_ERR;
493 		break;
494 	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
495 		wc->status = IB_WC_REM_ACCESS_ERR;
496 		break;
497 	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
498 		wc->status = IB_WC_REM_OP_ERR;
499 		break;
500 	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
501 		wc->status = IB_WC_RETRY_EXC_ERR;
502 		break;
503 	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
504 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
505 		break;
506 	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
507 		wc->status = IB_WC_REM_ABORT_ERR;
508 		break;
509 	default:
510 		wc->status = IB_WC_GENERAL_ERR;
511 		break;
512 	}
513 
514 	wc->vendor_err = cqe->vendor_err_syndrome;
515 }
516 
517 static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
518 {
519 	return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
520 				      MLX4_CQE_IPOIB_STATUS_IPV4F	|
521 				      MLX4_CQE_IPOIB_STATUS_IPV4OPT	|
522 				      MLX4_CQE_IPOIB_STATUS_IPV6	|
523 				      MLX4_CQE_IPOIB_STATUS_IPOK)) ==
524 		cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4	|
525 			    MLX4_CQE_IPOIB_STATUS_IPOK))		&&
526 		(status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP	|
527 				      MLX4_CQE_IPOIB_STATUS_TCP))	&&
528 		checksum == cpu_to_be16(0xffff);
529 }
530 
531 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
532 			    struct mlx4_ib_qp **cur_qp,
533 			    struct ib_wc *wc)
534 {
535 	struct mlx4_cqe *cqe;
536 	struct mlx4_qp *mqp;
537 	struct mlx4_ib_wq *wq;
538 	struct mlx4_ib_srq *srq;
539 	int is_send;
540 	int is_error;
541 	u32 g_mlpath_rqpn;
542 	u16 wqe_ctr;
543 
544 repoll:
545 	cqe = next_cqe_sw(cq);
546 	if (!cqe)
547 		return -EAGAIN;
548 
549 	++cq->mcq.cons_index;
550 
551 	/*
552 	 * Make sure we read CQ entry contents after we've checked the
553 	 * ownership bit.
554 	 */
555 	rmb();
556 
557 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
558 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
559 		MLX4_CQE_OPCODE_ERROR;
560 
561 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
562 		     is_send)) {
563 		printk(KERN_WARNING "Completion for NOP opcode detected!\n");
564 		return -EINVAL;
565 	}
566 
567 	/* Resize CQ in progress */
568 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
569 		if (cq->resize_buf) {
570 			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
571 
572 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
573 			cq->buf      = cq->resize_buf->buf;
574 			cq->ibcq.cqe = cq->resize_buf->cqe;
575 
576 			kfree(cq->resize_buf);
577 			cq->resize_buf = NULL;
578 		}
579 
580 		goto repoll;
581 	}
582 
583 	if (!*cur_qp ||
584 	    (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
585 		/*
586 		 * We do not have to take the QP table lock here,
587 		 * because CQs will be locked while QPs are removed
588 		 * from the table.
589 		 */
590 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
591 				       be32_to_cpu(cqe->my_qpn));
592 		if (unlikely(!mqp)) {
593 			printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
594 			       cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff);
595 			return -EINVAL;
596 		}
597 
598 		*cur_qp = to_mibqp(mqp);
599 	}
600 
601 	wc->qp = &(*cur_qp)->ibqp;
602 
603 	if (is_send) {
604 		wq = &(*cur_qp)->sq;
605 		if (!(*cur_qp)->sq_signal_bits) {
606 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
607 			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
608 		}
609 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
610 		++wq->tail;
611 	} else if ((*cur_qp)->ibqp.srq) {
612 		srq = to_msrq((*cur_qp)->ibqp.srq);
613 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
614 		wc->wr_id = srq->wrid[wqe_ctr];
615 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
616 	} else {
617 		wq	  = &(*cur_qp)->rq;
618 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
619 		++wq->tail;
620 	}
621 
622 	if (unlikely(is_error)) {
623 		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
624 		return 0;
625 	}
626 
627 	wc->status = IB_WC_SUCCESS;
628 
629 	if (is_send) {
630 		wc->wc_flags = 0;
631 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
632 		case MLX4_OPCODE_RDMA_WRITE_IMM:
633 			wc->wc_flags |= IB_WC_WITH_IMM;
634 		case MLX4_OPCODE_RDMA_WRITE:
635 			wc->opcode    = IB_WC_RDMA_WRITE;
636 			break;
637 		case MLX4_OPCODE_SEND_IMM:
638 			wc->wc_flags |= IB_WC_WITH_IMM;
639 		case MLX4_OPCODE_SEND:
640 			wc->opcode    = IB_WC_SEND;
641 			break;
642 		case MLX4_OPCODE_RDMA_READ:
643 			wc->opcode    = IB_WC_RDMA_READ;
644 			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
645 			break;
646 		case MLX4_OPCODE_ATOMIC_CS:
647 			wc->opcode    = IB_WC_COMP_SWAP;
648 			wc->byte_len  = 8;
649 			break;
650 		case MLX4_OPCODE_ATOMIC_FA:
651 			wc->opcode    = IB_WC_FETCH_ADD;
652 			wc->byte_len  = 8;
653 			break;
654 		case MLX4_OPCODE_BIND_MW:
655 			wc->opcode    = IB_WC_BIND_MW;
656 			break;
657 		case MLX4_OPCODE_LSO:
658 			wc->opcode    = IB_WC_LSO;
659 			break;
660 		}
661 	} else {
662 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
663 
664 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
665 		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
666 			wc->opcode   = IB_WC_RECV_RDMA_WITH_IMM;
667 			wc->wc_flags = IB_WC_WITH_IMM;
668 			wc->imm_data = cqe->immed_rss_invalid;
669 			break;
670 		case MLX4_RECV_OPCODE_SEND:
671 			wc->opcode   = IB_WC_RECV;
672 			wc->wc_flags = 0;
673 			break;
674 		case MLX4_RECV_OPCODE_SEND_IMM:
675 			wc->opcode   = IB_WC_RECV;
676 			wc->wc_flags = IB_WC_WITH_IMM;
677 			wc->imm_data = cqe->immed_rss_invalid;
678 			break;
679 		}
680 
681 		wc->slid	   = be16_to_cpu(cqe->rlid);
682 		wc->sl		   = cqe->sl >> 4;
683 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
684 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
685 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
686 		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
687 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
688 		wc->csum_ok	   = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
689 							   cqe->checksum);
690 	}
691 
692 	return 0;
693 }
694 
695 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
696 {
697 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
698 	struct mlx4_ib_qp *cur_qp = NULL;
699 	unsigned long flags;
700 	int npolled;
701 	int err = 0;
702 
703 	spin_lock_irqsave(&cq->lock, flags);
704 
705 	for (npolled = 0; npolled < num_entries; ++npolled) {
706 		err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
707 		if (err)
708 			break;
709 	}
710 
711 	if (npolled)
712 		mlx4_cq_set_ci(&cq->mcq);
713 
714 	spin_unlock_irqrestore(&cq->lock, flags);
715 
716 	if (err == 0 || err == -EAGAIN)
717 		return npolled;
718 	else
719 		return err;
720 }
721 
722 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
723 {
724 	mlx4_cq_arm(&to_mcq(ibcq)->mcq,
725 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
726 		    MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
727 		    to_mdev(ibcq->device)->uar_map,
728 		    MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
729 
730 	return 0;
731 }
732 
733 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
734 {
735 	u32 prod_index;
736 	int nfreed = 0;
737 	struct mlx4_cqe *cqe, *dest;
738 	u8 owner_bit;
739 
740 	/*
741 	 * First we need to find the current producer index, so we
742 	 * know where to start cleaning from.  It doesn't matter if HW
743 	 * adds new entries after this loop -- the QP we're worried
744 	 * about is already in RESET, so the new entries won't come
745 	 * from our QP and therefore don't need to be checked.
746 	 */
747 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
748 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
749 			break;
750 
751 	/*
752 	 * Now sweep backwards through the CQ, removing CQ entries
753 	 * that match our QP by copying older entries on top of them.
754 	 */
755 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
756 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
757 		if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) {
758 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
759 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
760 			++nfreed;
761 		} else if (nfreed) {
762 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
763 			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
764 			memcpy(dest, cqe, sizeof *cqe);
765 			dest->owner_sr_opcode = owner_bit |
766 				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
767 		}
768 	}
769 
770 	if (nfreed) {
771 		cq->mcq.cons_index += nfreed;
772 		/*
773 		 * Make sure update of buffer contents is done before
774 		 * updating consumer index.
775 		 */
776 		wmb();
777 		mlx4_cq_set_ci(&cq->mcq);
778 	}
779 }
780 
781 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
782 {
783 	spin_lock_irq(&cq->lock);
784 	__mlx4_ib_cq_clean(cq, qpn, srq);
785 	spin_unlock_irq(&cq->lock);
786 }
787