xref: /openbmc/linux/drivers/infiniband/hw/cxgb4/cq.c (revision 94c7b6fc)
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "iw_cxgb4.h"
34 
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 		      struct c4iw_dev_ucontext *uctx)
37 {
38 	struct fw_ri_res_wr *res_wr;
39 	struct fw_ri_res *res;
40 	int wr_len;
41 	struct c4iw_wr_wait wr_wait;
42 	struct sk_buff *skb;
43 	int ret;
44 
45 	wr_len = sizeof *res_wr + sizeof *res;
46 	skb = alloc_skb(wr_len, GFP_KERNEL);
47 	if (!skb)
48 		return -ENOMEM;
49 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50 
51 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 	memset(res_wr, 0, wr_len);
53 	res_wr->op_nres = cpu_to_be32(
54 			FW_WR_OP(FW_RI_RES_WR) |
55 			V_FW_RI_RES_WR_NRES(1) |
56 			FW_WR_COMPL(1));
57 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 	res_wr->cookie = (unsigned long) &wr_wait;
59 	res = res_wr->res;
60 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 	res->u.cq.op = FW_RI_RES_OP_RESET;
62 	res->u.cq.iqid = cpu_to_be32(cq->cqid);
63 
64 	c4iw_init_wr_wait(&wr_wait);
65 	ret = c4iw_ofld_send(rdev, skb);
66 	if (!ret) {
67 		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
68 	}
69 
70 	kfree(cq->sw_queue);
71 	dma_free_coherent(&(rdev->lldi.pdev->dev),
72 			  cq->memsize, cq->queue,
73 			  dma_unmap_addr(cq, mapping));
74 	c4iw_put_cqid(rdev, cq->cqid, uctx);
75 	return ret;
76 }
77 
78 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
79 		     struct c4iw_dev_ucontext *uctx)
80 {
81 	struct fw_ri_res_wr *res_wr;
82 	struct fw_ri_res *res;
83 	int wr_len;
84 	int user = (uctx != &rdev->uctx);
85 	struct c4iw_wr_wait wr_wait;
86 	int ret;
87 	struct sk_buff *skb;
88 
89 	cq->cqid = c4iw_get_cqid(rdev, uctx);
90 	if (!cq->cqid) {
91 		ret = -ENOMEM;
92 		goto err1;
93 	}
94 
95 	if (!user) {
96 		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
97 		if (!cq->sw_queue) {
98 			ret = -ENOMEM;
99 			goto err2;
100 		}
101 	}
102 	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
103 				       &cq->dma_addr, GFP_KERNEL);
104 	if (!cq->queue) {
105 		ret = -ENOMEM;
106 		goto err3;
107 	}
108 	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
109 	memset(cq->queue, 0, cq->memsize);
110 
111 	/* build fw_ri_res_wr */
112 	wr_len = sizeof *res_wr + sizeof *res;
113 
114 	skb = alloc_skb(wr_len, GFP_KERNEL);
115 	if (!skb) {
116 		ret = -ENOMEM;
117 		goto err4;
118 	}
119 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
120 
121 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122 	memset(res_wr, 0, wr_len);
123 	res_wr->op_nres = cpu_to_be32(
124 			FW_WR_OP(FW_RI_RES_WR) |
125 			V_FW_RI_RES_WR_NRES(1) |
126 			FW_WR_COMPL(1));
127 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 	res_wr->cookie = (unsigned long) &wr_wait;
129 	res = res_wr->res;
130 	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131 	res->u.cq.op = FW_RI_RES_OP_WRITE;
132 	res->u.cq.iqid = cpu_to_be32(cq->cqid);
133 	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
134 			V_FW_RI_RES_WR_IQANUS(0) |
135 			V_FW_RI_RES_WR_IQANUD(1) |
136 			F_FW_RI_RES_WR_IQANDST |
137 			V_FW_RI_RES_WR_IQANDSTINDEX(
138 				rdev->lldi.ciq_ids[cq->vector]));
139 	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
140 			F_FW_RI_RES_WR_IQDROPRSS |
141 			V_FW_RI_RES_WR_IQPCIECH(2) |
142 			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
143 			F_FW_RI_RES_WR_IQO |
144 			V_FW_RI_RES_WR_IQESIZE(1));
145 	res->u.cq.iqsize = cpu_to_be16(cq->size);
146 	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
147 
148 	c4iw_init_wr_wait(&wr_wait);
149 
150 	ret = c4iw_ofld_send(rdev, skb);
151 	if (ret)
152 		goto err4;
153 	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
154 	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
155 	if (ret)
156 		goto err4;
157 
158 	cq->gen = 1;
159 	cq->gts = rdev->lldi.gts_reg;
160 	cq->rdev = rdev;
161 	if (user) {
162 		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
163 					(cq->cqid << rdev->cqshift);
164 		cq->ugts &= PAGE_MASK;
165 	}
166 	return 0;
167 err4:
168 	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
169 			  dma_unmap_addr(cq, mapping));
170 err3:
171 	kfree(cq->sw_queue);
172 err2:
173 	c4iw_put_cqid(rdev, cq->cqid, uctx);
174 err1:
175 	return ret;
176 }
177 
178 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
179 {
180 	struct t4_cqe cqe;
181 
182 	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
183 	     wq, cq, cq->sw_cidx, cq->sw_pidx);
184 	memset(&cqe, 0, sizeof(cqe));
185 	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
186 				 V_CQE_OPCODE(FW_RI_SEND) |
187 				 V_CQE_TYPE(0) |
188 				 V_CQE_SWCQE(1) |
189 				 V_CQE_QPID(wq->sq.qid));
190 	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
191 	cq->sw_queue[cq->sw_pidx] = cqe;
192 	t4_swcq_produce(cq);
193 }
194 
195 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
196 {
197 	int flushed = 0;
198 	int in_use = wq->rq.in_use - count;
199 
200 	BUG_ON(in_use < 0);
201 	PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
202 	     wq, cq, wq->rq.in_use, count);
203 	while (in_use--) {
204 		insert_recv_cqe(wq, cq);
205 		flushed++;
206 	}
207 	return flushed;
208 }
209 
210 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
211 			  struct t4_swsqe *swcqe)
212 {
213 	struct t4_cqe cqe;
214 
215 	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
216 	     wq, cq, cq->sw_cidx, cq->sw_pidx);
217 	memset(&cqe, 0, sizeof(cqe));
218 	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
219 				 V_CQE_OPCODE(swcqe->opcode) |
220 				 V_CQE_TYPE(1) |
221 				 V_CQE_SWCQE(1) |
222 				 V_CQE_QPID(wq->sq.qid));
223 	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
224 	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
225 	cq->sw_queue[cq->sw_pidx] = cqe;
226 	t4_swcq_produce(cq);
227 }
228 
229 static void advance_oldest_read(struct t4_wq *wq);
230 
231 int c4iw_flush_sq(struct c4iw_qp *qhp)
232 {
233 	int flushed = 0;
234 	struct t4_wq *wq = &qhp->wq;
235 	struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
236 	struct t4_cq *cq = &chp->cq;
237 	int idx;
238 	struct t4_swsqe *swsqe;
239 
240 	if (wq->sq.flush_cidx == -1)
241 		wq->sq.flush_cidx = wq->sq.cidx;
242 	idx = wq->sq.flush_cidx;
243 	BUG_ON(idx >= wq->sq.size);
244 	while (idx != wq->sq.pidx) {
245 		swsqe = &wq->sq.sw_sq[idx];
246 		BUG_ON(swsqe->flushed);
247 		swsqe->flushed = 1;
248 		insert_sq_cqe(wq, cq, swsqe);
249 		if (wq->sq.oldest_read == swsqe) {
250 			BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
251 			advance_oldest_read(wq);
252 		}
253 		flushed++;
254 		if (++idx == wq->sq.size)
255 			idx = 0;
256 	}
257 	wq->sq.flush_cidx += flushed;
258 	if (wq->sq.flush_cidx >= wq->sq.size)
259 		wq->sq.flush_cidx -= wq->sq.size;
260 	return flushed;
261 }
262 
263 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
264 {
265 	struct t4_swsqe *swsqe;
266 	int cidx;
267 
268 	if (wq->sq.flush_cidx == -1)
269 		wq->sq.flush_cidx = wq->sq.cidx;
270 	cidx = wq->sq.flush_cidx;
271 	BUG_ON(cidx > wq->sq.size);
272 
273 	while (cidx != wq->sq.pidx) {
274 		swsqe = &wq->sq.sw_sq[cidx];
275 		if (!swsqe->signaled) {
276 			if (++cidx == wq->sq.size)
277 				cidx = 0;
278 		} else if (swsqe->complete) {
279 
280 			BUG_ON(swsqe->flushed);
281 
282 			/*
283 			 * Insert this completed cqe into the swcq.
284 			 */
285 			PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
286 					__func__, cidx, cq->sw_pidx);
287 			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
288 			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
289 			t4_swcq_produce(cq);
290 			swsqe->flushed = 1;
291 			if (++cidx == wq->sq.size)
292 				cidx = 0;
293 			wq->sq.flush_cidx = cidx;
294 		} else
295 			break;
296 	}
297 }
298 
299 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
300 		struct t4_cqe *read_cqe)
301 {
302 	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
303 	read_cqe->len = htonl(wq->sq.oldest_read->read_len);
304 	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
305 			V_CQE_SWCQE(SW_CQE(hw_cqe)) |
306 			V_CQE_OPCODE(FW_RI_READ_REQ) |
307 			V_CQE_TYPE(1));
308 	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
309 }
310 
311 static void advance_oldest_read(struct t4_wq *wq)
312 {
313 
314 	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
315 
316 	if (rptr == wq->sq.size)
317 		rptr = 0;
318 	while (rptr != wq->sq.pidx) {
319 		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
320 
321 		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
322 			return;
323 		if (++rptr == wq->sq.size)
324 			rptr = 0;
325 	}
326 	wq->sq.oldest_read = NULL;
327 }
328 
329 /*
330  * Move all CQEs from the HWCQ into the SWCQ.
331  * Deal with out-of-order and/or completions that complete
332  * prior unsignalled WRs.
333  */
334 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
335 {
336 	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
337 	struct c4iw_qp *qhp;
338 	struct t4_swsqe *swsqe;
339 	int ret;
340 
341 	PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
342 	ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
343 
344 	/*
345 	 * This logic is similar to poll_cq(), but not quite the same
346 	 * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
347 	 * also do any translation magic that poll_cq() normally does.
348 	 */
349 	while (!ret) {
350 		qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
351 
352 		/*
353 		 * drop CQEs with no associated QP
354 		 */
355 		if (qhp == NULL)
356 			goto next_cqe;
357 
358 		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
359 			goto next_cqe;
360 
361 		if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
362 
363 			/* If we have reached here because of async
364 			 * event or other error, and have egress error
365 			 * then drop
366 			 */
367 			if (CQE_TYPE(hw_cqe) == 1)
368 				goto next_cqe;
369 
370 			/* drop peer2peer RTR reads.
371 			 */
372 			if (CQE_WRID_STAG(hw_cqe) == 1)
373 				goto next_cqe;
374 
375 			/*
376 			 * Eat completions for unsignaled read WRs.
377 			 */
378 			if (!qhp->wq.sq.oldest_read->signaled) {
379 				advance_oldest_read(&qhp->wq);
380 				goto next_cqe;
381 			}
382 
383 			/*
384 			 * Don't write to the HWCQ, create a new read req CQE
385 			 * in local memory and move it into the swcq.
386 			 */
387 			create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
388 			hw_cqe = &read_cqe;
389 			advance_oldest_read(&qhp->wq);
390 		}
391 
392 		/* if its a SQ completion, then do the magic to move all the
393 		 * unsignaled and now in-order completions into the swcq.
394 		 */
395 		if (SQ_TYPE(hw_cqe)) {
396 			swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
397 			swsqe->cqe = *hw_cqe;
398 			swsqe->complete = 1;
399 			flush_completed_wrs(&qhp->wq, &chp->cq);
400 		} else {
401 			swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
402 			*swcqe = *hw_cqe;
403 			swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
404 			t4_swcq_produce(&chp->cq);
405 		}
406 next_cqe:
407 		t4_hwcq_consume(&chp->cq);
408 		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
409 	}
410 }
411 
412 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
413 {
414 	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
415 		return 0;
416 
417 	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
418 		return 0;
419 
420 	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
421 		return 0;
422 
423 	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
424 		return 0;
425 	return 1;
426 }
427 
428 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
429 {
430 	struct t4_cqe *cqe;
431 	u32 ptr;
432 
433 	*count = 0;
434 	PDBG("%s count zero %d\n", __func__, *count);
435 	ptr = cq->sw_cidx;
436 	while (ptr != cq->sw_pidx) {
437 		cqe = &cq->sw_queue[ptr];
438 		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
439 		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
440 			(*count)++;
441 		if (++ptr == cq->size)
442 			ptr = 0;
443 	}
444 	PDBG("%s cq %p count %d\n", __func__, cq, *count);
445 }
446 
447 /*
448  * poll_cq
449  *
450  * Caller must:
451  *     check the validity of the first CQE,
452  *     supply the wq assicated with the qpid.
453  *
454  * credit: cq credit to return to sge.
455  * cqe_flushed: 1 iff the CQE is flushed.
456  * cqe: copy of the polled CQE.
457  *
458  * return value:
459  *    0		    CQE returned ok.
460  *    -EAGAIN       CQE skipped, try again.
461  *    -EOVERFLOW    CQ overflow detected.
462  */
463 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
464 		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
465 {
466 	int ret = 0;
467 	struct t4_cqe *hw_cqe, read_cqe;
468 
469 	*cqe_flushed = 0;
470 	*credit = 0;
471 	ret = t4_next_cqe(cq, &hw_cqe);
472 	if (ret)
473 		return ret;
474 
475 	PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
476 	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
477 	     __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
478 	     CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
479 	     CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
480 	     CQE_WRID_LOW(hw_cqe));
481 
482 	/*
483 	 * skip cqe's not affiliated with a QP.
484 	 */
485 	if (wq == NULL) {
486 		ret = -EAGAIN;
487 		goto skip_cqe;
488 	}
489 
490 	/*
491 	* skip hw cqe's if the wq is flushed.
492 	*/
493 	if (wq->flushed && !SW_CQE(hw_cqe)) {
494 		ret = -EAGAIN;
495 		goto skip_cqe;
496 	}
497 
498 	/*
499 	 * skip TERMINATE cqes...
500 	 */
501 	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
502 		ret = -EAGAIN;
503 		goto skip_cqe;
504 	}
505 
506 	/*
507 	 * Gotta tweak READ completions:
508 	 *	1) the cqe doesn't contain the sq_wptr from the wr.
509 	 *	2) opcode not reflected from the wr.
510 	 *	3) read_len not reflected from the wr.
511 	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
512 	 */
513 	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
514 
515 		/* If we have reached here because of async
516 		 * event or other error, and have egress error
517 		 * then drop
518 		 */
519 		if (CQE_TYPE(hw_cqe) == 1) {
520 			if (CQE_STATUS(hw_cqe))
521 				t4_set_wq_in_error(wq);
522 			ret = -EAGAIN;
523 			goto skip_cqe;
524 		}
525 
526 		/* If this is an unsolicited read response, then the read
527 		 * was generated by the kernel driver as part of peer-2-peer
528 		 * connection setup.  So ignore the completion.
529 		 */
530 		if (CQE_WRID_STAG(hw_cqe) == 1) {
531 			if (CQE_STATUS(hw_cqe))
532 				t4_set_wq_in_error(wq);
533 			ret = -EAGAIN;
534 			goto skip_cqe;
535 		}
536 
537 		/*
538 		 * Eat completions for unsignaled read WRs.
539 		 */
540 		if (!wq->sq.oldest_read->signaled) {
541 			advance_oldest_read(wq);
542 			ret = -EAGAIN;
543 			goto skip_cqe;
544 		}
545 
546 		/*
547 		 * Don't write to the HWCQ, so create a new read req CQE
548 		 * in local memory.
549 		 */
550 		create_read_req_cqe(wq, hw_cqe, &read_cqe);
551 		hw_cqe = &read_cqe;
552 		advance_oldest_read(wq);
553 	}
554 
555 	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
556 		*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
557 		t4_set_wq_in_error(wq);
558 	}
559 
560 	/*
561 	 * RECV completion.
562 	 */
563 	if (RQ_TYPE(hw_cqe)) {
564 
565 		/*
566 		 * HW only validates 4 bits of MSN.  So we must validate that
567 		 * the MSN in the SEND is the next expected MSN.  If its not,
568 		 * then we complete this with T4_ERR_MSN and mark the wq in
569 		 * error.
570 		 */
571 
572 		if (t4_rq_empty(wq)) {
573 			t4_set_wq_in_error(wq);
574 			ret = -EAGAIN;
575 			goto skip_cqe;
576 		}
577 		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
578 			t4_set_wq_in_error(wq);
579 			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
580 			goto proc_cqe;
581 		}
582 		goto proc_cqe;
583 	}
584 
585 	/*
586 	 * If we get here its a send completion.
587 	 *
588 	 * Handle out of order completion. These get stuffed
589 	 * in the SW SQ. Then the SW SQ is walked to move any
590 	 * now in-order completions into the SW CQ.  This handles
591 	 * 2 cases:
592 	 *	1) reaping unsignaled WRs when the first subsequent
593 	 *	   signaled WR is completed.
594 	 *	2) out of order read completions.
595 	 */
596 	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
597 		struct t4_swsqe *swsqe;
598 
599 		PDBG("%s out of order completion going in sw_sq at idx %u\n",
600 		     __func__, CQE_WRID_SQ_IDX(hw_cqe));
601 		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
602 		swsqe->cqe = *hw_cqe;
603 		swsqe->complete = 1;
604 		ret = -EAGAIN;
605 		goto flush_wq;
606 	}
607 
608 proc_cqe:
609 	*cqe = *hw_cqe;
610 
611 	/*
612 	 * Reap the associated WR(s) that are freed up with this
613 	 * completion.
614 	 */
615 	if (SQ_TYPE(hw_cqe)) {
616 		int idx = CQE_WRID_SQ_IDX(hw_cqe);
617 		BUG_ON(idx >= wq->sq.size);
618 
619 		/*
620 		* Account for any unsignaled completions completed by
621 		* this signaled completion.  In this case, cidx points
622 		* to the first unsignaled one, and idx points to the
623 		* signaled one.  So adjust in_use based on this delta.
624 		* if this is not completing any unsigned wrs, then the
625 		* delta will be 0. Handle wrapping also!
626 		*/
627 		if (idx < wq->sq.cidx)
628 			wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
629 		else
630 			wq->sq.in_use -= idx - wq->sq.cidx;
631 		BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
632 
633 		wq->sq.cidx = (uint16_t)idx;
634 		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
635 		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
636 		t4_sq_consume(wq);
637 	} else {
638 		PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
639 		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
640 		BUG_ON(t4_rq_empty(wq));
641 		t4_rq_consume(wq);
642 		goto skip_cqe;
643 	}
644 
645 flush_wq:
646 	/*
647 	 * Flush any completed cqes that are now in-order.
648 	 */
649 	flush_completed_wrs(wq, cq);
650 
651 skip_cqe:
652 	if (SW_CQE(hw_cqe)) {
653 		PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
654 		     __func__, cq, cq->cqid, cq->sw_cidx);
655 		t4_swcq_consume(cq);
656 	} else {
657 		PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
658 		     __func__, cq, cq->cqid, cq->cidx);
659 		t4_hwcq_consume(cq);
660 	}
661 	return ret;
662 }
663 
664 /*
665  * Get one cq entry from c4iw and map it to openib.
666  *
667  * Returns:
668  *	0			cqe returned
669  *	-ENODATA		EMPTY;
670  *	-EAGAIN			caller must try again
671  *	any other -errno	fatal error
672  */
673 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
674 {
675 	struct c4iw_qp *qhp = NULL;
676 	struct t4_cqe uninitialized_var(cqe), *rd_cqe;
677 	struct t4_wq *wq;
678 	u32 credit = 0;
679 	u8 cqe_flushed;
680 	u64 cookie = 0;
681 	int ret;
682 
683 	ret = t4_next_cqe(&chp->cq, &rd_cqe);
684 
685 	if (ret)
686 		return ret;
687 
688 	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
689 	if (!qhp)
690 		wq = NULL;
691 	else {
692 		spin_lock(&qhp->lock);
693 		wq = &(qhp->wq);
694 	}
695 	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
696 	if (ret)
697 		goto out;
698 
699 	wc->wr_id = cookie;
700 	wc->qp = &qhp->ibqp;
701 	wc->vendor_err = CQE_STATUS(&cqe);
702 	wc->wc_flags = 0;
703 
704 	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
705 	     "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
706 	     CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
707 	     CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
708 
709 	if (CQE_TYPE(&cqe) == 0) {
710 		if (!CQE_STATUS(&cqe))
711 			wc->byte_len = CQE_LEN(&cqe);
712 		else
713 			wc->byte_len = 0;
714 		wc->opcode = IB_WC_RECV;
715 		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
716 		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
717 			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
718 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
719 		}
720 	} else {
721 		switch (CQE_OPCODE(&cqe)) {
722 		case FW_RI_RDMA_WRITE:
723 			wc->opcode = IB_WC_RDMA_WRITE;
724 			break;
725 		case FW_RI_READ_REQ:
726 			wc->opcode = IB_WC_RDMA_READ;
727 			wc->byte_len = CQE_LEN(&cqe);
728 			break;
729 		case FW_RI_SEND_WITH_INV:
730 		case FW_RI_SEND_WITH_SE_INV:
731 			wc->opcode = IB_WC_SEND;
732 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
733 			break;
734 		case FW_RI_SEND:
735 		case FW_RI_SEND_WITH_SE:
736 			wc->opcode = IB_WC_SEND;
737 			break;
738 		case FW_RI_BIND_MW:
739 			wc->opcode = IB_WC_BIND_MW;
740 			break;
741 
742 		case FW_RI_LOCAL_INV:
743 			wc->opcode = IB_WC_LOCAL_INV;
744 			break;
745 		case FW_RI_FAST_REGISTER:
746 			wc->opcode = IB_WC_FAST_REG_MR;
747 			break;
748 		default:
749 			printk(KERN_ERR MOD "Unexpected opcode %d "
750 			       "in the CQE received for QPID=0x%0x\n",
751 			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
752 			ret = -EINVAL;
753 			goto out;
754 		}
755 	}
756 
757 	if (cqe_flushed)
758 		wc->status = IB_WC_WR_FLUSH_ERR;
759 	else {
760 
761 		switch (CQE_STATUS(&cqe)) {
762 		case T4_ERR_SUCCESS:
763 			wc->status = IB_WC_SUCCESS;
764 			break;
765 		case T4_ERR_STAG:
766 			wc->status = IB_WC_LOC_ACCESS_ERR;
767 			break;
768 		case T4_ERR_PDID:
769 			wc->status = IB_WC_LOC_PROT_ERR;
770 			break;
771 		case T4_ERR_QPID:
772 		case T4_ERR_ACCESS:
773 			wc->status = IB_WC_LOC_ACCESS_ERR;
774 			break;
775 		case T4_ERR_WRAP:
776 			wc->status = IB_WC_GENERAL_ERR;
777 			break;
778 		case T4_ERR_BOUND:
779 			wc->status = IB_WC_LOC_LEN_ERR;
780 			break;
781 		case T4_ERR_INVALIDATE_SHARED_MR:
782 		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
783 			wc->status = IB_WC_MW_BIND_ERR;
784 			break;
785 		case T4_ERR_CRC:
786 		case T4_ERR_MARKER:
787 		case T4_ERR_PDU_LEN_ERR:
788 		case T4_ERR_OUT_OF_RQE:
789 		case T4_ERR_DDP_VERSION:
790 		case T4_ERR_RDMA_VERSION:
791 		case T4_ERR_DDP_QUEUE_NUM:
792 		case T4_ERR_MSN:
793 		case T4_ERR_TBIT:
794 		case T4_ERR_MO:
795 		case T4_ERR_MSN_RANGE:
796 		case T4_ERR_IRD_OVERFLOW:
797 		case T4_ERR_OPCODE:
798 		case T4_ERR_INTERNAL_ERR:
799 			wc->status = IB_WC_FATAL_ERR;
800 			break;
801 		case T4_ERR_SWFLUSH:
802 			wc->status = IB_WC_WR_FLUSH_ERR;
803 			break;
804 		default:
805 			printk(KERN_ERR MOD
806 			       "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
807 			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
808 			ret = -EINVAL;
809 		}
810 	}
811 out:
812 	if (wq)
813 		spin_unlock(&qhp->lock);
814 	return ret;
815 }
816 
817 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
818 {
819 	struct c4iw_cq *chp;
820 	unsigned long flags;
821 	int npolled;
822 	int err = 0;
823 
824 	chp = to_c4iw_cq(ibcq);
825 
826 	spin_lock_irqsave(&chp->lock, flags);
827 	for (npolled = 0; npolled < num_entries; ++npolled) {
828 		do {
829 			err = c4iw_poll_cq_one(chp, wc + npolled);
830 		} while (err == -EAGAIN);
831 		if (err)
832 			break;
833 	}
834 	spin_unlock_irqrestore(&chp->lock, flags);
835 	return !err || err == -ENODATA ? npolled : err;
836 }
837 
838 int c4iw_destroy_cq(struct ib_cq *ib_cq)
839 {
840 	struct c4iw_cq *chp;
841 	struct c4iw_ucontext *ucontext;
842 
843 	PDBG("%s ib_cq %p\n", __func__, ib_cq);
844 	chp = to_c4iw_cq(ib_cq);
845 
846 	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
847 	atomic_dec(&chp->refcnt);
848 	wait_event(chp->wait, !atomic_read(&chp->refcnt));
849 
850 	ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
851 				  : NULL;
852 	destroy_cq(&chp->rhp->rdev, &chp->cq,
853 		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
854 	kfree(chp);
855 	return 0;
856 }
857 
858 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
859 			     int vector, struct ib_ucontext *ib_context,
860 			     struct ib_udata *udata)
861 {
862 	struct c4iw_dev *rhp;
863 	struct c4iw_cq *chp;
864 	struct c4iw_create_cq_resp uresp;
865 	struct c4iw_ucontext *ucontext = NULL;
866 	int ret;
867 	size_t memsize, hwentries;
868 	struct c4iw_mm_entry *mm, *mm2;
869 
870 	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
871 
872 	rhp = to_c4iw_dev(ibdev);
873 
874 	if (vector >= rhp->rdev.lldi.nciq)
875 		return ERR_PTR(-EINVAL);
876 
877 	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
878 	if (!chp)
879 		return ERR_PTR(-ENOMEM);
880 
881 	if (ib_context)
882 		ucontext = to_c4iw_ucontext(ib_context);
883 
884 	/* account for the status page. */
885 	entries++;
886 
887 	/* IQ needs one extra entry to differentiate full vs empty. */
888 	entries++;
889 
890 	/*
891 	 * entries must be multiple of 16 for HW.
892 	 */
893 	entries = roundup(entries, 16);
894 
895 	/*
896 	 * Make actual HW queue 2x to avoid cdix_inc overflows.
897 	 */
898 	hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
899 
900 	/*
901 	 * Make HW queue at least 64 entries so GTS updates aren't too
902 	 * frequent.
903 	 */
904 	if (hwentries < 64)
905 		hwentries = 64;
906 
907 	memsize = hwentries * sizeof *chp->cq.queue;
908 
909 	/*
910 	 * memsize must be a multiple of the page size if its a user cq.
911 	 */
912 	if (ucontext) {
913 		memsize = roundup(memsize, PAGE_SIZE);
914 		hwentries = memsize / sizeof *chp->cq.queue;
915 		while (hwentries > T4_MAX_IQ_SIZE) {
916 			memsize -= PAGE_SIZE;
917 			hwentries = memsize / sizeof *chp->cq.queue;
918 		}
919 	}
920 	chp->cq.size = hwentries;
921 	chp->cq.memsize = memsize;
922 	chp->cq.vector = vector;
923 
924 	ret = create_cq(&rhp->rdev, &chp->cq,
925 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
926 	if (ret)
927 		goto err1;
928 
929 	chp->rhp = rhp;
930 	chp->cq.size--;				/* status page */
931 	chp->ibcq.cqe = entries - 2;
932 	spin_lock_init(&chp->lock);
933 	spin_lock_init(&chp->comp_handler_lock);
934 	atomic_set(&chp->refcnt, 1);
935 	init_waitqueue_head(&chp->wait);
936 	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
937 	if (ret)
938 		goto err2;
939 
940 	if (ucontext) {
941 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
942 		if (!mm)
943 			goto err3;
944 		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
945 		if (!mm2)
946 			goto err4;
947 
948 		uresp.qid_mask = rhp->rdev.cqmask;
949 		uresp.cqid = chp->cq.cqid;
950 		uresp.size = chp->cq.size;
951 		uresp.memsize = chp->cq.memsize;
952 		spin_lock(&ucontext->mmap_lock);
953 		uresp.key = ucontext->key;
954 		ucontext->key += PAGE_SIZE;
955 		uresp.gts_key = ucontext->key;
956 		ucontext->key += PAGE_SIZE;
957 		spin_unlock(&ucontext->mmap_lock);
958 		ret = ib_copy_to_udata(udata, &uresp,
959 				       sizeof(uresp) - sizeof(uresp.reserved));
960 		if (ret)
961 			goto err5;
962 
963 		mm->key = uresp.key;
964 		mm->addr = virt_to_phys(chp->cq.queue);
965 		mm->len = chp->cq.memsize;
966 		insert_mmap(ucontext, mm);
967 
968 		mm2->key = uresp.gts_key;
969 		mm2->addr = chp->cq.ugts;
970 		mm2->len = PAGE_SIZE;
971 		insert_mmap(ucontext, mm2);
972 	}
973 	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
974 	     __func__, chp->cq.cqid, chp, chp->cq.size,
975 	     chp->cq.memsize,
976 	     (unsigned long long) chp->cq.dma_addr);
977 	return &chp->ibcq;
978 err5:
979 	kfree(mm2);
980 err4:
981 	kfree(mm);
982 err3:
983 	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
984 err2:
985 	destroy_cq(&chp->rhp->rdev, &chp->cq,
986 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
987 err1:
988 	kfree(chp);
989 	return ERR_PTR(ret);
990 }
991 
992 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
993 {
994 	return -ENOSYS;
995 }
996 
997 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
998 {
999 	struct c4iw_cq *chp;
1000 	int ret;
1001 	unsigned long flag;
1002 
1003 	chp = to_c4iw_cq(ibcq);
1004 	spin_lock_irqsave(&chp->lock, flag);
1005 	ret = t4_arm_cq(&chp->cq,
1006 			(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1007 	spin_unlock_irqrestore(&chp->lock, flag);
1008 	if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1009 		ret = 0;
1010 	return ret;
1011 }
1012