xref: /openbmc/linux/drivers/infiniband/hw/cxgb4/qp.c (revision f5005f78)
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/module.h>
34 
35 #include "iw_cxgb4.h"
36 
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40 
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
44 
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 		 "QP count/threshold that triggers"
49 		 " automatic db flow control mode (default = 1000)");
50 
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 		 "QP count/threshold that triggers"
55 		 " disabling db coalescing (default = 0)");
56 
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60 
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62 {
63 	int ret = 0;
64 
65 	spin_lock_irq(&dev->lock);
66 	if (ird <= dev->avail_ird)
67 		dev->avail_ird -= ird;
68 	else
69 		ret = -ENOMEM;
70 	spin_unlock_irq(&dev->lock);
71 
72 	if (ret)
73 		dev_warn(&dev->rdev.lldi.pdev->dev,
74 			 "device IRD resources exhausted\n");
75 
76 	return ret;
77 }
78 
79 static void free_ird(struct c4iw_dev *dev, int ird)
80 {
81 	spin_lock_irq(&dev->lock);
82 	dev->avail_ird += ird;
83 	spin_unlock_irq(&dev->lock);
84 }
85 
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
87 {
88 	unsigned long flag;
89 	spin_lock_irqsave(&qhp->lock, flag);
90 	qhp->attr.state = state;
91 	spin_unlock_irqrestore(&qhp->lock, flag);
92 }
93 
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
95 {
96 	c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
97 }
98 
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100 {
101 	dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 			  pci_unmap_addr(sq, mapping));
103 }
104 
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
106 {
107 	if (t4_sq_onchip(sq))
108 		dealloc_oc_sq(rdev, sq);
109 	else
110 		dealloc_host_sq(rdev, sq);
111 }
112 
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
114 {
115 	if (!ocqp_support || !ocqp_supported(&rdev->lldi))
116 		return -ENOSYS;
117 	sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
118 	if (!sq->dma_addr)
119 		return -ENOMEM;
120 	sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 			rdev->lldi.vr->ocq.start;
122 	sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 					    rdev->lldi.vr->ocq.start);
124 	sq->flags |= T4_SQ_ONCHIP;
125 	return 0;
126 }
127 
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
129 {
130 	sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 				       &(sq->dma_addr), GFP_KERNEL);
132 	if (!sq->queue)
133 		return -ENOMEM;
134 	sq->phys_addr = virt_to_phys(sq->queue);
135 	pci_unmap_addr_set(sq, mapping, sq->dma_addr);
136 	return 0;
137 }
138 
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
140 {
141 	int ret = -ENOSYS;
142 	if (user)
143 		ret = alloc_oc_sq(rdev, sq);
144 	if (ret)
145 		ret = alloc_host_sq(rdev, sq);
146 	return ret;
147 }
148 
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 		      struct c4iw_dev_ucontext *uctx)
151 {
152 	/*
153 	 * uP clears EQ contexts when the connection exits rdma mode,
154 	 * so no need to post a RESET WR for these EQs.
155 	 */
156 	dma_free_coherent(&(rdev->lldi.pdev->dev),
157 			  wq->rq.memsize, wq->rq.queue,
158 			  dma_unmap_addr(&wq->rq, mapping));
159 	dealloc_sq(rdev, &wq->sq);
160 	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161 	kfree(wq->rq.sw_rq);
162 	kfree(wq->sq.sw_sq);
163 	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
165 	return 0;
166 }
167 
168 /*
169  * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170  * then this is a user mapping so compute the page-aligned physical address
171  * for mapping.
172  */
173 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174 			      enum cxgb4_bar2_qtype qtype,
175 			      unsigned int *pbar2_qid, u64 *pbar2_pa)
176 {
177 	u64 bar2_qoffset;
178 	int ret;
179 
180 	ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
181 				   pbar2_pa ? 1 : 0,
182 				   &bar2_qoffset, pbar2_qid);
183 	if (ret)
184 		return NULL;
185 
186 	if (pbar2_pa)
187 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188 	return rdev->bar2_kva + bar2_qoffset;
189 }
190 
191 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
192 		     struct t4_cq *rcq, struct t4_cq *scq,
193 		     struct c4iw_dev_ucontext *uctx)
194 {
195 	int user = (uctx != &rdev->uctx);
196 	struct fw_ri_res_wr *res_wr;
197 	struct fw_ri_res *res;
198 	int wr_len;
199 	struct c4iw_wr_wait wr_wait;
200 	struct sk_buff *skb;
201 	int ret = 0;
202 	int eqsize;
203 
204 	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
205 	if (!wq->sq.qid)
206 		return -ENOMEM;
207 
208 	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
209 	if (!wq->rq.qid) {
210 		ret = -ENOMEM;
211 		goto free_sq_qid;
212 	}
213 
214 	if (!user) {
215 		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
216 				 GFP_KERNEL);
217 		if (!wq->sq.sw_sq) {
218 			ret = -ENOMEM;
219 			goto free_rq_qid;
220 		}
221 
222 		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
223 				 GFP_KERNEL);
224 		if (!wq->rq.sw_rq) {
225 			ret = -ENOMEM;
226 			goto free_sw_sq;
227 		}
228 	}
229 
230 	/*
231 	 * RQT must be a power of 2 and at least 16 deep.
232 	 */
233 	wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
234 	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
235 	if (!wq->rq.rqt_hwaddr) {
236 		ret = -ENOMEM;
237 		goto free_sw_rq;
238 	}
239 
240 	ret = alloc_sq(rdev, &wq->sq, user);
241 	if (ret)
242 		goto free_hwaddr;
243 	memset(wq->sq.queue, 0, wq->sq.memsize);
244 	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
245 
246 	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
247 					  wq->rq.memsize, &(wq->rq.dma_addr),
248 					  GFP_KERNEL);
249 	if (!wq->rq.queue) {
250 		ret = -ENOMEM;
251 		goto free_sq;
252 	}
253 	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
254 		__func__, wq->sq.queue,
255 		(unsigned long long)virt_to_phys(wq->sq.queue),
256 		wq->rq.queue,
257 		(unsigned long long)virt_to_phys(wq->rq.queue));
258 	memset(wq->rq.queue, 0, wq->rq.memsize);
259 	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
260 
261 	wq->db = rdev->lldi.db_reg;
262 
263 	wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
264 					 &wq->sq.bar2_qid,
265 					 user ? &wq->sq.bar2_pa : NULL);
266 	wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
267 					 &wq->rq.bar2_qid,
268 					 user ? &wq->rq.bar2_pa : NULL);
269 
270 	/*
271 	 * User mode must have bar2 access.
272 	 */
273 	if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
274 		pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 		goto free_dma;
277 	}
278 
279 	wq->rdev = rdev;
280 	wq->rq.msn = 1;
281 
282 	/* build fw_ri_res_wr */
283 	wr_len = sizeof *res_wr + 2 * sizeof *res;
284 
285 	skb = alloc_skb(wr_len, GFP_KERNEL);
286 	if (!skb) {
287 		ret = -ENOMEM;
288 		goto free_dma;
289 	}
290 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
291 
292 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
293 	memset(res_wr, 0, wr_len);
294 	res_wr->op_nres = cpu_to_be32(
295 			FW_WR_OP_V(FW_RI_RES_WR) |
296 			FW_RI_RES_WR_NRES_V(2) |
297 			FW_WR_COMPL_F);
298 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
299 	res_wr->cookie = (uintptr_t)&wr_wait;
300 	res = res_wr->res;
301 	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
302 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
303 
304 	/*
305 	 * eqsize is the number of 64B entries plus the status page size.
306 	 */
307 	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
308 		rdev->hw_queue.t4_eq_status_entries;
309 
310 	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
311 		FW_RI_RES_WR_HOSTFCMODE_V(0) |	/* no host cidx updates */
312 		FW_RI_RES_WR_CPRIO_V(0) |	/* don't keep in chip cache */
313 		FW_RI_RES_WR_PCIECHN_V(0) |	/* set by uP at ri_init time */
314 		(t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
315 		FW_RI_RES_WR_IQID_V(scq->cqid));
316 	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
317 		FW_RI_RES_WR_DCAEN_V(0) |
318 		FW_RI_RES_WR_DCACPU_V(0) |
319 		FW_RI_RES_WR_FBMIN_V(2) |
320 		FW_RI_RES_WR_FBMAX_V(2) |
321 		FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
322 		FW_RI_RES_WR_CIDXFTHRESH_V(0) |
323 		FW_RI_RES_WR_EQSIZE_V(eqsize));
324 	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
325 	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
326 	res++;
327 	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
328 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
329 
330 	/*
331 	 * eqsize is the number of 64B entries plus the status page size.
332 	 */
333 	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
334 		rdev->hw_queue.t4_eq_status_entries;
335 	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
336 		FW_RI_RES_WR_HOSTFCMODE_V(0) |	/* no host cidx updates */
337 		FW_RI_RES_WR_CPRIO_V(0) |	/* don't keep in chip cache */
338 		FW_RI_RES_WR_PCIECHN_V(0) |	/* set by uP at ri_init time */
339 		FW_RI_RES_WR_IQID_V(rcq->cqid));
340 	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
341 		FW_RI_RES_WR_DCAEN_V(0) |
342 		FW_RI_RES_WR_DCACPU_V(0) |
343 		FW_RI_RES_WR_FBMIN_V(2) |
344 		FW_RI_RES_WR_FBMAX_V(2) |
345 		FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 		FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 		FW_RI_RES_WR_EQSIZE_V(eqsize));
348 	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
349 	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
350 
351 	c4iw_init_wr_wait(&wr_wait);
352 
353 	ret = c4iw_ofld_send(rdev, skb);
354 	if (ret)
355 		goto free_dma;
356 	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
357 	if (ret)
358 		goto free_dma;
359 
360 	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
361 	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
362 	     wq->sq.bar2_va, wq->rq.bar2_va);
363 
364 	return 0;
365 free_dma:
366 	dma_free_coherent(&(rdev->lldi.pdev->dev),
367 			  wq->rq.memsize, wq->rq.queue,
368 			  dma_unmap_addr(&wq->rq, mapping));
369 free_sq:
370 	dealloc_sq(rdev, &wq->sq);
371 free_hwaddr:
372 	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
373 free_sw_rq:
374 	kfree(wq->rq.sw_rq);
375 free_sw_sq:
376 	kfree(wq->sq.sw_sq);
377 free_rq_qid:
378 	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
379 free_sq_qid:
380 	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
381 	return ret;
382 }
383 
384 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
385 		      struct ib_send_wr *wr, int max, u32 *plenp)
386 {
387 	u8 *dstp, *srcp;
388 	u32 plen = 0;
389 	int i;
390 	int rem, len;
391 
392 	dstp = (u8 *)immdp->data;
393 	for (i = 0; i < wr->num_sge; i++) {
394 		if ((plen + wr->sg_list[i].length) > max)
395 			return -EMSGSIZE;
396 		srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
397 		plen += wr->sg_list[i].length;
398 		rem = wr->sg_list[i].length;
399 		while (rem) {
400 			if (dstp == (u8 *)&sq->queue[sq->size])
401 				dstp = (u8 *)sq->queue;
402 			if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
403 				len = rem;
404 			else
405 				len = (u8 *)&sq->queue[sq->size] - dstp;
406 			memcpy(dstp, srcp, len);
407 			dstp += len;
408 			srcp += len;
409 			rem -= len;
410 		}
411 	}
412 	len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
413 	if (len)
414 		memset(dstp, 0, len);
415 	immdp->op = FW_RI_DATA_IMMD;
416 	immdp->r1 = 0;
417 	immdp->r2 = 0;
418 	immdp->immdlen = cpu_to_be32(plen);
419 	*plenp = plen;
420 	return 0;
421 }
422 
423 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
424 		      struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
425 		      int num_sge, u32 *plenp)
426 
427 {
428 	int i;
429 	u32 plen = 0;
430 	__be64 *flitp = (__be64 *)isglp->sge;
431 
432 	for (i = 0; i < num_sge; i++) {
433 		if ((plen + sg_list[i].length) < plen)
434 			return -EMSGSIZE;
435 		plen += sg_list[i].length;
436 		*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
437 				     sg_list[i].length);
438 		if (++flitp == queue_end)
439 			flitp = queue_start;
440 		*flitp = cpu_to_be64(sg_list[i].addr);
441 		if (++flitp == queue_end)
442 			flitp = queue_start;
443 	}
444 	*flitp = (__force __be64)0;
445 	isglp->op = FW_RI_DATA_ISGL;
446 	isglp->r1 = 0;
447 	isglp->nsge = cpu_to_be16(num_sge);
448 	isglp->r2 = 0;
449 	if (plenp)
450 		*plenp = plen;
451 	return 0;
452 }
453 
454 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
455 			   struct ib_send_wr *wr, u8 *len16)
456 {
457 	u32 plen;
458 	int size;
459 	int ret;
460 
461 	if (wr->num_sge > T4_MAX_SEND_SGE)
462 		return -EINVAL;
463 	switch (wr->opcode) {
464 	case IB_WR_SEND:
465 		if (wr->send_flags & IB_SEND_SOLICITED)
466 			wqe->send.sendop_pkd = cpu_to_be32(
467 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
468 		else
469 			wqe->send.sendop_pkd = cpu_to_be32(
470 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
471 		wqe->send.stag_inv = 0;
472 		break;
473 	case IB_WR_SEND_WITH_INV:
474 		if (wr->send_flags & IB_SEND_SOLICITED)
475 			wqe->send.sendop_pkd = cpu_to_be32(
476 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
477 		else
478 			wqe->send.sendop_pkd = cpu_to_be32(
479 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
480 		wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
481 		break;
482 
483 	default:
484 		return -EINVAL;
485 	}
486 	wqe->send.r3 = 0;
487 	wqe->send.r4 = 0;
488 
489 	plen = 0;
490 	if (wr->num_sge) {
491 		if (wr->send_flags & IB_SEND_INLINE) {
492 			ret = build_immd(sq, wqe->send.u.immd_src, wr,
493 					 T4_MAX_SEND_INLINE, &plen);
494 			if (ret)
495 				return ret;
496 			size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
497 			       plen;
498 		} else {
499 			ret = build_isgl((__be64 *)sq->queue,
500 					 (__be64 *)&sq->queue[sq->size],
501 					 wqe->send.u.isgl_src,
502 					 wr->sg_list, wr->num_sge, &plen);
503 			if (ret)
504 				return ret;
505 			size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
506 			       wr->num_sge * sizeof(struct fw_ri_sge);
507 		}
508 	} else {
509 		wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
510 		wqe->send.u.immd_src[0].r1 = 0;
511 		wqe->send.u.immd_src[0].r2 = 0;
512 		wqe->send.u.immd_src[0].immdlen = 0;
513 		size = sizeof wqe->send + sizeof(struct fw_ri_immd);
514 		plen = 0;
515 	}
516 	*len16 = DIV_ROUND_UP(size, 16);
517 	wqe->send.plen = cpu_to_be32(plen);
518 	return 0;
519 }
520 
521 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
522 			    struct ib_send_wr *wr, u8 *len16)
523 {
524 	u32 plen;
525 	int size;
526 	int ret;
527 
528 	if (wr->num_sge > T4_MAX_SEND_SGE)
529 		return -EINVAL;
530 	wqe->write.r2 = 0;
531 	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
532 	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
533 	if (wr->num_sge) {
534 		if (wr->send_flags & IB_SEND_INLINE) {
535 			ret = build_immd(sq, wqe->write.u.immd_src, wr,
536 					 T4_MAX_WRITE_INLINE, &plen);
537 			if (ret)
538 				return ret;
539 			size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
540 			       plen;
541 		} else {
542 			ret = build_isgl((__be64 *)sq->queue,
543 					 (__be64 *)&sq->queue[sq->size],
544 					 wqe->write.u.isgl_src,
545 					 wr->sg_list, wr->num_sge, &plen);
546 			if (ret)
547 				return ret;
548 			size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
549 			       wr->num_sge * sizeof(struct fw_ri_sge);
550 		}
551 	} else {
552 		wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
553 		wqe->write.u.immd_src[0].r1 = 0;
554 		wqe->write.u.immd_src[0].r2 = 0;
555 		wqe->write.u.immd_src[0].immdlen = 0;
556 		size = sizeof wqe->write + sizeof(struct fw_ri_immd);
557 		plen = 0;
558 	}
559 	*len16 = DIV_ROUND_UP(size, 16);
560 	wqe->write.plen = cpu_to_be32(plen);
561 	return 0;
562 }
563 
564 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
565 {
566 	if (wr->num_sge > 1)
567 		return -EINVAL;
568 	if (wr->num_sge) {
569 		wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
570 		wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
571 							>> 32));
572 		wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
573 		wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
574 		wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
575 		wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
576 							 >> 32));
577 		wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
578 	} else {
579 		wqe->read.stag_src = cpu_to_be32(2);
580 		wqe->read.to_src_hi = 0;
581 		wqe->read.to_src_lo = 0;
582 		wqe->read.stag_sink = cpu_to_be32(2);
583 		wqe->read.plen = 0;
584 		wqe->read.to_sink_hi = 0;
585 		wqe->read.to_sink_lo = 0;
586 	}
587 	wqe->read.r2 = 0;
588 	wqe->read.r5 = 0;
589 	*len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
590 	return 0;
591 }
592 
593 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
594 			   struct ib_recv_wr *wr, u8 *len16)
595 {
596 	int ret;
597 
598 	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
599 			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
600 			 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
601 	if (ret)
602 		return ret;
603 	*len16 = DIV_ROUND_UP(sizeof wqe->recv +
604 			      wr->num_sge * sizeof(struct fw_ri_sge), 16);
605 	return 0;
606 }
607 
608 static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
609 			 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
610 {
611 
612 	struct fw_ri_immd *imdp;
613 	__be64 *p;
614 	int i;
615 	int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
616 	int rem;
617 
618 	if (wr->wr.fast_reg.page_list_len >
619 	    t4_max_fr_depth(use_dsgl))
620 		return -EINVAL;
621 
622 	wqe->fr.qpbinde_to_dcacpu = 0;
623 	wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
624 	wqe->fr.addr_type = FW_RI_VA_BASED_TO;
625 	wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
626 	wqe->fr.len_hi = 0;
627 	wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
628 	wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
629 	wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
630 	wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
631 					0xffffffff);
632 
633 	if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
634 		struct c4iw_fr_page_list *c4pl =
635 			to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
636 		struct fw_ri_dsgl *sglp;
637 
638 		for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
639 			wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
640 				cpu_to_be64((u64)
641 				wr->wr.fast_reg.page_list->page_list[i]);
642 		}
643 
644 		sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
645 		sglp->op = FW_RI_DATA_DSGL;
646 		sglp->r1 = 0;
647 		sglp->nsge = cpu_to_be16(1);
648 		sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
649 		sglp->len0 = cpu_to_be32(pbllen);
650 
651 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
652 	} else {
653 		imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
654 		imdp->op = FW_RI_DATA_IMMD;
655 		imdp->r1 = 0;
656 		imdp->r2 = 0;
657 		imdp->immdlen = cpu_to_be32(pbllen);
658 		p = (__be64 *)(imdp + 1);
659 		rem = pbllen;
660 		for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
661 			*p = cpu_to_be64(
662 				(u64)wr->wr.fast_reg.page_list->page_list[i]);
663 			rem -= sizeof(*p);
664 			if (++p == (__be64 *)&sq->queue[sq->size])
665 				p = (__be64 *)sq->queue;
666 		}
667 		BUG_ON(rem < 0);
668 		while (rem) {
669 			*p = 0;
670 			rem -= sizeof(*p);
671 			if (++p == (__be64 *)&sq->queue[sq->size])
672 				p = (__be64 *)sq->queue;
673 		}
674 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
675 				      + pbllen, 16);
676 	}
677 	return 0;
678 }
679 
680 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
681 			  u8 *len16)
682 {
683 	wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
684 	wqe->inv.r2 = 0;
685 	*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
686 	return 0;
687 }
688 
689 void c4iw_qp_add_ref(struct ib_qp *qp)
690 {
691 	PDBG("%s ib_qp %p\n", __func__, qp);
692 	atomic_inc(&(to_c4iw_qp(qp)->refcnt));
693 }
694 
695 void c4iw_qp_rem_ref(struct ib_qp *qp)
696 {
697 	PDBG("%s ib_qp %p\n", __func__, qp);
698 	if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
699 		wake_up(&(to_c4iw_qp(qp)->wait));
700 }
701 
702 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
703 {
704 	if (list_empty(entry))
705 		list_add_tail(entry, head);
706 }
707 
708 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
709 {
710 	unsigned long flags;
711 
712 	spin_lock_irqsave(&qhp->rhp->lock, flags);
713 	spin_lock(&qhp->lock);
714 	if (qhp->rhp->db_state == NORMAL)
715 		t4_ring_sq_db(&qhp->wq, inc,
716 			      is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
717 	else {
718 		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
719 		qhp->wq.sq.wq_pidx_inc += inc;
720 	}
721 	spin_unlock(&qhp->lock);
722 	spin_unlock_irqrestore(&qhp->rhp->lock, flags);
723 	return 0;
724 }
725 
726 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
727 {
728 	unsigned long flags;
729 
730 	spin_lock_irqsave(&qhp->rhp->lock, flags);
731 	spin_lock(&qhp->lock);
732 	if (qhp->rhp->db_state == NORMAL)
733 		t4_ring_rq_db(&qhp->wq, inc,
734 			      is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
735 	else {
736 		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
737 		qhp->wq.rq.wq_pidx_inc += inc;
738 	}
739 	spin_unlock(&qhp->lock);
740 	spin_unlock_irqrestore(&qhp->rhp->lock, flags);
741 	return 0;
742 }
743 
744 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
745 		   struct ib_send_wr **bad_wr)
746 {
747 	int err = 0;
748 	u8 len16 = 0;
749 	enum fw_wr_opcodes fw_opcode = 0;
750 	enum fw_ri_wr_flags fw_flags;
751 	struct c4iw_qp *qhp;
752 	union t4_wr *wqe = NULL;
753 	u32 num_wrs;
754 	struct t4_swsqe *swsqe;
755 	unsigned long flag;
756 	u16 idx = 0;
757 
758 	qhp = to_c4iw_qp(ibqp);
759 	spin_lock_irqsave(&qhp->lock, flag);
760 	if (t4_wq_in_error(&qhp->wq)) {
761 		spin_unlock_irqrestore(&qhp->lock, flag);
762 		return -EINVAL;
763 	}
764 	num_wrs = t4_sq_avail(&qhp->wq);
765 	if (num_wrs == 0) {
766 		spin_unlock_irqrestore(&qhp->lock, flag);
767 		return -ENOMEM;
768 	}
769 	while (wr) {
770 		if (num_wrs == 0) {
771 			err = -ENOMEM;
772 			*bad_wr = wr;
773 			break;
774 		}
775 		wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
776 		      qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
777 
778 		fw_flags = 0;
779 		if (wr->send_flags & IB_SEND_SOLICITED)
780 			fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
781 		if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
782 			fw_flags |= FW_RI_COMPLETION_FLAG;
783 		swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
784 		switch (wr->opcode) {
785 		case IB_WR_SEND_WITH_INV:
786 		case IB_WR_SEND:
787 			if (wr->send_flags & IB_SEND_FENCE)
788 				fw_flags |= FW_RI_READ_FENCE_FLAG;
789 			fw_opcode = FW_RI_SEND_WR;
790 			if (wr->opcode == IB_WR_SEND)
791 				swsqe->opcode = FW_RI_SEND;
792 			else
793 				swsqe->opcode = FW_RI_SEND_WITH_INV;
794 			err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
795 			break;
796 		case IB_WR_RDMA_WRITE:
797 			fw_opcode = FW_RI_RDMA_WRITE_WR;
798 			swsqe->opcode = FW_RI_RDMA_WRITE;
799 			err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
800 			break;
801 		case IB_WR_RDMA_READ:
802 		case IB_WR_RDMA_READ_WITH_INV:
803 			fw_opcode = FW_RI_RDMA_READ_WR;
804 			swsqe->opcode = FW_RI_READ_REQ;
805 			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
806 				fw_flags = FW_RI_RDMA_READ_INVALIDATE;
807 			else
808 				fw_flags = 0;
809 			err = build_rdma_read(wqe, wr, &len16);
810 			if (err)
811 				break;
812 			swsqe->read_len = wr->sg_list[0].length;
813 			if (!qhp->wq.sq.oldest_read)
814 				qhp->wq.sq.oldest_read = swsqe;
815 			break;
816 		case IB_WR_FAST_REG_MR:
817 			fw_opcode = FW_RI_FR_NSMR_WR;
818 			swsqe->opcode = FW_RI_FAST_REGISTER;
819 			err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
820 					    is_t5(
821 					    qhp->rhp->rdev.lldi.adapter_type) ?
822 					    1 : 0);
823 			break;
824 		case IB_WR_LOCAL_INV:
825 			if (wr->send_flags & IB_SEND_FENCE)
826 				fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
827 			fw_opcode = FW_RI_INV_LSTAG_WR;
828 			swsqe->opcode = FW_RI_LOCAL_INV;
829 			err = build_inv_stag(wqe, wr, &len16);
830 			break;
831 		default:
832 			PDBG("%s post of type=%d TBD!\n", __func__,
833 			     wr->opcode);
834 			err = -EINVAL;
835 		}
836 		if (err) {
837 			*bad_wr = wr;
838 			break;
839 		}
840 		swsqe->idx = qhp->wq.sq.pidx;
841 		swsqe->complete = 0;
842 		swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
843 				  qhp->sq_sig_all;
844 		swsqe->flushed = 0;
845 		swsqe->wr_id = wr->wr_id;
846 		if (c4iw_wr_log) {
847 			swsqe->sge_ts = cxgb4_read_sge_timestamp(
848 					qhp->rhp->rdev.lldi.ports[0]);
849 			getnstimeofday(&swsqe->host_ts);
850 		}
851 
852 		init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
853 
854 		PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
855 		     __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
856 		     swsqe->opcode, swsqe->read_len);
857 		wr = wr->next;
858 		num_wrs--;
859 		t4_sq_produce(&qhp->wq, len16);
860 		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
861 	}
862 	if (!qhp->rhp->rdev.status_page->db_off) {
863 		t4_ring_sq_db(&qhp->wq, idx,
864 			      is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
865 		spin_unlock_irqrestore(&qhp->lock, flag);
866 	} else {
867 		spin_unlock_irqrestore(&qhp->lock, flag);
868 		ring_kernel_sq_db(qhp, idx);
869 	}
870 	return err;
871 }
872 
873 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
874 		      struct ib_recv_wr **bad_wr)
875 {
876 	int err = 0;
877 	struct c4iw_qp *qhp;
878 	union t4_recv_wr *wqe = NULL;
879 	u32 num_wrs;
880 	u8 len16 = 0;
881 	unsigned long flag;
882 	u16 idx = 0;
883 
884 	qhp = to_c4iw_qp(ibqp);
885 	spin_lock_irqsave(&qhp->lock, flag);
886 	if (t4_wq_in_error(&qhp->wq)) {
887 		spin_unlock_irqrestore(&qhp->lock, flag);
888 		return -EINVAL;
889 	}
890 	num_wrs = t4_rq_avail(&qhp->wq);
891 	if (num_wrs == 0) {
892 		spin_unlock_irqrestore(&qhp->lock, flag);
893 		return -ENOMEM;
894 	}
895 	while (wr) {
896 		if (wr->num_sge > T4_MAX_RECV_SGE) {
897 			err = -EINVAL;
898 			*bad_wr = wr;
899 			break;
900 		}
901 		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
902 					   qhp->wq.rq.wq_pidx *
903 					   T4_EQ_ENTRY_SIZE);
904 		if (num_wrs)
905 			err = build_rdma_recv(qhp, wqe, wr, &len16);
906 		else
907 			err = -ENOMEM;
908 		if (err) {
909 			*bad_wr = wr;
910 			break;
911 		}
912 
913 		qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
914 		if (c4iw_wr_log) {
915 			qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
916 				cxgb4_read_sge_timestamp(
917 						qhp->rhp->rdev.lldi.ports[0]);
918 			getnstimeofday(
919 				&qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
920 		}
921 
922 		wqe->recv.opcode = FW_RI_RECV_WR;
923 		wqe->recv.r1 = 0;
924 		wqe->recv.wrid = qhp->wq.rq.pidx;
925 		wqe->recv.r2[0] = 0;
926 		wqe->recv.r2[1] = 0;
927 		wqe->recv.r2[2] = 0;
928 		wqe->recv.len16 = len16;
929 		PDBG("%s cookie 0x%llx pidx %u\n", __func__,
930 		     (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
931 		t4_rq_produce(&qhp->wq, len16);
932 		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
933 		wr = wr->next;
934 		num_wrs--;
935 	}
936 	if (!qhp->rhp->rdev.status_page->db_off) {
937 		t4_ring_rq_db(&qhp->wq, idx,
938 			      is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
939 		spin_unlock_irqrestore(&qhp->lock, flag);
940 	} else {
941 		spin_unlock_irqrestore(&qhp->lock, flag);
942 		ring_kernel_rq_db(qhp, idx);
943 	}
944 	return err;
945 }
946 
947 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
948 {
949 	return -ENOSYS;
950 }
951 
952 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
953 				    u8 *ecode)
954 {
955 	int status;
956 	int tagged;
957 	int opcode;
958 	int rqtype;
959 	int send_inv;
960 
961 	if (!err_cqe) {
962 		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
963 		*ecode = 0;
964 		return;
965 	}
966 
967 	status = CQE_STATUS(err_cqe);
968 	opcode = CQE_OPCODE(err_cqe);
969 	rqtype = RQ_TYPE(err_cqe);
970 	send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
971 		   (opcode == FW_RI_SEND_WITH_SE_INV);
972 	tagged = (opcode == FW_RI_RDMA_WRITE) ||
973 		 (rqtype && (opcode == FW_RI_READ_RESP));
974 
975 	switch (status) {
976 	case T4_ERR_STAG:
977 		if (send_inv) {
978 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
979 			*ecode = RDMAP_CANT_INV_STAG;
980 		} else {
981 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
982 			*ecode = RDMAP_INV_STAG;
983 		}
984 		break;
985 	case T4_ERR_PDID:
986 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
987 		if ((opcode == FW_RI_SEND_WITH_INV) ||
988 		    (opcode == FW_RI_SEND_WITH_SE_INV))
989 			*ecode = RDMAP_CANT_INV_STAG;
990 		else
991 			*ecode = RDMAP_STAG_NOT_ASSOC;
992 		break;
993 	case T4_ERR_QPID:
994 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
995 		*ecode = RDMAP_STAG_NOT_ASSOC;
996 		break;
997 	case T4_ERR_ACCESS:
998 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
999 		*ecode = RDMAP_ACC_VIOL;
1000 		break;
1001 	case T4_ERR_WRAP:
1002 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1003 		*ecode = RDMAP_TO_WRAP;
1004 		break;
1005 	case T4_ERR_BOUND:
1006 		if (tagged) {
1007 			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1008 			*ecode = DDPT_BASE_BOUNDS;
1009 		} else {
1010 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1011 			*ecode = RDMAP_BASE_BOUNDS;
1012 		}
1013 		break;
1014 	case T4_ERR_INVALIDATE_SHARED_MR:
1015 	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1016 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1017 		*ecode = RDMAP_CANT_INV_STAG;
1018 		break;
1019 	case T4_ERR_ECC:
1020 	case T4_ERR_ECC_PSTAG:
1021 	case T4_ERR_INTERNAL_ERR:
1022 		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1023 		*ecode = 0;
1024 		break;
1025 	case T4_ERR_OUT_OF_RQE:
1026 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1027 		*ecode = DDPU_INV_MSN_NOBUF;
1028 		break;
1029 	case T4_ERR_PBL_ADDR_BOUND:
1030 		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1031 		*ecode = DDPT_BASE_BOUNDS;
1032 		break;
1033 	case T4_ERR_CRC:
1034 		*layer_type = LAYER_MPA|DDP_LLP;
1035 		*ecode = MPA_CRC_ERR;
1036 		break;
1037 	case T4_ERR_MARKER:
1038 		*layer_type = LAYER_MPA|DDP_LLP;
1039 		*ecode = MPA_MARKER_ERR;
1040 		break;
1041 	case T4_ERR_PDU_LEN_ERR:
1042 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1043 		*ecode = DDPU_MSG_TOOBIG;
1044 		break;
1045 	case T4_ERR_DDP_VERSION:
1046 		if (tagged) {
1047 			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1048 			*ecode = DDPT_INV_VERS;
1049 		} else {
1050 			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1051 			*ecode = DDPU_INV_VERS;
1052 		}
1053 		break;
1054 	case T4_ERR_RDMA_VERSION:
1055 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1056 		*ecode = RDMAP_INV_VERS;
1057 		break;
1058 	case T4_ERR_OPCODE:
1059 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1060 		*ecode = RDMAP_INV_OPCODE;
1061 		break;
1062 	case T4_ERR_DDP_QUEUE_NUM:
1063 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1064 		*ecode = DDPU_INV_QN;
1065 		break;
1066 	case T4_ERR_MSN:
1067 	case T4_ERR_MSN_GAP:
1068 	case T4_ERR_MSN_RANGE:
1069 	case T4_ERR_IRD_OVERFLOW:
1070 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1071 		*ecode = DDPU_INV_MSN_RANGE;
1072 		break;
1073 	case T4_ERR_TBIT:
1074 		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1075 		*ecode = 0;
1076 		break;
1077 	case T4_ERR_MO:
1078 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1079 		*ecode = DDPU_INV_MO;
1080 		break;
1081 	default:
1082 		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1083 		*ecode = 0;
1084 		break;
1085 	}
1086 }
1087 
1088 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1089 			   gfp_t gfp)
1090 {
1091 	struct fw_ri_wr *wqe;
1092 	struct sk_buff *skb;
1093 	struct terminate_message *term;
1094 
1095 	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1096 	     qhp->ep->hwtid);
1097 
1098 	skb = alloc_skb(sizeof *wqe, gfp);
1099 	if (!skb)
1100 		return;
1101 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1102 
1103 	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1104 	memset(wqe, 0, sizeof *wqe);
1105 	wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1106 	wqe->flowid_len16 = cpu_to_be32(
1107 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
1108 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1109 
1110 	wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1111 	wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1112 	term = (struct terminate_message *)wqe->u.terminate.termmsg;
1113 	if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1114 		term->layer_etype = qhp->attr.layer_etype;
1115 		term->ecode = qhp->attr.ecode;
1116 	} else
1117 		build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1118 	c4iw_ofld_send(&qhp->rhp->rdev, skb);
1119 }
1120 
1121 /*
1122  * Assumes qhp lock is held.
1123  */
1124 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1125 		       struct c4iw_cq *schp)
1126 {
1127 	int count;
1128 	int rq_flushed, sq_flushed;
1129 	unsigned long flag;
1130 
1131 	PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
1132 
1133 	/* locking hierarchy: cq lock first, then qp lock. */
1134 	spin_lock_irqsave(&rchp->lock, flag);
1135 	spin_lock(&qhp->lock);
1136 
1137 	if (qhp->wq.flushed) {
1138 		spin_unlock(&qhp->lock);
1139 		spin_unlock_irqrestore(&rchp->lock, flag);
1140 		return;
1141 	}
1142 	qhp->wq.flushed = 1;
1143 
1144 	c4iw_flush_hw_cq(rchp);
1145 	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1146 	rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1147 	spin_unlock(&qhp->lock);
1148 	spin_unlock_irqrestore(&rchp->lock, flag);
1149 
1150 	/* locking hierarchy: cq lock first, then qp lock. */
1151 	spin_lock_irqsave(&schp->lock, flag);
1152 	spin_lock(&qhp->lock);
1153 	if (schp != rchp)
1154 		c4iw_flush_hw_cq(schp);
1155 	sq_flushed = c4iw_flush_sq(qhp);
1156 	spin_unlock(&qhp->lock);
1157 	spin_unlock_irqrestore(&schp->lock, flag);
1158 
1159 	if (schp == rchp) {
1160 		if (t4_clear_cq_armed(&rchp->cq) &&
1161 		    (rq_flushed || sq_flushed)) {
1162 			spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1163 			(*rchp->ibcq.comp_handler)(&rchp->ibcq,
1164 						   rchp->ibcq.cq_context);
1165 			spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1166 		}
1167 	} else {
1168 		if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1169 			spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1170 			(*rchp->ibcq.comp_handler)(&rchp->ibcq,
1171 						   rchp->ibcq.cq_context);
1172 			spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1173 		}
1174 		if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1175 			spin_lock_irqsave(&schp->comp_handler_lock, flag);
1176 			(*schp->ibcq.comp_handler)(&schp->ibcq,
1177 						   schp->ibcq.cq_context);
1178 			spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1179 		}
1180 	}
1181 }
1182 
1183 static void flush_qp(struct c4iw_qp *qhp)
1184 {
1185 	struct c4iw_cq *rchp, *schp;
1186 	unsigned long flag;
1187 
1188 	rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1189 	schp = to_c4iw_cq(qhp->ibqp.send_cq);
1190 
1191 	t4_set_wq_in_error(&qhp->wq);
1192 	if (qhp->ibqp.uobject) {
1193 		t4_set_cq_in_error(&rchp->cq);
1194 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1195 		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1196 		spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1197 		if (schp != rchp) {
1198 			t4_set_cq_in_error(&schp->cq);
1199 			spin_lock_irqsave(&schp->comp_handler_lock, flag);
1200 			(*schp->ibcq.comp_handler)(&schp->ibcq,
1201 					schp->ibcq.cq_context);
1202 			spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1203 		}
1204 		return;
1205 	}
1206 	__flush_qp(qhp, rchp, schp);
1207 }
1208 
1209 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1210 		     struct c4iw_ep *ep)
1211 {
1212 	struct fw_ri_wr *wqe;
1213 	int ret;
1214 	struct sk_buff *skb;
1215 
1216 	PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1217 	     ep->hwtid);
1218 
1219 	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1220 	if (!skb)
1221 		return -ENOMEM;
1222 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1223 
1224 	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1225 	memset(wqe, 0, sizeof *wqe);
1226 	wqe->op_compl = cpu_to_be32(
1227 		FW_WR_OP_V(FW_RI_INIT_WR) |
1228 		FW_WR_COMPL_F);
1229 	wqe->flowid_len16 = cpu_to_be32(
1230 		FW_WR_FLOWID_V(ep->hwtid) |
1231 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1232 	wqe->cookie = (uintptr_t)&ep->com.wr_wait;
1233 
1234 	wqe->u.fini.type = FW_RI_TYPE_FINI;
1235 	ret = c4iw_ofld_send(&rhp->rdev, skb);
1236 	if (ret)
1237 		goto out;
1238 
1239 	ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
1240 			     qhp->wq.sq.qid, __func__);
1241 out:
1242 	PDBG("%s ret %d\n", __func__, ret);
1243 	return ret;
1244 }
1245 
1246 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1247 {
1248 	PDBG("%s p2p_type = %d\n", __func__, p2p_type);
1249 	memset(&init->u, 0, sizeof init->u);
1250 	switch (p2p_type) {
1251 	case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1252 		init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1253 		init->u.write.stag_sink = cpu_to_be32(1);
1254 		init->u.write.to_sink = cpu_to_be64(1);
1255 		init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1256 		init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1257 						   sizeof(struct fw_ri_immd),
1258 						   16);
1259 		break;
1260 	case FW_RI_INIT_P2PTYPE_READ_REQ:
1261 		init->u.write.opcode = FW_RI_RDMA_READ_WR;
1262 		init->u.read.stag_src = cpu_to_be32(1);
1263 		init->u.read.to_src_lo = cpu_to_be32(1);
1264 		init->u.read.stag_sink = cpu_to_be32(1);
1265 		init->u.read.to_sink_lo = cpu_to_be32(1);
1266 		init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1267 		break;
1268 	}
1269 }
1270 
1271 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1272 {
1273 	struct fw_ri_wr *wqe;
1274 	int ret;
1275 	struct sk_buff *skb;
1276 
1277 	PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1278 	     qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1279 
1280 	skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1281 	if (!skb) {
1282 		ret = -ENOMEM;
1283 		goto out;
1284 	}
1285 	ret = alloc_ird(rhp, qhp->attr.max_ird);
1286 	if (ret) {
1287 		qhp->attr.max_ird = 0;
1288 		kfree_skb(skb);
1289 		goto out;
1290 	}
1291 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1292 
1293 	wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1294 	memset(wqe, 0, sizeof *wqe);
1295 	wqe->op_compl = cpu_to_be32(
1296 		FW_WR_OP_V(FW_RI_INIT_WR) |
1297 		FW_WR_COMPL_F);
1298 	wqe->flowid_len16 = cpu_to_be32(
1299 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
1300 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1301 
1302 	wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
1303 
1304 	wqe->u.init.type = FW_RI_TYPE_INIT;
1305 	wqe->u.init.mpareqbit_p2ptype =
1306 		FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1307 		FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1308 	wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1309 	if (qhp->attr.mpa_attr.recv_marker_enabled)
1310 		wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1311 	if (qhp->attr.mpa_attr.xmit_marker_enabled)
1312 		wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1313 	if (qhp->attr.mpa_attr.crc_enabled)
1314 		wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1315 
1316 	wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1317 			    FW_RI_QP_RDMA_WRITE_ENABLE |
1318 			    FW_RI_QP_BIND_ENABLE;
1319 	if (!qhp->ibqp.uobject)
1320 		wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1321 				     FW_RI_QP_STAG0_ENABLE;
1322 	wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1323 	wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1324 	wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1325 	wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1326 	wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1327 	wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1328 	wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1329 	wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1330 	wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1331 	wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1332 	wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1333 	wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1334 	wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1335 					 rhp->rdev.lldi.vr->rq.start);
1336 	if (qhp->attr.mpa_attr.initiator)
1337 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1338 
1339 	ret = c4iw_ofld_send(&rhp->rdev, skb);
1340 	if (ret)
1341 		goto err1;
1342 
1343 	ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1344 				  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1345 	if (!ret)
1346 		goto out;
1347 err1:
1348 	free_ird(rhp, qhp->attr.max_ird);
1349 out:
1350 	PDBG("%s ret %d\n", __func__, ret);
1351 	return ret;
1352 }
1353 
1354 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1355 		   enum c4iw_qp_attr_mask mask,
1356 		   struct c4iw_qp_attributes *attrs,
1357 		   int internal)
1358 {
1359 	int ret = 0;
1360 	struct c4iw_qp_attributes newattr = qhp->attr;
1361 	int disconnect = 0;
1362 	int terminate = 0;
1363 	int abort = 0;
1364 	int free = 0;
1365 	struct c4iw_ep *ep = NULL;
1366 
1367 	PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1368 	     qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1369 	     (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1370 
1371 	mutex_lock(&qhp->mutex);
1372 
1373 	/* Process attr changes if in IDLE */
1374 	if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1375 		if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1376 			ret = -EIO;
1377 			goto out;
1378 		}
1379 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1380 			newattr.enable_rdma_read = attrs->enable_rdma_read;
1381 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1382 			newattr.enable_rdma_write = attrs->enable_rdma_write;
1383 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1384 			newattr.enable_bind = attrs->enable_bind;
1385 		if (mask & C4IW_QP_ATTR_MAX_ORD) {
1386 			if (attrs->max_ord > c4iw_max_read_depth) {
1387 				ret = -EINVAL;
1388 				goto out;
1389 			}
1390 			newattr.max_ord = attrs->max_ord;
1391 		}
1392 		if (mask & C4IW_QP_ATTR_MAX_IRD) {
1393 			if (attrs->max_ird > cur_max_read_depth(rhp)) {
1394 				ret = -EINVAL;
1395 				goto out;
1396 			}
1397 			newattr.max_ird = attrs->max_ird;
1398 		}
1399 		qhp->attr = newattr;
1400 	}
1401 
1402 	if (mask & C4IW_QP_ATTR_SQ_DB) {
1403 		ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1404 		goto out;
1405 	}
1406 	if (mask & C4IW_QP_ATTR_RQ_DB) {
1407 		ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1408 		goto out;
1409 	}
1410 
1411 	if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1412 		goto out;
1413 	if (qhp->attr.state == attrs->next_state)
1414 		goto out;
1415 
1416 	switch (qhp->attr.state) {
1417 	case C4IW_QP_STATE_IDLE:
1418 		switch (attrs->next_state) {
1419 		case C4IW_QP_STATE_RTS:
1420 			if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1421 				ret = -EINVAL;
1422 				goto out;
1423 			}
1424 			if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1425 				ret = -EINVAL;
1426 				goto out;
1427 			}
1428 			qhp->attr.mpa_attr = attrs->mpa_attr;
1429 			qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1430 			qhp->ep = qhp->attr.llp_stream_handle;
1431 			set_state(qhp, C4IW_QP_STATE_RTS);
1432 
1433 			/*
1434 			 * Ref the endpoint here and deref when we
1435 			 * disassociate the endpoint from the QP.  This
1436 			 * happens in CLOSING->IDLE transition or *->ERROR
1437 			 * transition.
1438 			 */
1439 			c4iw_get_ep(&qhp->ep->com);
1440 			ret = rdma_init(rhp, qhp);
1441 			if (ret)
1442 				goto err;
1443 			break;
1444 		case C4IW_QP_STATE_ERROR:
1445 			set_state(qhp, C4IW_QP_STATE_ERROR);
1446 			flush_qp(qhp);
1447 			break;
1448 		default:
1449 			ret = -EINVAL;
1450 			goto out;
1451 		}
1452 		break;
1453 	case C4IW_QP_STATE_RTS:
1454 		switch (attrs->next_state) {
1455 		case C4IW_QP_STATE_CLOSING:
1456 			BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1457 			t4_set_wq_in_error(&qhp->wq);
1458 			set_state(qhp, C4IW_QP_STATE_CLOSING);
1459 			ep = qhp->ep;
1460 			if (!internal) {
1461 				abort = 0;
1462 				disconnect = 1;
1463 				c4iw_get_ep(&qhp->ep->com);
1464 			}
1465 			ret = rdma_fini(rhp, qhp, ep);
1466 			if (ret)
1467 				goto err;
1468 			break;
1469 		case C4IW_QP_STATE_TERMINATE:
1470 			t4_set_wq_in_error(&qhp->wq);
1471 			set_state(qhp, C4IW_QP_STATE_TERMINATE);
1472 			qhp->attr.layer_etype = attrs->layer_etype;
1473 			qhp->attr.ecode = attrs->ecode;
1474 			ep = qhp->ep;
1475 			if (!internal) {
1476 				c4iw_get_ep(&qhp->ep->com);
1477 				terminate = 1;
1478 				disconnect = 1;
1479 			} else {
1480 				terminate = qhp->attr.send_term;
1481 				ret = rdma_fini(rhp, qhp, ep);
1482 				if (ret)
1483 					goto err;
1484 			}
1485 			break;
1486 		case C4IW_QP_STATE_ERROR:
1487 			t4_set_wq_in_error(&qhp->wq);
1488 			set_state(qhp, C4IW_QP_STATE_ERROR);
1489 			if (!internal) {
1490 				abort = 1;
1491 				disconnect = 1;
1492 				ep = qhp->ep;
1493 				c4iw_get_ep(&qhp->ep->com);
1494 			}
1495 			goto err;
1496 			break;
1497 		default:
1498 			ret = -EINVAL;
1499 			goto out;
1500 		}
1501 		break;
1502 	case C4IW_QP_STATE_CLOSING:
1503 		if (!internal) {
1504 			ret = -EINVAL;
1505 			goto out;
1506 		}
1507 		switch (attrs->next_state) {
1508 		case C4IW_QP_STATE_IDLE:
1509 			flush_qp(qhp);
1510 			set_state(qhp, C4IW_QP_STATE_IDLE);
1511 			qhp->attr.llp_stream_handle = NULL;
1512 			c4iw_put_ep(&qhp->ep->com);
1513 			qhp->ep = NULL;
1514 			wake_up(&qhp->wait);
1515 			break;
1516 		case C4IW_QP_STATE_ERROR:
1517 			goto err;
1518 		default:
1519 			ret = -EINVAL;
1520 			goto err;
1521 		}
1522 		break;
1523 	case C4IW_QP_STATE_ERROR:
1524 		if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1525 			ret = -EINVAL;
1526 			goto out;
1527 		}
1528 		if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1529 			ret = -EINVAL;
1530 			goto out;
1531 		}
1532 		set_state(qhp, C4IW_QP_STATE_IDLE);
1533 		break;
1534 	case C4IW_QP_STATE_TERMINATE:
1535 		if (!internal) {
1536 			ret = -EINVAL;
1537 			goto out;
1538 		}
1539 		goto err;
1540 		break;
1541 	default:
1542 		printk(KERN_ERR "%s in a bad state %d\n",
1543 		       __func__, qhp->attr.state);
1544 		ret = -EINVAL;
1545 		goto err;
1546 		break;
1547 	}
1548 	goto out;
1549 err:
1550 	PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1551 	     qhp->wq.sq.qid);
1552 
1553 	/* disassociate the LLP connection */
1554 	qhp->attr.llp_stream_handle = NULL;
1555 	if (!ep)
1556 		ep = qhp->ep;
1557 	qhp->ep = NULL;
1558 	set_state(qhp, C4IW_QP_STATE_ERROR);
1559 	free = 1;
1560 	abort = 1;
1561 	BUG_ON(!ep);
1562 	flush_qp(qhp);
1563 	wake_up(&qhp->wait);
1564 out:
1565 	mutex_unlock(&qhp->mutex);
1566 
1567 	if (terminate)
1568 		post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1569 
1570 	/*
1571 	 * If disconnect is 1, then we need to initiate a disconnect
1572 	 * on the EP.  This can be a normal close (RTS->CLOSING) or
1573 	 * an abnormal close (RTS/CLOSING->ERROR).
1574 	 */
1575 	if (disconnect) {
1576 		c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1577 							 GFP_KERNEL);
1578 		c4iw_put_ep(&ep->com);
1579 	}
1580 
1581 	/*
1582 	 * If free is 1, then we've disassociated the EP from the QP
1583 	 * and we need to dereference the EP.
1584 	 */
1585 	if (free)
1586 		c4iw_put_ep(&ep->com);
1587 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1588 	return ret;
1589 }
1590 
1591 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1592 {
1593 	struct c4iw_dev *rhp;
1594 	struct c4iw_qp *qhp;
1595 	struct c4iw_qp_attributes attrs;
1596 	struct c4iw_ucontext *ucontext;
1597 
1598 	qhp = to_c4iw_qp(ib_qp);
1599 	rhp = qhp->rhp;
1600 
1601 	attrs.next_state = C4IW_QP_STATE_ERROR;
1602 	if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1603 		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1604 	else
1605 		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1606 	wait_event(qhp->wait, !qhp->ep);
1607 
1608 	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1609 	atomic_dec(&qhp->refcnt);
1610 	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1611 
1612 	spin_lock_irq(&rhp->lock);
1613 	if (!list_empty(&qhp->db_fc_entry))
1614 		list_del_init(&qhp->db_fc_entry);
1615 	spin_unlock_irq(&rhp->lock);
1616 	free_ird(rhp, qhp->attr.max_ird);
1617 
1618 	ucontext = ib_qp->uobject ?
1619 		   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1620 	destroy_qp(&rhp->rdev, &qhp->wq,
1621 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1622 
1623 	PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1624 	kfree(qhp);
1625 	return 0;
1626 }
1627 
1628 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1629 			     struct ib_udata *udata)
1630 {
1631 	struct c4iw_dev *rhp;
1632 	struct c4iw_qp *qhp;
1633 	struct c4iw_pd *php;
1634 	struct c4iw_cq *schp;
1635 	struct c4iw_cq *rchp;
1636 	struct c4iw_create_qp_resp uresp;
1637 	unsigned int sqsize, rqsize;
1638 	struct c4iw_ucontext *ucontext;
1639 	int ret;
1640 	struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
1641 
1642 	PDBG("%s ib_pd %p\n", __func__, pd);
1643 
1644 	if (attrs->qp_type != IB_QPT_RC)
1645 		return ERR_PTR(-EINVAL);
1646 
1647 	php = to_c4iw_pd(pd);
1648 	rhp = php->rhp;
1649 	schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1650 	rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1651 	if (!schp || !rchp)
1652 		return ERR_PTR(-EINVAL);
1653 
1654 	if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1655 		return ERR_PTR(-EINVAL);
1656 
1657 	if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1658 		return ERR_PTR(-E2BIG);
1659 	rqsize = attrs->cap.max_recv_wr + 1;
1660 	if (rqsize < 8)
1661 		rqsize = 8;
1662 
1663 	if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1664 		return ERR_PTR(-E2BIG);
1665 	sqsize = attrs->cap.max_send_wr + 1;
1666 	if (sqsize < 8)
1667 		sqsize = 8;
1668 
1669 	ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1670 
1671 	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1672 	if (!qhp)
1673 		return ERR_PTR(-ENOMEM);
1674 	qhp->wq.sq.size = sqsize;
1675 	qhp->wq.sq.memsize =
1676 		(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1677 		sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1678 	qhp->wq.sq.flush_cidx = -1;
1679 	qhp->wq.rq.size = rqsize;
1680 	qhp->wq.rq.memsize =
1681 		(rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1682 		sizeof(*qhp->wq.rq.queue);
1683 
1684 	if (ucontext) {
1685 		qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1686 		qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1687 	}
1688 
1689 	ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1690 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1691 	if (ret)
1692 		goto err1;
1693 
1694 	attrs->cap.max_recv_wr = rqsize - 1;
1695 	attrs->cap.max_send_wr = sqsize - 1;
1696 	attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1697 
1698 	qhp->rhp = rhp;
1699 	qhp->attr.pd = php->pdid;
1700 	qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1701 	qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1702 	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1703 	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1704 	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1705 	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1706 	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1707 	qhp->attr.state = C4IW_QP_STATE_IDLE;
1708 	qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1709 	qhp->attr.enable_rdma_read = 1;
1710 	qhp->attr.enable_rdma_write = 1;
1711 	qhp->attr.enable_bind = 1;
1712 	qhp->attr.max_ord = 0;
1713 	qhp->attr.max_ird = 0;
1714 	qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1715 	spin_lock_init(&qhp->lock);
1716 	mutex_init(&qhp->mutex);
1717 	init_waitqueue_head(&qhp->wait);
1718 	atomic_set(&qhp->refcnt, 1);
1719 
1720 	ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1721 	if (ret)
1722 		goto err2;
1723 
1724 	if (udata) {
1725 		mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1726 		if (!mm1) {
1727 			ret = -ENOMEM;
1728 			goto err3;
1729 		}
1730 		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1731 		if (!mm2) {
1732 			ret = -ENOMEM;
1733 			goto err4;
1734 		}
1735 		mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1736 		if (!mm3) {
1737 			ret = -ENOMEM;
1738 			goto err5;
1739 		}
1740 		mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1741 		if (!mm4) {
1742 			ret = -ENOMEM;
1743 			goto err6;
1744 		}
1745 		if (t4_sq_onchip(&qhp->wq.sq)) {
1746 			mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1747 			if (!mm5) {
1748 				ret = -ENOMEM;
1749 				goto err7;
1750 			}
1751 			uresp.flags = C4IW_QPF_ONCHIP;
1752 		} else
1753 			uresp.flags = 0;
1754 		uresp.qid_mask = rhp->rdev.qpmask;
1755 		uresp.sqid = qhp->wq.sq.qid;
1756 		uresp.sq_size = qhp->wq.sq.size;
1757 		uresp.sq_memsize = qhp->wq.sq.memsize;
1758 		uresp.rqid = qhp->wq.rq.qid;
1759 		uresp.rq_size = qhp->wq.rq.size;
1760 		uresp.rq_memsize = qhp->wq.rq.memsize;
1761 		spin_lock(&ucontext->mmap_lock);
1762 		if (mm5) {
1763 			uresp.ma_sync_key = ucontext->key;
1764 			ucontext->key += PAGE_SIZE;
1765 		} else {
1766 			uresp.ma_sync_key =  0;
1767 		}
1768 		uresp.sq_key = ucontext->key;
1769 		ucontext->key += PAGE_SIZE;
1770 		uresp.rq_key = ucontext->key;
1771 		ucontext->key += PAGE_SIZE;
1772 		uresp.sq_db_gts_key = ucontext->key;
1773 		ucontext->key += PAGE_SIZE;
1774 		uresp.rq_db_gts_key = ucontext->key;
1775 		ucontext->key += PAGE_SIZE;
1776 		spin_unlock(&ucontext->mmap_lock);
1777 		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1778 		if (ret)
1779 			goto err8;
1780 		mm1->key = uresp.sq_key;
1781 		mm1->addr = qhp->wq.sq.phys_addr;
1782 		mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1783 		insert_mmap(ucontext, mm1);
1784 		mm2->key = uresp.rq_key;
1785 		mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1786 		mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1787 		insert_mmap(ucontext, mm2);
1788 		mm3->key = uresp.sq_db_gts_key;
1789 		mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
1790 		mm3->len = PAGE_SIZE;
1791 		insert_mmap(ucontext, mm3);
1792 		mm4->key = uresp.rq_db_gts_key;
1793 		mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
1794 		mm4->len = PAGE_SIZE;
1795 		insert_mmap(ucontext, mm4);
1796 		if (mm5) {
1797 			mm5->key = uresp.ma_sync_key;
1798 			mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1799 				    + PCIE_MA_SYNC_A) & PAGE_MASK;
1800 			mm5->len = PAGE_SIZE;
1801 			insert_mmap(ucontext, mm5);
1802 		}
1803 	}
1804 	qhp->ibqp.qp_num = qhp->wq.sq.qid;
1805 	init_timer(&(qhp->timer));
1806 	INIT_LIST_HEAD(&qhp->db_fc_entry);
1807 	PDBG("%s sq id %u size %u memsize %zu num_entries %u "
1808 	     "rq id %u size %u memsize %zu num_entries %u\n", __func__,
1809 	     qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1810 	     attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1811 	     qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1812 	return &qhp->ibqp;
1813 err8:
1814 	kfree(mm5);
1815 err7:
1816 	kfree(mm4);
1817 err6:
1818 	kfree(mm3);
1819 err5:
1820 	kfree(mm2);
1821 err4:
1822 	kfree(mm1);
1823 err3:
1824 	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1825 err2:
1826 	destroy_qp(&rhp->rdev, &qhp->wq,
1827 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1828 err1:
1829 	kfree(qhp);
1830 	return ERR_PTR(ret);
1831 }
1832 
1833 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1834 		      int attr_mask, struct ib_udata *udata)
1835 {
1836 	struct c4iw_dev *rhp;
1837 	struct c4iw_qp *qhp;
1838 	enum c4iw_qp_attr_mask mask = 0;
1839 	struct c4iw_qp_attributes attrs;
1840 
1841 	PDBG("%s ib_qp %p\n", __func__, ibqp);
1842 
1843 	/* iwarp does not support the RTR state */
1844 	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1845 		attr_mask &= ~IB_QP_STATE;
1846 
1847 	/* Make sure we still have something left to do */
1848 	if (!attr_mask)
1849 		return 0;
1850 
1851 	memset(&attrs, 0, sizeof attrs);
1852 	qhp = to_c4iw_qp(ibqp);
1853 	rhp = qhp->rhp;
1854 
1855 	attrs.next_state = c4iw_convert_state(attr->qp_state);
1856 	attrs.enable_rdma_read = (attr->qp_access_flags &
1857 			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
1858 	attrs.enable_rdma_write = (attr->qp_access_flags &
1859 				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1860 	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1861 
1862 
1863 	mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1864 	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1865 			(C4IW_QP_ATTR_ENABLE_RDMA_READ |
1866 			 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1867 			 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1868 
1869 	/*
1870 	 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1871 	 * ringing the queue db when we're in DB_FULL mode.
1872 	 * Only allow this on T4 devices.
1873 	 */
1874 	attrs.sq_db_inc = attr->sq_psn;
1875 	attrs.rq_db_inc = attr->rq_psn;
1876 	mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1877 	mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1878 	if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1879 	    (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1880 		return -EINVAL;
1881 
1882 	return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1883 }
1884 
1885 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1886 {
1887 	PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1888 	return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1889 }
1890 
1891 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1892 		     int attr_mask, struct ib_qp_init_attr *init_attr)
1893 {
1894 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1895 
1896 	memset(attr, 0, sizeof *attr);
1897 	memset(init_attr, 0, sizeof *init_attr);
1898 	attr->qp_state = to_ib_qp_state(qhp->attr.state);
1899 	init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1900 	init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1901 	init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1902 	init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1903 	init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1904 	init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1905 	return 0;
1906 }
1907