1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/spinlock.h>
35 #include <rdma/ib_smi.h>
36 
37 #include "qib.h"
38 #include "qib_mad.h"
39 
40 /*
41  * Validate a RWQE and fill in the SGE state.
42  * Return 1 if OK.
43  */
44 static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
45 {
46 	int i, j, ret;
47 	struct ib_wc wc;
48 	struct rvt_lkey_table *rkt;
49 	struct rvt_pd *pd;
50 	struct rvt_sge_state *ss;
51 
52 	rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
53 	pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
54 	ss = &qp->r_sge;
55 	ss->sg_list = qp->r_sg_list;
56 	qp->r_len = 0;
57 	for (i = j = 0; i < wqe->num_sge; i++) {
58 		if (wqe->sg_list[i].length == 0)
59 			continue;
60 		/* Check LKEY */
61 		if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
62 				 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
63 			goto bad_lkey;
64 		qp->r_len += wqe->sg_list[i].length;
65 		j++;
66 	}
67 	ss->num_sge = j;
68 	ss->total_len = qp->r_len;
69 	ret = 1;
70 	goto bail;
71 
72 bad_lkey:
73 	while (j) {
74 		struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
75 
76 		rvt_put_mr(sge->mr);
77 	}
78 	ss->num_sge = 0;
79 	memset(&wc, 0, sizeof(wc));
80 	wc.wr_id = wqe->wr_id;
81 	wc.status = IB_WC_LOC_PROT_ERR;
82 	wc.opcode = IB_WC_RECV;
83 	wc.qp = &qp->ibqp;
84 	/* Signal solicited completion event. */
85 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
86 	ret = 0;
87 bail:
88 	return ret;
89 }
90 
91 /**
92  * qib_get_rwqe - copy the next RWQE into the QP's RWQE
93  * @qp: the QP
94  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
95  *
96  * Return -1 if there is a local error, 0 if no RWQE is available,
97  * otherwise return 1.
98  *
99  * Can be called from interrupt level.
100  */
101 int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
102 {
103 	unsigned long flags;
104 	struct rvt_rq *rq;
105 	struct rvt_rwq *wq;
106 	struct rvt_srq *srq;
107 	struct rvt_rwqe *wqe;
108 	void (*handler)(struct ib_event *, void *);
109 	u32 tail;
110 	int ret;
111 
112 	if (qp->ibqp.srq) {
113 		srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
114 		handler = srq->ibsrq.event_handler;
115 		rq = &srq->rq;
116 	} else {
117 		srq = NULL;
118 		handler = NULL;
119 		rq = &qp->r_rq;
120 	}
121 
122 	spin_lock_irqsave(&rq->lock, flags);
123 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
124 		ret = 0;
125 		goto unlock;
126 	}
127 
128 	wq = rq->wq;
129 	tail = wq->tail;
130 	/* Validate tail before using it since it is user writable. */
131 	if (tail >= rq->size)
132 		tail = 0;
133 	if (unlikely(tail == wq->head)) {
134 		ret = 0;
135 		goto unlock;
136 	}
137 	/* Make sure entry is read after head index is read. */
138 	smp_rmb();
139 	wqe = rvt_get_rwqe_ptr(rq, tail);
140 	/*
141 	 * Even though we update the tail index in memory, the verbs
142 	 * consumer is not supposed to post more entries until a
143 	 * completion is generated.
144 	 */
145 	if (++tail >= rq->size)
146 		tail = 0;
147 	wq->tail = tail;
148 	if (!wr_id_only && !qib_init_sge(qp, wqe)) {
149 		ret = -1;
150 		goto unlock;
151 	}
152 	qp->r_wr_id = wqe->wr_id;
153 
154 	ret = 1;
155 	set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
156 	if (handler) {
157 		u32 n;
158 
159 		/*
160 		 * Validate head pointer value and compute
161 		 * the number of remaining WQEs.
162 		 */
163 		n = wq->head;
164 		if (n >= rq->size)
165 			n = 0;
166 		if (n < tail)
167 			n += rq->size - tail;
168 		else
169 			n -= tail;
170 		if (n < srq->limit) {
171 			struct ib_event ev;
172 
173 			srq->limit = 0;
174 			spin_unlock_irqrestore(&rq->lock, flags);
175 			ev.device = qp->ibqp.device;
176 			ev.element.srq = qp->ibqp.srq;
177 			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
178 			handler(&ev, srq->ibsrq.srq_context);
179 			goto bail;
180 		}
181 	}
182 unlock:
183 	spin_unlock_irqrestore(&rq->lock, flags);
184 bail:
185 	return ret;
186 }
187 
188 /*
189  * Switch to alternate path.
190  * The QP s_lock should be held and interrupts disabled.
191  */
192 void qib_migrate_qp(struct rvt_qp *qp)
193 {
194 	struct ib_event ev;
195 
196 	qp->s_mig_state = IB_MIG_MIGRATED;
197 	qp->remote_ah_attr = qp->alt_ah_attr;
198 	qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
199 	qp->s_pkey_index = qp->s_alt_pkey_index;
200 
201 	ev.device = qp->ibqp.device;
202 	ev.element.qp = &qp->ibqp;
203 	ev.event = IB_EVENT_PATH_MIG;
204 	qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
205 }
206 
207 static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
208 {
209 	if (!index) {
210 		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
211 
212 		return ppd->guid;
213 	}
214 	return ibp->guids[index - 1];
215 }
216 
217 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
218 {
219 	return (gid->global.interface_id == id &&
220 		(gid->global.subnet_prefix == gid_prefix ||
221 		 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
222 }
223 
224 /*
225  *
226  * This should be called with the QP r_lock held.
227  *
228  * The s_lock will be acquired around the qib_migrate_qp() call.
229  */
230 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
231 		      int has_grh, struct rvt_qp *qp, u32 bth0)
232 {
233 	__be64 guid;
234 	unsigned long flags;
235 
236 	if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
237 		if (!has_grh) {
238 			if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
239 			    IB_AH_GRH)
240 				goto err;
241 		} else {
242 			const struct ib_global_route *grh;
243 
244 			if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
245 			      IB_AH_GRH))
246 				goto err;
247 			grh = rdma_ah_read_grh(&qp->alt_ah_attr);
248 			guid = get_sguid(ibp, grh->sgid_index);
249 			if (!gid_ok(&hdr->u.l.grh.dgid,
250 				    ibp->rvp.gid_prefix, guid))
251 				goto err;
252 			if (!gid_ok(&hdr->u.l.grh.sgid,
253 			    grh->dgid.global.subnet_prefix,
254 			    grh->dgid.global.interface_id))
255 				goto err;
256 		}
257 		if (!qib_pkey_ok((u16)bth0,
258 				 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
259 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
260 				      (u16)bth0,
261 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
262 				      0, qp->ibqp.qp_num,
263 				      hdr->lrh[3], hdr->lrh[1]);
264 			goto err;
265 		}
266 		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
267 		if ((be16_to_cpu(hdr->lrh[3]) !=
268 		     rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
269 		    ppd_from_ibp(ibp)->port !=
270 			    rdma_ah_get_port_num(&qp->alt_ah_attr))
271 			goto err;
272 		spin_lock_irqsave(&qp->s_lock, flags);
273 		qib_migrate_qp(qp);
274 		spin_unlock_irqrestore(&qp->s_lock, flags);
275 	} else {
276 		if (!has_grh) {
277 			if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
278 			    IB_AH_GRH)
279 				goto err;
280 		} else {
281 			const struct ib_global_route *grh;
282 
283 			if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
284 			      IB_AH_GRH))
285 				goto err;
286 			grh = rdma_ah_read_grh(&qp->remote_ah_attr);
287 			guid = get_sguid(ibp, grh->sgid_index);
288 			if (!gid_ok(&hdr->u.l.grh.dgid,
289 				    ibp->rvp.gid_prefix, guid))
290 				goto err;
291 			if (!gid_ok(&hdr->u.l.grh.sgid,
292 			    grh->dgid.global.subnet_prefix,
293 			    grh->dgid.global.interface_id))
294 				goto err;
295 		}
296 		if (!qib_pkey_ok((u16)bth0,
297 				 qib_get_pkey(ibp, qp->s_pkey_index))) {
298 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
299 				      (u16)bth0,
300 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
301 				      0, qp->ibqp.qp_num,
302 				      hdr->lrh[3], hdr->lrh[1]);
303 			goto err;
304 		}
305 		/* Validate the SLID. See Ch. 9.6.1.5 */
306 		if (be16_to_cpu(hdr->lrh[3]) !=
307 		    rdma_ah_get_dlid(&qp->remote_ah_attr) ||
308 		    ppd_from_ibp(ibp)->port != qp->port_num)
309 			goto err;
310 		if (qp->s_mig_state == IB_MIG_REARM &&
311 		    !(bth0 & IB_BTH_MIG_REQ))
312 			qp->s_mig_state = IB_MIG_ARMED;
313 	}
314 
315 	return 0;
316 
317 err:
318 	return 1;
319 }
320 
321 /**
322  * qib_ruc_loopback - handle UC and RC lookback requests
323  * @sqp: the sending QP
324  *
325  * This is called from qib_do_send() to
326  * forward a WQE addressed to the same HCA.
327  * Note that although we are single threaded due to the tasklet, we still
328  * have to protect against post_send().  We don't have to worry about
329  * receive interrupts since this is a connected protocol and all packets
330  * will pass through here.
331  */
332 static void qib_ruc_loopback(struct rvt_qp *sqp)
333 {
334 	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
335 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
336 	struct qib_devdata *dd = ppd->dd;
337 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
338 	struct rvt_qp *qp;
339 	struct rvt_swqe *wqe;
340 	struct rvt_sge *sge;
341 	unsigned long flags;
342 	struct ib_wc wc;
343 	u64 sdata;
344 	atomic64_t *maddr;
345 	enum ib_wc_status send_status;
346 	int release;
347 	int ret;
348 
349 	rcu_read_lock();
350 	/*
351 	 * Note that we check the responder QP state after
352 	 * checking the requester's state.
353 	 */
354 	qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
355 	if (!qp)
356 		goto done;
357 
358 	spin_lock_irqsave(&sqp->s_lock, flags);
359 
360 	/* Return if we are already busy processing a work request. */
361 	if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
362 	    !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
363 		goto unlock;
364 
365 	sqp->s_flags |= RVT_S_BUSY;
366 
367 again:
368 	smp_read_barrier_depends(); /* see post_one_send() */
369 	if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
370 		goto clr_busy;
371 	wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
372 
373 	/* Return if it is not OK to start a new work reqeust. */
374 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
375 		if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
376 			goto clr_busy;
377 		/* We are in the error state, flush the work request. */
378 		send_status = IB_WC_WR_FLUSH_ERR;
379 		goto flush_send;
380 	}
381 
382 	/*
383 	 * We can rely on the entry not changing without the s_lock
384 	 * being held until we update s_last.
385 	 * We increment s_cur to indicate s_last is in progress.
386 	 */
387 	if (sqp->s_last == sqp->s_cur) {
388 		if (++sqp->s_cur >= sqp->s_size)
389 			sqp->s_cur = 0;
390 	}
391 	spin_unlock_irqrestore(&sqp->s_lock, flags);
392 
393 	if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
394 	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
395 		ibp->rvp.n_pkt_drops++;
396 		/*
397 		 * For RC, the requester would timeout and retry so
398 		 * shortcut the timeouts and just signal too many retries.
399 		 */
400 		if (sqp->ibqp.qp_type == IB_QPT_RC)
401 			send_status = IB_WC_RETRY_EXC_ERR;
402 		else
403 			send_status = IB_WC_SUCCESS;
404 		goto serr;
405 	}
406 
407 	memset(&wc, 0, sizeof(wc));
408 	send_status = IB_WC_SUCCESS;
409 
410 	release = 1;
411 	sqp->s_sge.sge = wqe->sg_list[0];
412 	sqp->s_sge.sg_list = wqe->sg_list + 1;
413 	sqp->s_sge.num_sge = wqe->wr.num_sge;
414 	sqp->s_len = wqe->length;
415 	switch (wqe->wr.opcode) {
416 	case IB_WR_SEND_WITH_IMM:
417 		wc.wc_flags = IB_WC_WITH_IMM;
418 		wc.ex.imm_data = wqe->wr.ex.imm_data;
419 		/* FALLTHROUGH */
420 	case IB_WR_SEND:
421 		ret = qib_get_rwqe(qp, 0);
422 		if (ret < 0)
423 			goto op_err;
424 		if (!ret)
425 			goto rnr_nak;
426 		break;
427 
428 	case IB_WR_RDMA_WRITE_WITH_IMM:
429 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
430 			goto inv_err;
431 		wc.wc_flags = IB_WC_WITH_IMM;
432 		wc.ex.imm_data = wqe->wr.ex.imm_data;
433 		ret = qib_get_rwqe(qp, 1);
434 		if (ret < 0)
435 			goto op_err;
436 		if (!ret)
437 			goto rnr_nak;
438 		/* FALLTHROUGH */
439 	case IB_WR_RDMA_WRITE:
440 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
441 			goto inv_err;
442 		if (wqe->length == 0)
443 			break;
444 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
445 					  wqe->rdma_wr.remote_addr,
446 					  wqe->rdma_wr.rkey,
447 					  IB_ACCESS_REMOTE_WRITE)))
448 			goto acc_err;
449 		qp->r_sge.sg_list = NULL;
450 		qp->r_sge.num_sge = 1;
451 		qp->r_sge.total_len = wqe->length;
452 		break;
453 
454 	case IB_WR_RDMA_READ:
455 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
456 			goto inv_err;
457 		if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
458 					  wqe->rdma_wr.remote_addr,
459 					  wqe->rdma_wr.rkey,
460 					  IB_ACCESS_REMOTE_READ)))
461 			goto acc_err;
462 		release = 0;
463 		sqp->s_sge.sg_list = NULL;
464 		sqp->s_sge.num_sge = 1;
465 		qp->r_sge.sge = wqe->sg_list[0];
466 		qp->r_sge.sg_list = wqe->sg_list + 1;
467 		qp->r_sge.num_sge = wqe->wr.num_sge;
468 		qp->r_sge.total_len = wqe->length;
469 		break;
470 
471 	case IB_WR_ATOMIC_CMP_AND_SWP:
472 	case IB_WR_ATOMIC_FETCH_AND_ADD:
473 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
474 			goto inv_err;
475 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
476 					  wqe->atomic_wr.remote_addr,
477 					  wqe->atomic_wr.rkey,
478 					  IB_ACCESS_REMOTE_ATOMIC)))
479 			goto acc_err;
480 		/* Perform atomic OP and save result. */
481 		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
482 		sdata = wqe->atomic_wr.compare_add;
483 		*(u64 *) sqp->s_sge.sge.vaddr =
484 			(wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
485 			(u64) atomic64_add_return(sdata, maddr) - sdata :
486 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
487 				      sdata, wqe->atomic_wr.swap);
488 		rvt_put_mr(qp->r_sge.sge.mr);
489 		qp->r_sge.num_sge = 0;
490 		goto send_comp;
491 
492 	default:
493 		send_status = IB_WC_LOC_QP_OP_ERR;
494 		goto serr;
495 	}
496 
497 	sge = &sqp->s_sge.sge;
498 	while (sqp->s_len) {
499 		u32 len = sqp->s_len;
500 
501 		if (len > sge->length)
502 			len = sge->length;
503 		if (len > sge->sge_length)
504 			len = sge->sge_length;
505 		BUG_ON(len == 0);
506 		qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
507 		sge->vaddr += len;
508 		sge->length -= len;
509 		sge->sge_length -= len;
510 		if (sge->sge_length == 0) {
511 			if (!release)
512 				rvt_put_mr(sge->mr);
513 			if (--sqp->s_sge.num_sge)
514 				*sge = *sqp->s_sge.sg_list++;
515 		} else if (sge->length == 0 && sge->mr->lkey) {
516 			if (++sge->n >= RVT_SEGSZ) {
517 				if (++sge->m >= sge->mr->mapsz)
518 					break;
519 				sge->n = 0;
520 			}
521 			sge->vaddr =
522 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
523 			sge->length =
524 				sge->mr->map[sge->m]->segs[sge->n].length;
525 		}
526 		sqp->s_len -= len;
527 	}
528 	if (release)
529 		rvt_put_ss(&qp->r_sge);
530 
531 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
532 		goto send_comp;
533 
534 	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
535 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
536 	else
537 		wc.opcode = IB_WC_RECV;
538 	wc.wr_id = qp->r_wr_id;
539 	wc.status = IB_WC_SUCCESS;
540 	wc.byte_len = wqe->length;
541 	wc.qp = &qp->ibqp;
542 	wc.src_qp = qp->remote_qpn;
543 	wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
544 	wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
545 	wc.port_num = 1;
546 	/* Signal completion event if the solicited bit is set. */
547 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
548 		     wqe->wr.send_flags & IB_SEND_SOLICITED);
549 
550 send_comp:
551 	spin_lock_irqsave(&sqp->s_lock, flags);
552 	ibp->rvp.n_loop_pkts++;
553 flush_send:
554 	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
555 	qib_send_complete(sqp, wqe, send_status);
556 	goto again;
557 
558 rnr_nak:
559 	/* Handle RNR NAK */
560 	if (qp->ibqp.qp_type == IB_QPT_UC)
561 		goto send_comp;
562 	ibp->rvp.n_rnr_naks++;
563 	/*
564 	 * Note: we don't need the s_lock held since the BUSY flag
565 	 * makes this single threaded.
566 	 */
567 	if (sqp->s_rnr_retry == 0) {
568 		send_status = IB_WC_RNR_RETRY_EXC_ERR;
569 		goto serr;
570 	}
571 	if (sqp->s_rnr_retry_cnt < 7)
572 		sqp->s_rnr_retry--;
573 	spin_lock_irqsave(&sqp->s_lock, flags);
574 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
575 		goto clr_busy;
576 	rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
577 				IB_AETH_CREDIT_SHIFT);
578 	goto clr_busy;
579 
580 op_err:
581 	send_status = IB_WC_REM_OP_ERR;
582 	wc.status = IB_WC_LOC_QP_OP_ERR;
583 	goto err;
584 
585 inv_err:
586 	send_status = IB_WC_REM_INV_REQ_ERR;
587 	wc.status = IB_WC_LOC_QP_OP_ERR;
588 	goto err;
589 
590 acc_err:
591 	send_status = IB_WC_REM_ACCESS_ERR;
592 	wc.status = IB_WC_LOC_PROT_ERR;
593 err:
594 	/* responder goes to error state */
595 	rvt_rc_error(qp, wc.status);
596 
597 serr:
598 	spin_lock_irqsave(&sqp->s_lock, flags);
599 	qib_send_complete(sqp, wqe, send_status);
600 	if (sqp->ibqp.qp_type == IB_QPT_RC) {
601 		int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
602 
603 		sqp->s_flags &= ~RVT_S_BUSY;
604 		spin_unlock_irqrestore(&sqp->s_lock, flags);
605 		if (lastwqe) {
606 			struct ib_event ev;
607 
608 			ev.device = sqp->ibqp.device;
609 			ev.element.qp = &sqp->ibqp;
610 			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
611 			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
612 		}
613 		goto done;
614 	}
615 clr_busy:
616 	sqp->s_flags &= ~RVT_S_BUSY;
617 unlock:
618 	spin_unlock_irqrestore(&sqp->s_lock, flags);
619 done:
620 	rcu_read_unlock();
621 }
622 
623 /**
624  * qib_make_grh - construct a GRH header
625  * @ibp: a pointer to the IB port
626  * @hdr: a pointer to the GRH header being constructed
627  * @grh: the global route address to send to
628  * @hwords: the number of 32 bit words of header being sent
629  * @nwords: the number of 32 bit words of data being sent
630  *
631  * Return the size of the header in 32 bit words.
632  */
633 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
634 		 const struct ib_global_route *grh, u32 hwords, u32 nwords)
635 {
636 	hdr->version_tclass_flow =
637 		cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
638 			    (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
639 			    (grh->flow_label << IB_GRH_FLOW_SHIFT));
640 	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
641 	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
642 	hdr->next_hdr = IB_GRH_NEXT_HDR;
643 	hdr->hop_limit = grh->hop_limit;
644 	/* The SGID is 32-bit aligned. */
645 	hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
646 	hdr->sgid.global.interface_id = grh->sgid_index ?
647 		ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
648 	hdr->dgid = grh->dgid;
649 
650 	/* GRH header size in 32-bit words. */
651 	return sizeof(struct ib_grh) / sizeof(u32);
652 }
653 
654 void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
655 			 u32 bth0, u32 bth2)
656 {
657 	struct qib_qp_priv *priv = qp->priv;
658 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
659 	u16 lrh0;
660 	u32 nwords;
661 	u32 extra_bytes;
662 
663 	/* Construct the header. */
664 	extra_bytes = -qp->s_cur_size & 3;
665 	nwords = (qp->s_cur_size + extra_bytes) >> 2;
666 	lrh0 = QIB_LRH_BTH;
667 	if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
668 		qp->s_hdrwords +=
669 			qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
670 				     rdma_ah_read_grh(&qp->remote_ah_attr),
671 				     qp->s_hdrwords, nwords);
672 		lrh0 = QIB_LRH_GRH;
673 	}
674 	lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
675 		rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
676 	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
677 	priv->s_hdr->lrh[1] =
678 			cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
679 	priv->s_hdr->lrh[2] =
680 			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
681 	priv->s_hdr->lrh[3] =
682 		cpu_to_be16(ppd_from_ibp(ibp)->lid |
683 			    rdma_ah_get_path_bits(&qp->remote_ah_attr));
684 	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
685 	bth0 |= extra_bytes << 20;
686 	if (qp->s_mig_state == IB_MIG_MIGRATED)
687 		bth0 |= IB_BTH_MIG_REQ;
688 	ohdr->bth[0] = cpu_to_be32(bth0);
689 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
690 	ohdr->bth[2] = cpu_to_be32(bth2);
691 	this_cpu_inc(ibp->pmastats->n_unicast_xmit);
692 }
693 
694 void _qib_do_send(struct work_struct *work)
695 {
696 	struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
697 						s_work);
698 	struct rvt_qp *qp = priv->owner;
699 
700 	qib_do_send(qp);
701 }
702 
703 /**
704  * qib_do_send - perform a send on a QP
705  * @qp: pointer to the QP
706  *
707  * Process entries in the send work queue until credit or queue is
708  * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
709  * Otherwise, two threads could send packets out of order.
710  */
711 void qib_do_send(struct rvt_qp *qp)
712 {
713 	struct qib_qp_priv *priv = qp->priv;
714 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
715 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
716 	int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
717 	unsigned long flags;
718 
719 	if ((qp->ibqp.qp_type == IB_QPT_RC ||
720 	     qp->ibqp.qp_type == IB_QPT_UC) &&
721 	    (rdma_ah_get_dlid(&qp->remote_ah_attr) &
722 	     ~((1 << ppd->lmc) - 1)) == ppd->lid) {
723 		qib_ruc_loopback(qp);
724 		return;
725 	}
726 
727 	if (qp->ibqp.qp_type == IB_QPT_RC)
728 		make_req = qib_make_rc_req;
729 	else if (qp->ibqp.qp_type == IB_QPT_UC)
730 		make_req = qib_make_uc_req;
731 	else
732 		make_req = qib_make_ud_req;
733 
734 	spin_lock_irqsave(&qp->s_lock, flags);
735 
736 	/* Return if we are already busy processing a work request. */
737 	if (!qib_send_ok(qp)) {
738 		spin_unlock_irqrestore(&qp->s_lock, flags);
739 		return;
740 	}
741 
742 	qp->s_flags |= RVT_S_BUSY;
743 
744 	do {
745 		/* Check for a constructed packet to be sent. */
746 		if (qp->s_hdrwords != 0) {
747 			spin_unlock_irqrestore(&qp->s_lock, flags);
748 			/*
749 			 * If the packet cannot be sent now, return and
750 			 * the send tasklet will be woken up later.
751 			 */
752 			if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
753 					   qp->s_cur_sge, qp->s_cur_size))
754 				return;
755 			/* Record that s_hdr is empty. */
756 			qp->s_hdrwords = 0;
757 			spin_lock_irqsave(&qp->s_lock, flags);
758 		}
759 	} while (make_req(qp, &flags));
760 
761 	spin_unlock_irqrestore(&qp->s_lock, flags);
762 }
763 
764 /*
765  * This should be called with s_lock held.
766  */
767 void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
768 		       enum ib_wc_status status)
769 {
770 	u32 old_last, last;
771 
772 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
773 		return;
774 
775 	last = qp->s_last;
776 	old_last = last;
777 	if (++last >= qp->s_size)
778 		last = 0;
779 	qp->s_last = last;
780 	/* See post_send() */
781 	barrier();
782 	rvt_put_swqe(wqe);
783 	if (qp->ibqp.qp_type == IB_QPT_UD ||
784 	    qp->ibqp.qp_type == IB_QPT_SMI ||
785 	    qp->ibqp.qp_type == IB_QPT_GSI)
786 		atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
787 
788 	rvt_qp_swqe_complete(qp,
789 			     wqe,
790 			     ib_qib_wc_opcode[wqe->wr.opcode],
791 			     status);
792 
793 	if (qp->s_acked == old_last)
794 		qp->s_acked = last;
795 	if (qp->s_cur == old_last)
796 		qp->s_cur = last;
797 	if (qp->s_tail == old_last)
798 		qp->s_tail = last;
799 	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
800 		qp->s_draining = 0;
801 }
802