xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_ud.c (revision 020c5260)
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_verbs.h>
36 
37 #include "qib.h"
38 #include "qib_mad.h"
39 
40 /**
41  * qib_ud_loopback - handle send on loopback QPs
42  * @sqp: the sending QP
43  * @swqe: the send work request
44  *
45  * This is called from qib_make_ud_req() to forward a WQE addressed
46  * to the same HCA.
47  * Note that the receive interrupt handler may be calling qib_ud_rcv()
48  * while this is being called.
49  */
50 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
51 {
52 	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
53 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
54 	struct qib_devdata *dd = ppd->dd;
55 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
56 	struct rvt_qp *qp;
57 	struct rdma_ah_attr *ah_attr;
58 	unsigned long flags;
59 	struct rvt_sge_state ssge;
60 	struct rvt_sge *sge;
61 	struct ib_wc wc;
62 	u32 length;
63 	enum ib_qp_type sqptype, dqptype;
64 
65 	rcu_read_lock();
66 	qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
67 	if (!qp) {
68 		ibp->rvp.n_pkt_drops++;
69 		rcu_read_unlock();
70 		return;
71 	}
72 
73 	sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
74 			IB_QPT_UD : sqp->ibqp.qp_type;
75 	dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
76 			IB_QPT_UD : qp->ibqp.qp_type;
77 
78 	if (dqptype != sqptype ||
79 	    !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
80 		ibp->rvp.n_pkt_drops++;
81 		goto drop;
82 	}
83 
84 	ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
85 	ppd = ppd_from_ibp(ibp);
86 
87 	if (qp->ibqp.qp_num > 1) {
88 		u16 pkey1;
89 		u16 pkey2;
90 		u16 lid;
91 
92 		pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
93 		pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
94 		if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
95 			lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
96 					  ((1 << ppd->lmc) - 1));
97 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
98 				      rdma_ah_get_sl(ah_attr),
99 				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
100 				      cpu_to_be16(lid),
101 				      cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
102 			goto drop;
103 		}
104 	}
105 
106 	/*
107 	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
108 	 * Qkeys with the high order bit set mean use the
109 	 * qkey from the QP context instead of the WR (see 10.2.5).
110 	 */
111 	if (qp->ibqp.qp_num) {
112 		u32 qkey;
113 
114 		qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
115 			sqp->qkey : swqe->ud_wr.remote_qkey;
116 		if (unlikely(qkey != qp->qkey)) {
117 			u16 lid;
118 
119 			lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
120 					  ((1 << ppd->lmc) - 1));
121 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
122 				      rdma_ah_get_sl(ah_attr),
123 				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
124 				      cpu_to_be16(lid),
125 				      cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
126 			goto drop;
127 		}
128 	}
129 
130 	/*
131 	 * A GRH is expected to precede the data even if not
132 	 * present on the wire.
133 	 */
134 	length = swqe->length;
135 	memset(&wc, 0, sizeof(wc));
136 	wc.byte_len = length + sizeof(struct ib_grh);
137 
138 	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
139 		wc.wc_flags = IB_WC_WITH_IMM;
140 		wc.ex.imm_data = swqe->wr.ex.imm_data;
141 	}
142 
143 	spin_lock_irqsave(&qp->r_lock, flags);
144 
145 	/*
146 	 * Get the next work request entry to find where to put the data.
147 	 */
148 	if (qp->r_flags & RVT_R_REUSE_SGE)
149 		qp->r_flags &= ~RVT_R_REUSE_SGE;
150 	else {
151 		int ret;
152 
153 		ret = qib_get_rwqe(qp, 0);
154 		if (ret < 0) {
155 			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
156 			goto bail_unlock;
157 		}
158 		if (!ret) {
159 			if (qp->ibqp.qp_num == 0)
160 				ibp->rvp.n_vl15_dropped++;
161 			goto bail_unlock;
162 		}
163 	}
164 	/* Silently drop packets which are too big. */
165 	if (unlikely(wc.byte_len > qp->r_len)) {
166 		qp->r_flags |= RVT_R_REUSE_SGE;
167 		ibp->rvp.n_pkt_drops++;
168 		goto bail_unlock;
169 	}
170 
171 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
172 		struct ib_grh grh;
173 		const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
174 
175 		qib_make_grh(ibp, &grh, grd, 0, 0);
176 		qib_copy_sge(&qp->r_sge, &grh,
177 			     sizeof(grh), 1);
178 		wc.wc_flags |= IB_WC_GRH;
179 	} else
180 		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
181 	ssge.sg_list = swqe->sg_list + 1;
182 	ssge.sge = *swqe->sg_list;
183 	ssge.num_sge = swqe->wr.num_sge;
184 	sge = &ssge.sge;
185 	while (length) {
186 		u32 len = sge->length;
187 
188 		if (len > length)
189 			len = length;
190 		if (len > sge->sge_length)
191 			len = sge->sge_length;
192 		BUG_ON(len == 0);
193 		qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
194 		sge->vaddr += len;
195 		sge->length -= len;
196 		sge->sge_length -= len;
197 		if (sge->sge_length == 0) {
198 			if (--ssge.num_sge)
199 				*sge = *ssge.sg_list++;
200 		} else if (sge->length == 0 && sge->mr->lkey) {
201 			if (++sge->n >= RVT_SEGSZ) {
202 				if (++sge->m >= sge->mr->mapsz)
203 					break;
204 				sge->n = 0;
205 			}
206 			sge->vaddr =
207 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
208 			sge->length =
209 				sge->mr->map[sge->m]->segs[sge->n].length;
210 		}
211 		length -= len;
212 	}
213 	rvt_put_ss(&qp->r_sge);
214 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
215 		goto bail_unlock;
216 	wc.wr_id = qp->r_wr_id;
217 	wc.status = IB_WC_SUCCESS;
218 	wc.opcode = IB_WC_RECV;
219 	wc.qp = &qp->ibqp;
220 	wc.src_qp = sqp->ibqp.qp_num;
221 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
222 		swqe->ud_wr.pkey_index : 0;
223 	wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
224 				((1 << ppd->lmc) - 1));
225 	wc.sl = rdma_ah_get_sl(ah_attr);
226 	wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
227 	wc.port_num = qp->port_num;
228 	/* Signal completion event if the solicited bit is set. */
229 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
230 		     swqe->wr.send_flags & IB_SEND_SOLICITED);
231 	ibp->rvp.n_loop_pkts++;
232 bail_unlock:
233 	spin_unlock_irqrestore(&qp->r_lock, flags);
234 drop:
235 	rcu_read_unlock();
236 }
237 
238 /**
239  * qib_make_ud_req - construct a UD request packet
240  * @qp: the QP
241  *
242  * Assumes the s_lock is held.
243  *
244  * Return 1 if constructed; otherwise, return 0.
245  */
246 int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
247 {
248 	struct qib_qp_priv *priv = qp->priv;
249 	struct ib_other_headers *ohdr;
250 	struct rdma_ah_attr *ah_attr;
251 	struct qib_pportdata *ppd;
252 	struct qib_ibport *ibp;
253 	struct rvt_swqe *wqe;
254 	u32 nwords;
255 	u32 extra_bytes;
256 	u32 bth0;
257 	u16 lrh0;
258 	u16 lid;
259 	int ret = 0;
260 	int next_cur;
261 
262 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
263 		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
264 			goto bail;
265 		/* We are in the error state, flush the work request. */
266 		smp_read_barrier_depends(); /* see post_one_send */
267 		if (qp->s_last == ACCESS_ONCE(qp->s_head))
268 			goto bail;
269 		/* If DMAs are in progress, we can't flush immediately. */
270 		if (atomic_read(&priv->s_dma_busy)) {
271 			qp->s_flags |= RVT_S_WAIT_DMA;
272 			goto bail;
273 		}
274 		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
275 		qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
276 		goto done;
277 	}
278 
279 	/* see post_one_send() */
280 	smp_read_barrier_depends();
281 	if (qp->s_cur == ACCESS_ONCE(qp->s_head))
282 		goto bail;
283 
284 	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
285 	next_cur = qp->s_cur + 1;
286 	if (next_cur >= qp->s_size)
287 		next_cur = 0;
288 
289 	/* Construct the header. */
290 	ibp = to_iport(qp->ibqp.device, qp->port_num);
291 	ppd = ppd_from_ibp(ibp);
292 	ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
293 	if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
294 		if (rdma_ah_get_dlid(ah_attr) !=
295 				be16_to_cpu(IB_LID_PERMISSIVE))
296 			this_cpu_inc(ibp->pmastats->n_multicast_xmit);
297 		else
298 			this_cpu_inc(ibp->pmastats->n_unicast_xmit);
299 	} else {
300 		this_cpu_inc(ibp->pmastats->n_unicast_xmit);
301 		lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
302 		if (unlikely(lid == ppd->lid)) {
303 			unsigned long tflags = *flags;
304 			/*
305 			 * If DMAs are in progress, we can't generate
306 			 * a completion for the loopback packet since
307 			 * it would be out of order.
308 			 * XXX Instead of waiting, we could queue a
309 			 * zero length descriptor so we get a callback.
310 			 */
311 			if (atomic_read(&priv->s_dma_busy)) {
312 				qp->s_flags |= RVT_S_WAIT_DMA;
313 				goto bail;
314 			}
315 			qp->s_cur = next_cur;
316 			spin_unlock_irqrestore(&qp->s_lock, tflags);
317 			qib_ud_loopback(qp, wqe);
318 			spin_lock_irqsave(&qp->s_lock, tflags);
319 			*flags = tflags;
320 			qib_send_complete(qp, wqe, IB_WC_SUCCESS);
321 			goto done;
322 		}
323 	}
324 
325 	qp->s_cur = next_cur;
326 	extra_bytes = -wqe->length & 3;
327 	nwords = (wqe->length + extra_bytes) >> 2;
328 
329 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
330 	qp->s_hdrwords = 7;
331 	qp->s_cur_size = wqe->length;
332 	qp->s_cur_sge = &qp->s_sge;
333 	qp->s_srate = rdma_ah_get_static_rate(ah_attr);
334 	qp->s_wqe = wqe;
335 	qp->s_sge.sge = wqe->sg_list[0];
336 	qp->s_sge.sg_list = wqe->sg_list + 1;
337 	qp->s_sge.num_sge = wqe->wr.num_sge;
338 	qp->s_sge.total_len = wqe->length;
339 
340 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
341 		/* Header size in 32-bit words. */
342 		qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
343 					       rdma_ah_read_grh(ah_attr),
344 					       qp->s_hdrwords, nwords);
345 		lrh0 = QIB_LRH_GRH;
346 		ohdr = &priv->s_hdr->u.l.oth;
347 		/*
348 		 * Don't worry about sending to locally attached multicast
349 		 * QPs.  It is unspecified by the spec. what happens.
350 		 */
351 	} else {
352 		/* Header size in 32-bit words. */
353 		lrh0 = QIB_LRH_BTH;
354 		ohdr = &priv->s_hdr->u.oth;
355 	}
356 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
357 		qp->s_hdrwords++;
358 		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
359 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
360 	} else
361 		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
362 	lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
363 	if (qp->ibqp.qp_type == IB_QPT_SMI)
364 		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
365 	else
366 		lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
367 	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
368 	priv->s_hdr->lrh[1] =
369 			cpu_to_be16(rdma_ah_get_dlid(ah_attr));  /* DEST LID */
370 	priv->s_hdr->lrh[2] =
371 			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
372 	lid = ppd->lid;
373 	if (lid) {
374 		lid |= rdma_ah_get_path_bits(ah_attr) &
375 			((1 << ppd->lmc) - 1);
376 		priv->s_hdr->lrh[3] = cpu_to_be16(lid);
377 	} else
378 		priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
379 	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
380 		bth0 |= IB_BTH_SOLICITED;
381 	bth0 |= extra_bytes << 20;
382 	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
383 		qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
384 			     wqe->ud_wr.pkey_index : qp->s_pkey_index);
385 	ohdr->bth[0] = cpu_to_be32(bth0);
386 	/*
387 	 * Use the multicast QP if the destination LID is a multicast LID.
388 	 */
389 	ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
390 			be16_to_cpu(IB_MULTICAST_LID_BASE) &&
391 		rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
392 		cpu_to_be32(QIB_MULTICAST_QPN) :
393 		cpu_to_be32(wqe->ud_wr.remote_qpn);
394 	ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
395 	/*
396 	 * Qkeys with the high order bit set mean use the
397 	 * qkey from the QP context instead of the WR (see 10.2.5).
398 	 */
399 	ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
400 					 qp->qkey : wqe->ud_wr.remote_qkey);
401 	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
402 
403 done:
404 	return 1;
405 bail:
406 	qp->s_flags &= ~RVT_S_BUSY;
407 	return ret;
408 }
409 
410 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
411 {
412 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
413 	struct qib_devdata *dd = ppd->dd;
414 	unsigned ctxt = ppd->hw_pidx;
415 	unsigned i;
416 
417 	pkey &= 0x7fff;	/* remove limited/full membership bit */
418 
419 	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
420 		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
421 			return i;
422 
423 	/*
424 	 * Should not get here, this means hardware failed to validate pkeys.
425 	 * Punt and return index 0.
426 	 */
427 	return 0;
428 }
429 
430 /**
431  * qib_ud_rcv - receive an incoming UD packet
432  * @ibp: the port the packet came in on
433  * @hdr: the packet header
434  * @has_grh: true if the packet has a GRH
435  * @data: the packet data
436  * @tlen: the packet length
437  * @qp: the QP the packet came on
438  *
439  * This is called from qib_qp_rcv() to process an incoming UD packet
440  * for the given QP.
441  * Called at interrupt level.
442  */
443 void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
444 		int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
445 {
446 	struct ib_other_headers *ohdr;
447 	int opcode;
448 	u32 hdrsize;
449 	u32 pad;
450 	struct ib_wc wc;
451 	u32 qkey;
452 	u32 src_qp;
453 	u16 dlid;
454 
455 	/* Check for GRH */
456 	if (!has_grh) {
457 		ohdr = &hdr->u.oth;
458 		hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
459 	} else {
460 		ohdr = &hdr->u.l.oth;
461 		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
462 	}
463 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
464 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
465 
466 	/*
467 	 * Get the number of bytes the message was padded by
468 	 * and drop incomplete packets.
469 	 */
470 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
471 	if (unlikely(tlen < (hdrsize + pad + 4)))
472 		goto drop;
473 
474 	tlen -= hdrsize + pad + 4;
475 
476 	/*
477 	 * Check that the permissive LID is only used on QP0
478 	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
479 	 */
480 	if (qp->ibqp.qp_num) {
481 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
482 			     hdr->lrh[3] == IB_LID_PERMISSIVE))
483 			goto drop;
484 		if (qp->ibqp.qp_num > 1) {
485 			u16 pkey1, pkey2;
486 
487 			pkey1 = be32_to_cpu(ohdr->bth[0]);
488 			pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
489 			if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
490 				qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
491 					      pkey1,
492 					      (be16_to_cpu(hdr->lrh[0]) >> 4) &
493 						0xF,
494 					      src_qp, qp->ibqp.qp_num,
495 					      hdr->lrh[3], hdr->lrh[1]);
496 				return;
497 			}
498 		}
499 		if (unlikely(qkey != qp->qkey)) {
500 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
501 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
502 				      src_qp, qp->ibqp.qp_num,
503 				      hdr->lrh[3], hdr->lrh[1]);
504 			return;
505 		}
506 		/* Drop invalid MAD packets (see 13.5.3.1). */
507 		if (unlikely(qp->ibqp.qp_num == 1 &&
508 			     (tlen != 256 ||
509 			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
510 			goto drop;
511 	} else {
512 		struct ib_smp *smp;
513 
514 		/* Drop invalid MAD packets (see 13.5.3.1). */
515 		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
516 			goto drop;
517 		smp = (struct ib_smp *) data;
518 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
519 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
520 		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
521 			goto drop;
522 	}
523 
524 	/*
525 	 * The opcode is in the low byte when its in network order
526 	 * (top byte when in host order).
527 	 */
528 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
529 	if (qp->ibqp.qp_num > 1 &&
530 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
531 		wc.ex.imm_data = ohdr->u.ud.imm_data;
532 		wc.wc_flags = IB_WC_WITH_IMM;
533 		tlen -= sizeof(u32);
534 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
535 		wc.ex.imm_data = 0;
536 		wc.wc_flags = 0;
537 	} else
538 		goto drop;
539 
540 	/*
541 	 * A GRH is expected to precede the data even if not
542 	 * present on the wire.
543 	 */
544 	wc.byte_len = tlen + sizeof(struct ib_grh);
545 
546 	/*
547 	 * Get the next work request entry to find where to put the data.
548 	 */
549 	if (qp->r_flags & RVT_R_REUSE_SGE)
550 		qp->r_flags &= ~RVT_R_REUSE_SGE;
551 	else {
552 		int ret;
553 
554 		ret = qib_get_rwqe(qp, 0);
555 		if (ret < 0) {
556 			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
557 			return;
558 		}
559 		if (!ret) {
560 			if (qp->ibqp.qp_num == 0)
561 				ibp->rvp.n_vl15_dropped++;
562 			return;
563 		}
564 	}
565 	/* Silently drop packets which are too big. */
566 	if (unlikely(wc.byte_len > qp->r_len)) {
567 		qp->r_flags |= RVT_R_REUSE_SGE;
568 		goto drop;
569 	}
570 	if (has_grh) {
571 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
572 			     sizeof(struct ib_grh), 1);
573 		wc.wc_flags |= IB_WC_GRH;
574 	} else
575 		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
576 	qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
577 	rvt_put_ss(&qp->r_sge);
578 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
579 		return;
580 	wc.wr_id = qp->r_wr_id;
581 	wc.status = IB_WC_SUCCESS;
582 	wc.opcode = IB_WC_RECV;
583 	wc.vendor_err = 0;
584 	wc.qp = &qp->ibqp;
585 	wc.src_qp = src_qp;
586 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
587 		qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
588 	wc.slid = be16_to_cpu(hdr->lrh[3]);
589 	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
590 	dlid = be16_to_cpu(hdr->lrh[1]);
591 	/*
592 	 * Save the LMC lower bits if the destination LID is a unicast LID.
593 	 */
594 	wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
595 		dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
596 	wc.port_num = qp->port_num;
597 	/* Signal completion event if the solicited bit is set. */
598 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
599 		     (ohdr->bth[0] &
600 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
601 	return;
602 
603 drop:
604 	ibp->rvp.n_pkt_drops++;
605 }
606