xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_ud.c (revision 6c33a6f4)
1 /*
2  * Copyright (c) 2012 - 2019 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <rdma/ib_smi.h>
36 #include <rdma/ib_verbs.h>
37 
38 #include "qib.h"
39 #include "qib_mad.h"
40 
41 /**
42  * qib_ud_loopback - handle send on loopback QPs
43  * @sqp: the sending QP
44  * @swqe: the send work request
45  *
46  * This is called from qib_make_ud_req() to forward a WQE addressed
47  * to the same HCA.
48  * Note that the receive interrupt handler may be calling qib_ud_rcv()
49  * while this is being called.
50  */
51 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
52 {
53 	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
54 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
55 	struct qib_devdata *dd = ppd->dd;
56 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
57 	struct rvt_qp *qp;
58 	struct rdma_ah_attr *ah_attr;
59 	unsigned long flags;
60 	struct rvt_sge_state ssge;
61 	struct rvt_sge *sge;
62 	struct ib_wc wc;
63 	u32 length;
64 	enum ib_qp_type sqptype, dqptype;
65 
66 	rcu_read_lock();
67 	qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
68 	if (!qp) {
69 		ibp->rvp.n_pkt_drops++;
70 		goto drop;
71 	}
72 
73 	sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
74 			IB_QPT_UD : sqp->ibqp.qp_type;
75 	dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
76 			IB_QPT_UD : qp->ibqp.qp_type;
77 
78 	if (dqptype != sqptype ||
79 	    !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
80 		ibp->rvp.n_pkt_drops++;
81 		goto drop;
82 	}
83 
84 	ah_attr = rvt_get_swqe_ah_attr(swqe);
85 	ppd = ppd_from_ibp(ibp);
86 
87 	if (qp->ibqp.qp_num > 1) {
88 		u16 pkey1;
89 		u16 pkey2;
90 		u16 lid;
91 
92 		pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
93 		pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
94 		if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
95 			lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
96 					  ((1 << ppd->lmc) - 1));
97 			qib_bad_pkey(ibp, pkey1,
98 				     rdma_ah_get_sl(ah_attr),
99 				     sqp->ibqp.qp_num, qp->ibqp.qp_num,
100 				     cpu_to_be16(lid),
101 				     cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
102 			goto drop;
103 		}
104 	}
105 
106 	/*
107 	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
108 	 * Qkeys with the high order bit set mean use the
109 	 * qkey from the QP context instead of the WR (see 10.2.5).
110 	 */
111 	if (qp->ibqp.qp_num) {
112 		u32 qkey;
113 
114 		qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
115 			sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
116 		if (unlikely(qkey != qp->qkey))
117 			goto drop;
118 	}
119 
120 	/*
121 	 * A GRH is expected to precede the data even if not
122 	 * present on the wire.
123 	 */
124 	length = swqe->length;
125 	memset(&wc, 0, sizeof(wc));
126 	wc.byte_len = length + sizeof(struct ib_grh);
127 
128 	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
129 		wc.wc_flags = IB_WC_WITH_IMM;
130 		wc.ex.imm_data = swqe->wr.ex.imm_data;
131 	}
132 
133 	spin_lock_irqsave(&qp->r_lock, flags);
134 
135 	/*
136 	 * Get the next work request entry to find where to put the data.
137 	 */
138 	if (qp->r_flags & RVT_R_REUSE_SGE)
139 		qp->r_flags &= ~RVT_R_REUSE_SGE;
140 	else {
141 		int ret;
142 
143 		ret = rvt_get_rwqe(qp, false);
144 		if (ret < 0) {
145 			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
146 			goto bail_unlock;
147 		}
148 		if (!ret) {
149 			if (qp->ibqp.qp_num == 0)
150 				ibp->rvp.n_vl15_dropped++;
151 			goto bail_unlock;
152 		}
153 	}
154 	/* Silently drop packets which are too big. */
155 	if (unlikely(wc.byte_len > qp->r_len)) {
156 		qp->r_flags |= RVT_R_REUSE_SGE;
157 		ibp->rvp.n_pkt_drops++;
158 		goto bail_unlock;
159 	}
160 
161 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
162 		struct ib_grh grh;
163 		const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
164 
165 		qib_make_grh(ibp, &grh, grd, 0, 0);
166 		rvt_copy_sge(qp, &qp->r_sge, &grh,
167 			     sizeof(grh), true, false);
168 		wc.wc_flags |= IB_WC_GRH;
169 	} else
170 		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
171 	ssge.sg_list = swqe->sg_list + 1;
172 	ssge.sge = *swqe->sg_list;
173 	ssge.num_sge = swqe->wr.num_sge;
174 	sge = &ssge.sge;
175 	while (length) {
176 		u32 len = rvt_get_sge_length(sge, length);
177 
178 		rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
179 		sge->vaddr += len;
180 		sge->length -= len;
181 		sge->sge_length -= len;
182 		if (sge->sge_length == 0) {
183 			if (--ssge.num_sge)
184 				*sge = *ssge.sg_list++;
185 		} else if (sge->length == 0 && sge->mr->lkey) {
186 			if (++sge->n >= RVT_SEGSZ) {
187 				if (++sge->m >= sge->mr->mapsz)
188 					break;
189 				sge->n = 0;
190 			}
191 			sge->vaddr =
192 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
193 			sge->length =
194 				sge->mr->map[sge->m]->segs[sge->n].length;
195 		}
196 		length -= len;
197 	}
198 	rvt_put_ss(&qp->r_sge);
199 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
200 		goto bail_unlock;
201 	wc.wr_id = qp->r_wr_id;
202 	wc.status = IB_WC_SUCCESS;
203 	wc.opcode = IB_WC_RECV;
204 	wc.qp = &qp->ibqp;
205 	wc.src_qp = sqp->ibqp.qp_num;
206 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
207 		rvt_get_swqe_pkey_index(swqe) : 0;
208 	wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
209 				((1 << ppd->lmc) - 1));
210 	wc.sl = rdma_ah_get_sl(ah_attr);
211 	wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
212 	wc.port_num = qp->port_num;
213 	/* Signal completion event if the solicited bit is set. */
214 	rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
215 	ibp->rvp.n_loop_pkts++;
216 bail_unlock:
217 	spin_unlock_irqrestore(&qp->r_lock, flags);
218 drop:
219 	rcu_read_unlock();
220 }
221 
222 /**
223  * qib_make_ud_req - construct a UD request packet
224  * @qp: the QP
225  *
226  * Assumes the s_lock is held.
227  *
228  * Return 1 if constructed; otherwise, return 0.
229  */
230 int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
231 {
232 	struct qib_qp_priv *priv = qp->priv;
233 	struct ib_other_headers *ohdr;
234 	struct rdma_ah_attr *ah_attr;
235 	struct qib_pportdata *ppd;
236 	struct qib_ibport *ibp;
237 	struct rvt_swqe *wqe;
238 	u32 nwords;
239 	u32 extra_bytes;
240 	u32 bth0;
241 	u16 lrh0;
242 	u16 lid;
243 	int ret = 0;
244 	int next_cur;
245 
246 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
247 		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
248 			goto bail;
249 		/* We are in the error state, flush the work request. */
250 		if (qp->s_last == READ_ONCE(qp->s_head))
251 			goto bail;
252 		/* If DMAs are in progress, we can't flush immediately. */
253 		if (atomic_read(&priv->s_dma_busy)) {
254 			qp->s_flags |= RVT_S_WAIT_DMA;
255 			goto bail;
256 		}
257 		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
258 		rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
259 		goto done;
260 	}
261 
262 	/* see post_one_send() */
263 	if (qp->s_cur == READ_ONCE(qp->s_head))
264 		goto bail;
265 
266 	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
267 	next_cur = qp->s_cur + 1;
268 	if (next_cur >= qp->s_size)
269 		next_cur = 0;
270 
271 	/* Construct the header. */
272 	ibp = to_iport(qp->ibqp.device, qp->port_num);
273 	ppd = ppd_from_ibp(ibp);
274 	ah_attr = rvt_get_swqe_ah_attr(wqe);
275 	if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
276 		if (rdma_ah_get_dlid(ah_attr) !=
277 				be16_to_cpu(IB_LID_PERMISSIVE))
278 			this_cpu_inc(ibp->pmastats->n_multicast_xmit);
279 		else
280 			this_cpu_inc(ibp->pmastats->n_unicast_xmit);
281 	} else {
282 		this_cpu_inc(ibp->pmastats->n_unicast_xmit);
283 		lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
284 		if (unlikely(lid == ppd->lid)) {
285 			unsigned long tflags = *flags;
286 			/*
287 			 * If DMAs are in progress, we can't generate
288 			 * a completion for the loopback packet since
289 			 * it would be out of order.
290 			 * XXX Instead of waiting, we could queue a
291 			 * zero length descriptor so we get a callback.
292 			 */
293 			if (atomic_read(&priv->s_dma_busy)) {
294 				qp->s_flags |= RVT_S_WAIT_DMA;
295 				goto bail;
296 			}
297 			qp->s_cur = next_cur;
298 			spin_unlock_irqrestore(&qp->s_lock, tflags);
299 			qib_ud_loopback(qp, wqe);
300 			spin_lock_irqsave(&qp->s_lock, tflags);
301 			*flags = tflags;
302 			rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
303 			goto done;
304 		}
305 	}
306 
307 	qp->s_cur = next_cur;
308 	extra_bytes = -wqe->length & 3;
309 	nwords = (wqe->length + extra_bytes) >> 2;
310 
311 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
312 	qp->s_hdrwords = 7;
313 	qp->s_cur_size = wqe->length;
314 	qp->s_cur_sge = &qp->s_sge;
315 	qp->s_srate = rdma_ah_get_static_rate(ah_attr);
316 	qp->s_wqe = wqe;
317 	qp->s_sge.sge = wqe->sg_list[0];
318 	qp->s_sge.sg_list = wqe->sg_list + 1;
319 	qp->s_sge.num_sge = wqe->wr.num_sge;
320 	qp->s_sge.total_len = wqe->length;
321 
322 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
323 		/* Header size in 32-bit words. */
324 		qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
325 					       rdma_ah_read_grh(ah_attr),
326 					       qp->s_hdrwords, nwords);
327 		lrh0 = QIB_LRH_GRH;
328 		ohdr = &priv->s_hdr->u.l.oth;
329 		/*
330 		 * Don't worry about sending to locally attached multicast
331 		 * QPs.  It is unspecified by the spec. what happens.
332 		 */
333 	} else {
334 		/* Header size in 32-bit words. */
335 		lrh0 = QIB_LRH_BTH;
336 		ohdr = &priv->s_hdr->u.oth;
337 	}
338 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
339 		qp->s_hdrwords++;
340 		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
341 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
342 	} else
343 		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
344 	lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
345 	if (qp->ibqp.qp_type == IB_QPT_SMI)
346 		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 	else
348 		lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
349 	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
350 	priv->s_hdr->lrh[1] =
351 			cpu_to_be16(rdma_ah_get_dlid(ah_attr));  /* DEST LID */
352 	priv->s_hdr->lrh[2] =
353 			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
354 	lid = ppd->lid;
355 	if (lid) {
356 		lid |= rdma_ah_get_path_bits(ah_attr) &
357 			((1 << ppd->lmc) - 1);
358 		priv->s_hdr->lrh[3] = cpu_to_be16(lid);
359 	} else
360 		priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
361 	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
362 		bth0 |= IB_BTH_SOLICITED;
363 	bth0 |= extra_bytes << 20;
364 	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
365 		qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
366 			     rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
367 	ohdr->bth[0] = cpu_to_be32(bth0);
368 	/*
369 	 * Use the multicast QP if the destination LID is a multicast LID.
370 	 */
371 	ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
372 			be16_to_cpu(IB_MULTICAST_LID_BASE) &&
373 		rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
374 		cpu_to_be32(QIB_MULTICAST_QPN) :
375 		cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
376 	ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
377 	/*
378 	 * Qkeys with the high order bit set mean use the
379 	 * qkey from the QP context instead of the WR (see 10.2.5).
380 	 */
381 	ohdr->u.ud.deth[0] =
382 		cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
383 			    rvt_get_swqe_remote_qkey(wqe));
384 	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
385 
386 done:
387 	return 1;
388 bail:
389 	qp->s_flags &= ~RVT_S_BUSY;
390 	return ret;
391 }
392 
393 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
394 {
395 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
396 	struct qib_devdata *dd = ppd->dd;
397 	unsigned ctxt = ppd->hw_pidx;
398 	unsigned i;
399 
400 	pkey &= 0x7fff;	/* remove limited/full membership bit */
401 
402 	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
403 		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
404 			return i;
405 
406 	/*
407 	 * Should not get here, this means hardware failed to validate pkeys.
408 	 * Punt and return index 0.
409 	 */
410 	return 0;
411 }
412 
413 /**
414  * qib_ud_rcv - receive an incoming UD packet
415  * @ibp: the port the packet came in on
416  * @hdr: the packet header
417  * @has_grh: true if the packet has a GRH
418  * @data: the packet data
419  * @tlen: the packet length
420  * @qp: the QP the packet came on
421  *
422  * This is called from qib_qp_rcv() to process an incoming UD packet
423  * for the given QP.
424  * Called at interrupt level.
425  */
426 void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
427 		int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
428 {
429 	struct ib_other_headers *ohdr;
430 	int opcode;
431 	u32 hdrsize;
432 	u32 pad;
433 	struct ib_wc wc;
434 	u32 qkey;
435 	u32 src_qp;
436 	u16 dlid;
437 
438 	/* Check for GRH */
439 	if (!has_grh) {
440 		ohdr = &hdr->u.oth;
441 		hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
442 	} else {
443 		ohdr = &hdr->u.l.oth;
444 		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
445 	}
446 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
447 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
448 
449 	/*
450 	 * Get the number of bytes the message was padded by
451 	 * and drop incomplete packets.
452 	 */
453 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
454 	if (unlikely(tlen < (hdrsize + pad + 4)))
455 		goto drop;
456 
457 	tlen -= hdrsize + pad + 4;
458 
459 	/*
460 	 * Check that the permissive LID is only used on QP0
461 	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
462 	 */
463 	if (qp->ibqp.qp_num) {
464 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
465 			     hdr->lrh[3] == IB_LID_PERMISSIVE))
466 			goto drop;
467 		if (qp->ibqp.qp_num > 1) {
468 			u16 pkey1, pkey2;
469 
470 			pkey1 = be32_to_cpu(ohdr->bth[0]);
471 			pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
472 			if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
473 				qib_bad_pkey(ibp,
474 					     pkey1,
475 					     (be16_to_cpu(hdr->lrh[0]) >> 4) &
476 						0xF,
477 					     src_qp, qp->ibqp.qp_num,
478 					     hdr->lrh[3], hdr->lrh[1]);
479 				return;
480 			}
481 		}
482 		if (unlikely(qkey != qp->qkey))
483 			return;
484 
485 		/* Drop invalid MAD packets (see 13.5.3.1). */
486 		if (unlikely(qp->ibqp.qp_num == 1 &&
487 			     (tlen != 256 ||
488 			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
489 			goto drop;
490 	} else {
491 		struct ib_smp *smp;
492 
493 		/* Drop invalid MAD packets (see 13.5.3.1). */
494 		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
495 			goto drop;
496 		smp = (struct ib_smp *) data;
497 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
498 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
499 		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
500 			goto drop;
501 	}
502 
503 	/*
504 	 * The opcode is in the low byte when its in network order
505 	 * (top byte when in host order).
506 	 */
507 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
508 	if (qp->ibqp.qp_num > 1 &&
509 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
510 		wc.ex.imm_data = ohdr->u.ud.imm_data;
511 		wc.wc_flags = IB_WC_WITH_IMM;
512 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
513 		wc.ex.imm_data = 0;
514 		wc.wc_flags = 0;
515 	} else
516 		goto drop;
517 
518 	/*
519 	 * A GRH is expected to precede the data even if not
520 	 * present on the wire.
521 	 */
522 	wc.byte_len = tlen + sizeof(struct ib_grh);
523 
524 	/*
525 	 * Get the next work request entry to find where to put the data.
526 	 */
527 	if (qp->r_flags & RVT_R_REUSE_SGE)
528 		qp->r_flags &= ~RVT_R_REUSE_SGE;
529 	else {
530 		int ret;
531 
532 		ret = rvt_get_rwqe(qp, false);
533 		if (ret < 0) {
534 			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
535 			return;
536 		}
537 		if (!ret) {
538 			if (qp->ibqp.qp_num == 0)
539 				ibp->rvp.n_vl15_dropped++;
540 			return;
541 		}
542 	}
543 	/* Silently drop packets which are too big. */
544 	if (unlikely(wc.byte_len > qp->r_len)) {
545 		qp->r_flags |= RVT_R_REUSE_SGE;
546 		goto drop;
547 	}
548 	if (has_grh) {
549 		rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
550 			     sizeof(struct ib_grh), true, false);
551 		wc.wc_flags |= IB_WC_GRH;
552 	} else
553 		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
554 	rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
555 		     true, false);
556 	rvt_put_ss(&qp->r_sge);
557 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
558 		return;
559 	wc.wr_id = qp->r_wr_id;
560 	wc.status = IB_WC_SUCCESS;
561 	wc.opcode = IB_WC_RECV;
562 	wc.vendor_err = 0;
563 	wc.qp = &qp->ibqp;
564 	wc.src_qp = src_qp;
565 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
566 		qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
567 	wc.slid = be16_to_cpu(hdr->lrh[3]);
568 	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
569 	dlid = be16_to_cpu(hdr->lrh[1]);
570 	/*
571 	 * Save the LMC lower bits if the destination LID is a unicast LID.
572 	 */
573 	wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
574 		dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
575 	wc.port_num = qp->port_num;
576 	/* Signal completion event if the solicited bit is set. */
577 	rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
578 	return;
579 
580 drop:
581 	ibp->rvp.n_pkt_drops++;
582 }
583