xref: /openbmc/linux/drivers/infiniband/hw/hfi1/uc.c (revision 90a53e44)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include "hfi.h"
49 #include "verbs_txreq.h"
50 #include "qp.h"
51 
52 /* cut down ridiculously long IB macro names */
53 #define OP(x) UC_OP(x)
54 
55 /**
56  * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
57  * @qp: a pointer to the QP
58  *
59  * Assume s_lock is held.
60  *
61  * Return 1 if constructed; otherwise, return 0.
62  */
63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
64 {
65 	struct hfi1_qp_priv *priv = qp->priv;
66 	struct ib_other_headers *ohdr;
67 	struct rvt_swqe *wqe;
68 	u32 hwords;
69 	u32 bth0 = 0;
70 	u32 len;
71 	u32 pmtu = qp->pmtu;
72 	int middle = 0;
73 
74 	ps->s_txreq = get_txreq(ps->dev, qp);
75 	if (IS_ERR(ps->s_txreq))
76 		goto bail_no_tx;
77 
78 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
79 		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
80 			goto bail;
81 		/* We are in the error state, flush the work request. */
82 		smp_read_barrier_depends(); /* see post_one_send() */
83 		if (qp->s_last == ACCESS_ONCE(qp->s_head))
84 			goto bail;
85 		/* If DMAs are in progress, we can't flush immediately. */
86 		if (iowait_sdma_pending(&priv->s_iowait)) {
87 			qp->s_flags |= RVT_S_WAIT_DMA;
88 			goto bail;
89 		}
90 		clear_ahg(qp);
91 		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
92 		hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
93 		goto done_free_tx;
94 	}
95 
96 	ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
97 	if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
98 		/* header size in 32-bit words LRH+BTH = (8+12)/4. */
99 		hwords = 5;
100 		if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
101 			ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
102 		else
103 			ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
104 	} else {
105 		/* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
106 		hwords = 7;
107 		if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
108 		    (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
109 			ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
110 		else
111 			ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
112 	}
113 
114 	/* Get the next send request. */
115 	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
116 	qp->s_wqe = NULL;
117 	switch (qp->s_state) {
118 	default:
119 		if (!(ib_rvt_state_ops[qp->state] &
120 		    RVT_PROCESS_NEXT_SEND_OK))
121 			goto bail;
122 		/* Check if send work queue is empty. */
123 		smp_read_barrier_depends(); /* see post_one_send() */
124 		if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
125 			clear_ahg(qp);
126 			goto bail;
127 		}
128 		/*
129 		 * Local operations are processed immediately
130 		 * after all prior requests have completed.
131 		 */
132 		if (wqe->wr.opcode == IB_WR_REG_MR ||
133 		    wqe->wr.opcode == IB_WR_LOCAL_INV) {
134 			int local_ops = 0;
135 			int err = 0;
136 
137 			if (qp->s_last != qp->s_cur)
138 				goto bail;
139 			if (++qp->s_cur == qp->s_size)
140 				qp->s_cur = 0;
141 			if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
142 				err = rvt_invalidate_rkey(
143 					qp, wqe->wr.ex.invalidate_rkey);
144 				local_ops = 1;
145 			}
146 			hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
147 							: IB_WC_SUCCESS);
148 			if (local_ops)
149 				atomic_dec(&qp->local_ops_pending);
150 			qp->s_hdrwords = 0;
151 			goto done_free_tx;
152 		}
153 		/*
154 		 * Start a new request.
155 		 */
156 		qp->s_psn = wqe->psn;
157 		qp->s_sge.sge = wqe->sg_list[0];
158 		qp->s_sge.sg_list = wqe->sg_list + 1;
159 		qp->s_sge.num_sge = wqe->wr.num_sge;
160 		qp->s_sge.total_len = wqe->length;
161 		len = wqe->length;
162 		qp->s_len = len;
163 		switch (wqe->wr.opcode) {
164 		case IB_WR_SEND:
165 		case IB_WR_SEND_WITH_IMM:
166 			if (len > pmtu) {
167 				qp->s_state = OP(SEND_FIRST);
168 				len = pmtu;
169 				break;
170 			}
171 			if (wqe->wr.opcode == IB_WR_SEND) {
172 				qp->s_state = OP(SEND_ONLY);
173 			} else {
174 				qp->s_state =
175 					OP(SEND_ONLY_WITH_IMMEDIATE);
176 				/* Immediate data comes after the BTH */
177 				ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 				hwords += 1;
179 			}
180 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 				bth0 |= IB_BTH_SOLICITED;
182 			qp->s_wqe = wqe;
183 			if (++qp->s_cur >= qp->s_size)
184 				qp->s_cur = 0;
185 			break;
186 
187 		case IB_WR_RDMA_WRITE:
188 		case IB_WR_RDMA_WRITE_WITH_IMM:
189 			ohdr->u.rc.reth.vaddr =
190 				cpu_to_be64(wqe->rdma_wr.remote_addr);
191 			ohdr->u.rc.reth.rkey =
192 				cpu_to_be32(wqe->rdma_wr.rkey);
193 			ohdr->u.rc.reth.length = cpu_to_be32(len);
194 			hwords += sizeof(struct ib_reth) / 4;
195 			if (len > pmtu) {
196 				qp->s_state = OP(RDMA_WRITE_FIRST);
197 				len = pmtu;
198 				break;
199 			}
200 			if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
201 				qp->s_state = OP(RDMA_WRITE_ONLY);
202 			} else {
203 				qp->s_state =
204 					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
205 				/* Immediate data comes after the RETH */
206 				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
207 				hwords += 1;
208 				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
209 					bth0 |= IB_BTH_SOLICITED;
210 			}
211 			qp->s_wqe = wqe;
212 			if (++qp->s_cur >= qp->s_size)
213 				qp->s_cur = 0;
214 			break;
215 
216 		default:
217 			goto bail;
218 		}
219 		break;
220 
221 	case OP(SEND_FIRST):
222 		qp->s_state = OP(SEND_MIDDLE);
223 		/* FALLTHROUGH */
224 	case OP(SEND_MIDDLE):
225 		len = qp->s_len;
226 		if (len > pmtu) {
227 			len = pmtu;
228 			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
229 			break;
230 		}
231 		if (wqe->wr.opcode == IB_WR_SEND) {
232 			qp->s_state = OP(SEND_LAST);
233 		} else {
234 			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
235 			/* Immediate data comes after the BTH */
236 			ohdr->u.imm_data = wqe->wr.ex.imm_data;
237 			hwords += 1;
238 		}
239 		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
240 			bth0 |= IB_BTH_SOLICITED;
241 		qp->s_wqe = wqe;
242 		if (++qp->s_cur >= qp->s_size)
243 			qp->s_cur = 0;
244 		break;
245 
246 	case OP(RDMA_WRITE_FIRST):
247 		qp->s_state = OP(RDMA_WRITE_MIDDLE);
248 		/* FALLTHROUGH */
249 	case OP(RDMA_WRITE_MIDDLE):
250 		len = qp->s_len;
251 		if (len > pmtu) {
252 			len = pmtu;
253 			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
254 			break;
255 		}
256 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
257 			qp->s_state = OP(RDMA_WRITE_LAST);
258 		} else {
259 			qp->s_state =
260 				OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
261 			/* Immediate data comes after the BTH */
262 			ohdr->u.imm_data = wqe->wr.ex.imm_data;
263 			hwords += 1;
264 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
265 				bth0 |= IB_BTH_SOLICITED;
266 		}
267 		qp->s_wqe = wqe;
268 		if (++qp->s_cur >= qp->s_size)
269 			qp->s_cur = 0;
270 		break;
271 	}
272 	qp->s_len -= len;
273 	qp->s_hdrwords = hwords;
274 	ps->s_txreq->sde = priv->s_sde;
275 	ps->s_txreq->ss = &qp->s_sge;
276 	ps->s_txreq->s_cur_size = len;
277 	hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
278 			     mask_psn(qp->s_psn++), middle, ps);
279 	/* pbc */
280 	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
281 	return 1;
282 
283 done_free_tx:
284 	hfi1_put_txreq(ps->s_txreq);
285 	ps->s_txreq = NULL;
286 	return 1;
287 
288 bail:
289 	hfi1_put_txreq(ps->s_txreq);
290 
291 bail_no_tx:
292 	ps->s_txreq = NULL;
293 	qp->s_flags &= ~RVT_S_BUSY;
294 	qp->s_hdrwords = 0;
295 	return 0;
296 }
297 
298 /**
299  * hfi1_uc_rcv - handle an incoming UC packet
300  * @ibp: the port the packet came in on
301  * @hdr: the header of the packet
302  * @rcv_flags: flags relevant to rcv processing
303  * @data: the packet data
304  * @tlen: the length of the packet
305  * @qp: the QP for this packet.
306  *
307  * This is called from qp_rcv() to process an incoming UC packet
308  * for the given QP.
309  * Called at interrupt level.
310  */
311 void hfi1_uc_rcv(struct hfi1_packet *packet)
312 {
313 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
314 	void *data = packet->payload;
315 	u32 tlen = packet->tlen;
316 	struct rvt_qp *qp = packet->qp;
317 	struct ib_other_headers *ohdr = packet->ohdr;
318 	u32 opcode = packet->opcode;
319 	u32 hdrsize = packet->hlen;
320 	u32 psn;
321 	u32 pad = packet->pad;
322 	struct ib_wc wc;
323 	u32 pmtu = qp->pmtu;
324 	struct ib_reth *reth;
325 	int ret;
326 	u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
327 
328 	if (hfi1_ruc_check_hdr(ibp, packet))
329 		return;
330 
331 	process_ecn(qp, packet, true);
332 
333 	psn = ib_bth_get_psn(ohdr);
334 	/* Compare the PSN verses the expected PSN. */
335 	if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
336 		/*
337 		 * Handle a sequence error.
338 		 * Silently drop any current message.
339 		 */
340 		qp->r_psn = psn;
341 inv:
342 		if (qp->r_state == OP(SEND_FIRST) ||
343 		    qp->r_state == OP(SEND_MIDDLE)) {
344 			set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
345 			qp->r_sge.num_sge = 0;
346 		} else {
347 			rvt_put_ss(&qp->r_sge);
348 		}
349 		qp->r_state = OP(SEND_LAST);
350 		switch (opcode) {
351 		case OP(SEND_FIRST):
352 		case OP(SEND_ONLY):
353 		case OP(SEND_ONLY_WITH_IMMEDIATE):
354 			goto send_first;
355 
356 		case OP(RDMA_WRITE_FIRST):
357 		case OP(RDMA_WRITE_ONLY):
358 		case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
359 			goto rdma_first;
360 
361 		default:
362 			goto drop;
363 		}
364 	}
365 
366 	/* Check for opcode sequence errors. */
367 	switch (qp->r_state) {
368 	case OP(SEND_FIRST):
369 	case OP(SEND_MIDDLE):
370 		if (opcode == OP(SEND_MIDDLE) ||
371 		    opcode == OP(SEND_LAST) ||
372 		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
373 			break;
374 		goto inv;
375 
376 	case OP(RDMA_WRITE_FIRST):
377 	case OP(RDMA_WRITE_MIDDLE):
378 		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
379 		    opcode == OP(RDMA_WRITE_LAST) ||
380 		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
381 			break;
382 		goto inv;
383 
384 	default:
385 		if (opcode == OP(SEND_FIRST) ||
386 		    opcode == OP(SEND_ONLY) ||
387 		    opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
388 		    opcode == OP(RDMA_WRITE_FIRST) ||
389 		    opcode == OP(RDMA_WRITE_ONLY) ||
390 		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
391 			break;
392 		goto inv;
393 	}
394 
395 	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
396 		rvt_comm_est(qp);
397 
398 	/* OK, process the packet. */
399 	switch (opcode) {
400 	case OP(SEND_FIRST):
401 	case OP(SEND_ONLY):
402 	case OP(SEND_ONLY_WITH_IMMEDIATE):
403 send_first:
404 		if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
405 			qp->r_sge = qp->s_rdma_read_sge;
406 		} else {
407 			ret = hfi1_rvt_get_rwqe(qp, 0);
408 			if (ret < 0)
409 				goto op_err;
410 			if (!ret)
411 				goto drop;
412 			/*
413 			 * qp->s_rdma_read_sge will be the owner
414 			 * of the mr references.
415 			 */
416 			qp->s_rdma_read_sge = qp->r_sge;
417 		}
418 		qp->r_rcv_len = 0;
419 		if (opcode == OP(SEND_ONLY))
420 			goto no_immediate_data;
421 		else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
422 			goto send_last_imm;
423 		/* FALLTHROUGH */
424 	case OP(SEND_MIDDLE):
425 		/* Check for invalid length PMTU or posted rwqe len. */
426 		/*
427 		 * There will be no padding for 9B packet but 16B packets
428 		 * will come in with some padding since we always add
429 		 * CRC and LT bytes which will need to be flit aligned
430 		 */
431 		if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
432 			goto rewind;
433 		qp->r_rcv_len += pmtu;
434 		if (unlikely(qp->r_rcv_len > qp->r_len))
435 			goto rewind;
436 		hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
437 		break;
438 
439 	case OP(SEND_LAST_WITH_IMMEDIATE):
440 send_last_imm:
441 		wc.ex.imm_data = ohdr->u.imm_data;
442 		wc.wc_flags = IB_WC_WITH_IMM;
443 		goto send_last;
444 	case OP(SEND_LAST):
445 no_immediate_data:
446 		wc.ex.imm_data = 0;
447 		wc.wc_flags = 0;
448 send_last:
449 		/* Check for invalid length. */
450 		/* LAST len should be >= 1 */
451 		if (unlikely(tlen < (hdrsize + extra_bytes)))
452 			goto rewind;
453 		/* Don't count the CRC. */
454 		tlen -= (hdrsize + extra_bytes);
455 		wc.byte_len = tlen + qp->r_rcv_len;
456 		if (unlikely(wc.byte_len > qp->r_len))
457 			goto rewind;
458 		wc.opcode = IB_WC_RECV;
459 		hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
460 		rvt_put_ss(&qp->s_rdma_read_sge);
461 last_imm:
462 		wc.wr_id = qp->r_wr_id;
463 		wc.status = IB_WC_SUCCESS;
464 		wc.qp = &qp->ibqp;
465 		wc.src_qp = qp->remote_qpn;
466 		wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
467 		/*
468 		 * It seems that IB mandates the presence of an SL in a
469 		 * work completion only for the UD transport (see section
470 		 * 11.4.2 of IBTA Vol. 1).
471 		 *
472 		 * However, the way the SL is chosen below is consistent
473 		 * with the way that IB/qib works and is trying avoid
474 		 * introducing incompatibilities.
475 		 *
476 		 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
477 		 */
478 		wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
479 		/* zero fields that are N/A */
480 		wc.vendor_err = 0;
481 		wc.pkey_index = 0;
482 		wc.dlid_path_bits = 0;
483 		wc.port_num = 0;
484 		/* Signal completion event if the solicited bit is set. */
485 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
486 			     (ohdr->bth[0] &
487 			      cpu_to_be32(IB_BTH_SOLICITED)) != 0);
488 		break;
489 
490 	case OP(RDMA_WRITE_FIRST):
491 	case OP(RDMA_WRITE_ONLY):
492 	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
493 rdma_first:
494 		if (unlikely(!(qp->qp_access_flags &
495 			       IB_ACCESS_REMOTE_WRITE))) {
496 			goto drop;
497 		}
498 		reth = &ohdr->u.rc.reth;
499 		qp->r_len = be32_to_cpu(reth->length);
500 		qp->r_rcv_len = 0;
501 		qp->r_sge.sg_list = NULL;
502 		if (qp->r_len != 0) {
503 			u32 rkey = be32_to_cpu(reth->rkey);
504 			u64 vaddr = be64_to_cpu(reth->vaddr);
505 			int ok;
506 
507 			/* Check rkey */
508 			ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
509 					 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
510 			if (unlikely(!ok))
511 				goto drop;
512 			qp->r_sge.num_sge = 1;
513 		} else {
514 			qp->r_sge.num_sge = 0;
515 			qp->r_sge.sge.mr = NULL;
516 			qp->r_sge.sge.vaddr = NULL;
517 			qp->r_sge.sge.length = 0;
518 			qp->r_sge.sge.sge_length = 0;
519 		}
520 		if (opcode == OP(RDMA_WRITE_ONLY)) {
521 			goto rdma_last;
522 		} else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
523 			wc.ex.imm_data = ohdr->u.rc.imm_data;
524 			goto rdma_last_imm;
525 		}
526 		/* FALLTHROUGH */
527 	case OP(RDMA_WRITE_MIDDLE):
528 		/* Check for invalid length PMTU or posted rwqe len. */
529 		if (unlikely(tlen != (hdrsize + pmtu + 4)))
530 			goto drop;
531 		qp->r_rcv_len += pmtu;
532 		if (unlikely(qp->r_rcv_len > qp->r_len))
533 			goto drop;
534 		hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
535 		break;
536 
537 	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
538 		wc.ex.imm_data = ohdr->u.imm_data;
539 rdma_last_imm:
540 		wc.wc_flags = IB_WC_WITH_IMM;
541 
542 		/* Check for invalid length. */
543 		/* LAST len should be >= 1 */
544 		if (unlikely(tlen < (hdrsize + pad + 4)))
545 			goto drop;
546 		/* Don't count the CRC. */
547 		tlen -= (hdrsize + extra_bytes);
548 		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
549 			goto drop;
550 		if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
551 			rvt_put_ss(&qp->s_rdma_read_sge);
552 		} else {
553 			ret = hfi1_rvt_get_rwqe(qp, 1);
554 			if (ret < 0)
555 				goto op_err;
556 			if (!ret)
557 				goto drop;
558 		}
559 		wc.byte_len = qp->r_len;
560 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
561 		hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
562 		rvt_put_ss(&qp->r_sge);
563 		goto last_imm;
564 
565 	case OP(RDMA_WRITE_LAST):
566 rdma_last:
567 		/* Check for invalid length. */
568 		/* LAST len should be >= 1 */
569 		if (unlikely(tlen < (hdrsize + pad + 4)))
570 			goto drop;
571 		/* Don't count the CRC. */
572 		tlen -= (hdrsize + extra_bytes);
573 		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
574 			goto drop;
575 		hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
576 		rvt_put_ss(&qp->r_sge);
577 		break;
578 
579 	default:
580 		/* Drop packet for unknown opcodes. */
581 		goto drop;
582 	}
583 	qp->r_psn++;
584 	qp->r_state = opcode;
585 	return;
586 
587 rewind:
588 	set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
589 	qp->r_sge.num_sge = 0;
590 drop:
591 	ibp->rvp.n_pkt_drops++;
592 	return;
593 
594 op_err:
595 	rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
596 }
597