xref: /openbmc/linux/drivers/infiniband/hw/hfi1/uc.c (revision 93032e31)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include "hfi.h"
49 #include "verbs_txreq.h"
50 #include "qp.h"
51 
52 /* cut down ridiculously long IB macro names */
53 #define OP(x) UC_OP(x)
54 
55 /**
56  * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
57  * @qp: a pointer to the QP
58  *
59  * Assume s_lock is held.
60  *
61  * Return 1 if constructed; otherwise, return 0.
62  */
63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
64 {
65 	struct hfi1_qp_priv *priv = qp->priv;
66 	struct ib_other_headers *ohdr;
67 	struct rvt_swqe *wqe;
68 	u32 hwords = 5;
69 	u32 bth0 = 0;
70 	u32 len;
71 	u32 pmtu = qp->pmtu;
72 	int middle = 0;
73 
74 	ps->s_txreq = get_txreq(ps->dev, qp);
75 	if (IS_ERR(ps->s_txreq))
76 		goto bail_no_tx;
77 
78 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
79 		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
80 			goto bail;
81 		/* We are in the error state, flush the work request. */
82 		smp_read_barrier_depends(); /* see post_one_send() */
83 		if (qp->s_last == ACCESS_ONCE(qp->s_head))
84 			goto bail;
85 		/* If DMAs are in progress, we can't flush immediately. */
86 		if (iowait_sdma_pending(&priv->s_iowait)) {
87 			qp->s_flags |= RVT_S_WAIT_DMA;
88 			goto bail;
89 		}
90 		clear_ahg(qp);
91 		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
92 		hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
93 		goto done_free_tx;
94 	}
95 
96 	ohdr = &ps->s_txreq->phdr.hdr.u.oth;
97 	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
98 		ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
99 
100 	/* Get the next send request. */
101 	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
102 	qp->s_wqe = NULL;
103 	switch (qp->s_state) {
104 	default:
105 		if (!(ib_rvt_state_ops[qp->state] &
106 		    RVT_PROCESS_NEXT_SEND_OK))
107 			goto bail;
108 		/* Check if send work queue is empty. */
109 		smp_read_barrier_depends(); /* see post_one_send() */
110 		if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
111 			clear_ahg(qp);
112 			goto bail;
113 		}
114 		/*
115 		 * Local operations are processed immediately
116 		 * after all prior requests have completed.
117 		 */
118 		if (wqe->wr.opcode == IB_WR_REG_MR ||
119 		    wqe->wr.opcode == IB_WR_LOCAL_INV) {
120 			int local_ops = 0;
121 			int err = 0;
122 
123 			if (qp->s_last != qp->s_cur)
124 				goto bail;
125 			if (++qp->s_cur == qp->s_size)
126 				qp->s_cur = 0;
127 			if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
128 				err = rvt_invalidate_rkey(
129 					qp, wqe->wr.ex.invalidate_rkey);
130 				local_ops = 1;
131 			}
132 			hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
133 							: IB_WC_SUCCESS);
134 			if (local_ops)
135 				atomic_dec(&qp->local_ops_pending);
136 			qp->s_hdrwords = 0;
137 			goto done_free_tx;
138 		}
139 		/*
140 		 * Start a new request.
141 		 */
142 		qp->s_psn = wqe->psn;
143 		qp->s_sge.sge = wqe->sg_list[0];
144 		qp->s_sge.sg_list = wqe->sg_list + 1;
145 		qp->s_sge.num_sge = wqe->wr.num_sge;
146 		qp->s_sge.total_len = wqe->length;
147 		len = wqe->length;
148 		qp->s_len = len;
149 		switch (wqe->wr.opcode) {
150 		case IB_WR_SEND:
151 		case IB_WR_SEND_WITH_IMM:
152 			if (len > pmtu) {
153 				qp->s_state = OP(SEND_FIRST);
154 				len = pmtu;
155 				break;
156 			}
157 			if (wqe->wr.opcode == IB_WR_SEND) {
158 				qp->s_state = OP(SEND_ONLY);
159 			} else {
160 				qp->s_state =
161 					OP(SEND_ONLY_WITH_IMMEDIATE);
162 				/* Immediate data comes after the BTH */
163 				ohdr->u.imm_data = wqe->wr.ex.imm_data;
164 				hwords += 1;
165 			}
166 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
167 				bth0 |= IB_BTH_SOLICITED;
168 			qp->s_wqe = wqe;
169 			if (++qp->s_cur >= qp->s_size)
170 				qp->s_cur = 0;
171 			break;
172 
173 		case IB_WR_RDMA_WRITE:
174 		case IB_WR_RDMA_WRITE_WITH_IMM:
175 			ohdr->u.rc.reth.vaddr =
176 				cpu_to_be64(wqe->rdma_wr.remote_addr);
177 			ohdr->u.rc.reth.rkey =
178 				cpu_to_be32(wqe->rdma_wr.rkey);
179 			ohdr->u.rc.reth.length = cpu_to_be32(len);
180 			hwords += sizeof(struct ib_reth) / 4;
181 			if (len > pmtu) {
182 				qp->s_state = OP(RDMA_WRITE_FIRST);
183 				len = pmtu;
184 				break;
185 			}
186 			if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
187 				qp->s_state = OP(RDMA_WRITE_ONLY);
188 			} else {
189 				qp->s_state =
190 					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
191 				/* Immediate data comes after the RETH */
192 				ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
193 				hwords += 1;
194 				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
195 					bth0 |= IB_BTH_SOLICITED;
196 			}
197 			qp->s_wqe = wqe;
198 			if (++qp->s_cur >= qp->s_size)
199 				qp->s_cur = 0;
200 			break;
201 
202 		default:
203 			goto bail;
204 		}
205 		break;
206 
207 	case OP(SEND_FIRST):
208 		qp->s_state = OP(SEND_MIDDLE);
209 		/* FALLTHROUGH */
210 	case OP(SEND_MIDDLE):
211 		len = qp->s_len;
212 		if (len > pmtu) {
213 			len = pmtu;
214 			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
215 			break;
216 		}
217 		if (wqe->wr.opcode == IB_WR_SEND) {
218 			qp->s_state = OP(SEND_LAST);
219 		} else {
220 			qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
221 			/* Immediate data comes after the BTH */
222 			ohdr->u.imm_data = wqe->wr.ex.imm_data;
223 			hwords += 1;
224 		}
225 		if (wqe->wr.send_flags & IB_SEND_SOLICITED)
226 			bth0 |= IB_BTH_SOLICITED;
227 		qp->s_wqe = wqe;
228 		if (++qp->s_cur >= qp->s_size)
229 			qp->s_cur = 0;
230 		break;
231 
232 	case OP(RDMA_WRITE_FIRST):
233 		qp->s_state = OP(RDMA_WRITE_MIDDLE);
234 		/* FALLTHROUGH */
235 	case OP(RDMA_WRITE_MIDDLE):
236 		len = qp->s_len;
237 		if (len > pmtu) {
238 			len = pmtu;
239 			middle = HFI1_CAP_IS_KSET(SDMA_AHG);
240 			break;
241 		}
242 		if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
243 			qp->s_state = OP(RDMA_WRITE_LAST);
244 		} else {
245 			qp->s_state =
246 				OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
247 			/* Immediate data comes after the BTH */
248 			ohdr->u.imm_data = wqe->wr.ex.imm_data;
249 			hwords += 1;
250 			if (wqe->wr.send_flags & IB_SEND_SOLICITED)
251 				bth0 |= IB_BTH_SOLICITED;
252 		}
253 		qp->s_wqe = wqe;
254 		if (++qp->s_cur >= qp->s_size)
255 			qp->s_cur = 0;
256 		break;
257 	}
258 	qp->s_len -= len;
259 	qp->s_hdrwords = hwords;
260 	ps->s_txreq->sde = priv->s_sde;
261 	qp->s_cur_sge = &qp->s_sge;
262 	qp->s_cur_size = len;
263 	hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
264 			     mask_psn(qp->s_psn++), middle, ps);
265 	/* pbc */
266 	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
267 	return 1;
268 
269 done_free_tx:
270 	hfi1_put_txreq(ps->s_txreq);
271 	ps->s_txreq = NULL;
272 	return 1;
273 
274 bail:
275 	hfi1_put_txreq(ps->s_txreq);
276 
277 bail_no_tx:
278 	ps->s_txreq = NULL;
279 	qp->s_flags &= ~RVT_S_BUSY;
280 	qp->s_hdrwords = 0;
281 	return 0;
282 }
283 
284 /**
285  * hfi1_uc_rcv - handle an incoming UC packet
286  * @ibp: the port the packet came in on
287  * @hdr: the header of the packet
288  * @rcv_flags: flags relevant to rcv processing
289  * @data: the packet data
290  * @tlen: the length of the packet
291  * @qp: the QP for this packet.
292  *
293  * This is called from qp_rcv() to process an incoming UC packet
294  * for the given QP.
295  * Called at interrupt level.
296  */
297 void hfi1_uc_rcv(struct hfi1_packet *packet)
298 {
299 	struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
300 	struct ib_header *hdr = packet->hdr;
301 	u32 rcv_flags = packet->rcv_flags;
302 	void *data = packet->ebuf;
303 	u32 tlen = packet->tlen;
304 	struct rvt_qp *qp = packet->qp;
305 	struct ib_other_headers *ohdr = packet->ohdr;
306 	u32 bth0, opcode;
307 	u32 hdrsize = packet->hlen;
308 	u32 psn;
309 	u32 pad;
310 	struct ib_wc wc;
311 	u32 pmtu = qp->pmtu;
312 	struct ib_reth *reth;
313 	int has_grh = rcv_flags & HFI1_HAS_GRH;
314 	int ret;
315 
316 	bth0 = be32_to_cpu(ohdr->bth[0]);
317 	if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
318 		return;
319 
320 	process_ecn(qp, packet, true);
321 
322 	psn = be32_to_cpu(ohdr->bth[2]);
323 	opcode = (bth0 >> 24) & 0xff;
324 
325 	/* Compare the PSN verses the expected PSN. */
326 	if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
327 		/*
328 		 * Handle a sequence error.
329 		 * Silently drop any current message.
330 		 */
331 		qp->r_psn = psn;
332 inv:
333 		if (qp->r_state == OP(SEND_FIRST) ||
334 		    qp->r_state == OP(SEND_MIDDLE)) {
335 			set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
336 			qp->r_sge.num_sge = 0;
337 		} else {
338 			rvt_put_ss(&qp->r_sge);
339 		}
340 		qp->r_state = OP(SEND_LAST);
341 		switch (opcode) {
342 		case OP(SEND_FIRST):
343 		case OP(SEND_ONLY):
344 		case OP(SEND_ONLY_WITH_IMMEDIATE):
345 			goto send_first;
346 
347 		case OP(RDMA_WRITE_FIRST):
348 		case OP(RDMA_WRITE_ONLY):
349 		case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
350 			goto rdma_first;
351 
352 		default:
353 			goto drop;
354 		}
355 	}
356 
357 	/* Check for opcode sequence errors. */
358 	switch (qp->r_state) {
359 	case OP(SEND_FIRST):
360 	case OP(SEND_MIDDLE):
361 		if (opcode == OP(SEND_MIDDLE) ||
362 		    opcode == OP(SEND_LAST) ||
363 		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
364 			break;
365 		goto inv;
366 
367 	case OP(RDMA_WRITE_FIRST):
368 	case OP(RDMA_WRITE_MIDDLE):
369 		if (opcode == OP(RDMA_WRITE_MIDDLE) ||
370 		    opcode == OP(RDMA_WRITE_LAST) ||
371 		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
372 			break;
373 		goto inv;
374 
375 	default:
376 		if (opcode == OP(SEND_FIRST) ||
377 		    opcode == OP(SEND_ONLY) ||
378 		    opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
379 		    opcode == OP(RDMA_WRITE_FIRST) ||
380 		    opcode == OP(RDMA_WRITE_ONLY) ||
381 		    opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
382 			break;
383 		goto inv;
384 	}
385 
386 	if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
387 		qp_comm_est(qp);
388 
389 	/* OK, process the packet. */
390 	switch (opcode) {
391 	case OP(SEND_FIRST):
392 	case OP(SEND_ONLY):
393 	case OP(SEND_ONLY_WITH_IMMEDIATE):
394 send_first:
395 		if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
396 			qp->r_sge = qp->s_rdma_read_sge;
397 		} else {
398 			ret = hfi1_rvt_get_rwqe(qp, 0);
399 			if (ret < 0)
400 				goto op_err;
401 			if (!ret)
402 				goto drop;
403 			/*
404 			 * qp->s_rdma_read_sge will be the owner
405 			 * of the mr references.
406 			 */
407 			qp->s_rdma_read_sge = qp->r_sge;
408 		}
409 		qp->r_rcv_len = 0;
410 		if (opcode == OP(SEND_ONLY))
411 			goto no_immediate_data;
412 		else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
413 			goto send_last_imm;
414 		/* FALLTHROUGH */
415 	case OP(SEND_MIDDLE):
416 		/* Check for invalid length PMTU or posted rwqe len. */
417 		if (unlikely(tlen != (hdrsize + pmtu + 4)))
418 			goto rewind;
419 		qp->r_rcv_len += pmtu;
420 		if (unlikely(qp->r_rcv_len > qp->r_len))
421 			goto rewind;
422 		hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0);
423 		break;
424 
425 	case OP(SEND_LAST_WITH_IMMEDIATE):
426 send_last_imm:
427 		wc.ex.imm_data = ohdr->u.imm_data;
428 		wc.wc_flags = IB_WC_WITH_IMM;
429 		goto send_last;
430 	case OP(SEND_LAST):
431 no_immediate_data:
432 		wc.ex.imm_data = 0;
433 		wc.wc_flags = 0;
434 send_last:
435 		/* Get the number of bytes the message was padded by. */
436 		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
437 		/* Check for invalid length. */
438 		/* LAST len should be >= 1 */
439 		if (unlikely(tlen < (hdrsize + pad + 4)))
440 			goto rewind;
441 		/* Don't count the CRC. */
442 		tlen -= (hdrsize + pad + 4);
443 		wc.byte_len = tlen + qp->r_rcv_len;
444 		if (unlikely(wc.byte_len > qp->r_len))
445 			goto rewind;
446 		wc.opcode = IB_WC_RECV;
447 		hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0);
448 		rvt_put_ss(&qp->s_rdma_read_sge);
449 last_imm:
450 		wc.wr_id = qp->r_wr_id;
451 		wc.status = IB_WC_SUCCESS;
452 		wc.qp = &qp->ibqp;
453 		wc.src_qp = qp->remote_qpn;
454 		wc.slid = qp->remote_ah_attr.dlid;
455 		/*
456 		 * It seems that IB mandates the presence of an SL in a
457 		 * work completion only for the UD transport (see section
458 		 * 11.4.2 of IBTA Vol. 1).
459 		 *
460 		 * However, the way the SL is chosen below is consistent
461 		 * with the way that IB/qib works and is trying avoid
462 		 * introducing incompatibilities.
463 		 *
464 		 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
465 		 */
466 		wc.sl = qp->remote_ah_attr.sl;
467 		/* zero fields that are N/A */
468 		wc.vendor_err = 0;
469 		wc.pkey_index = 0;
470 		wc.dlid_path_bits = 0;
471 		wc.port_num = 0;
472 		/* Signal completion event if the solicited bit is set. */
473 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
474 			     (ohdr->bth[0] &
475 			      cpu_to_be32(IB_BTH_SOLICITED)) != 0);
476 		break;
477 
478 	case OP(RDMA_WRITE_FIRST):
479 	case OP(RDMA_WRITE_ONLY):
480 	case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
481 rdma_first:
482 		if (unlikely(!(qp->qp_access_flags &
483 			       IB_ACCESS_REMOTE_WRITE))) {
484 			goto drop;
485 		}
486 		reth = &ohdr->u.rc.reth;
487 		qp->r_len = be32_to_cpu(reth->length);
488 		qp->r_rcv_len = 0;
489 		qp->r_sge.sg_list = NULL;
490 		if (qp->r_len != 0) {
491 			u32 rkey = be32_to_cpu(reth->rkey);
492 			u64 vaddr = be64_to_cpu(reth->vaddr);
493 			int ok;
494 
495 			/* Check rkey */
496 			ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
497 					 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
498 			if (unlikely(!ok))
499 				goto drop;
500 			qp->r_sge.num_sge = 1;
501 		} else {
502 			qp->r_sge.num_sge = 0;
503 			qp->r_sge.sge.mr = NULL;
504 			qp->r_sge.sge.vaddr = NULL;
505 			qp->r_sge.sge.length = 0;
506 			qp->r_sge.sge.sge_length = 0;
507 		}
508 		if (opcode == OP(RDMA_WRITE_ONLY)) {
509 			goto rdma_last;
510 		} else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
511 			wc.ex.imm_data = ohdr->u.rc.imm_data;
512 			goto rdma_last_imm;
513 		}
514 		/* FALLTHROUGH */
515 	case OP(RDMA_WRITE_MIDDLE):
516 		/* Check for invalid length PMTU or posted rwqe len. */
517 		if (unlikely(tlen != (hdrsize + pmtu + 4)))
518 			goto drop;
519 		qp->r_rcv_len += pmtu;
520 		if (unlikely(qp->r_rcv_len > qp->r_len))
521 			goto drop;
522 		hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
523 		break;
524 
525 	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
526 		wc.ex.imm_data = ohdr->u.imm_data;
527 rdma_last_imm:
528 		wc.wc_flags = IB_WC_WITH_IMM;
529 
530 		/* Get the number of bytes the message was padded by. */
531 		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
532 		/* Check for invalid length. */
533 		/* LAST len should be >= 1 */
534 		if (unlikely(tlen < (hdrsize + pad + 4)))
535 			goto drop;
536 		/* Don't count the CRC. */
537 		tlen -= (hdrsize + pad + 4);
538 		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
539 			goto drop;
540 		if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
541 			rvt_put_ss(&qp->s_rdma_read_sge);
542 		} else {
543 			ret = hfi1_rvt_get_rwqe(qp, 1);
544 			if (ret < 0)
545 				goto op_err;
546 			if (!ret)
547 				goto drop;
548 		}
549 		wc.byte_len = qp->r_len;
550 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
551 		hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
552 		rvt_put_ss(&qp->r_sge);
553 		goto last_imm;
554 
555 	case OP(RDMA_WRITE_LAST):
556 rdma_last:
557 		/* Get the number of bytes the message was padded by. */
558 		pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
559 		/* Check for invalid length. */
560 		/* LAST len should be >= 1 */
561 		if (unlikely(tlen < (hdrsize + pad + 4)))
562 			goto drop;
563 		/* Don't count the CRC. */
564 		tlen -= (hdrsize + pad + 4);
565 		if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
566 			goto drop;
567 		hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
568 		rvt_put_ss(&qp->r_sge);
569 		break;
570 
571 	default:
572 		/* Drop packet for unknown opcodes. */
573 		goto drop;
574 	}
575 	qp->r_psn++;
576 	qp->r_state = opcode;
577 	return;
578 
579 rewind:
580 	set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
581 	qp->r_sge.num_sge = 0;
582 drop:
583 	ibp->rvp.n_pkt_drops++;
584 	return;
585 
586 op_err:
587 	hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
588 }
589