1f931551bSRalph Campbell /*
2f931551bSRalph Campbell * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3f931551bSRalph Campbell * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4f931551bSRalph Campbell *
5f931551bSRalph Campbell * This software is available to you under a choice of one of two
6f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU
7f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file
8f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the
9f931551bSRalph Campbell * OpenIB.org BSD license below:
10f931551bSRalph Campbell *
11f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or
12f931551bSRalph Campbell * without modification, are permitted provided that the following
13f931551bSRalph Campbell * conditions are met:
14f931551bSRalph Campbell *
15f931551bSRalph Campbell * - Redistributions of source code must retain the above
16f931551bSRalph Campbell * copyright notice, this list of conditions and the following
17f931551bSRalph Campbell * disclaimer.
18f931551bSRalph Campbell *
19f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above
20f931551bSRalph Campbell * copyright notice, this list of conditions and the following
21f931551bSRalph Campbell * disclaimer in the documentation and/or other materials
22f931551bSRalph Campbell * provided with the distribution.
23f931551bSRalph Campbell *
24f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31f931551bSRalph Campbell * SOFTWARE.
32f931551bSRalph Campbell */
33f931551bSRalph Campbell
34f931551bSRalph Campbell #include <linux/io.h>
35f931551bSRalph Campbell
36f931551bSRalph Campbell #include "qib.h"
37f931551bSRalph Campbell
38f931551bSRalph Campbell /* cut down ridiculously long IB macro names */
39f931551bSRalph Campbell #define OP(x) IB_OPCODE_RC_##x
40f931551bSRalph Campbell
41f931551bSRalph Campbell
restart_sge(struct rvt_sge_state * ss,struct rvt_swqe * wqe,u32 psn,u32 pmtu)427c2e11feSDennis Dalessandro static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
43f931551bSRalph Campbell u32 psn, u32 pmtu)
44f931551bSRalph Campbell {
45f931551bSRalph Campbell u32 len;
46f931551bSRalph Campbell
47f931551bSRalph Campbell len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48385156c5SKaike Wan return rvt_restart_sge(ss, wqe, len);
49f931551bSRalph Campbell }
50f931551bSRalph Campbell
51f931551bSRalph Campbell /**
52f931551bSRalph Campbell * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
53f931551bSRalph Campbell * @dev: the device for this QP
54f931551bSRalph Campbell * @qp: a pointer to the QP
55f931551bSRalph Campbell * @ohdr: a pointer to the IB header being constructed
56f931551bSRalph Campbell * @pmtu: the path MTU
57f931551bSRalph Campbell *
58f931551bSRalph Campbell * Return 1 if constructed; otherwise, return 0.
59f931551bSRalph Campbell * Note that we are in the responder's side of the QP context.
60f931551bSRalph Campbell * Note the QP s_lock must be held.
61f931551bSRalph Campbell */
qib_make_rc_ack(struct qib_ibdev * dev,struct rvt_qp * qp,struct ib_other_headers * ohdr,u32 pmtu)627c2e11feSDennis Dalessandro static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
63261a4351SMike Marciniszyn struct ib_other_headers *ohdr, u32 pmtu)
64f931551bSRalph Campbell {
657c2e11feSDennis Dalessandro struct rvt_ack_entry *e;
66f931551bSRalph Campbell u32 hwords;
67f931551bSRalph Campbell u32 len;
68f931551bSRalph Campbell u32 bth0;
69f931551bSRalph Campbell u32 bth2;
70f931551bSRalph Campbell
71f931551bSRalph Campbell /* Don't send an ACK if we aren't supposed to. */
72db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
73f931551bSRalph Campbell goto bail;
74f931551bSRalph Campbell
75f931551bSRalph Campbell /* header size in 32-bit words LRH+BTH = (8+12)/4. */
76f931551bSRalph Campbell hwords = 5;
77f931551bSRalph Campbell
78f931551bSRalph Campbell switch (qp->s_ack_state) {
79f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_LAST):
80f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_ONLY):
81f931551bSRalph Campbell e = &qp->s_ack_queue[qp->s_tail_ack_queue];
82f931551bSRalph Campbell if (e->rdma_sge.mr) {
837c2e11feSDennis Dalessandro rvt_put_mr(e->rdma_sge.mr);
84f931551bSRalph Campbell e->rdma_sge.mr = NULL;
85f931551bSRalph Campbell }
86df561f66SGustavo A. R. Silva fallthrough;
87f931551bSRalph Campbell case OP(ATOMIC_ACKNOWLEDGE):
88f931551bSRalph Campbell /*
89f931551bSRalph Campbell * We can increment the tail pointer now that the last
90f931551bSRalph Campbell * response has been sent instead of only being
91f931551bSRalph Campbell * constructed.
92f931551bSRalph Campbell */
93f931551bSRalph Campbell if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
94f931551bSRalph Campbell qp->s_tail_ack_queue = 0;
95df561f66SGustavo A. R. Silva fallthrough;
96f931551bSRalph Campbell case OP(SEND_ONLY):
97f931551bSRalph Campbell case OP(ACKNOWLEDGE):
98f931551bSRalph Campbell /* Check for no next entry in the queue. */
99f931551bSRalph Campbell if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
10001ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_ACK_PENDING)
101f931551bSRalph Campbell goto normal;
102f931551bSRalph Campbell goto bail;
103f931551bSRalph Campbell }
104f931551bSRalph Campbell
105f931551bSRalph Campbell e = &qp->s_ack_queue[qp->s_tail_ack_queue];
106f931551bSRalph Campbell if (e->opcode == OP(RDMA_READ_REQUEST)) {
107f931551bSRalph Campbell /*
108f931551bSRalph Campbell * If a RDMA read response is being resent and
109f931551bSRalph Campbell * we haven't seen the duplicate request yet,
110f931551bSRalph Campbell * then stop sending the remaining responses the
111f931551bSRalph Campbell * responder has seen until the requester resends it.
112f931551bSRalph Campbell */
113f931551bSRalph Campbell len = e->rdma_sge.sge_length;
114f931551bSRalph Campbell if (len && !e->rdma_sge.mr) {
115f931551bSRalph Campbell qp->s_tail_ack_queue = qp->r_head_ack_queue;
116f931551bSRalph Campbell goto bail;
117f931551bSRalph Campbell }
118f931551bSRalph Campbell /* Copy SGE state in case we need to resend */
119f931551bSRalph Campbell qp->s_rdma_mr = e->rdma_sge.mr;
120f931551bSRalph Campbell if (qp->s_rdma_mr)
1217c2e11feSDennis Dalessandro rvt_get_mr(qp->s_rdma_mr);
122f931551bSRalph Campbell qp->s_ack_rdma_sge.sge = e->rdma_sge;
123f931551bSRalph Campbell qp->s_ack_rdma_sge.num_sge = 1;
124f931551bSRalph Campbell qp->s_cur_sge = &qp->s_ack_rdma_sge;
125f931551bSRalph Campbell if (len > pmtu) {
126f931551bSRalph Campbell len = pmtu;
127f931551bSRalph Campbell qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
128f931551bSRalph Campbell } else {
129f931551bSRalph Campbell qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
130f931551bSRalph Campbell e->sent = 1;
131f931551bSRalph Campbell }
132696513e8SBrian Welty ohdr->u.aeth = rvt_compute_aeth(qp);
133f931551bSRalph Campbell hwords++;
134f931551bSRalph Campbell qp->s_ack_rdma_psn = e->psn;
135f931551bSRalph Campbell bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
136f931551bSRalph Campbell } else {
137f931551bSRalph Campbell /* COMPARE_SWAP or FETCH_ADD */
138f931551bSRalph Campbell qp->s_cur_sge = NULL;
139f931551bSRalph Campbell len = 0;
140f931551bSRalph Campbell qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
141696513e8SBrian Welty ohdr->u.at.aeth = rvt_compute_aeth(qp);
142261a4351SMike Marciniszyn ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
143f931551bSRalph Campbell hwords += sizeof(ohdr->u.at) / sizeof(u32);
144f931551bSRalph Campbell bth2 = e->psn & QIB_PSN_MASK;
145f931551bSRalph Campbell e->sent = 1;
146f931551bSRalph Campbell }
147f931551bSRalph Campbell bth0 = qp->s_ack_state << 24;
148f931551bSRalph Campbell break;
149f931551bSRalph Campbell
150f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_FIRST):
151f931551bSRalph Campbell qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
152df561f66SGustavo A. R. Silva fallthrough;
153f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_MIDDLE):
154f931551bSRalph Campbell qp->s_cur_sge = &qp->s_ack_rdma_sge;
155f931551bSRalph Campbell qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
156f931551bSRalph Campbell if (qp->s_rdma_mr)
1577c2e11feSDennis Dalessandro rvt_get_mr(qp->s_rdma_mr);
158f931551bSRalph Campbell len = qp->s_ack_rdma_sge.sge.sge_length;
159f931551bSRalph Campbell if (len > pmtu)
160f931551bSRalph Campbell len = pmtu;
161f931551bSRalph Campbell else {
162696513e8SBrian Welty ohdr->u.aeth = rvt_compute_aeth(qp);
163f931551bSRalph Campbell hwords++;
164f931551bSRalph Campbell qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
165f931551bSRalph Campbell e = &qp->s_ack_queue[qp->s_tail_ack_queue];
166f931551bSRalph Campbell e->sent = 1;
167f931551bSRalph Campbell }
168f931551bSRalph Campbell bth0 = qp->s_ack_state << 24;
169f931551bSRalph Campbell bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
170f931551bSRalph Campbell break;
171f931551bSRalph Campbell
172f931551bSRalph Campbell default:
173f931551bSRalph Campbell normal:
174f931551bSRalph Campbell /*
175f931551bSRalph Campbell * Send a regular ACK.
176f931551bSRalph Campbell * Set the s_ack_state so we wait until after sending
177f931551bSRalph Campbell * the ACK before setting s_ack_state to ACKNOWLEDGE
178f931551bSRalph Campbell * (see above).
179f931551bSRalph Campbell */
180f931551bSRalph Campbell qp->s_ack_state = OP(SEND_ONLY);
18101ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_ACK_PENDING;
182f931551bSRalph Campbell qp->s_cur_sge = NULL;
183f931551bSRalph Campbell if (qp->s_nak_state)
184f931551bSRalph Campbell ohdr->u.aeth =
185832666c1SDon Hiatt cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
186f931551bSRalph Campbell (qp->s_nak_state <<
187832666c1SDon Hiatt IB_AETH_CREDIT_SHIFT));
188f931551bSRalph Campbell else
189696513e8SBrian Welty ohdr->u.aeth = rvt_compute_aeth(qp);
190f931551bSRalph Campbell hwords++;
191f931551bSRalph Campbell len = 0;
192f931551bSRalph Campbell bth0 = OP(ACKNOWLEDGE) << 24;
193f931551bSRalph Campbell bth2 = qp->s_ack_psn & QIB_PSN_MASK;
194f931551bSRalph Campbell }
195f931551bSRalph Campbell qp->s_rdma_ack_cnt++;
196f931551bSRalph Campbell qp->s_hdrwords = hwords;
197f931551bSRalph Campbell qp->s_cur_size = len;
198f931551bSRalph Campbell qib_make_ruc_header(qp, ohdr, bth0, bth2);
199f931551bSRalph Campbell return 1;
200f931551bSRalph Campbell
201f931551bSRalph Campbell bail:
202f931551bSRalph Campbell qp->s_ack_state = OP(ACKNOWLEDGE);
20301ba79d4SHarish Chegondi qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
204f931551bSRalph Campbell return 0;
205f931551bSRalph Campbell }
206f931551bSRalph Campbell
207f931551bSRalph Campbell /**
208f931551bSRalph Campbell * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
209f931551bSRalph Campbell * @qp: a pointer to the QP
210*24d02e04SLee Jones * @flags: unused
211f931551bSRalph Campbell *
21246a80d62SMike Marciniszyn * Assumes the s_lock is held.
21346a80d62SMike Marciniszyn *
214f931551bSRalph Campbell * Return 1 if constructed; otherwise, return 0.
215f931551bSRalph Campbell */
qib_make_rc_req(struct rvt_qp * qp,unsigned long * flags)216747f4d7aSMike Marciniszyn int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
217f931551bSRalph Campbell {
218ffc26907SDennis Dalessandro struct qib_qp_priv *priv = qp->priv;
219f931551bSRalph Campbell struct qib_ibdev *dev = to_idev(qp->ibqp.device);
220261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
2217c2e11feSDennis Dalessandro struct rvt_sge_state *ss;
2227c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
223f931551bSRalph Campbell u32 hwords;
224f931551bSRalph Campbell u32 len;
225f931551bSRalph Campbell u32 bth0;
226f931551bSRalph Campbell u32 bth2;
227cc6ea138SMike Marciniszyn u32 pmtu = qp->pmtu;
228f931551bSRalph Campbell char newreq;
229f931551bSRalph Campbell int ret = 0;
230f931551bSRalph Campbell int delta;
231f931551bSRalph Campbell
232ffc26907SDennis Dalessandro ohdr = &priv->s_hdr->u.oth;
233d8966fcdSDasaratharaman Chandramouli if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
234ffc26907SDennis Dalessandro ohdr = &priv->s_hdr->u.l.oth;
235f931551bSRalph Campbell
236f931551bSRalph Campbell /* Sending responses has higher priority over sending requests. */
23701ba79d4SHarish Chegondi if ((qp->s_flags & RVT_S_RESP_PENDING) &&
238f931551bSRalph Campbell qib_make_rc_ack(dev, qp, ohdr, pmtu))
239f931551bSRalph Campbell goto done;
240f931551bSRalph Campbell
241db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
242db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
243f931551bSRalph Campbell goto bail;
244f931551bSRalph Campbell /* We are in the error state, flush the work request. */
245eb04ff09SMike Marciniszyn if (qp->s_last == READ_ONCE(qp->s_head))
246f931551bSRalph Campbell goto bail;
247f931551bSRalph Campbell /* If DMAs are in progress, we can't flush immediately. */
248ffc26907SDennis Dalessandro if (atomic_read(&priv->s_dma_busy)) {
24901ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_DMA;
250f931551bSRalph Campbell goto bail;
251f931551bSRalph Campbell }
252db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_last);
253116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
25430ab7e23SMike Marciniszyn IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
25530ab7e23SMike Marciniszyn /* will get called again */
256f931551bSRalph Campbell goto done;
257f931551bSRalph Campbell }
258f931551bSRalph Campbell
25901ba79d4SHarish Chegondi if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
260f931551bSRalph Campbell goto bail;
261f931551bSRalph Campbell
262f931551bSRalph Campbell if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
263f931551bSRalph Campbell if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
26401ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_PSN;
265f931551bSRalph Campbell goto bail;
266f931551bSRalph Campbell }
267f931551bSRalph Campbell qp->s_sending_psn = qp->s_psn;
268f931551bSRalph Campbell qp->s_sending_hpsn = qp->s_psn - 1;
269f931551bSRalph Campbell }
270f931551bSRalph Campbell
271f931551bSRalph Campbell /* header size in 32-bit words LRH+BTH = (8+12)/4. */
272f931551bSRalph Campbell hwords = 5;
273f931551bSRalph Campbell bth0 = 0;
274f931551bSRalph Campbell
275f931551bSRalph Campbell /* Send a request. */
276db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
277f931551bSRalph Campbell switch (qp->s_state) {
278f931551bSRalph Campbell default:
279db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
280f931551bSRalph Campbell goto bail;
281f931551bSRalph Campbell /*
282f931551bSRalph Campbell * Resend an old request or start a new one.
283f931551bSRalph Campbell *
284f931551bSRalph Campbell * We keep track of the current SWQE so that
285f931551bSRalph Campbell * we don't reset the "furthest progress" state
286f931551bSRalph Campbell * if we need to back up.
287f931551bSRalph Campbell */
288f931551bSRalph Campbell newreq = 0;
289f931551bSRalph Campbell if (qp->s_cur == qp->s_tail) {
290f931551bSRalph Campbell /* Check if send work queue is empty. */
291eb04ff09SMike Marciniszyn if (qp->s_tail == READ_ONCE(qp->s_head))
292f931551bSRalph Campbell goto bail;
293f931551bSRalph Campbell /*
294f931551bSRalph Campbell * If a fence is requested, wait for previous
295f931551bSRalph Campbell * RDMA read and atomic operations to finish.
296f931551bSRalph Campbell */
297f931551bSRalph Campbell if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
298f931551bSRalph Campbell qp->s_num_rd_atomic) {
29901ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_FENCE;
300f931551bSRalph Campbell goto bail;
301f931551bSRalph Campbell }
302f931551bSRalph Campbell newreq = 1;
30346a80d62SMike Marciniszyn qp->s_psn = wqe->psn;
304f931551bSRalph Campbell }
305f931551bSRalph Campbell /*
306f931551bSRalph Campbell * Note that we have to be careful not to modify the
307f931551bSRalph Campbell * original work request since we may need to resend
308f931551bSRalph Campbell * it.
309f931551bSRalph Campbell */
310f931551bSRalph Campbell len = wqe->length;
311f931551bSRalph Campbell ss = &qp->s_sge;
312f931551bSRalph Campbell bth2 = qp->s_psn & QIB_PSN_MASK;
313f931551bSRalph Campbell switch (wqe->wr.opcode) {
314f931551bSRalph Campbell case IB_WR_SEND:
315f931551bSRalph Campbell case IB_WR_SEND_WITH_IMM:
316f931551bSRalph Campbell /* If no credit, return. */
31771994354SKaike Wan if (!rvt_rc_credit_avail(qp, wqe))
318f931551bSRalph Campbell goto bail;
319f931551bSRalph Campbell if (len > pmtu) {
320f931551bSRalph Campbell qp->s_state = OP(SEND_FIRST);
321f931551bSRalph Campbell len = pmtu;
322f931551bSRalph Campbell break;
323f931551bSRalph Campbell }
324f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_SEND)
325f931551bSRalph Campbell qp->s_state = OP(SEND_ONLY);
326f931551bSRalph Campbell else {
327f931551bSRalph Campbell qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
328f931551bSRalph Campbell /* Immediate data comes after the BTH */
329f931551bSRalph Campbell ohdr->u.imm_data = wqe->wr.ex.imm_data;
330f931551bSRalph Campbell hwords += 1;
331f931551bSRalph Campbell }
332f931551bSRalph Campbell if (wqe->wr.send_flags & IB_SEND_SOLICITED)
333f931551bSRalph Campbell bth0 |= IB_BTH_SOLICITED;
334f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
335f931551bSRalph Campbell if (++qp->s_cur == qp->s_size)
336f931551bSRalph Campbell qp->s_cur = 0;
337f931551bSRalph Campbell break;
338f931551bSRalph Campbell
339f931551bSRalph Campbell case IB_WR_RDMA_WRITE:
34001ba79d4SHarish Chegondi if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
341f931551bSRalph Campbell qp->s_lsn++;
3425b0ef650SMike Marciniszyn goto no_flow_control;
343f931551bSRalph Campbell case IB_WR_RDMA_WRITE_WITH_IMM:
344f931551bSRalph Campbell /* If no credit, return. */
34571994354SKaike Wan if (!rvt_rc_credit_avail(qp, wqe))
346f931551bSRalph Campbell goto bail;
3475b0ef650SMike Marciniszyn no_flow_control:
348f931551bSRalph Campbell ohdr->u.rc.reth.vaddr =
349e622f2f4SChristoph Hellwig cpu_to_be64(wqe->rdma_wr.remote_addr);
350f931551bSRalph Campbell ohdr->u.rc.reth.rkey =
351e622f2f4SChristoph Hellwig cpu_to_be32(wqe->rdma_wr.rkey);
352f931551bSRalph Campbell ohdr->u.rc.reth.length = cpu_to_be32(len);
353f931551bSRalph Campbell hwords += sizeof(struct ib_reth) / sizeof(u32);
354f931551bSRalph Campbell if (len > pmtu) {
355f931551bSRalph Campbell qp->s_state = OP(RDMA_WRITE_FIRST);
356f931551bSRalph Campbell len = pmtu;
357f931551bSRalph Campbell break;
358f931551bSRalph Campbell }
359e622f2f4SChristoph Hellwig if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
360f931551bSRalph Campbell qp->s_state = OP(RDMA_WRITE_ONLY);
361f931551bSRalph Campbell else {
362e622f2f4SChristoph Hellwig qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
363f931551bSRalph Campbell /* Immediate data comes after RETH */
364e622f2f4SChristoph Hellwig ohdr->u.rc.imm_data =
365e622f2f4SChristoph Hellwig wqe->rdma_wr.wr.ex.imm_data;
366f931551bSRalph Campbell hwords += 1;
367e622f2f4SChristoph Hellwig if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
368f931551bSRalph Campbell bth0 |= IB_BTH_SOLICITED;
369f931551bSRalph Campbell }
370f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
371f931551bSRalph Campbell if (++qp->s_cur == qp->s_size)
372f931551bSRalph Campbell qp->s_cur = 0;
373f931551bSRalph Campbell break;
374f931551bSRalph Campbell
375f931551bSRalph Campbell case IB_WR_RDMA_READ:
376f931551bSRalph Campbell /*
377f931551bSRalph Campbell * Don't allow more operations to be started
378f931551bSRalph Campbell * than the QP limits allow.
379f931551bSRalph Campbell */
380f931551bSRalph Campbell if (newreq) {
381f931551bSRalph Campbell if (qp->s_num_rd_atomic >=
382f931551bSRalph Campbell qp->s_max_rd_atomic) {
38301ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_RDMAR;
384f931551bSRalph Campbell goto bail;
385f931551bSRalph Campbell }
386f931551bSRalph Campbell qp->s_num_rd_atomic++;
38701ba79d4SHarish Chegondi if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
388f931551bSRalph Campbell qp->s_lsn++;
389f931551bSRalph Campbell }
390e622f2f4SChristoph Hellwig
391f931551bSRalph Campbell ohdr->u.rc.reth.vaddr =
392e622f2f4SChristoph Hellwig cpu_to_be64(wqe->rdma_wr.remote_addr);
393f931551bSRalph Campbell ohdr->u.rc.reth.rkey =
394e622f2f4SChristoph Hellwig cpu_to_be32(wqe->rdma_wr.rkey);
395f931551bSRalph Campbell ohdr->u.rc.reth.length = cpu_to_be32(len);
396f931551bSRalph Campbell qp->s_state = OP(RDMA_READ_REQUEST);
397f931551bSRalph Campbell hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
398f931551bSRalph Campbell ss = NULL;
399f931551bSRalph Campbell len = 0;
400f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
401f931551bSRalph Campbell if (++qp->s_cur == qp->s_size)
402f931551bSRalph Campbell qp->s_cur = 0;
403f931551bSRalph Campbell break;
404f931551bSRalph Campbell
405f931551bSRalph Campbell case IB_WR_ATOMIC_CMP_AND_SWP:
406f931551bSRalph Campbell case IB_WR_ATOMIC_FETCH_AND_ADD:
407f931551bSRalph Campbell /*
408f931551bSRalph Campbell * Don't allow more operations to be started
409f931551bSRalph Campbell * than the QP limits allow.
410f931551bSRalph Campbell */
411f931551bSRalph Campbell if (newreq) {
412f931551bSRalph Campbell if (qp->s_num_rd_atomic >=
413f931551bSRalph Campbell qp->s_max_rd_atomic) {
41401ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_RDMAR;
415f931551bSRalph Campbell goto bail;
416f931551bSRalph Campbell }
417f931551bSRalph Campbell qp->s_num_rd_atomic++;
41801ba79d4SHarish Chegondi if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
419f931551bSRalph Campbell qp->s_lsn++;
420f931551bSRalph Campbell }
421e622f2f4SChristoph Hellwig if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
422f931551bSRalph Campbell qp->s_state = OP(COMPARE_SWAP);
423261a4351SMike Marciniszyn put_ib_ateth_swap(wqe->atomic_wr.swap,
424261a4351SMike Marciniszyn &ohdr->u.atomic_eth);
42587b3524cSMike Marciniszyn put_ib_ateth_compare(wqe->atomic_wr.compare_add,
426261a4351SMike Marciniszyn &ohdr->u.atomic_eth);
427f931551bSRalph Campbell } else {
428f931551bSRalph Campbell qp->s_state = OP(FETCH_ADD);
429261a4351SMike Marciniszyn put_ib_ateth_swap(wqe->atomic_wr.compare_add,
430261a4351SMike Marciniszyn &ohdr->u.atomic_eth);
43187b3524cSMike Marciniszyn put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
432f931551bSRalph Campbell }
433261a4351SMike Marciniszyn put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
434261a4351SMike Marciniszyn &ohdr->u.atomic_eth);
435f931551bSRalph Campbell ohdr->u.atomic_eth.rkey = cpu_to_be32(
436e622f2f4SChristoph Hellwig wqe->atomic_wr.rkey);
437f931551bSRalph Campbell hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
438f931551bSRalph Campbell ss = NULL;
439f931551bSRalph Campbell len = 0;
440f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
441f931551bSRalph Campbell if (++qp->s_cur == qp->s_size)
442f931551bSRalph Campbell qp->s_cur = 0;
443f931551bSRalph Campbell break;
444f931551bSRalph Campbell
445f931551bSRalph Campbell default:
446f931551bSRalph Campbell goto bail;
447f931551bSRalph Campbell }
448f931551bSRalph Campbell qp->s_sge.sge = wqe->sg_list[0];
449f931551bSRalph Campbell qp->s_sge.sg_list = wqe->sg_list + 1;
450f931551bSRalph Campbell qp->s_sge.num_sge = wqe->wr.num_sge;
451f931551bSRalph Campbell qp->s_sge.total_len = wqe->length;
452f931551bSRalph Campbell qp->s_len = wqe->length;
453f931551bSRalph Campbell if (newreq) {
454f931551bSRalph Campbell qp->s_tail++;
455f931551bSRalph Campbell if (qp->s_tail >= qp->s_size)
456f931551bSRalph Campbell qp->s_tail = 0;
457f931551bSRalph Campbell }
458f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_READ)
459f931551bSRalph Campbell qp->s_psn = wqe->lpsn + 1;
46046a80d62SMike Marciniszyn else
461f931551bSRalph Campbell qp->s_psn++;
462f931551bSRalph Campbell break;
463f931551bSRalph Campbell
464f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_FIRST):
465f931551bSRalph Campbell /*
466f931551bSRalph Campbell * qp->s_state is normally set to the opcode of the
467f931551bSRalph Campbell * last packet constructed for new requests and therefore
468f931551bSRalph Campbell * is never set to RDMA read response.
469f931551bSRalph Campbell * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
470f931551bSRalph Campbell * thread to indicate a SEND needs to be restarted from an
471f931551bSRalph Campbell * earlier PSN without interferring with the sending thread.
472f931551bSRalph Campbell * See qib_restart_rc().
473f931551bSRalph Campbell */
474f931551bSRalph Campbell qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
475df561f66SGustavo A. R. Silva fallthrough;
476f931551bSRalph Campbell case OP(SEND_FIRST):
477f931551bSRalph Campbell qp->s_state = OP(SEND_MIDDLE);
478df561f66SGustavo A. R. Silva fallthrough;
479f931551bSRalph Campbell case OP(SEND_MIDDLE):
480f931551bSRalph Campbell bth2 = qp->s_psn++ & QIB_PSN_MASK;
481f931551bSRalph Campbell ss = &qp->s_sge;
482f931551bSRalph Campbell len = qp->s_len;
483f931551bSRalph Campbell if (len > pmtu) {
484f931551bSRalph Campbell len = pmtu;
485f931551bSRalph Campbell break;
486f931551bSRalph Campbell }
487f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_SEND)
488f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
489f931551bSRalph Campbell else {
490f931551bSRalph Campbell qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
491f931551bSRalph Campbell /* Immediate data comes after the BTH */
492f931551bSRalph Campbell ohdr->u.imm_data = wqe->wr.ex.imm_data;
493f931551bSRalph Campbell hwords += 1;
494f931551bSRalph Campbell }
495f931551bSRalph Campbell if (wqe->wr.send_flags & IB_SEND_SOLICITED)
496f931551bSRalph Campbell bth0 |= IB_BTH_SOLICITED;
497f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
498f931551bSRalph Campbell qp->s_cur++;
499f931551bSRalph Campbell if (qp->s_cur >= qp->s_size)
500f931551bSRalph Campbell qp->s_cur = 0;
501f931551bSRalph Campbell break;
502f931551bSRalph Campbell
503f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_LAST):
504f931551bSRalph Campbell /*
505f931551bSRalph Campbell * qp->s_state is normally set to the opcode of the
506f931551bSRalph Campbell * last packet constructed for new requests and therefore
507f931551bSRalph Campbell * is never set to RDMA read response.
508f931551bSRalph Campbell * RDMA_READ_RESPONSE_LAST is used by the ACK processing
509f931551bSRalph Campbell * thread to indicate a RDMA write needs to be restarted from
510f931551bSRalph Campbell * an earlier PSN without interferring with the sending thread.
511f931551bSRalph Campbell * See qib_restart_rc().
512f931551bSRalph Campbell */
513f931551bSRalph Campbell qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
514df561f66SGustavo A. R. Silva fallthrough;
515f931551bSRalph Campbell case OP(RDMA_WRITE_FIRST):
516f931551bSRalph Campbell qp->s_state = OP(RDMA_WRITE_MIDDLE);
517df561f66SGustavo A. R. Silva fallthrough;
518f931551bSRalph Campbell case OP(RDMA_WRITE_MIDDLE):
519f931551bSRalph Campbell bth2 = qp->s_psn++ & QIB_PSN_MASK;
520f931551bSRalph Campbell ss = &qp->s_sge;
521f931551bSRalph Campbell len = qp->s_len;
522f931551bSRalph Campbell if (len > pmtu) {
523f931551bSRalph Campbell len = pmtu;
524f931551bSRalph Campbell break;
525f931551bSRalph Campbell }
526f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
527f931551bSRalph Campbell qp->s_state = OP(RDMA_WRITE_LAST);
528f931551bSRalph Campbell else {
529f931551bSRalph Campbell qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
530f931551bSRalph Campbell /* Immediate data comes after the BTH */
531f931551bSRalph Campbell ohdr->u.imm_data = wqe->wr.ex.imm_data;
532f931551bSRalph Campbell hwords += 1;
533f931551bSRalph Campbell if (wqe->wr.send_flags & IB_SEND_SOLICITED)
534f931551bSRalph Campbell bth0 |= IB_BTH_SOLICITED;
535f931551bSRalph Campbell }
536f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
537f931551bSRalph Campbell qp->s_cur++;
538f931551bSRalph Campbell if (qp->s_cur >= qp->s_size)
539f931551bSRalph Campbell qp->s_cur = 0;
540f931551bSRalph Campbell break;
541f931551bSRalph Campbell
542f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_MIDDLE):
543f931551bSRalph Campbell /*
544f931551bSRalph Campbell * qp->s_state is normally set to the opcode of the
545f931551bSRalph Campbell * last packet constructed for new requests and therefore
546f931551bSRalph Campbell * is never set to RDMA read response.
547f931551bSRalph Campbell * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
548f931551bSRalph Campbell * thread to indicate a RDMA read needs to be restarted from
549f931551bSRalph Campbell * an earlier PSN without interferring with the sending thread.
550f931551bSRalph Campbell * See qib_restart_rc().
551f931551bSRalph Campbell */
552f931551bSRalph Campbell len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
553f931551bSRalph Campbell ohdr->u.rc.reth.vaddr =
554e622f2f4SChristoph Hellwig cpu_to_be64(wqe->rdma_wr.remote_addr + len);
555f931551bSRalph Campbell ohdr->u.rc.reth.rkey =
556e622f2f4SChristoph Hellwig cpu_to_be32(wqe->rdma_wr.rkey);
557f931551bSRalph Campbell ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
558f931551bSRalph Campbell qp->s_state = OP(RDMA_READ_REQUEST);
559f931551bSRalph Campbell hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
560f931551bSRalph Campbell bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
561f931551bSRalph Campbell qp->s_psn = wqe->lpsn + 1;
562f931551bSRalph Campbell ss = NULL;
563f931551bSRalph Campbell len = 0;
564f931551bSRalph Campbell qp->s_cur++;
565f931551bSRalph Campbell if (qp->s_cur == qp->s_size)
566f931551bSRalph Campbell qp->s_cur = 0;
567f931551bSRalph Campbell break;
568f931551bSRalph Campbell }
569f931551bSRalph Campbell qp->s_sending_hpsn = bth2;
570f931551bSRalph Campbell delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
571f931551bSRalph Campbell if (delta && delta % QIB_PSN_CREDIT == 0)
572f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
57301ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_SEND_ONE) {
57401ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_SEND_ONE;
57501ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_ACK;
576f931551bSRalph Campbell bth2 |= IB_BTH_REQ_ACK;
577f931551bSRalph Campbell }
578f931551bSRalph Campbell qp->s_len -= len;
579f931551bSRalph Campbell qp->s_hdrwords = hwords;
580f931551bSRalph Campbell qp->s_cur_sge = ss;
581f931551bSRalph Campbell qp->s_cur_size = len;
582f931551bSRalph Campbell qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
583f931551bSRalph Campbell done:
58446a80d62SMike Marciniszyn return 1;
585f931551bSRalph Campbell bail:
58601ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_BUSY;
587f931551bSRalph Campbell return ret;
588f931551bSRalph Campbell }
589f931551bSRalph Campbell
590f931551bSRalph Campbell /**
591f931551bSRalph Campbell * qib_send_rc_ack - Construct an ACK packet and send it
592f931551bSRalph Campbell * @qp: a pointer to the QP
593f931551bSRalph Campbell *
594f931551bSRalph Campbell * This is called from qib_rc_rcv() and qib_kreceive().
595f931551bSRalph Campbell * Note that RDMA reads and atomics are handled in the
596f931551bSRalph Campbell * send side QP state and tasklet.
597f931551bSRalph Campbell */
qib_send_rc_ack(struct rvt_qp * qp)5987c2e11feSDennis Dalessandro void qib_send_rc_ack(struct rvt_qp *qp)
599f931551bSRalph Campbell {
600f931551bSRalph Campbell struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
601f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
602f931551bSRalph Campbell struct qib_pportdata *ppd = ppd_from_ibp(ibp);
603f931551bSRalph Campbell u64 pbc;
604f931551bSRalph Campbell u16 lrh0;
605f931551bSRalph Campbell u32 bth0;
606f931551bSRalph Campbell u32 hwords;
607f931551bSRalph Campbell u32 pbufn;
608f931551bSRalph Campbell u32 __iomem *piobuf;
609261a4351SMike Marciniszyn struct ib_header hdr;
610261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
611f931551bSRalph Campbell u32 control;
612f931551bSRalph Campbell unsigned long flags;
613f931551bSRalph Campbell
614f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
615f931551bSRalph Campbell
616db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
617f931551bSRalph Campbell goto unlock;
618f931551bSRalph Campbell
619f931551bSRalph Campbell /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
62001ba79d4SHarish Chegondi if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
621f931551bSRalph Campbell goto queue_ack;
622f931551bSRalph Campbell
623f931551bSRalph Campbell /* Construct the header with s_lock held so APM doesn't change it. */
624f931551bSRalph Campbell ohdr = &hdr.u.oth;
625f931551bSRalph Campbell lrh0 = QIB_LRH_BTH;
626f931551bSRalph Campbell /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
627f931551bSRalph Campbell hwords = 6;
628d8966fcdSDasaratharaman Chandramouli if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
629d8966fcdSDasaratharaman Chandramouli IB_AH_GRH)) {
630f931551bSRalph Campbell hwords += qib_make_grh(ibp, &hdr.u.l.grh,
631d8966fcdSDasaratharaman Chandramouli rdma_ah_read_grh(&qp->remote_ah_attr),
632d8966fcdSDasaratharaman Chandramouli hwords, 0);
633f931551bSRalph Campbell ohdr = &hdr.u.l.oth;
634f931551bSRalph Campbell lrh0 = QIB_LRH_GRH;
635f931551bSRalph Campbell }
636f931551bSRalph Campbell /* read pkey_index w/o lock (its atomic) */
637f931551bSRalph Campbell bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
638f931551bSRalph Campbell if (qp->s_mig_state == IB_MIG_MIGRATED)
639f931551bSRalph Campbell bth0 |= IB_BTH_MIG_REQ;
640f931551bSRalph Campbell if (qp->r_nak_state)
641832666c1SDon Hiatt ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
642f931551bSRalph Campbell (qp->r_nak_state <<
643832666c1SDon Hiatt IB_AETH_CREDIT_SHIFT));
644f931551bSRalph Campbell else
645696513e8SBrian Welty ohdr->u.aeth = rvt_compute_aeth(qp);
646d8966fcdSDasaratharaman Chandramouli lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
647d8966fcdSDasaratharaman Chandramouli rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
648f931551bSRalph Campbell hdr.lrh[0] = cpu_to_be16(lrh0);
649d8966fcdSDasaratharaman Chandramouli hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
650f931551bSRalph Campbell hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
651d8966fcdSDasaratharaman Chandramouli hdr.lrh[3] = cpu_to_be16(ppd->lid |
652d8966fcdSDasaratharaman Chandramouli rdma_ah_get_path_bits(&qp->remote_ah_attr));
653f931551bSRalph Campbell ohdr->bth[0] = cpu_to_be32(bth0);
654f931551bSRalph Campbell ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
655f931551bSRalph Campbell ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
656f931551bSRalph Campbell
657f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
658f931551bSRalph Campbell
659f931551bSRalph Campbell /* Don't try to send ACKs if the link isn't ACTIVE */
660f931551bSRalph Campbell if (!(ppd->lflags & QIBL_LINKACTIVE))
661f931551bSRalph Campbell goto done;
662f931551bSRalph Campbell
663f931551bSRalph Campbell control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
664f931551bSRalph Campbell qp->s_srate, lrh0 >> 12);
665f931551bSRalph Campbell /* length is + 1 for the control dword */
666f931551bSRalph Campbell pbc = ((u64) control << 32) | (hwords + 1);
667f931551bSRalph Campbell
668f931551bSRalph Campbell piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
669f931551bSRalph Campbell if (!piobuf) {
670f931551bSRalph Campbell /*
671f931551bSRalph Campbell * We are out of PIO buffers at the moment.
672f931551bSRalph Campbell * Pass responsibility for sending the ACK to the
673f931551bSRalph Campbell * send tasklet so that when a PIO buffer becomes
674f931551bSRalph Campbell * available, the ACK is sent ahead of other outgoing
675f931551bSRalph Campbell * packets.
676f931551bSRalph Campbell */
677f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
678f931551bSRalph Campbell goto queue_ack;
679f931551bSRalph Campbell }
680f931551bSRalph Campbell
681f931551bSRalph Campbell /*
682f931551bSRalph Campbell * Write the pbc.
683f931551bSRalph Campbell * We have to flush after the PBC for correctness
684f931551bSRalph Campbell * on some cpus or WC buffer can be written out of order.
685f931551bSRalph Campbell */
686f931551bSRalph Campbell writeq(pbc, piobuf);
687f931551bSRalph Campbell
688f931551bSRalph Campbell if (dd->flags & QIB_PIO_FLUSH_WC) {
689f931551bSRalph Campbell u32 *hdrp = (u32 *) &hdr;
690f931551bSRalph Campbell
691f931551bSRalph Campbell qib_flush_wc();
692f931551bSRalph Campbell qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
693f931551bSRalph Campbell qib_flush_wc();
694f931551bSRalph Campbell __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
695f931551bSRalph Campbell } else
696f931551bSRalph Campbell qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
697f931551bSRalph Campbell
698f931551bSRalph Campbell if (dd->flags & QIB_USE_SPCL_TRIG) {
699f931551bSRalph Campbell u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
700f931551bSRalph Campbell
701f931551bSRalph Campbell qib_flush_wc();
702f931551bSRalph Campbell __raw_writel(0xaebecede, piobuf + spcl_off);
703f931551bSRalph Campbell }
704f931551bSRalph Campbell
705f931551bSRalph Campbell qib_flush_wc();
706f931551bSRalph Campbell qib_sendbuf_done(dd, pbufn);
707f931551bSRalph Campbell
7087d7632adSMike Marciniszyn this_cpu_inc(ibp->pmastats->n_unicast_xmit);
709f931551bSRalph Campbell goto done;
710f931551bSRalph Campbell
711f931551bSRalph Campbell queue_ack:
712db3ef0ebSHarish Chegondi if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
713f24a6d48SHarish Chegondi this_cpu_inc(*ibp->rvp.rc_qacks);
71401ba79d4SHarish Chegondi qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
715f931551bSRalph Campbell qp->s_nak_state = qp->r_nak_state;
716f931551bSRalph Campbell qp->s_ack_psn = qp->r_ack_psn;
717f931551bSRalph Campbell
718f931551bSRalph Campbell /* Schedule the send tasklet. */
719f931551bSRalph Campbell qib_schedule_send(qp);
720f931551bSRalph Campbell }
721f931551bSRalph Campbell unlock:
722f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
723f931551bSRalph Campbell done:
724f931551bSRalph Campbell return;
725f931551bSRalph Campbell }
726f931551bSRalph Campbell
727f931551bSRalph Campbell /**
728f931551bSRalph Campbell * reset_psn - reset the QP state to send starting from PSN
729f931551bSRalph Campbell * @qp: the QP
730f931551bSRalph Campbell * @psn: the packet sequence number to restart at
731f931551bSRalph Campbell *
732f931551bSRalph Campbell * This is called from qib_rc_rcv() to process an incoming RC ACK
733f931551bSRalph Campbell * for the given QP.
734f931551bSRalph Campbell * Called at interrupt level with the QP s_lock held.
735f931551bSRalph Campbell */
reset_psn(struct rvt_qp * qp,u32 psn)7367c2e11feSDennis Dalessandro static void reset_psn(struct rvt_qp *qp, u32 psn)
737f931551bSRalph Campbell {
738f931551bSRalph Campbell u32 n = qp->s_acked;
739db3ef0ebSHarish Chegondi struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
740f931551bSRalph Campbell u32 opcode;
741f931551bSRalph Campbell
742f931551bSRalph Campbell qp->s_cur = n;
743f931551bSRalph Campbell
744f931551bSRalph Campbell /*
745f931551bSRalph Campbell * If we are starting the request from the beginning,
746f931551bSRalph Campbell * let the normal send code handle initialization.
747f931551bSRalph Campbell */
748f931551bSRalph Campbell if (qib_cmp24(psn, wqe->psn) <= 0) {
749f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
750f931551bSRalph Campbell goto done;
751f931551bSRalph Campbell }
752f931551bSRalph Campbell
753f931551bSRalph Campbell /* Find the work request opcode corresponding to the given PSN. */
754f931551bSRalph Campbell opcode = wqe->wr.opcode;
755f931551bSRalph Campbell for (;;) {
756f931551bSRalph Campbell int diff;
757f931551bSRalph Campbell
758f931551bSRalph Campbell if (++n == qp->s_size)
759f931551bSRalph Campbell n = 0;
760f931551bSRalph Campbell if (n == qp->s_tail)
761f931551bSRalph Campbell break;
762db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, n);
763f931551bSRalph Campbell diff = qib_cmp24(psn, wqe->psn);
764f931551bSRalph Campbell if (diff < 0)
765f931551bSRalph Campbell break;
766f931551bSRalph Campbell qp->s_cur = n;
767f931551bSRalph Campbell /*
768f931551bSRalph Campbell * If we are starting the request from the beginning,
769f931551bSRalph Campbell * let the normal send code handle initialization.
770f931551bSRalph Campbell */
771f931551bSRalph Campbell if (diff == 0) {
772f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
773f931551bSRalph Campbell goto done;
774f931551bSRalph Campbell }
775f931551bSRalph Campbell opcode = wqe->wr.opcode;
776f931551bSRalph Campbell }
777f931551bSRalph Campbell
778f931551bSRalph Campbell /*
779f931551bSRalph Campbell * Set the state to restart in the middle of a request.
780f931551bSRalph Campbell * Don't change the s_sge, s_cur_sge, or s_cur_size.
781f931551bSRalph Campbell * See qib_make_rc_req().
782f931551bSRalph Campbell */
783f931551bSRalph Campbell switch (opcode) {
784f931551bSRalph Campbell case IB_WR_SEND:
785f931551bSRalph Campbell case IB_WR_SEND_WITH_IMM:
786f931551bSRalph Campbell qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
787f931551bSRalph Campbell break;
788f931551bSRalph Campbell
789f931551bSRalph Campbell case IB_WR_RDMA_WRITE:
790f931551bSRalph Campbell case IB_WR_RDMA_WRITE_WITH_IMM:
791f931551bSRalph Campbell qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
792f931551bSRalph Campbell break;
793f931551bSRalph Campbell
794f931551bSRalph Campbell case IB_WR_RDMA_READ:
795f931551bSRalph Campbell qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
796f931551bSRalph Campbell break;
797f931551bSRalph Campbell
798f931551bSRalph Campbell default:
799f931551bSRalph Campbell /*
800f931551bSRalph Campbell * This case shouldn't happen since its only
801f931551bSRalph Campbell * one PSN per req.
802f931551bSRalph Campbell */
803f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
804f931551bSRalph Campbell }
805f931551bSRalph Campbell done:
806f931551bSRalph Campbell qp->s_psn = psn;
807f931551bSRalph Campbell /*
80801ba79d4SHarish Chegondi * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
809f931551bSRalph Campbell * asynchronously before the send tasklet can get scheduled.
810f931551bSRalph Campbell * Doing it in qib_make_rc_req() is too late.
811f931551bSRalph Campbell */
812f931551bSRalph Campbell if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
813f931551bSRalph Campbell (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
81401ba79d4SHarish Chegondi qp->s_flags |= RVT_S_WAIT_PSN;
815f931551bSRalph Campbell }
816f931551bSRalph Campbell
817f931551bSRalph Campbell /*
818f931551bSRalph Campbell * Back up requester to resend the last un-ACKed request.
819a5210c12SRalph Campbell * The QP r_lock and s_lock should be held and interrupts disabled.
820f931551bSRalph Campbell */
qib_restart_rc(struct rvt_qp * qp,u32 psn,int wait)821b4238e70SVenkata Sandeep Dhanalakota void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
822f931551bSRalph Campbell {
823db3ef0ebSHarish Chegondi struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
824f931551bSRalph Campbell struct qib_ibport *ibp;
825f931551bSRalph Campbell
826f931551bSRalph Campbell if (qp->s_retry == 0) {
827f931551bSRalph Campbell if (qp->s_mig_state == IB_MIG_ARMED) {
828f931551bSRalph Campbell qib_migrate_qp(qp);
829f931551bSRalph Campbell qp->s_retry = qp->s_retry_cnt;
830f931551bSRalph Campbell } else if (qp->s_last == qp->s_acked) {
831116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
83270696ea7SHarish Chegondi rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
833f931551bSRalph Campbell return;
834f931551bSRalph Campbell } else /* XXX need to handle delayed completion */
835f931551bSRalph Campbell return;
836f931551bSRalph Campbell } else
837f931551bSRalph Campbell qp->s_retry--;
838f931551bSRalph Campbell
839f931551bSRalph Campbell ibp = to_iport(qp->ibqp.device, qp->port_num);
840f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_READ)
841f24a6d48SHarish Chegondi ibp->rvp.n_rc_resends++;
842f931551bSRalph Campbell else
843f24a6d48SHarish Chegondi ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
844f931551bSRalph Campbell
84501ba79d4SHarish Chegondi qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
84601ba79d4SHarish Chegondi RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
84701ba79d4SHarish Chegondi RVT_S_WAIT_ACK);
848f931551bSRalph Campbell if (wait)
84901ba79d4SHarish Chegondi qp->s_flags |= RVT_S_SEND_ONE;
850f931551bSRalph Campbell reset_psn(qp, psn);
851f931551bSRalph Campbell }
852f931551bSRalph Campbell
853f931551bSRalph Campbell /*
854f931551bSRalph Campbell * Set qp->s_sending_psn to the next PSN after the given one.
855f931551bSRalph Campbell * This would be psn+1 except when RDMA reads are present.
856f931551bSRalph Campbell */
reset_sending_psn(struct rvt_qp * qp,u32 psn)8577c2e11feSDennis Dalessandro static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
858f931551bSRalph Campbell {
8597c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
860f931551bSRalph Campbell u32 n = qp->s_last;
861f931551bSRalph Campbell
862f931551bSRalph Campbell /* Find the work request corresponding to the given PSN. */
863f931551bSRalph Campbell for (;;) {
864db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, n);
865f931551bSRalph Campbell if (qib_cmp24(psn, wqe->lpsn) <= 0) {
866f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_READ)
867f931551bSRalph Campbell qp->s_sending_psn = wqe->lpsn + 1;
868f931551bSRalph Campbell else
869f931551bSRalph Campbell qp->s_sending_psn = psn + 1;
870f931551bSRalph Campbell break;
871f931551bSRalph Campbell }
872f931551bSRalph Campbell if (++n == qp->s_size)
873f931551bSRalph Campbell n = 0;
874f931551bSRalph Campbell if (n == qp->s_tail)
875f931551bSRalph Campbell break;
876f931551bSRalph Campbell }
877f931551bSRalph Campbell }
878f931551bSRalph Campbell
879f931551bSRalph Campbell /*
880f931551bSRalph Campbell * This should be called with the QP s_lock held and interrupts disabled.
881f931551bSRalph Campbell */
qib_rc_send_complete(struct rvt_qp * qp,struct ib_header * hdr)882261a4351SMike Marciniszyn void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
883f931551bSRalph Campbell {
884261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
8857c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
886f931551bSRalph Campbell u32 opcode;
887f931551bSRalph Campbell u32 psn;
888f931551bSRalph Campbell
889f9215b5eSMike Marciniszyn if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
890f931551bSRalph Campbell return;
891f931551bSRalph Campbell
892f931551bSRalph Campbell /* Find out where the BTH is */
893f931551bSRalph Campbell if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
894f931551bSRalph Campbell ohdr = &hdr->u.oth;
895f931551bSRalph Campbell else
896f931551bSRalph Campbell ohdr = &hdr->u.l.oth;
897f931551bSRalph Campbell
898f931551bSRalph Campbell opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
899f931551bSRalph Campbell if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
900f931551bSRalph Campbell opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
901f931551bSRalph Campbell WARN_ON(!qp->s_rdma_ack_cnt);
902f931551bSRalph Campbell qp->s_rdma_ack_cnt--;
903f931551bSRalph Campbell return;
904f931551bSRalph Campbell }
905f931551bSRalph Campbell
906f931551bSRalph Campbell psn = be32_to_cpu(ohdr->bth[2]);
907f931551bSRalph Campbell reset_sending_psn(qp, psn);
908f931551bSRalph Campbell
909f931551bSRalph Campbell /*
910f931551bSRalph Campbell * Start timer after a packet requesting an ACK has been sent and
911f931551bSRalph Campbell * there are still requests that haven't been acked.
912f931551bSRalph Campbell */
913f931551bSRalph Campbell if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
91401ba79d4SHarish Chegondi !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
915db3ef0ebSHarish Chegondi (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
916b4238e70SVenkata Sandeep Dhanalakota rvt_add_retry_timer(qp);
917f931551bSRalph Campbell
918f931551bSRalph Campbell while (qp->s_last != qp->s_acked) {
919db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_last);
920f931551bSRalph Campbell if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
921f931551bSRalph Campbell qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
922f931551bSRalph Campbell break;
9234a9ceb7dSMike Marciniszyn rvt_qp_complete_swqe(qp,
92443a474aaSMike Marciniszyn wqe,
92543a474aaSMike Marciniszyn ib_qib_wc_opcode[wqe->wr.opcode],
92643a474aaSMike Marciniszyn IB_WC_SUCCESS);
927f931551bSRalph Campbell }
928f931551bSRalph Campbell /*
929f931551bSRalph Campbell * If we were waiting for sends to complete before resending,
930f931551bSRalph Campbell * and they are now complete, restart sending.
931f931551bSRalph Campbell */
93201ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_WAIT_PSN &&
933f931551bSRalph Campbell qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
93401ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_WAIT_PSN;
935f931551bSRalph Campbell qp->s_sending_psn = qp->s_psn;
936f931551bSRalph Campbell qp->s_sending_hpsn = qp->s_psn - 1;
937f931551bSRalph Campbell qib_schedule_send(qp);
938f931551bSRalph Campbell }
939f931551bSRalph Campbell }
940f931551bSRalph Campbell
update_last_psn(struct rvt_qp * qp,u32 psn)9417c2e11feSDennis Dalessandro static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
942f931551bSRalph Campbell {
943f931551bSRalph Campbell qp->s_last_psn = psn;
944f931551bSRalph Campbell }
945f931551bSRalph Campbell
946f931551bSRalph Campbell /*
947f931551bSRalph Campbell * Generate a SWQE completion.
948f931551bSRalph Campbell * This is similar to qib_send_complete but has to check to be sure
949f931551bSRalph Campbell * that the SGEs are not being referenced if the SWQE is being resent.
950f931551bSRalph Campbell */
do_rc_completion(struct rvt_qp * qp,struct rvt_swqe * wqe,struct qib_ibport * ibp)9517c2e11feSDennis Dalessandro static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
9527c2e11feSDennis Dalessandro struct rvt_swqe *wqe,
953f931551bSRalph Campbell struct qib_ibport *ibp)
954f931551bSRalph Campbell {
955f931551bSRalph Campbell /*
956f931551bSRalph Campbell * Don't decrement refcount and don't generate a
957f931551bSRalph Campbell * completion if the SWQE is being resent until the send
958f931551bSRalph Campbell * is finished.
959f931551bSRalph Campbell */
960f931551bSRalph Campbell if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
9614a9ceb7dSMike Marciniszyn qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
9624a9ceb7dSMike Marciniszyn rvt_qp_complete_swqe(qp,
96343a474aaSMike Marciniszyn wqe,
96443a474aaSMike Marciniszyn ib_qib_wc_opcode[wqe->wr.opcode],
96543a474aaSMike Marciniszyn IB_WC_SUCCESS);
9664a9ceb7dSMike Marciniszyn else
967f24a6d48SHarish Chegondi this_cpu_inc(*ibp->rvp.rc_delayed_comp);
968f931551bSRalph Campbell
969f931551bSRalph Campbell qp->s_retry = qp->s_retry_cnt;
970f931551bSRalph Campbell update_last_psn(qp, wqe->lpsn);
971f931551bSRalph Campbell
972f931551bSRalph Campbell /*
973f931551bSRalph Campbell * If we are completing a request which is in the process of
974f931551bSRalph Campbell * being resent, we can stop resending it since we know the
975f931551bSRalph Campbell * responder has already seen it.
976f931551bSRalph Campbell */
977f931551bSRalph Campbell if (qp->s_acked == qp->s_cur) {
978f931551bSRalph Campbell if (++qp->s_cur >= qp->s_size)
979f931551bSRalph Campbell qp->s_cur = 0;
980f931551bSRalph Campbell qp->s_acked = qp->s_cur;
981db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
982f931551bSRalph Campbell if (qp->s_acked != qp->s_tail) {
983f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
984f931551bSRalph Campbell qp->s_psn = wqe->psn;
985f931551bSRalph Campbell }
986f931551bSRalph Campbell } else {
987f931551bSRalph Campbell if (++qp->s_acked >= qp->s_size)
988f931551bSRalph Campbell qp->s_acked = 0;
989f931551bSRalph Campbell if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
990f931551bSRalph Campbell qp->s_draining = 0;
991db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
992f931551bSRalph Campbell }
993f931551bSRalph Campbell return wqe;
994f931551bSRalph Campbell }
995f931551bSRalph Campbell
996*24d02e04SLee Jones /*
997f931551bSRalph Campbell * do_rc_ack - process an incoming RC ACK
998f931551bSRalph Campbell * @qp: the QP the ACK came in on
999f931551bSRalph Campbell * @psn: the packet sequence number of the ACK
1000f931551bSRalph Campbell * @opcode: the opcode of the request that resulted in the ACK
1001f931551bSRalph Campbell *
1002f931551bSRalph Campbell * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1003f931551bSRalph Campbell * for the given QP.
1004f931551bSRalph Campbell * Called at interrupt level with the QP s_lock held.
1005f931551bSRalph Campbell * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1006f931551bSRalph Campbell */
do_rc_ack(struct rvt_qp * qp,u32 aeth,u32 psn,int opcode,u64 val,struct qib_ctxtdata * rcd)10077c2e11feSDennis Dalessandro static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1008f931551bSRalph Campbell u64 val, struct qib_ctxtdata *rcd)
1009f931551bSRalph Campbell {
1010f931551bSRalph Campbell struct qib_ibport *ibp;
1011f931551bSRalph Campbell enum ib_wc_status status;
10127c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
1013f931551bSRalph Campbell int ret = 0;
1014f931551bSRalph Campbell u32 ack_psn;
1015f931551bSRalph Campbell int diff;
1016f931551bSRalph Campbell
1017f931551bSRalph Campbell /*
1018f931551bSRalph Campbell * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1019f931551bSRalph Campbell * requests and implicitly NAK RDMA read and atomic requests issued
1020f931551bSRalph Campbell * before the NAK'ed request. The MSN won't include the NAK'ed
1021f931551bSRalph Campbell * request but will include an ACK'ed request(s).
1022f931551bSRalph Campbell */
1023f931551bSRalph Campbell ack_psn = psn;
1024832666c1SDon Hiatt if (aeth >> IB_AETH_NAK_SHIFT)
1025f931551bSRalph Campbell ack_psn--;
1026db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1027f931551bSRalph Campbell ibp = to_iport(qp->ibqp.device, qp->port_num);
1028f931551bSRalph Campbell
1029f931551bSRalph Campbell /*
1030f931551bSRalph Campbell * The MSN might be for a later WQE than the PSN indicates so
1031f931551bSRalph Campbell * only complete WQEs that the PSN finishes.
1032f931551bSRalph Campbell */
1033f931551bSRalph Campbell while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1034f931551bSRalph Campbell /*
1035f931551bSRalph Campbell * RDMA_READ_RESPONSE_ONLY is a special case since
1036f931551bSRalph Campbell * we want to generate completion events for everything
1037f931551bSRalph Campbell * before the RDMA read, copy the data, then generate
1038f931551bSRalph Campbell * the completion for the read.
1039f931551bSRalph Campbell */
1040f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1041f931551bSRalph Campbell opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1042f931551bSRalph Campbell diff == 0) {
1043f931551bSRalph Campbell ret = 1;
1044f931551bSRalph Campbell goto bail;
1045f931551bSRalph Campbell }
1046f931551bSRalph Campbell /*
1047f931551bSRalph Campbell * If this request is a RDMA read or atomic, and the ACK is
1048f931551bSRalph Campbell * for a later operation, this ACK NAKs the RDMA read or
1049f931551bSRalph Campbell * atomic. In other words, only a RDMA_READ_LAST or ONLY
1050f931551bSRalph Campbell * can ACK a RDMA read and likewise for atomic ops. Note
1051f931551bSRalph Campbell * that the NAK case can only happen if relaxed ordering is
1052f931551bSRalph Campbell * used and requests are sent after an RDMA read or atomic
1053f931551bSRalph Campbell * is sent but before the response is received.
1054f931551bSRalph Campbell */
1055f931551bSRalph Campbell if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1056f931551bSRalph Campbell (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1057f931551bSRalph Campbell ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1058f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1059f931551bSRalph Campbell (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1060f931551bSRalph Campbell /* Retry this request. */
106101ba79d4SHarish Chegondi if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
106201ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RDMAR_SEQ;
1063f931551bSRalph Campbell qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1064f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
106501ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_SEND;
10664d6f85c3SMike Marciniszyn rvt_get_qp(qp);
1067f931551bSRalph Campbell list_add_tail(&qp->rspwait,
1068f931551bSRalph Campbell &rcd->qp_wait_list);
1069f931551bSRalph Campbell }
1070f931551bSRalph Campbell }
1071f931551bSRalph Campbell /*
1072f931551bSRalph Campbell * No need to process the ACK/NAK since we are
1073f931551bSRalph Campbell * restarting an earlier request.
1074f931551bSRalph Campbell */
1075f931551bSRalph Campbell goto bail;
1076f931551bSRalph Campbell }
1077f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1078f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1079f931551bSRalph Campbell u64 *vaddr = wqe->sg_list[0].vaddr;
1080f931551bSRalph Campbell *vaddr = val;
1081f931551bSRalph Campbell }
1082f931551bSRalph Campbell if (qp->s_num_rd_atomic &&
1083f931551bSRalph Campbell (wqe->wr.opcode == IB_WR_RDMA_READ ||
1084f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1085f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1086f931551bSRalph Campbell qp->s_num_rd_atomic--;
1087f931551bSRalph Campbell /* Restart sending task if fence is complete */
108801ba79d4SHarish Chegondi if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1089f931551bSRalph Campbell !qp->s_num_rd_atomic) {
109001ba79d4SHarish Chegondi qp->s_flags &= ~(RVT_S_WAIT_FENCE |
109101ba79d4SHarish Chegondi RVT_S_WAIT_ACK);
1092f931551bSRalph Campbell qib_schedule_send(qp);
109301ba79d4SHarish Chegondi } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
109401ba79d4SHarish Chegondi qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
109501ba79d4SHarish Chegondi RVT_S_WAIT_ACK);
1096f931551bSRalph Campbell qib_schedule_send(qp);
1097f931551bSRalph Campbell }
1098f931551bSRalph Campbell }
1099f931551bSRalph Campbell wqe = do_rc_completion(qp, wqe, ibp);
1100f931551bSRalph Campbell if (qp->s_acked == qp->s_tail)
1101f931551bSRalph Campbell break;
1102f931551bSRalph Campbell }
1103f931551bSRalph Campbell
1104832666c1SDon Hiatt switch (aeth >> IB_AETH_NAK_SHIFT) {
1105f931551bSRalph Campbell case 0: /* ACK */
1106f24a6d48SHarish Chegondi this_cpu_inc(*ibp->rvp.rc_acks);
1107f931551bSRalph Campbell if (qp->s_acked != qp->s_tail) {
1108f931551bSRalph Campbell /*
1109f931551bSRalph Campbell * We are expecting more ACKs so
1110f931551bSRalph Campbell * reset the retransmit timer.
1111f931551bSRalph Campbell */
1112b4238e70SVenkata Sandeep Dhanalakota rvt_mod_retry_timer(qp);
1113f931551bSRalph Campbell /*
1114f931551bSRalph Campbell * We can stop resending the earlier packets and
1115f931551bSRalph Campbell * continue with the next packet the receiver wants.
1116f931551bSRalph Campbell */
1117f931551bSRalph Campbell if (qib_cmp24(qp->s_psn, psn) <= 0)
1118f931551bSRalph Campbell reset_psn(qp, psn + 1);
1119b4238e70SVenkata Sandeep Dhanalakota } else {
1120b4238e70SVenkata Sandeep Dhanalakota /* No more acks - kill all timers */
1121b4238e70SVenkata Sandeep Dhanalakota rvt_stop_rc_timers(qp);
1122b4238e70SVenkata Sandeep Dhanalakota if (qib_cmp24(qp->s_psn, psn) <= 0) {
1123f931551bSRalph Campbell qp->s_state = OP(SEND_LAST);
1124f931551bSRalph Campbell qp->s_psn = psn + 1;
1125f931551bSRalph Campbell }
1126b4238e70SVenkata Sandeep Dhanalakota }
112701ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_WAIT_ACK) {
112801ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_WAIT_ACK;
1129f931551bSRalph Campbell qib_schedule_send(qp);
1130f931551bSRalph Campbell }
1131696513e8SBrian Welty rvt_get_credit(qp, aeth);
1132f931551bSRalph Campbell qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1133f931551bSRalph Campbell qp->s_retry = qp->s_retry_cnt;
1134f931551bSRalph Campbell update_last_psn(qp, psn);
1135b4238e70SVenkata Sandeep Dhanalakota return 1;
1136f931551bSRalph Campbell
1137f931551bSRalph Campbell case 1: /* RNR NAK */
1138f24a6d48SHarish Chegondi ibp->rvp.n_rnr_naks++;
1139f931551bSRalph Campbell if (qp->s_acked == qp->s_tail)
1140f931551bSRalph Campbell goto bail;
114101ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_WAIT_RNR)
1142f931551bSRalph Campbell goto bail;
1143f931551bSRalph Campbell if (qp->s_rnr_retry == 0) {
1144f931551bSRalph Campbell status = IB_WC_RNR_RETRY_EXC_ERR;
1145f931551bSRalph Campbell goto class_b;
1146f931551bSRalph Campbell }
1147f931551bSRalph Campbell if (qp->s_rnr_retry_cnt < 7)
1148f931551bSRalph Campbell qp->s_rnr_retry--;
1149f931551bSRalph Campbell
1150f931551bSRalph Campbell /* The last valid PSN is the previous PSN. */
1151f931551bSRalph Campbell update_last_psn(qp, psn - 1);
1152f931551bSRalph Campbell
1153f24a6d48SHarish Chegondi ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1154f931551bSRalph Campbell
1155f931551bSRalph Campbell reset_psn(qp, psn);
1156f931551bSRalph Campbell
115701ba79d4SHarish Chegondi qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1158b4238e70SVenkata Sandeep Dhanalakota rvt_stop_rc_timers(qp);
1159b4238e70SVenkata Sandeep Dhanalakota rvt_add_rnr_timer(qp, aeth);
1160b4238e70SVenkata Sandeep Dhanalakota return 0;
1161f931551bSRalph Campbell
1162f931551bSRalph Campbell case 3: /* NAK */
1163f931551bSRalph Campbell if (qp->s_acked == qp->s_tail)
1164f931551bSRalph Campbell goto bail;
1165f931551bSRalph Campbell /* The last valid PSN is the previous PSN. */
1166f931551bSRalph Campbell update_last_psn(qp, psn - 1);
1167832666c1SDon Hiatt switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1168832666c1SDon Hiatt IB_AETH_CREDIT_MASK) {
1169f931551bSRalph Campbell case 0: /* PSN sequence error */
1170f24a6d48SHarish Chegondi ibp->rvp.n_seq_naks++;
1171f931551bSRalph Campbell /*
1172f931551bSRalph Campbell * Back up to the responder's expected PSN.
1173f931551bSRalph Campbell * Note that we might get a NAK in the middle of an
1174f931551bSRalph Campbell * RDMA READ response which terminates the RDMA
1175f931551bSRalph Campbell * READ.
1176f931551bSRalph Campbell */
1177f931551bSRalph Campbell qib_restart_rc(qp, psn, 0);
1178f931551bSRalph Campbell qib_schedule_send(qp);
1179f931551bSRalph Campbell break;
1180f931551bSRalph Campbell
1181f931551bSRalph Campbell case 1: /* Invalid Request */
1182f931551bSRalph Campbell status = IB_WC_REM_INV_REQ_ERR;
1183f24a6d48SHarish Chegondi ibp->rvp.n_other_naks++;
1184f931551bSRalph Campbell goto class_b;
1185f931551bSRalph Campbell
1186f931551bSRalph Campbell case 2: /* Remote Access Error */
1187f931551bSRalph Campbell status = IB_WC_REM_ACCESS_ERR;
1188f24a6d48SHarish Chegondi ibp->rvp.n_other_naks++;
1189f931551bSRalph Campbell goto class_b;
1190f931551bSRalph Campbell
1191f931551bSRalph Campbell case 3: /* Remote Operation Error */
1192f931551bSRalph Campbell status = IB_WC_REM_OP_ERR;
1193f24a6d48SHarish Chegondi ibp->rvp.n_other_naks++;
1194f931551bSRalph Campbell class_b:
1195f931551bSRalph Campbell if (qp->s_last == qp->s_acked) {
1196116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, status);
119770696ea7SHarish Chegondi rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1198f931551bSRalph Campbell }
1199f931551bSRalph Campbell break;
1200f931551bSRalph Campbell
1201f931551bSRalph Campbell default:
1202f931551bSRalph Campbell /* Ignore other reserved NAK error codes */
1203f931551bSRalph Campbell goto reserved;
1204f931551bSRalph Campbell }
1205f931551bSRalph Campbell qp->s_retry = qp->s_retry_cnt;
1206f931551bSRalph Campbell qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1207f931551bSRalph Campbell goto bail;
1208f931551bSRalph Campbell
1209f931551bSRalph Campbell default: /* 2: reserved */
1210f931551bSRalph Campbell reserved:
1211f931551bSRalph Campbell /* Ignore reserved NAK codes. */
1212f931551bSRalph Campbell goto bail;
1213f931551bSRalph Campbell }
1214f931551bSRalph Campbell
1215f931551bSRalph Campbell bail:
1216b4238e70SVenkata Sandeep Dhanalakota rvt_stop_rc_timers(qp);
1217f931551bSRalph Campbell return ret;
1218f931551bSRalph Campbell }
1219f931551bSRalph Campbell
1220f931551bSRalph Campbell /*
1221f931551bSRalph Campbell * We have seen an out of sequence RDMA read middle or last packet.
1222f931551bSRalph Campbell * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1223f931551bSRalph Campbell */
rdma_seq_err(struct rvt_qp * qp,struct qib_ibport * ibp,u32 psn,struct qib_ctxtdata * rcd)12247c2e11feSDennis Dalessandro static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1225f931551bSRalph Campbell struct qib_ctxtdata *rcd)
1226f931551bSRalph Campbell {
12277c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
1228f931551bSRalph Campbell
1229f931551bSRalph Campbell /* Remove QP from retry timer */
1230b4238e70SVenkata Sandeep Dhanalakota rvt_stop_rc_timers(qp);
1231f931551bSRalph Campbell
1232db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1233f931551bSRalph Campbell
1234f931551bSRalph Campbell while (qib_cmp24(psn, wqe->lpsn) > 0) {
1235f931551bSRalph Campbell if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1236f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1237f931551bSRalph Campbell wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1238f931551bSRalph Campbell break;
1239f931551bSRalph Campbell wqe = do_rc_completion(qp, wqe, ibp);
1240f931551bSRalph Campbell }
1241f931551bSRalph Campbell
1242f24a6d48SHarish Chegondi ibp->rvp.n_rdma_seq++;
124301ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RDMAR_SEQ;
1244f931551bSRalph Campbell qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1245f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
124601ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_SEND;
12474d6f85c3SMike Marciniszyn rvt_get_qp(qp);
1248f931551bSRalph Campbell list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1249f931551bSRalph Campbell }
1250f931551bSRalph Campbell }
1251f931551bSRalph Campbell
1252f931551bSRalph Campbell /**
1253f931551bSRalph Campbell * qib_rc_rcv_resp - process an incoming RC response packet
1254f931551bSRalph Campbell * @ibp: the port this packet came in on
1255f931551bSRalph Campbell * @ohdr: the other headers for this packet
1256f931551bSRalph Campbell * @data: the packet data
1257f931551bSRalph Campbell * @tlen: the packet length
1258f931551bSRalph Campbell * @qp: the QP for this packet
1259f931551bSRalph Campbell * @opcode: the opcode for this packet
1260f931551bSRalph Campbell * @psn: the packet sequence number for this packet
1261f931551bSRalph Campbell * @hdrsize: the header length
1262f931551bSRalph Campbell * @pmtu: the path MTU
1263*24d02e04SLee Jones * @rcd: the context pointer
1264f931551bSRalph Campbell *
1265f931551bSRalph Campbell * This is called from qib_rc_rcv() to process an incoming RC response
1266f931551bSRalph Campbell * packet for the given QP.
1267f931551bSRalph Campbell * Called at interrupt level.
1268f931551bSRalph Campbell */
qib_rc_rcv_resp(struct qib_ibport * ibp,struct ib_other_headers * ohdr,void * data,u32 tlen,struct rvt_qp * qp,u32 opcode,u32 psn,u32 hdrsize,u32 pmtu,struct qib_ctxtdata * rcd)1269f931551bSRalph Campbell static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1270261a4351SMike Marciniszyn struct ib_other_headers *ohdr,
1271f931551bSRalph Campbell void *data, u32 tlen,
12727c2e11feSDennis Dalessandro struct rvt_qp *qp,
1273f931551bSRalph Campbell u32 opcode,
1274f931551bSRalph Campbell u32 psn, u32 hdrsize, u32 pmtu,
1275f931551bSRalph Campbell struct qib_ctxtdata *rcd)
1276f931551bSRalph Campbell {
12777c2e11feSDennis Dalessandro struct rvt_swqe *wqe;
1278dd04e43dSMike Marciniszyn struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1279f931551bSRalph Campbell enum ib_wc_status status;
1280f931551bSRalph Campbell unsigned long flags;
1281f931551bSRalph Campbell int diff;
1282f931551bSRalph Campbell u32 pad;
1283f931551bSRalph Campbell u32 aeth;
1284f931551bSRalph Campbell u64 val;
1285f931551bSRalph Campbell
1286dd04e43dSMike Marciniszyn if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1287dd04e43dSMike Marciniszyn /*
1288dd04e43dSMike Marciniszyn * If ACK'd PSN on SDMA busy list try to make progress to
1289dd04e43dSMike Marciniszyn * reclaim SDMA credits.
1290dd04e43dSMike Marciniszyn */
1291dd04e43dSMike Marciniszyn if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1292dd04e43dSMike Marciniszyn (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1293dd04e43dSMike Marciniszyn
1294dd04e43dSMike Marciniszyn /*
1295dd04e43dSMike Marciniszyn * If send tasklet not running attempt to progress
1296dd04e43dSMike Marciniszyn * SDMA queue.
1297dd04e43dSMike Marciniszyn */
129801ba79d4SHarish Chegondi if (!(qp->s_flags & RVT_S_BUSY)) {
1299dd04e43dSMike Marciniszyn /* Acquire SDMA Lock */
1300dd04e43dSMike Marciniszyn spin_lock_irqsave(&ppd->sdma_lock, flags);
1301dd04e43dSMike Marciniszyn /* Invoke sdma make progress */
1302dd04e43dSMike Marciniszyn qib_sdma_make_progress(ppd);
1303dd04e43dSMike Marciniszyn /* Release SDMA Lock */
1304dd04e43dSMike Marciniszyn spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1305dd04e43dSMike Marciniszyn }
1306dd04e43dSMike Marciniszyn }
1307dd04e43dSMike Marciniszyn }
1308dd04e43dSMike Marciniszyn
1309f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
1310db3ef0ebSHarish Chegondi if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1311414ed90cSMike Marciniszyn goto ack_done;
1312f931551bSRalph Campbell
1313f931551bSRalph Campbell /* Ignore invalid responses. */
1314eb04ff09SMike Marciniszyn if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1315f931551bSRalph Campbell goto ack_done;
1316f931551bSRalph Campbell
1317f931551bSRalph Campbell /* Ignore duplicate responses. */
1318f931551bSRalph Campbell diff = qib_cmp24(psn, qp->s_last_psn);
1319f931551bSRalph Campbell if (unlikely(diff <= 0)) {
1320f931551bSRalph Campbell /* Update credits for "ghost" ACKs */
1321f931551bSRalph Campbell if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1322f931551bSRalph Campbell aeth = be32_to_cpu(ohdr->u.aeth);
1323832666c1SDon Hiatt if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1324696513e8SBrian Welty rvt_get_credit(qp, aeth);
1325f931551bSRalph Campbell }
1326f931551bSRalph Campbell goto ack_done;
1327f931551bSRalph Campbell }
1328f931551bSRalph Campbell
1329f931551bSRalph Campbell /*
1330f931551bSRalph Campbell * Skip everything other than the PSN we expect, if we are waiting
1331f931551bSRalph Campbell * for a reply to a restarted RDMA read or atomic op.
1332f931551bSRalph Campbell */
133301ba79d4SHarish Chegondi if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1334f931551bSRalph Campbell if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1335f931551bSRalph Campbell goto ack_done;
133601ba79d4SHarish Chegondi qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1337f931551bSRalph Campbell }
1338f931551bSRalph Campbell
1339f931551bSRalph Campbell if (unlikely(qp->s_acked == qp->s_tail))
1340f931551bSRalph Campbell goto ack_done;
1341db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1342f931551bSRalph Campbell status = IB_WC_SUCCESS;
1343f931551bSRalph Campbell
1344f931551bSRalph Campbell switch (opcode) {
1345f931551bSRalph Campbell case OP(ACKNOWLEDGE):
1346f931551bSRalph Campbell case OP(ATOMIC_ACKNOWLEDGE):
1347f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_FIRST):
1348f931551bSRalph Campbell aeth = be32_to_cpu(ohdr->u.aeth);
1349261a4351SMike Marciniszyn if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1350261a4351SMike Marciniszyn val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1351261a4351SMike Marciniszyn else
1352f931551bSRalph Campbell val = 0;
1353f931551bSRalph Campbell if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1354f931551bSRalph Campbell opcode != OP(RDMA_READ_RESPONSE_FIRST))
1355f931551bSRalph Campbell goto ack_done;
1356f931551bSRalph Campbell hdrsize += 4;
1357db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1358f931551bSRalph Campbell if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1359f931551bSRalph Campbell goto ack_op_err;
1360f931551bSRalph Campbell /*
1361f931551bSRalph Campbell * If this is a response to a resent RDMA read, we
1362f931551bSRalph Campbell * have to be careful to copy the data to the right
1363f931551bSRalph Campbell * location.
1364f931551bSRalph Campbell */
1365f931551bSRalph Campbell qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1366f931551bSRalph Campbell wqe, psn, pmtu);
1367f931551bSRalph Campbell goto read_middle;
1368f931551bSRalph Campbell
1369f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_MIDDLE):
1370f931551bSRalph Campbell /* no AETH, no ACK */
1371f931551bSRalph Campbell if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1372f931551bSRalph Campbell goto ack_seq_err;
1373f931551bSRalph Campbell if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1374f931551bSRalph Campbell goto ack_op_err;
1375f931551bSRalph Campbell read_middle:
1376f931551bSRalph Campbell if (unlikely(tlen != (hdrsize + pmtu + 4)))
1377f931551bSRalph Campbell goto ack_len_err;
1378f931551bSRalph Campbell if (unlikely(pmtu >= qp->s_rdma_read_len))
1379f931551bSRalph Campbell goto ack_len_err;
1380f931551bSRalph Campbell
1381f931551bSRalph Campbell /*
1382f931551bSRalph Campbell * We got a response so update the timeout.
1383f931551bSRalph Campbell * 4.096 usec. * (1 << qp->timeout)
1384f931551bSRalph Campbell */
1385b4238e70SVenkata Sandeep Dhanalakota rvt_mod_retry_timer(qp);
138601ba79d4SHarish Chegondi if (qp->s_flags & RVT_S_WAIT_ACK) {
138701ba79d4SHarish Chegondi qp->s_flags &= ~RVT_S_WAIT_ACK;
1388f931551bSRalph Campbell qib_schedule_send(qp);
1389f931551bSRalph Campbell }
1390f931551bSRalph Campbell
1391f931551bSRalph Campbell if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1392f931551bSRalph Campbell qp->s_retry = qp->s_retry_cnt;
1393f931551bSRalph Campbell
1394f931551bSRalph Campbell /*
1395f931551bSRalph Campbell * Update the RDMA receive state but do the copy w/o
1396f931551bSRalph Campbell * holding the locks and blocking interrupts.
1397f931551bSRalph Campbell */
1398f931551bSRalph Campbell qp->s_rdma_read_len -= pmtu;
1399f931551bSRalph Campbell update_last_psn(qp, psn);
1400f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
1401019f118bSBrian Welty rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1402019f118bSBrian Welty data, pmtu, false, false);
1403f931551bSRalph Campbell goto bail;
1404f931551bSRalph Campbell
1405f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_ONLY):
1406f931551bSRalph Campbell aeth = be32_to_cpu(ohdr->u.aeth);
1407f931551bSRalph Campbell if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1408f931551bSRalph Campbell goto ack_done;
1409f931551bSRalph Campbell /* Get the number of bytes the message was padded by. */
1410f931551bSRalph Campbell pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1411f931551bSRalph Campbell /*
1412f931551bSRalph Campbell * Check that the data size is >= 0 && <= pmtu.
1413f931551bSRalph Campbell * Remember to account for the AETH header (4) and
1414f931551bSRalph Campbell * ICRC (4).
1415f931551bSRalph Campbell */
1416f931551bSRalph Campbell if (unlikely(tlen < (hdrsize + pad + 8)))
1417f931551bSRalph Campbell goto ack_len_err;
1418f931551bSRalph Campbell /*
1419f931551bSRalph Campbell * If this is a response to a resent RDMA read, we
1420f931551bSRalph Campbell * have to be careful to copy the data to the right
1421f931551bSRalph Campbell * location.
1422f931551bSRalph Campbell */
1423db3ef0ebSHarish Chegondi wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1424f931551bSRalph Campbell qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1425f931551bSRalph Campbell wqe, psn, pmtu);
1426f931551bSRalph Campbell goto read_last;
1427f931551bSRalph Campbell
1428f931551bSRalph Campbell case OP(RDMA_READ_RESPONSE_LAST):
1429f931551bSRalph Campbell /* ACKs READ req. */
1430f931551bSRalph Campbell if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1431f931551bSRalph Campbell goto ack_seq_err;
1432f931551bSRalph Campbell if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1433f931551bSRalph Campbell goto ack_op_err;
1434f931551bSRalph Campbell /* Get the number of bytes the message was padded by. */
1435f931551bSRalph Campbell pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1436f931551bSRalph Campbell /*
1437f931551bSRalph Campbell * Check that the data size is >= 1 && <= pmtu.
1438f931551bSRalph Campbell * Remember to account for the AETH header (4) and
1439f931551bSRalph Campbell * ICRC (4).
1440f931551bSRalph Campbell */
1441f931551bSRalph Campbell if (unlikely(tlen <= (hdrsize + pad + 8)))
1442f931551bSRalph Campbell goto ack_len_err;
1443f931551bSRalph Campbell read_last:
1444f931551bSRalph Campbell tlen -= hdrsize + pad + 8;
1445f931551bSRalph Campbell if (unlikely(tlen != qp->s_rdma_read_len))
1446f931551bSRalph Campbell goto ack_len_err;
1447f931551bSRalph Campbell aeth = be32_to_cpu(ohdr->u.aeth);
1448019f118bSBrian Welty rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1449019f118bSBrian Welty data, tlen, false, false);
1450f931551bSRalph Campbell WARN_ON(qp->s_rdma_read_sge.num_sge);
1451f931551bSRalph Campbell (void) do_rc_ack(qp, aeth, psn,
1452f931551bSRalph Campbell OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1453f931551bSRalph Campbell goto ack_done;
1454f931551bSRalph Campbell }
1455f931551bSRalph Campbell
1456f931551bSRalph Campbell ack_op_err:
1457f931551bSRalph Campbell status = IB_WC_LOC_QP_OP_ERR;
1458f931551bSRalph Campbell goto ack_err;
1459f931551bSRalph Campbell
1460f931551bSRalph Campbell ack_seq_err:
1461f931551bSRalph Campbell rdma_seq_err(qp, ibp, psn, rcd);
1462f931551bSRalph Campbell goto ack_done;
1463f931551bSRalph Campbell
1464f931551bSRalph Campbell ack_len_err:
1465f931551bSRalph Campbell status = IB_WC_LOC_LEN_ERR;
1466f931551bSRalph Campbell ack_err:
1467f931551bSRalph Campbell if (qp->s_last == qp->s_acked) {
1468116aa033SVenkata Sandeep Dhanalakota rvt_send_complete(qp, wqe, status);
146970696ea7SHarish Chegondi rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1470f931551bSRalph Campbell }
1471f931551bSRalph Campbell ack_done:
1472f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
1473f931551bSRalph Campbell bail:
1474f931551bSRalph Campbell return;
1475f931551bSRalph Campbell }
1476f931551bSRalph Campbell
1477f931551bSRalph Campbell /**
1478f931551bSRalph Campbell * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1479f931551bSRalph Campbell * @ohdr: the other headers for this packet
1480f931551bSRalph Campbell * @data: the packet data
1481f931551bSRalph Campbell * @qp: the QP for this packet
1482f931551bSRalph Campbell * @opcode: the opcode for this packet
1483f931551bSRalph Campbell * @psn: the packet sequence number for this packet
1484f931551bSRalph Campbell * @diff: the difference between the PSN and the expected PSN
1485*24d02e04SLee Jones * @rcd: the context pointer
1486f931551bSRalph Campbell *
1487f931551bSRalph Campbell * This is called from qib_rc_rcv() to process an unexpected
1488f931551bSRalph Campbell * incoming RC packet for the given QP.
1489f931551bSRalph Campbell * Called at interrupt level.
1490f931551bSRalph Campbell * Return 1 if no more processing is needed; otherwise return 0 to
1491f931551bSRalph Campbell * schedule a response to be sent.
1492f931551bSRalph Campbell */
qib_rc_rcv_error(struct ib_other_headers * ohdr,void * data,struct rvt_qp * qp,u32 opcode,u32 psn,int diff,struct qib_ctxtdata * rcd)1493261a4351SMike Marciniszyn static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
1494f931551bSRalph Campbell void *data,
14957c2e11feSDennis Dalessandro struct rvt_qp *qp,
1496f931551bSRalph Campbell u32 opcode,
1497f931551bSRalph Campbell u32 psn,
1498f931551bSRalph Campbell int diff,
1499f931551bSRalph Campbell struct qib_ctxtdata *rcd)
1500f931551bSRalph Campbell {
1501f931551bSRalph Campbell struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
15027c2e11feSDennis Dalessandro struct rvt_ack_entry *e;
1503f931551bSRalph Campbell unsigned long flags;
1504f931551bSRalph Campbell u8 i, prev;
1505f931551bSRalph Campbell int old_req;
1506f931551bSRalph Campbell
1507f931551bSRalph Campbell if (diff > 0) {
1508f931551bSRalph Campbell /*
1509f931551bSRalph Campbell * Packet sequence error.
1510f931551bSRalph Campbell * A NAK will ACK earlier sends and RDMA writes.
1511f931551bSRalph Campbell * Don't queue the NAK if we already sent one.
1512f931551bSRalph Campbell */
1513f931551bSRalph Campbell if (!qp->r_nak_state) {
1514f24a6d48SHarish Chegondi ibp->rvp.n_rc_seqnak++;
1515f931551bSRalph Campbell qp->r_nak_state = IB_NAK_PSN_ERROR;
1516f931551bSRalph Campbell /* Use the expected PSN. */
1517f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn;
1518f931551bSRalph Campbell /*
1519f931551bSRalph Campbell * Wait to send the sequence NAK until all packets
1520f931551bSRalph Campbell * in the receive queue have been processed.
1521f931551bSRalph Campbell * Otherwise, we end up propagating congestion.
1522f931551bSRalph Campbell */
1523f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
152401ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_NAK;
15254d6f85c3SMike Marciniszyn rvt_get_qp(qp);
1526f931551bSRalph Campbell list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1527f931551bSRalph Campbell }
1528f931551bSRalph Campbell }
1529f931551bSRalph Campbell goto done;
1530f931551bSRalph Campbell }
1531f931551bSRalph Campbell
1532f931551bSRalph Campbell /*
1533f931551bSRalph Campbell * Handle a duplicate request. Don't re-execute SEND, RDMA
1534f931551bSRalph Campbell * write or atomic op. Don't NAK errors, just silently drop
1535f931551bSRalph Campbell * the duplicate request. Note that r_sge, r_len, and
1536f931551bSRalph Campbell * r_rcv_len may be in use so don't modify them.
1537f931551bSRalph Campbell *
1538f931551bSRalph Campbell * We are supposed to ACK the earliest duplicate PSN but we
1539f931551bSRalph Campbell * can coalesce an outstanding duplicate ACK. We have to
1540f931551bSRalph Campbell * send the earliest so that RDMA reads can be restarted at
1541f931551bSRalph Campbell * the requester's expected PSN.
1542f931551bSRalph Campbell *
1543f931551bSRalph Campbell * First, find where this duplicate PSN falls within the
1544f931551bSRalph Campbell * ACKs previously sent.
1545f931551bSRalph Campbell * old_req is true if there is an older response that is scheduled
1546f931551bSRalph Campbell * to be sent before sending this one.
1547f931551bSRalph Campbell */
1548f931551bSRalph Campbell e = NULL;
1549f931551bSRalph Campbell old_req = 1;
1550f24a6d48SHarish Chegondi ibp->rvp.n_rc_dupreq++;
1551f931551bSRalph Campbell
1552f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
1553f931551bSRalph Campbell
1554f931551bSRalph Campbell for (i = qp->r_head_ack_queue; ; i = prev) {
1555f931551bSRalph Campbell if (i == qp->s_tail_ack_queue)
1556f931551bSRalph Campbell old_req = 0;
1557f931551bSRalph Campbell if (i)
1558f931551bSRalph Campbell prev = i - 1;
1559f931551bSRalph Campbell else
1560f931551bSRalph Campbell prev = QIB_MAX_RDMA_ATOMIC;
1561f931551bSRalph Campbell if (prev == qp->r_head_ack_queue) {
1562f931551bSRalph Campbell e = NULL;
1563f931551bSRalph Campbell break;
1564f931551bSRalph Campbell }
1565f931551bSRalph Campbell e = &qp->s_ack_queue[prev];
1566f931551bSRalph Campbell if (!e->opcode) {
1567f931551bSRalph Campbell e = NULL;
1568f931551bSRalph Campbell break;
1569f931551bSRalph Campbell }
1570f931551bSRalph Campbell if (qib_cmp24(psn, e->psn) >= 0) {
1571f931551bSRalph Campbell if (prev == qp->s_tail_ack_queue &&
1572f931551bSRalph Campbell qib_cmp24(psn, e->lpsn) <= 0)
1573f931551bSRalph Campbell old_req = 0;
1574f931551bSRalph Campbell break;
1575f931551bSRalph Campbell }
1576f931551bSRalph Campbell }
1577f931551bSRalph Campbell switch (opcode) {
1578f931551bSRalph Campbell case OP(RDMA_READ_REQUEST): {
1579f931551bSRalph Campbell struct ib_reth *reth;
1580f931551bSRalph Campbell u32 offset;
1581f931551bSRalph Campbell u32 len;
1582f931551bSRalph Campbell
1583f931551bSRalph Campbell /*
1584f931551bSRalph Campbell * If we didn't find the RDMA read request in the ack queue,
1585f931551bSRalph Campbell * we can ignore this request.
1586f931551bSRalph Campbell */
1587f931551bSRalph Campbell if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1588f931551bSRalph Campbell goto unlock_done;
1589f931551bSRalph Campbell /* RETH comes after BTH */
1590f931551bSRalph Campbell reth = &ohdr->u.rc.reth;
1591f931551bSRalph Campbell /*
1592f931551bSRalph Campbell * Address range must be a subset of the original
1593f931551bSRalph Campbell * request and start on pmtu boundaries.
1594f931551bSRalph Campbell * We reuse the old ack_queue slot since the requester
1595f931551bSRalph Campbell * should not back up and request an earlier PSN for the
1596f931551bSRalph Campbell * same request.
1597f931551bSRalph Campbell */
1598f931551bSRalph Campbell offset = ((psn - e->psn) & QIB_PSN_MASK) *
1599cc6ea138SMike Marciniszyn qp->pmtu;
1600f931551bSRalph Campbell len = be32_to_cpu(reth->length);
1601f931551bSRalph Campbell if (unlikely(offset + len != e->rdma_sge.sge_length))
1602f931551bSRalph Campbell goto unlock_done;
1603f931551bSRalph Campbell if (e->rdma_sge.mr) {
16047c2e11feSDennis Dalessandro rvt_put_mr(e->rdma_sge.mr);
1605f931551bSRalph Campbell e->rdma_sge.mr = NULL;
1606f931551bSRalph Campbell }
1607f931551bSRalph Campbell if (len != 0) {
1608f931551bSRalph Campbell u32 rkey = be32_to_cpu(reth->rkey);
1609f931551bSRalph Campbell u64 vaddr = be64_to_cpu(reth->vaddr);
1610f931551bSRalph Campbell int ok;
1611f931551bSRalph Campbell
16127c2e11feSDennis Dalessandro ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1613f931551bSRalph Campbell IB_ACCESS_REMOTE_READ);
1614f931551bSRalph Campbell if (unlikely(!ok))
1615f931551bSRalph Campbell goto unlock_done;
1616f931551bSRalph Campbell } else {
1617f931551bSRalph Campbell e->rdma_sge.vaddr = NULL;
1618f931551bSRalph Campbell e->rdma_sge.length = 0;
1619f931551bSRalph Campbell e->rdma_sge.sge_length = 0;
1620f931551bSRalph Campbell }
1621f931551bSRalph Campbell e->psn = psn;
1622f931551bSRalph Campbell if (old_req)
1623f931551bSRalph Campbell goto unlock_done;
1624f931551bSRalph Campbell qp->s_tail_ack_queue = prev;
1625f931551bSRalph Campbell break;
1626f931551bSRalph Campbell }
1627f931551bSRalph Campbell
1628f931551bSRalph Campbell case OP(COMPARE_SWAP):
1629f931551bSRalph Campbell case OP(FETCH_ADD): {
1630f931551bSRalph Campbell /*
1631f931551bSRalph Campbell * If we didn't find the atomic request in the ack queue
1632f931551bSRalph Campbell * or the send tasklet is already backed up to send an
1633f931551bSRalph Campbell * earlier entry, we can ignore this request.
1634f931551bSRalph Campbell */
1635f931551bSRalph Campbell if (!e || e->opcode != (u8) opcode || old_req)
1636f931551bSRalph Campbell goto unlock_done;
1637f931551bSRalph Campbell qp->s_tail_ack_queue = prev;
1638f931551bSRalph Campbell break;
1639f931551bSRalph Campbell }
1640f931551bSRalph Campbell
1641f931551bSRalph Campbell default:
1642f931551bSRalph Campbell /*
1643f931551bSRalph Campbell * Ignore this operation if it doesn't request an ACK
1644f931551bSRalph Campbell * or an earlier RDMA read or atomic is going to be resent.
1645f931551bSRalph Campbell */
1646f931551bSRalph Campbell if (!(psn & IB_BTH_REQ_ACK) || old_req)
1647f931551bSRalph Campbell goto unlock_done;
1648f931551bSRalph Campbell /*
1649f931551bSRalph Campbell * Resend the most recent ACK if this request is
1650f931551bSRalph Campbell * after all the previous RDMA reads and atomics.
1651f931551bSRalph Campbell */
1652f931551bSRalph Campbell if (i == qp->r_head_ack_queue) {
1653f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
1654f931551bSRalph Campbell qp->r_nak_state = 0;
1655f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn - 1;
1656f931551bSRalph Campbell goto send_ack;
1657f931551bSRalph Campbell }
1658f931551bSRalph Campbell /*
1659f931551bSRalph Campbell * Try to send a simple ACK to work around a Mellanox bug
1660f931551bSRalph Campbell * which doesn't accept a RDMA read response or atomic
1661f931551bSRalph Campbell * response as an ACK for earlier SENDs or RDMA writes.
1662f931551bSRalph Campbell */
166301ba79d4SHarish Chegondi if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
1664f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
1665f931551bSRalph Campbell qp->r_nak_state = 0;
1666f931551bSRalph Campbell qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1667f931551bSRalph Campbell goto send_ack;
1668f931551bSRalph Campbell }
1669f931551bSRalph Campbell /*
1670f931551bSRalph Campbell * Resend the RDMA read or atomic op which
1671f931551bSRalph Campbell * ACKs this duplicate request.
1672f931551bSRalph Campbell */
1673f931551bSRalph Campbell qp->s_tail_ack_queue = i;
1674f931551bSRalph Campbell break;
1675f931551bSRalph Campbell }
1676f931551bSRalph Campbell qp->s_ack_state = OP(ACKNOWLEDGE);
167701ba79d4SHarish Chegondi qp->s_flags |= RVT_S_RESP_PENDING;
1678f931551bSRalph Campbell qp->r_nak_state = 0;
1679f931551bSRalph Campbell qib_schedule_send(qp);
1680f931551bSRalph Campbell
1681f931551bSRalph Campbell unlock_done:
1682f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
1683f931551bSRalph Campbell done:
1684f931551bSRalph Campbell return 1;
1685f931551bSRalph Campbell
1686f931551bSRalph Campbell send_ack:
1687f931551bSRalph Campbell return 0;
1688f931551bSRalph Campbell }
1689f931551bSRalph Campbell
qib_update_ack_queue(struct rvt_qp * qp,unsigned n)16907c2e11feSDennis Dalessandro static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
1691f931551bSRalph Campbell {
1692f931551bSRalph Campbell unsigned next;
1693f931551bSRalph Campbell
1694f931551bSRalph Campbell next = n + 1;
1695f931551bSRalph Campbell if (next > QIB_MAX_RDMA_ATOMIC)
1696f931551bSRalph Campbell next = 0;
1697f931551bSRalph Campbell qp->s_tail_ack_queue = next;
1698f931551bSRalph Campbell qp->s_ack_state = OP(ACKNOWLEDGE);
1699f931551bSRalph Campbell }
1700f931551bSRalph Campbell
1701f931551bSRalph Campbell /**
1702f931551bSRalph Campbell * qib_rc_rcv - process an incoming RC packet
1703f931551bSRalph Campbell * @rcd: the context pointer
1704f931551bSRalph Campbell * @hdr: the header of this packet
1705f931551bSRalph Campbell * @has_grh: true if the header has a GRH
1706f931551bSRalph Campbell * @data: the packet data
1707f931551bSRalph Campbell * @tlen: the packet length
1708f931551bSRalph Campbell * @qp: the QP for this packet
1709f931551bSRalph Campbell *
1710f931551bSRalph Campbell * This is called from qib_qp_rcv() to process an incoming RC packet
1711f931551bSRalph Campbell * for the given QP.
1712f931551bSRalph Campbell * Called at interrupt level.
1713f931551bSRalph Campbell */
qib_rc_rcv(struct qib_ctxtdata * rcd,struct ib_header * hdr,int has_grh,void * data,u32 tlen,struct rvt_qp * qp)1714261a4351SMike Marciniszyn void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
17157c2e11feSDennis Dalessandro int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
1716f931551bSRalph Campbell {
1717f931551bSRalph Campbell struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1718261a4351SMike Marciniszyn struct ib_other_headers *ohdr;
1719f931551bSRalph Campbell u32 opcode;
1720f931551bSRalph Campbell u32 hdrsize;
1721f931551bSRalph Campbell u32 psn;
1722f931551bSRalph Campbell u32 pad;
1723f931551bSRalph Campbell struct ib_wc wc;
1724cc6ea138SMike Marciniszyn u32 pmtu = qp->pmtu;
1725f931551bSRalph Campbell int diff;
1726f931551bSRalph Campbell struct ib_reth *reth;
1727f931551bSRalph Campbell unsigned long flags;
1728f931551bSRalph Campbell int ret;
1729f931551bSRalph Campbell
1730f931551bSRalph Campbell /* Check for GRH */
1731f931551bSRalph Campbell if (!has_grh) {
1732f931551bSRalph Campbell ohdr = &hdr->u.oth;
1733f931551bSRalph Campbell hdrsize = 8 + 12; /* LRH + BTH */
1734f931551bSRalph Campbell } else {
1735f931551bSRalph Campbell ohdr = &hdr->u.l.oth;
1736f931551bSRalph Campbell hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1737f931551bSRalph Campbell }
1738f931551bSRalph Campbell
1739f931551bSRalph Campbell opcode = be32_to_cpu(ohdr->bth[0]);
1740f931551bSRalph Campbell if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
17419fd5473dSMike Marciniszyn return;
1742f931551bSRalph Campbell
1743f931551bSRalph Campbell psn = be32_to_cpu(ohdr->bth[2]);
1744f931551bSRalph Campbell opcode >>= 24;
1745f931551bSRalph Campbell
1746f931551bSRalph Campbell /*
1747f931551bSRalph Campbell * Process responses (ACKs) before anything else. Note that the
1748f931551bSRalph Campbell * packet sequence number will be for something in the send work
1749f931551bSRalph Campbell * queue rather than the expected receive packet sequence number.
1750f931551bSRalph Campbell * In other words, this QP is the requester.
1751f931551bSRalph Campbell */
1752f931551bSRalph Campbell if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1753f931551bSRalph Campbell opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1754f931551bSRalph Campbell qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1755f931551bSRalph Campbell hdrsize, pmtu, rcd);
1756a5210c12SRalph Campbell return;
1757f931551bSRalph Campbell }
1758f931551bSRalph Campbell
1759f931551bSRalph Campbell /* Compute 24 bits worth of difference. */
1760f931551bSRalph Campbell diff = qib_cmp24(psn, qp->r_psn);
1761f931551bSRalph Campbell if (unlikely(diff)) {
1762f931551bSRalph Campbell if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1763a5210c12SRalph Campbell return;
1764f931551bSRalph Campbell goto send_ack;
1765f931551bSRalph Campbell }
1766f931551bSRalph Campbell
1767f931551bSRalph Campbell /* Check for opcode sequence errors. */
1768f931551bSRalph Campbell switch (qp->r_state) {
1769f931551bSRalph Campbell case OP(SEND_FIRST):
1770f931551bSRalph Campbell case OP(SEND_MIDDLE):
1771f931551bSRalph Campbell if (opcode == OP(SEND_MIDDLE) ||
1772f931551bSRalph Campbell opcode == OP(SEND_LAST) ||
1773f931551bSRalph Campbell opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1774f931551bSRalph Campbell break;
1775f931551bSRalph Campbell goto nack_inv;
1776f931551bSRalph Campbell
1777f931551bSRalph Campbell case OP(RDMA_WRITE_FIRST):
1778f931551bSRalph Campbell case OP(RDMA_WRITE_MIDDLE):
1779f931551bSRalph Campbell if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1780f931551bSRalph Campbell opcode == OP(RDMA_WRITE_LAST) ||
1781f931551bSRalph Campbell opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1782f931551bSRalph Campbell break;
1783f931551bSRalph Campbell goto nack_inv;
1784f931551bSRalph Campbell
1785f931551bSRalph Campbell default:
1786f931551bSRalph Campbell if (opcode == OP(SEND_MIDDLE) ||
1787f931551bSRalph Campbell opcode == OP(SEND_LAST) ||
1788f931551bSRalph Campbell opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1789f931551bSRalph Campbell opcode == OP(RDMA_WRITE_MIDDLE) ||
1790f931551bSRalph Campbell opcode == OP(RDMA_WRITE_LAST) ||
1791f931551bSRalph Campbell opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1792f931551bSRalph Campbell goto nack_inv;
1793f931551bSRalph Campbell /*
1794f931551bSRalph Campbell * Note that it is up to the requester to not send a new
1795f931551bSRalph Campbell * RDMA read or atomic operation before receiving an ACK
1796f931551bSRalph Campbell * for the previous operation.
1797f931551bSRalph Campbell */
1798f931551bSRalph Campbell break;
1799f931551bSRalph Campbell }
1800f931551bSRalph Campbell
1801beb5a042SBrian Welty if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1802beb5a042SBrian Welty rvt_comm_est(qp);
1803f931551bSRalph Campbell
1804f931551bSRalph Campbell /* OK, process the packet. */
1805f931551bSRalph Campbell switch (opcode) {
1806f931551bSRalph Campbell case OP(SEND_FIRST):
1807832369faSBrian Welty ret = rvt_get_rwqe(qp, false);
1808f931551bSRalph Campbell if (ret < 0)
1809f931551bSRalph Campbell goto nack_op_err;
1810f931551bSRalph Campbell if (!ret)
1811f931551bSRalph Campbell goto rnr_nak;
1812f931551bSRalph Campbell qp->r_rcv_len = 0;
1813df561f66SGustavo A. R. Silva fallthrough;
1814f931551bSRalph Campbell case OP(SEND_MIDDLE):
1815f931551bSRalph Campbell case OP(RDMA_WRITE_MIDDLE):
1816f931551bSRalph Campbell send_middle:
1817f931551bSRalph Campbell /* Check for invalid length PMTU or posted rwqe len. */
1818f931551bSRalph Campbell if (unlikely(tlen != (hdrsize + pmtu + 4)))
1819f931551bSRalph Campbell goto nack_inv;
1820f931551bSRalph Campbell qp->r_rcv_len += pmtu;
1821f931551bSRalph Campbell if (unlikely(qp->r_rcv_len > qp->r_len))
1822f931551bSRalph Campbell goto nack_inv;
1823019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
1824f931551bSRalph Campbell break;
1825f931551bSRalph Campbell
1826f931551bSRalph Campbell case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1827f931551bSRalph Campbell /* consume RWQE */
1828832369faSBrian Welty ret = rvt_get_rwqe(qp, true);
1829f931551bSRalph Campbell if (ret < 0)
1830f931551bSRalph Campbell goto nack_op_err;
1831f931551bSRalph Campbell if (!ret)
1832f931551bSRalph Campbell goto rnr_nak;
1833f931551bSRalph Campbell goto send_last_imm;
1834f931551bSRalph Campbell
1835f931551bSRalph Campbell case OP(SEND_ONLY):
1836f931551bSRalph Campbell case OP(SEND_ONLY_WITH_IMMEDIATE):
1837832369faSBrian Welty ret = rvt_get_rwqe(qp, false);
1838f931551bSRalph Campbell if (ret < 0)
1839f931551bSRalph Campbell goto nack_op_err;
1840f931551bSRalph Campbell if (!ret)
1841f931551bSRalph Campbell goto rnr_nak;
1842f931551bSRalph Campbell qp->r_rcv_len = 0;
1843f931551bSRalph Campbell if (opcode == OP(SEND_ONLY))
18442fc109c8SMike Marciniszyn goto no_immediate_data;
1845df561f66SGustavo A. R. Silva fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
1846f931551bSRalph Campbell case OP(SEND_LAST_WITH_IMMEDIATE):
1847f931551bSRalph Campbell send_last_imm:
1848f931551bSRalph Campbell wc.ex.imm_data = ohdr->u.imm_data;
1849f931551bSRalph Campbell hdrsize += 4;
1850f931551bSRalph Campbell wc.wc_flags = IB_WC_WITH_IMM;
18512fc109c8SMike Marciniszyn goto send_last;
1852f931551bSRalph Campbell case OP(SEND_LAST):
1853f931551bSRalph Campbell case OP(RDMA_WRITE_LAST):
18542fc109c8SMike Marciniszyn no_immediate_data:
18552fc109c8SMike Marciniszyn wc.wc_flags = 0;
18562fc109c8SMike Marciniszyn wc.ex.imm_data = 0;
1857f931551bSRalph Campbell send_last:
1858f931551bSRalph Campbell /* Get the number of bytes the message was padded by. */
1859f931551bSRalph Campbell pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1860f931551bSRalph Campbell /* Check for invalid length. */
1861f931551bSRalph Campbell /* XXX LAST len should be >= 1 */
1862f931551bSRalph Campbell if (unlikely(tlen < (hdrsize + pad + 4)))
1863f931551bSRalph Campbell goto nack_inv;
1864f931551bSRalph Campbell /* Don't count the CRC. */
1865f931551bSRalph Campbell tlen -= (hdrsize + pad + 4);
1866f931551bSRalph Campbell wc.byte_len = tlen + qp->r_rcv_len;
1867f931551bSRalph Campbell if (unlikely(wc.byte_len > qp->r_len))
1868f931551bSRalph Campbell goto nack_inv;
1869019f118bSBrian Welty rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
187070696ea7SHarish Chegondi rvt_put_ss(&qp->r_sge);
1871f931551bSRalph Campbell qp->r_msn++;
187201ba79d4SHarish Chegondi if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1873f931551bSRalph Campbell break;
1874f931551bSRalph Campbell wc.wr_id = qp->r_wr_id;
1875f931551bSRalph Campbell wc.status = IB_WC_SUCCESS;
1876f931551bSRalph Campbell if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1877f931551bSRalph Campbell opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1878f931551bSRalph Campbell wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1879f931551bSRalph Campbell else
1880f931551bSRalph Campbell wc.opcode = IB_WC_RECV;
1881f931551bSRalph Campbell wc.qp = &qp->ibqp;
1882f931551bSRalph Campbell wc.src_qp = qp->remote_qpn;
1883d8966fcdSDasaratharaman Chandramouli wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1884d8966fcdSDasaratharaman Chandramouli wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
18852fc109c8SMike Marciniszyn /* zero fields that are N/A */
18862fc109c8SMike Marciniszyn wc.vendor_err = 0;
18872fc109c8SMike Marciniszyn wc.pkey_index = 0;
18882fc109c8SMike Marciniszyn wc.dlid_path_bits = 0;
18892fc109c8SMike Marciniszyn wc.port_num = 0;
1890f931551bSRalph Campbell /* Signal completion event if the solicited bit is set. */
18915136bfeaSKamenee Arumugam rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
1892f931551bSRalph Campbell break;
1893f931551bSRalph Campbell
1894f931551bSRalph Campbell case OP(RDMA_WRITE_FIRST):
1895f931551bSRalph Campbell case OP(RDMA_WRITE_ONLY):
1896f931551bSRalph Campbell case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1897f931551bSRalph Campbell if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1898f931551bSRalph Campbell goto nack_inv;
1899f931551bSRalph Campbell /* consume RWQE */
1900f931551bSRalph Campbell reth = &ohdr->u.rc.reth;
1901f931551bSRalph Campbell hdrsize += sizeof(*reth);
1902f931551bSRalph Campbell qp->r_len = be32_to_cpu(reth->length);
1903f931551bSRalph Campbell qp->r_rcv_len = 0;
1904f931551bSRalph Campbell qp->r_sge.sg_list = NULL;
1905f931551bSRalph Campbell if (qp->r_len != 0) {
1906f931551bSRalph Campbell u32 rkey = be32_to_cpu(reth->rkey);
1907f931551bSRalph Campbell u64 vaddr = be64_to_cpu(reth->vaddr);
1908f931551bSRalph Campbell int ok;
1909f931551bSRalph Campbell
1910f931551bSRalph Campbell /* Check rkey & NAK */
19117c2e11feSDennis Dalessandro ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
1912f931551bSRalph Campbell rkey, IB_ACCESS_REMOTE_WRITE);
1913f931551bSRalph Campbell if (unlikely(!ok))
1914f931551bSRalph Campbell goto nack_acc;
1915f931551bSRalph Campbell qp->r_sge.num_sge = 1;
1916f931551bSRalph Campbell } else {
1917f931551bSRalph Campbell qp->r_sge.num_sge = 0;
1918f931551bSRalph Campbell qp->r_sge.sge.mr = NULL;
1919f931551bSRalph Campbell qp->r_sge.sge.vaddr = NULL;
1920f931551bSRalph Campbell qp->r_sge.sge.length = 0;
1921f931551bSRalph Campbell qp->r_sge.sge.sge_length = 0;
1922f931551bSRalph Campbell }
1923f931551bSRalph Campbell if (opcode == OP(RDMA_WRITE_FIRST))
1924f931551bSRalph Campbell goto send_middle;
1925f931551bSRalph Campbell else if (opcode == OP(RDMA_WRITE_ONLY))
19262fc109c8SMike Marciniszyn goto no_immediate_data;
1927832369faSBrian Welty ret = rvt_get_rwqe(qp, true);
1928f931551bSRalph Campbell if (ret < 0)
1929f931551bSRalph Campbell goto nack_op_err;
19301feb4006SMike Marciniszyn if (!ret) {
19311feb4006SMike Marciniszyn rvt_put_ss(&qp->r_sge);
1932f931551bSRalph Campbell goto rnr_nak;
19331feb4006SMike Marciniszyn }
19345715f5d4SJason Gunthorpe wc.ex.imm_data = ohdr->u.rc.imm_data;
19355715f5d4SJason Gunthorpe hdrsize += 4;
19365715f5d4SJason Gunthorpe wc.wc_flags = IB_WC_WITH_IMM;
19375715f5d4SJason Gunthorpe goto send_last;
1938f931551bSRalph Campbell
1939f931551bSRalph Campbell case OP(RDMA_READ_REQUEST): {
19407c2e11feSDennis Dalessandro struct rvt_ack_entry *e;
1941f931551bSRalph Campbell u32 len;
1942f931551bSRalph Campbell u8 next;
1943f931551bSRalph Campbell
1944f931551bSRalph Campbell if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1945f931551bSRalph Campbell goto nack_inv;
1946f931551bSRalph Campbell next = qp->r_head_ack_queue + 1;
1947f931551bSRalph Campbell /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
1948f931551bSRalph Campbell if (next > QIB_MAX_RDMA_ATOMIC)
1949f931551bSRalph Campbell next = 0;
1950f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
1951f931551bSRalph Campbell if (unlikely(next == qp->s_tail_ack_queue)) {
1952f931551bSRalph Campbell if (!qp->s_ack_queue[next].sent)
1953f931551bSRalph Campbell goto nack_inv_unlck;
1954f931551bSRalph Campbell qib_update_ack_queue(qp, next);
1955f931551bSRalph Campbell }
1956f931551bSRalph Campbell e = &qp->s_ack_queue[qp->r_head_ack_queue];
1957f931551bSRalph Campbell if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
19587c2e11feSDennis Dalessandro rvt_put_mr(e->rdma_sge.mr);
1959f931551bSRalph Campbell e->rdma_sge.mr = NULL;
1960f931551bSRalph Campbell }
1961f931551bSRalph Campbell reth = &ohdr->u.rc.reth;
1962f931551bSRalph Campbell len = be32_to_cpu(reth->length);
1963f931551bSRalph Campbell if (len) {
1964f931551bSRalph Campbell u32 rkey = be32_to_cpu(reth->rkey);
1965f931551bSRalph Campbell u64 vaddr = be64_to_cpu(reth->vaddr);
1966f931551bSRalph Campbell int ok;
1967f931551bSRalph Campbell
1968f931551bSRalph Campbell /* Check rkey & NAK */
19697c2e11feSDennis Dalessandro ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1970f931551bSRalph Campbell rkey, IB_ACCESS_REMOTE_READ);
1971f931551bSRalph Campbell if (unlikely(!ok))
1972f931551bSRalph Campbell goto nack_acc_unlck;
1973f931551bSRalph Campbell /*
1974f931551bSRalph Campbell * Update the next expected PSN. We add 1 later
1975f931551bSRalph Campbell * below, so only add the remainder here.
1976f931551bSRalph Campbell */
19775dc80605SMike Marciniszyn qp->r_psn += rvt_div_mtu(qp, len - 1);
1978f931551bSRalph Campbell } else {
1979f931551bSRalph Campbell e->rdma_sge.mr = NULL;
1980f931551bSRalph Campbell e->rdma_sge.vaddr = NULL;
1981f931551bSRalph Campbell e->rdma_sge.length = 0;
1982f931551bSRalph Campbell e->rdma_sge.sge_length = 0;
1983f931551bSRalph Campbell }
1984f931551bSRalph Campbell e->opcode = opcode;
1985f931551bSRalph Campbell e->sent = 0;
1986f931551bSRalph Campbell e->psn = psn;
1987f931551bSRalph Campbell e->lpsn = qp->r_psn;
1988f931551bSRalph Campbell /*
1989f931551bSRalph Campbell * We need to increment the MSN here instead of when we
1990f931551bSRalph Campbell * finish sending the result since a duplicate request would
1991f931551bSRalph Campbell * increment it more than once.
1992f931551bSRalph Campbell */
1993f931551bSRalph Campbell qp->r_msn++;
1994f931551bSRalph Campbell qp->r_psn++;
1995f931551bSRalph Campbell qp->r_state = opcode;
1996f931551bSRalph Campbell qp->r_nak_state = 0;
1997f931551bSRalph Campbell qp->r_head_ack_queue = next;
1998f931551bSRalph Campbell
1999f931551bSRalph Campbell /* Schedule the send tasklet. */
200001ba79d4SHarish Chegondi qp->s_flags |= RVT_S_RESP_PENDING;
2001f931551bSRalph Campbell qib_schedule_send(qp);
2002f931551bSRalph Campbell
2003a5210c12SRalph Campbell goto sunlock;
2004f931551bSRalph Campbell }
2005f931551bSRalph Campbell
2006f931551bSRalph Campbell case OP(COMPARE_SWAP):
2007f931551bSRalph Campbell case OP(FETCH_ADD): {
2008f931551bSRalph Campbell struct ib_atomic_eth *ateth;
20097c2e11feSDennis Dalessandro struct rvt_ack_entry *e;
2010f931551bSRalph Campbell u64 vaddr;
2011f931551bSRalph Campbell atomic64_t *maddr;
2012f931551bSRalph Campbell u64 sdata;
2013f931551bSRalph Campbell u32 rkey;
2014f931551bSRalph Campbell u8 next;
2015f931551bSRalph Campbell
2016f931551bSRalph Campbell if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2017f931551bSRalph Campbell goto nack_inv;
2018f931551bSRalph Campbell next = qp->r_head_ack_queue + 1;
2019f931551bSRalph Campbell if (next > QIB_MAX_RDMA_ATOMIC)
2020f931551bSRalph Campbell next = 0;
2021f931551bSRalph Campbell spin_lock_irqsave(&qp->s_lock, flags);
2022f931551bSRalph Campbell if (unlikely(next == qp->s_tail_ack_queue)) {
2023f931551bSRalph Campbell if (!qp->s_ack_queue[next].sent)
2024f931551bSRalph Campbell goto nack_inv_unlck;
2025f931551bSRalph Campbell qib_update_ack_queue(qp, next);
2026f931551bSRalph Campbell }
2027f931551bSRalph Campbell e = &qp->s_ack_queue[qp->r_head_ack_queue];
2028f931551bSRalph Campbell if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
20297c2e11feSDennis Dalessandro rvt_put_mr(e->rdma_sge.mr);
2030f931551bSRalph Campbell e->rdma_sge.mr = NULL;
2031f931551bSRalph Campbell }
2032f931551bSRalph Campbell ateth = &ohdr->u.atomic_eth;
2033261a4351SMike Marciniszyn vaddr = get_ib_ateth_vaddr(ateth);
2034f931551bSRalph Campbell if (unlikely(vaddr & (sizeof(u64) - 1)))
2035f931551bSRalph Campbell goto nack_inv_unlck;
2036f931551bSRalph Campbell rkey = be32_to_cpu(ateth->rkey);
2037f931551bSRalph Campbell /* Check rkey & NAK */
20387c2e11feSDennis Dalessandro if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2039f931551bSRalph Campbell vaddr, rkey,
2040f931551bSRalph Campbell IB_ACCESS_REMOTE_ATOMIC)))
2041f931551bSRalph Campbell goto nack_acc_unlck;
2042f931551bSRalph Campbell /* Perform atomic OP and save result. */
2043f931551bSRalph Campbell maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2044261a4351SMike Marciniszyn sdata = get_ib_ateth_swap(ateth);
2045f931551bSRalph Campbell e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2046f931551bSRalph Campbell (u64) atomic64_add_return(sdata, maddr) - sdata :
2047f931551bSRalph Campbell (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2048261a4351SMike Marciniszyn get_ib_ateth_compare(ateth),
2049f931551bSRalph Campbell sdata);
20507c2e11feSDennis Dalessandro rvt_put_mr(qp->r_sge.sge.mr);
2051f931551bSRalph Campbell qp->r_sge.num_sge = 0;
2052f931551bSRalph Campbell e->opcode = opcode;
2053f931551bSRalph Campbell e->sent = 0;
2054f931551bSRalph Campbell e->psn = psn;
2055f931551bSRalph Campbell e->lpsn = psn;
2056f931551bSRalph Campbell qp->r_msn++;
2057f931551bSRalph Campbell qp->r_psn++;
2058f931551bSRalph Campbell qp->r_state = opcode;
2059f931551bSRalph Campbell qp->r_nak_state = 0;
2060f931551bSRalph Campbell qp->r_head_ack_queue = next;
2061f931551bSRalph Campbell
2062f931551bSRalph Campbell /* Schedule the send tasklet. */
206301ba79d4SHarish Chegondi qp->s_flags |= RVT_S_RESP_PENDING;
2064f931551bSRalph Campbell qib_schedule_send(qp);
2065f931551bSRalph Campbell
2066a5210c12SRalph Campbell goto sunlock;
2067f931551bSRalph Campbell }
2068f931551bSRalph Campbell
2069f931551bSRalph Campbell default:
2070f931551bSRalph Campbell /* NAK unknown opcodes. */
2071f931551bSRalph Campbell goto nack_inv;
2072f931551bSRalph Campbell }
2073f931551bSRalph Campbell qp->r_psn++;
2074f931551bSRalph Campbell qp->r_state = opcode;
2075f931551bSRalph Campbell qp->r_ack_psn = psn;
2076f931551bSRalph Campbell qp->r_nak_state = 0;
2077f931551bSRalph Campbell /* Send an ACK if requested or required. */
2078f931551bSRalph Campbell if (psn & (1 << 31))
2079f931551bSRalph Campbell goto send_ack;
2080a5210c12SRalph Campbell return;
2081f931551bSRalph Campbell
2082f931551bSRalph Campbell rnr_nak:
2083f931551bSRalph Campbell qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2084f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn;
2085f931551bSRalph Campbell /* Queue RNR NAK for later */
2086f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
208701ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_NAK;
20884d6f85c3SMike Marciniszyn rvt_get_qp(qp);
2089f931551bSRalph Campbell list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2090f931551bSRalph Campbell }
2091a5210c12SRalph Campbell return;
2092f931551bSRalph Campbell
2093f931551bSRalph Campbell nack_op_err:
2094beb5a042SBrian Welty rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2095f931551bSRalph Campbell qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2096f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn;
2097f931551bSRalph Campbell /* Queue NAK for later */
2098f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
209901ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_NAK;
21004d6f85c3SMike Marciniszyn rvt_get_qp(qp);
2101f931551bSRalph Campbell list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2102f931551bSRalph Campbell }
2103a5210c12SRalph Campbell return;
2104f931551bSRalph Campbell
2105f931551bSRalph Campbell nack_inv_unlck:
2106f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
2107f931551bSRalph Campbell nack_inv:
2108beb5a042SBrian Welty rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2109f931551bSRalph Campbell qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2110f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn;
2111f931551bSRalph Campbell /* Queue NAK for later */
2112f931551bSRalph Campbell if (list_empty(&qp->rspwait)) {
211301ba79d4SHarish Chegondi qp->r_flags |= RVT_R_RSP_NAK;
21144d6f85c3SMike Marciniszyn rvt_get_qp(qp);
2115f931551bSRalph Campbell list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2116f931551bSRalph Campbell }
2117a5210c12SRalph Campbell return;
2118f931551bSRalph Campbell
2119f931551bSRalph Campbell nack_acc_unlck:
2120f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
2121f931551bSRalph Campbell nack_acc:
2122beb5a042SBrian Welty rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2123f931551bSRalph Campbell qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2124f931551bSRalph Campbell qp->r_ack_psn = qp->r_psn;
2125f931551bSRalph Campbell send_ack:
2126f931551bSRalph Campbell qib_send_rc_ack(qp);
2127f931551bSRalph Campbell return;
2128f931551bSRalph Campbell
2129f931551bSRalph Campbell sunlock:
2130f931551bSRalph Campbell spin_unlock_irqrestore(&qp->s_lock, flags);
2131f931551bSRalph Campbell }
2132