1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/io.h> 49 #include <rdma/rdma_vt.h> 50 #include <rdma/rdmavt_qp.h> 51 52 #include "hfi.h" 53 #include "qp.h" 54 #include "rc.h" 55 #include "verbs_txreq.h" 56 #include "trace.h" 57 58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, 59 u8 *prev_ack, bool *scheduled) 60 __must_hold(&qp->s_lock) 61 { 62 struct rvt_ack_entry *e = NULL; 63 u8 i, p; 64 bool s = true; 65 66 for (i = qp->r_head_ack_queue; ; i = p) { 67 if (i == qp->s_tail_ack_queue) 68 s = false; 69 if (i) 70 p = i - 1; 71 else 72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); 73 if (p == qp->r_head_ack_queue) { 74 e = NULL; 75 break; 76 } 77 e = &qp->s_ack_queue[p]; 78 if (!e->opcode) { 79 e = NULL; 80 break; 81 } 82 if (cmp_psn(psn, e->psn) >= 0) { 83 if (p == qp->s_tail_ack_queue && 84 cmp_psn(psn, e->lpsn) <= 0) 85 s = false; 86 break; 87 } 88 } 89 if (prev) 90 *prev = p; 91 if (prev_ack) 92 *prev_ack = i; 93 if (scheduled) 94 *scheduled = s; 95 return e; 96 } 97 98 /** 99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) 100 * @dev: the device for this QP 101 * @qp: a pointer to the QP 102 * @ohdr: a pointer to the IB header being constructed 103 * @ps: the xmit packet state 104 * 105 * Return 1 if constructed; otherwise, return 0. 106 * Note that we are in the responder's side of the QP context. 107 * Note the QP s_lock must be held. 108 */ 109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, 110 struct ib_other_headers *ohdr, 111 struct hfi1_pkt_state *ps) 112 { 113 struct rvt_ack_entry *e; 114 u32 hwords, hdrlen; 115 u32 len = 0; 116 u32 bth0 = 0, bth2 = 0; 117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); 118 int middle = 0; 119 u32 pmtu = qp->pmtu; 120 struct hfi1_qp_priv *qpriv = qp->priv; 121 bool last_pkt; 122 u32 delta; 123 u8 next = qp->s_tail_ack_queue; 124 struct tid_rdma_request *req; 125 126 trace_hfi1_rsp_make_rc_ack(qp, 0); 127 lockdep_assert_held(&qp->s_lock); 128 /* Don't send an ACK if we aren't supposed to. */ 129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 130 goto bail; 131 132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B) 133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 134 hwords = 5; 135 else 136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 137 hwords = 7; 138 139 switch (qp->s_ack_state) { 140 case OP(RDMA_READ_RESPONSE_LAST): 141 case OP(RDMA_READ_RESPONSE_ONLY): 142 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 143 release_rdma_sge_mr(e); 144 /* FALLTHROUGH */ 145 case OP(ATOMIC_ACKNOWLEDGE): 146 /* 147 * We can increment the tail pointer now that the last 148 * response has been sent instead of only being 149 * constructed. 150 */ 151 if (++next > rvt_size_atomic(&dev->rdi)) 152 next = 0; 153 /* 154 * Only advance the s_acked_ack_queue pointer if there 155 * have been no TID RDMA requests. 156 */ 157 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 158 if (e->opcode != TID_OP(WRITE_REQ) && 159 qp->s_acked_ack_queue == qp->s_tail_ack_queue) 160 qp->s_acked_ack_queue = next; 161 qp->s_tail_ack_queue = next; 162 trace_hfi1_rsp_make_rc_ack(qp, e->psn); 163 /* FALLTHROUGH */ 164 case OP(SEND_ONLY): 165 case OP(ACKNOWLEDGE): 166 /* Check for no next entry in the queue. */ 167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 168 if (qp->s_flags & RVT_S_ACK_PENDING) 169 goto normal; 170 goto bail; 171 } 172 173 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 174 /* Check for tid write fence */ 175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) || 176 hfi1_tid_rdma_ack_interlock(qp, e)) { 177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB); 178 goto bail; 179 } 180 if (e->opcode == OP(RDMA_READ_REQUEST)) { 181 /* 182 * If a RDMA read response is being resent and 183 * we haven't seen the duplicate request yet, 184 * then stop sending the remaining responses the 185 * responder has seen until the requester re-sends it. 186 */ 187 len = e->rdma_sge.sge_length; 188 if (len && !e->rdma_sge.mr) { 189 if (qp->s_acked_ack_queue == 190 qp->s_tail_ack_queue) 191 qp->s_acked_ack_queue = 192 qp->r_head_ack_queue; 193 qp->s_tail_ack_queue = qp->r_head_ack_queue; 194 goto bail; 195 } 196 /* Copy SGE state in case we need to resend */ 197 ps->s_txreq->mr = e->rdma_sge.mr; 198 if (ps->s_txreq->mr) 199 rvt_get_mr(ps->s_txreq->mr); 200 qp->s_ack_rdma_sge.sge = e->rdma_sge; 201 qp->s_ack_rdma_sge.num_sge = 1; 202 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 203 if (len > pmtu) { 204 len = pmtu; 205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 206 } else { 207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 208 e->sent = 1; 209 } 210 ohdr->u.aeth = rvt_compute_aeth(qp); 211 hwords++; 212 qp->s_ack_rdma_psn = e->psn; 213 bth2 = mask_psn(qp->s_ack_rdma_psn++); 214 } else if (e->opcode == TID_OP(WRITE_REQ)) { 215 /* 216 * If a TID RDMA WRITE RESP is being resent, we have to 217 * wait for the actual request. All requests that are to 218 * be resent will have their state set to 219 * TID_REQUEST_RESEND. When the new request arrives, the 220 * state will be changed to TID_REQUEST_RESEND_ACTIVE. 221 */ 222 req = ack_to_tid_req(e); 223 if (req->state == TID_REQUEST_RESEND || 224 req->state == TID_REQUEST_INIT_RESEND) 225 goto bail; 226 qp->s_ack_state = TID_OP(WRITE_RESP); 227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg); 228 goto write_resp; 229 } else if (e->opcode == TID_OP(READ_REQ)) { 230 /* 231 * If a TID RDMA read response is being resent and 232 * we haven't seen the duplicate request yet, 233 * then stop sending the remaining responses the 234 * responder has seen until the requester re-sends it. 235 */ 236 len = e->rdma_sge.sge_length; 237 if (len && !e->rdma_sge.mr) { 238 if (qp->s_acked_ack_queue == 239 qp->s_tail_ack_queue) 240 qp->s_acked_ack_queue = 241 qp->r_head_ack_queue; 242 qp->s_tail_ack_queue = qp->r_head_ack_queue; 243 goto bail; 244 } 245 /* Copy SGE state in case we need to resend */ 246 ps->s_txreq->mr = e->rdma_sge.mr; 247 if (ps->s_txreq->mr) 248 rvt_get_mr(ps->s_txreq->mr); 249 qp->s_ack_rdma_sge.sge = e->rdma_sge; 250 qp->s_ack_rdma_sge.num_sge = 1; 251 qp->s_ack_state = TID_OP(READ_RESP); 252 goto read_resp; 253 } else { 254 /* COMPARE_SWAP or FETCH_ADD */ 255 ps->s_txreq->ss = NULL; 256 len = 0; 257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 258 ohdr->u.at.aeth = rvt_compute_aeth(qp); 259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); 260 hwords += sizeof(ohdr->u.at) / sizeof(u32); 261 bth2 = mask_psn(e->psn); 262 e->sent = 1; 263 } 264 trace_hfi1_tid_write_rsp_make_rc_ack(qp); 265 bth0 = qp->s_ack_state << 24; 266 break; 267 268 case OP(RDMA_READ_RESPONSE_FIRST): 269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 270 /* FALLTHROUGH */ 271 case OP(RDMA_READ_RESPONSE_MIDDLE): 272 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; 274 if (ps->s_txreq->mr) 275 rvt_get_mr(ps->s_txreq->mr); 276 len = qp->s_ack_rdma_sge.sge.sge_length; 277 if (len > pmtu) { 278 len = pmtu; 279 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 280 } else { 281 ohdr->u.aeth = rvt_compute_aeth(qp); 282 hwords++; 283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 284 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 285 e->sent = 1; 286 } 287 bth0 = qp->s_ack_state << 24; 288 bth2 = mask_psn(qp->s_ack_rdma_psn++); 289 break; 290 291 case TID_OP(WRITE_RESP): 292 write_resp: 293 /* 294 * 1. Check if RVT_S_ACK_PENDING is set. If yes, 295 * goto normal. 296 * 2. Attempt to allocate TID resources. 297 * 3. Remove RVT_S_RESP_PENDING flags from s_flags 298 * 4. If resources not available: 299 * 4.1 Set RVT_S_WAIT_TID_SPACE 300 * 4.2 Queue QP on RCD TID queue 301 * 4.3 Put QP on iowait list. 302 * 4.4 Build IB RNR NAK with appropriate timeout value 303 * 4.5 Return indication progress made. 304 * 5. If resources are available: 305 * 5.1 Program HW flow CSRs 306 * 5.2 Build TID RDMA WRITE RESP packet 307 * 5.3 If more resources needed, do 2.1 - 2.3. 308 * 5.4 Wake up next QP on RCD TID queue. 309 * 5.5 Return indication progress made. 310 */ 311 312 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 313 req = ack_to_tid_req(e); 314 315 /* 316 * Send scheduled RNR NAK's. RNR NAK's need to be sent at 317 * segment boundaries, not at request boundaries. Don't change 318 * s_ack_state because we are still in the middle of a request 319 */ 320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND && 321 qp->s_tail_ack_queue == qpriv->r_tid_alloc && 322 req->cur_seg == req->alloc_seg) { 323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT; 324 goto normal_no_state; 325 } 326 327 bth2 = mask_psn(qp->s_ack_rdma_psn); 328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1, 329 bth2, &len, 330 &ps->s_txreq->ss); 331 if (!hdrlen) 332 return 0; 333 334 hwords += hdrlen; 335 bth0 = qp->s_ack_state << 24; 336 qp->s_ack_rdma_psn++; 337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn, 338 e->lpsn, req); 339 if (req->cur_seg != req->total_segs) 340 break; 341 342 e->sent = 1; 343 /* Do not free e->rdma_sge until all data are received */ 344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 345 break; 346 347 case TID_OP(READ_RESP): 348 read_resp: 349 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 350 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0, 352 &bth1, &bth2, &len, 353 &last_pkt); 354 if (delta == 0) 355 goto error_qp; 356 hwords += delta; 357 if (last_pkt) { 358 e->sent = 1; 359 /* 360 * Increment qp->s_tail_ack_queue through s_ack_state 361 * transition. 362 */ 363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 364 } 365 break; 366 case TID_OP(READ_REQ): 367 goto bail; 368 369 default: 370 normal: 371 /* 372 * Send a regular ACK. 373 * Set the s_ack_state so we wait until after sending 374 * the ACK before setting s_ack_state to ACKNOWLEDGE 375 * (see above). 376 */ 377 qp->s_ack_state = OP(SEND_ONLY); 378 normal_no_state: 379 if (qp->s_nak_state) 380 ohdr->u.aeth = 381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 382 (qp->s_nak_state << 383 IB_AETH_CREDIT_SHIFT)); 384 else 385 ohdr->u.aeth = rvt_compute_aeth(qp); 386 hwords++; 387 len = 0; 388 bth0 = OP(ACKNOWLEDGE) << 24; 389 bth2 = mask_psn(qp->s_ack_psn); 390 qp->s_flags &= ~RVT_S_ACK_PENDING; 391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; 392 ps->s_txreq->ss = NULL; 393 } 394 qp->s_rdma_ack_cnt++; 395 ps->s_txreq->sde = qpriv->s_sde; 396 ps->s_txreq->s_cur_size = len; 397 ps->s_txreq->hdr_dwords = hwords; 398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps); 399 return 1; 400 error_qp: 401 spin_unlock_irqrestore(&qp->s_lock, ps->flags); 402 spin_lock_irqsave(&qp->r_lock, ps->flags); 403 spin_lock(&qp->s_lock); 404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 405 spin_unlock(&qp->s_lock); 406 spin_unlock_irqrestore(&qp->r_lock, ps->flags); 407 spin_lock_irqsave(&qp->s_lock, ps->flags); 408 bail: 409 qp->s_ack_state = OP(ACKNOWLEDGE); 410 /* 411 * Ensure s_rdma_ack_cnt changes are committed prior to resetting 412 * RVT_S_RESP_PENDING 413 */ 414 smp_wmb(); 415 qp->s_flags &= ~(RVT_S_RESP_PENDING 416 | RVT_S_ACK_PENDING 417 | HFI1_S_AHG_VALID); 418 return 0; 419 } 420 421 /** 422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 423 * @qp: a pointer to the QP 424 * 425 * Assumes s_lock is held. 426 * 427 * Return 1 if constructed; otherwise, return 0. 428 */ 429 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 430 { 431 struct hfi1_qp_priv *priv = qp->priv; 432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 433 struct ib_other_headers *ohdr; 434 struct rvt_sge_state *ss = NULL; 435 struct rvt_swqe *wqe; 436 struct hfi1_swqe_priv *wpriv; 437 struct tid_rdma_request *req = NULL; 438 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 439 u32 hwords = 5; 440 u32 len = 0; 441 u32 bth0 = 0, bth2 = 0; 442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); 443 u32 pmtu = qp->pmtu; 444 char newreq; 445 int middle = 0; 446 int delta; 447 struct tid_rdma_flow *flow = NULL; 448 struct tid_rdma_params *remote; 449 450 trace_hfi1_sender_make_rc_req(qp); 451 lockdep_assert_held(&qp->s_lock); 452 ps->s_txreq = get_txreq(ps->dev, qp); 453 if (!ps->s_txreq) 454 goto bail_no_tx; 455 456 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 457 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 458 hwords = 5; 459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) 460 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; 461 else 462 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; 463 } else { 464 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 465 hwords = 7; 466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) 468 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; 469 else 470 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; 471 } 472 473 /* Sending responses has higher priority over sending requests. */ 474 if ((qp->s_flags & RVT_S_RESP_PENDING) && 475 make_rc_ack(dev, qp, ohdr, ps)) 476 return 1; 477 478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 480 goto bail; 481 /* We are in the error state, flush the work request. */ 482 if (qp->s_last == READ_ONCE(qp->s_head)) 483 goto bail; 484 /* If DMAs are in progress, we can't flush immediately. */ 485 if (iowait_sdma_pending(&priv->s_iowait)) { 486 qp->s_flags |= RVT_S_WAIT_DMA; 487 goto bail; 488 } 489 clear_ahg(qp); 490 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 492 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 493 /* will get called again */ 494 goto done_free_tx; 495 } 496 497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT)) 498 goto bail; 499 500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { 501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { 502 qp->s_flags |= RVT_S_WAIT_PSN; 503 goto bail; 504 } 505 qp->s_sending_psn = qp->s_psn; 506 qp->s_sending_hpsn = qp->s_psn - 1; 507 } 508 509 /* Send a request. */ 510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 511 check_s_state: 512 switch (qp->s_state) { 513 default: 514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) 515 goto bail; 516 /* 517 * Resend an old request or start a new one. 518 * 519 * We keep track of the current SWQE so that 520 * we don't reset the "furthest progress" state 521 * if we need to back up. 522 */ 523 newreq = 0; 524 if (qp->s_cur == qp->s_tail) { 525 /* Check if send work queue is empty. */ 526 if (qp->s_tail == READ_ONCE(qp->s_head)) { 527 clear_ahg(qp); 528 goto bail; 529 } 530 /* 531 * If a fence is requested, wait for previous 532 * RDMA read and atomic operations to finish. 533 * However, there is no need to guard against 534 * TID RDMA READ after TID RDMA READ. 535 */ 536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 537 qp->s_num_rd_atomic && 538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || 539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) { 540 qp->s_flags |= RVT_S_WAIT_FENCE; 541 goto bail; 542 } 543 /* 544 * Local operations are processed immediately 545 * after all prior requests have completed 546 */ 547 if (wqe->wr.opcode == IB_WR_REG_MR || 548 wqe->wr.opcode == IB_WR_LOCAL_INV) { 549 int local_ops = 0; 550 int err = 0; 551 552 if (qp->s_last != qp->s_cur) 553 goto bail; 554 if (++qp->s_cur == qp->s_size) 555 qp->s_cur = 0; 556 if (++qp->s_tail == qp->s_size) 557 qp->s_tail = 0; 558 if (!(wqe->wr.send_flags & 559 RVT_SEND_COMPLETION_ONLY)) { 560 err = rvt_invalidate_rkey( 561 qp, 562 wqe->wr.ex.invalidate_rkey); 563 local_ops = 1; 564 } 565 rvt_send_complete(qp, wqe, 566 err ? IB_WC_LOC_PROT_ERR 567 : IB_WC_SUCCESS); 568 if (local_ops) 569 atomic_dec(&qp->local_ops_pending); 570 goto done_free_tx; 571 } 572 573 newreq = 1; 574 qp->s_psn = wqe->psn; 575 } 576 /* 577 * Note that we have to be careful not to modify the 578 * original work request since we may need to resend 579 * it. 580 */ 581 len = wqe->length; 582 ss = &qp->s_sge; 583 bth2 = mask_psn(qp->s_psn); 584 585 /* 586 * Interlock between various IB requests and TID RDMA 587 * if necessary. 588 */ 589 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) || 590 hfi1_tid_rdma_wqe_interlock(qp, wqe)) 591 goto bail; 592 593 switch (wqe->wr.opcode) { 594 case IB_WR_SEND: 595 case IB_WR_SEND_WITH_IMM: 596 case IB_WR_SEND_WITH_INV: 597 /* If no credit, return. */ 598 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 599 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 600 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 601 goto bail; 602 } 603 if (len > pmtu) { 604 qp->s_state = OP(SEND_FIRST); 605 len = pmtu; 606 break; 607 } 608 if (wqe->wr.opcode == IB_WR_SEND) { 609 qp->s_state = OP(SEND_ONLY); 610 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 611 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 612 /* Immediate data comes after the BTH */ 613 ohdr->u.imm_data = wqe->wr.ex.imm_data; 614 hwords += 1; 615 } else { 616 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); 617 /* Invalidate rkey comes after the BTH */ 618 ohdr->u.ieth = cpu_to_be32( 619 wqe->wr.ex.invalidate_rkey); 620 hwords += 1; 621 } 622 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 623 bth0 |= IB_BTH_SOLICITED; 624 bth2 |= IB_BTH_REQ_ACK; 625 if (++qp->s_cur == qp->s_size) 626 qp->s_cur = 0; 627 break; 628 629 case IB_WR_RDMA_WRITE: 630 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 631 qp->s_lsn++; 632 goto no_flow_control; 633 case IB_WR_RDMA_WRITE_WITH_IMM: 634 /* If no credit, return. */ 635 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 636 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 637 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 638 goto bail; 639 } 640 no_flow_control: 641 put_ib_reth_vaddr( 642 wqe->rdma_wr.remote_addr, 643 &ohdr->u.rc.reth); 644 ohdr->u.rc.reth.rkey = 645 cpu_to_be32(wqe->rdma_wr.rkey); 646 ohdr->u.rc.reth.length = cpu_to_be32(len); 647 hwords += sizeof(struct ib_reth) / sizeof(u32); 648 if (len > pmtu) { 649 qp->s_state = OP(RDMA_WRITE_FIRST); 650 len = pmtu; 651 break; 652 } 653 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 654 qp->s_state = OP(RDMA_WRITE_ONLY); 655 } else { 656 qp->s_state = 657 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 658 /* Immediate data comes after RETH */ 659 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 660 hwords += 1; 661 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 662 bth0 |= IB_BTH_SOLICITED; 663 } 664 bth2 |= IB_BTH_REQ_ACK; 665 if (++qp->s_cur == qp->s_size) 666 qp->s_cur = 0; 667 break; 668 669 case IB_WR_TID_RDMA_WRITE: 670 if (newreq) { 671 /* 672 * Limit the number of TID RDMA WRITE requests. 673 */ 674 if (atomic_read(&priv->n_tid_requests) >= 675 HFI1_TID_RDMA_WRITE_CNT) 676 goto bail; 677 678 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 679 qp->s_lsn++; 680 } 681 682 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, 683 &bth1, &bth2, 684 &len); 685 ss = NULL; 686 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) { 687 priv->s_tid_cur = qp->s_cur; 688 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) { 689 priv->s_tid_tail = qp->s_cur; 690 priv->s_state = TID_OP(WRITE_RESP); 691 } 692 } else if (priv->s_tid_cur == priv->s_tid_head) { 693 struct rvt_swqe *__w; 694 struct tid_rdma_request *__r; 695 696 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur); 697 __r = wqe_to_tid_req(__w); 698 699 /* 700 * The s_tid_cur pointer is advanced to s_cur if 701 * any of the following conditions about the WQE 702 * to which s_ti_cur currently points to are 703 * satisfied: 704 * 1. The request is not a TID RDMA WRITE 705 * request, 706 * 2. The request is in the INACTIVE or 707 * COMPLETE states (TID RDMA READ requests 708 * stay at INACTIVE and TID RDMA WRITE 709 * transition to COMPLETE when done), 710 * 3. The request is in the ACTIVE or SYNC 711 * state and the number of completed 712 * segments is equal to the total segment 713 * count. 714 * (If ACTIVE, the request is waiting for 715 * ACKs. If SYNC, the request has not 716 * received any responses because it's 717 * waiting on a sync point.) 718 */ 719 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE || 720 __r->state == TID_REQUEST_INACTIVE || 721 __r->state == TID_REQUEST_COMPLETE || 722 ((__r->state == TID_REQUEST_ACTIVE || 723 __r->state == TID_REQUEST_SYNC) && 724 __r->comp_seg == __r->total_segs)) { 725 if (priv->s_tid_tail == 726 priv->s_tid_cur && 727 priv->s_state == 728 TID_OP(WRITE_DATA_LAST)) { 729 priv->s_tid_tail = qp->s_cur; 730 priv->s_state = 731 TID_OP(WRITE_RESP); 732 } 733 priv->s_tid_cur = qp->s_cur; 734 } 735 /* 736 * A corner case: when the last TID RDMA WRITE 737 * request was completed, s_tid_head, 738 * s_tid_cur, and s_tid_tail all point to the 739 * same location. Other requests are posted and 740 * s_cur wraps around to the same location, 741 * where a new TID RDMA WRITE is posted. In 742 * this case, none of the indices need to be 743 * updated. However, the priv->s_state should. 744 */ 745 if (priv->s_tid_tail == qp->s_cur && 746 priv->s_state == TID_OP(WRITE_DATA_LAST)) 747 priv->s_state = TID_OP(WRITE_RESP); 748 } 749 req = wqe_to_tid_req(wqe); 750 if (newreq) { 751 priv->s_tid_head = qp->s_cur; 752 priv->pending_tid_w_resp += req->total_segs; 753 atomic_inc(&priv->n_tid_requests); 754 atomic_dec(&priv->n_requests); 755 } else { 756 req->state = TID_REQUEST_RESEND; 757 req->comp_seg = delta_psn(bth2, wqe->psn); 758 /* 759 * Pull back any segments since we are going 760 * to re-receive them. 761 */ 762 req->setup_head = req->clear_tail; 763 priv->pending_tid_w_resp += 764 delta_psn(wqe->lpsn, bth2) + 1; 765 } 766 767 trace_hfi1_tid_write_sender_make_req(qp, newreq); 768 trace_hfi1_tid_req_make_req_write(qp, newreq, 769 wqe->wr.opcode, 770 wqe->psn, wqe->lpsn, 771 req); 772 if (++qp->s_cur == qp->s_size) 773 qp->s_cur = 0; 774 break; 775 776 case IB_WR_RDMA_READ: 777 /* 778 * Don't allow more operations to be started 779 * than the QP limits allow. 780 */ 781 if (qp->s_num_rd_atomic >= 782 qp->s_max_rd_atomic) { 783 qp->s_flags |= RVT_S_WAIT_RDMAR; 784 goto bail; 785 } 786 qp->s_num_rd_atomic++; 787 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 788 qp->s_lsn++; 789 put_ib_reth_vaddr( 790 wqe->rdma_wr.remote_addr, 791 &ohdr->u.rc.reth); 792 ohdr->u.rc.reth.rkey = 793 cpu_to_be32(wqe->rdma_wr.rkey); 794 ohdr->u.rc.reth.length = cpu_to_be32(len); 795 qp->s_state = OP(RDMA_READ_REQUEST); 796 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 797 ss = NULL; 798 len = 0; 799 bth2 |= IB_BTH_REQ_ACK; 800 if (++qp->s_cur == qp->s_size) 801 qp->s_cur = 0; 802 break; 803 804 case IB_WR_TID_RDMA_READ: 805 trace_hfi1_tid_read_sender_make_req(qp, newreq); 806 wpriv = wqe->priv; 807 req = wqe_to_tid_req(wqe); 808 trace_hfi1_tid_req_make_req_read(qp, newreq, 809 wqe->wr.opcode, 810 wqe->psn, wqe->lpsn, 811 req); 812 delta = cmp_psn(qp->s_psn, wqe->psn); 813 814 /* 815 * Don't allow more operations to be started 816 * than the QP limits allow. We could get here under 817 * three conditions; (1) It's a new request; (2) We are 818 * sending the second or later segment of a request, 819 * but the qp->s_state is set to OP(RDMA_READ_REQUEST) 820 * when the last segment of a previous request is 821 * received just before this; (3) We are re-sending a 822 * request. 823 */ 824 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { 825 qp->s_flags |= RVT_S_WAIT_RDMAR; 826 goto bail; 827 } 828 if (newreq) { 829 struct tid_rdma_flow *flow = 830 &req->flows[req->setup_head]; 831 832 /* 833 * Set up s_sge as it is needed for TID 834 * allocation. However, if the pages have been 835 * walked and mapped, skip it. An earlier try 836 * has failed to allocate the TID entries. 837 */ 838 if (!flow->npagesets) { 839 qp->s_sge.sge = wqe->sg_list[0]; 840 qp->s_sge.sg_list = wqe->sg_list + 1; 841 qp->s_sge.num_sge = wqe->wr.num_sge; 842 qp->s_sge.total_len = wqe->length; 843 qp->s_len = wqe->length; 844 req->isge = 0; 845 req->clear_tail = req->setup_head; 846 req->flow_idx = req->setup_head; 847 req->state = TID_REQUEST_ACTIVE; 848 } 849 } else if (delta == 0) { 850 /* Re-send a request */ 851 req->cur_seg = 0; 852 req->comp_seg = 0; 853 req->ack_pending = 0; 854 req->flow_idx = req->clear_tail; 855 req->state = TID_REQUEST_RESEND; 856 } 857 req->s_next_psn = qp->s_psn; 858 /* Read one segment at a time */ 859 len = min_t(u32, req->seg_len, 860 wqe->length - req->seg_len * req->cur_seg); 861 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, 862 &bth1, &bth2, 863 &len); 864 if (delta <= 0) { 865 /* Wait for TID space */ 866 goto bail; 867 } 868 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 869 qp->s_lsn++; 870 hwords += delta; 871 ss = &wpriv->ss; 872 /* Check if this is the last segment */ 873 if (req->cur_seg >= req->total_segs && 874 ++qp->s_cur == qp->s_size) 875 qp->s_cur = 0; 876 break; 877 878 case IB_WR_ATOMIC_CMP_AND_SWP: 879 case IB_WR_ATOMIC_FETCH_AND_ADD: 880 /* 881 * Don't allow more operations to be started 882 * than the QP limits allow. 883 */ 884 if (qp->s_num_rd_atomic >= 885 qp->s_max_rd_atomic) { 886 qp->s_flags |= RVT_S_WAIT_RDMAR; 887 goto bail; 888 } 889 qp->s_num_rd_atomic++; 890 891 /* FALLTHROUGH */ 892 case IB_WR_OPFN: 893 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 894 qp->s_lsn++; 895 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 896 wqe->wr.opcode == IB_WR_OPFN) { 897 qp->s_state = OP(COMPARE_SWAP); 898 put_ib_ateth_swap(wqe->atomic_wr.swap, 899 &ohdr->u.atomic_eth); 900 put_ib_ateth_compare(wqe->atomic_wr.compare_add, 901 &ohdr->u.atomic_eth); 902 } else { 903 qp->s_state = OP(FETCH_ADD); 904 put_ib_ateth_swap(wqe->atomic_wr.compare_add, 905 &ohdr->u.atomic_eth); 906 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); 907 } 908 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, 909 &ohdr->u.atomic_eth); 910 ohdr->u.atomic_eth.rkey = cpu_to_be32( 911 wqe->atomic_wr.rkey); 912 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 913 ss = NULL; 914 len = 0; 915 bth2 |= IB_BTH_REQ_ACK; 916 if (++qp->s_cur == qp->s_size) 917 qp->s_cur = 0; 918 break; 919 920 default: 921 goto bail; 922 } 923 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) { 924 qp->s_sge.sge = wqe->sg_list[0]; 925 qp->s_sge.sg_list = wqe->sg_list + 1; 926 qp->s_sge.num_sge = wqe->wr.num_sge; 927 qp->s_sge.total_len = wqe->length; 928 qp->s_len = wqe->length; 929 } 930 if (newreq) { 931 qp->s_tail++; 932 if (qp->s_tail >= qp->s_size) 933 qp->s_tail = 0; 934 } 935 if (wqe->wr.opcode == IB_WR_RDMA_READ || 936 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 937 qp->s_psn = wqe->lpsn + 1; 938 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) 939 qp->s_psn = req->s_next_psn; 940 else 941 qp->s_psn++; 942 break; 943 944 case OP(RDMA_READ_RESPONSE_FIRST): 945 /* 946 * qp->s_state is normally set to the opcode of the 947 * last packet constructed for new requests and therefore 948 * is never set to RDMA read response. 949 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing 950 * thread to indicate a SEND needs to be restarted from an 951 * earlier PSN without interfering with the sending thread. 952 * See restart_rc(). 953 */ 954 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 955 /* FALLTHROUGH */ 956 case OP(SEND_FIRST): 957 qp->s_state = OP(SEND_MIDDLE); 958 /* FALLTHROUGH */ 959 case OP(SEND_MIDDLE): 960 bth2 = mask_psn(qp->s_psn++); 961 ss = &qp->s_sge; 962 len = qp->s_len; 963 if (len > pmtu) { 964 len = pmtu; 965 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 966 break; 967 } 968 if (wqe->wr.opcode == IB_WR_SEND) { 969 qp->s_state = OP(SEND_LAST); 970 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 971 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 972 /* Immediate data comes after the BTH */ 973 ohdr->u.imm_data = wqe->wr.ex.imm_data; 974 hwords += 1; 975 } else { 976 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); 977 /* invalidate data comes after the BTH */ 978 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); 979 hwords += 1; 980 } 981 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 982 bth0 |= IB_BTH_SOLICITED; 983 bth2 |= IB_BTH_REQ_ACK; 984 qp->s_cur++; 985 if (qp->s_cur >= qp->s_size) 986 qp->s_cur = 0; 987 break; 988 989 case OP(RDMA_READ_RESPONSE_LAST): 990 /* 991 * qp->s_state is normally set to the opcode of the 992 * last packet constructed for new requests and therefore 993 * is never set to RDMA read response. 994 * RDMA_READ_RESPONSE_LAST is used by the ACK processing 995 * thread to indicate a RDMA write needs to be restarted from 996 * an earlier PSN without interfering with the sending thread. 997 * See restart_rc(). 998 */ 999 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 1000 /* FALLTHROUGH */ 1001 case OP(RDMA_WRITE_FIRST): 1002 qp->s_state = OP(RDMA_WRITE_MIDDLE); 1003 /* FALLTHROUGH */ 1004 case OP(RDMA_WRITE_MIDDLE): 1005 bth2 = mask_psn(qp->s_psn++); 1006 ss = &qp->s_sge; 1007 len = qp->s_len; 1008 if (len > pmtu) { 1009 len = pmtu; 1010 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 1011 break; 1012 } 1013 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 1014 qp->s_state = OP(RDMA_WRITE_LAST); 1015 } else { 1016 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 1017 /* Immediate data comes after the BTH */ 1018 ohdr->u.imm_data = wqe->wr.ex.imm_data; 1019 hwords += 1; 1020 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 1021 bth0 |= IB_BTH_SOLICITED; 1022 } 1023 bth2 |= IB_BTH_REQ_ACK; 1024 qp->s_cur++; 1025 if (qp->s_cur >= qp->s_size) 1026 qp->s_cur = 0; 1027 break; 1028 1029 case OP(RDMA_READ_RESPONSE_MIDDLE): 1030 /* 1031 * qp->s_state is normally set to the opcode of the 1032 * last packet constructed for new requests and therefore 1033 * is never set to RDMA read response. 1034 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing 1035 * thread to indicate a RDMA read needs to be restarted from 1036 * an earlier PSN without interfering with the sending thread. 1037 * See restart_rc(). 1038 */ 1039 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 1040 put_ib_reth_vaddr( 1041 wqe->rdma_wr.remote_addr + len, 1042 &ohdr->u.rc.reth); 1043 ohdr->u.rc.reth.rkey = 1044 cpu_to_be32(wqe->rdma_wr.rkey); 1045 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 1046 qp->s_state = OP(RDMA_READ_REQUEST); 1047 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 1048 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; 1049 qp->s_psn = wqe->lpsn + 1; 1050 ss = NULL; 1051 len = 0; 1052 qp->s_cur++; 1053 if (qp->s_cur == qp->s_size) 1054 qp->s_cur = 0; 1055 break; 1056 1057 case TID_OP(WRITE_RESP): 1058 /* 1059 * This value for s_state is used for restarting a TID RDMA 1060 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE 1061 * for more). 1062 */ 1063 req = wqe_to_tid_req(wqe); 1064 req->state = TID_REQUEST_RESEND; 1065 rcu_read_lock(); 1066 remote = rcu_dereference(priv->tid_rdma.remote); 1067 req->comp_seg = delta_psn(qp->s_psn, wqe->psn); 1068 len = wqe->length - (req->comp_seg * remote->max_len); 1069 rcu_read_unlock(); 1070 1071 bth2 = mask_psn(qp->s_psn); 1072 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1, 1073 &bth2, &len); 1074 qp->s_psn = wqe->lpsn + 1; 1075 ss = NULL; 1076 qp->s_state = TID_OP(WRITE_REQ); 1077 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; 1078 priv->s_tid_cur = qp->s_cur; 1079 if (++qp->s_cur == qp->s_size) 1080 qp->s_cur = 0; 1081 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode, 1082 wqe->psn, wqe->lpsn, req); 1083 break; 1084 1085 case TID_OP(READ_RESP): 1086 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 1087 goto bail; 1088 /* This is used to restart a TID read request */ 1089 req = wqe_to_tid_req(wqe); 1090 wpriv = wqe->priv; 1091 /* 1092 * Back down. The field qp->s_psn has been set to the psn with 1093 * which the request should be restart. It's OK to use division 1094 * as this is on the retry path. 1095 */ 1096 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps; 1097 1098 /* 1099 * The following function need to be redefined to return the 1100 * status to make sure that we find the flow. At the same 1101 * time, we can use the req->state change to check if the 1102 * call succeeds or not. 1103 */ 1104 req->state = TID_REQUEST_RESEND; 1105 hfi1_tid_rdma_restart_req(qp, wqe, &bth2); 1106 if (req->state != TID_REQUEST_ACTIVE) { 1107 /* 1108 * Failed to find the flow. Release all allocated tid 1109 * resources. 1110 */ 1111 hfi1_kern_exp_rcv_clear_all(req); 1112 hfi1_kern_clear_hw_flow(priv->rcd, qp); 1113 1114 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR); 1115 goto bail; 1116 } 1117 req->state = TID_REQUEST_RESEND; 1118 len = min_t(u32, req->seg_len, 1119 wqe->length - req->seg_len * req->cur_seg); 1120 flow = &req->flows[req->flow_idx]; 1121 len -= flow->sent; 1122 req->s_next_psn = flow->flow_state.ib_lpsn + 1; 1123 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1, 1124 &bth2, &len); 1125 if (delta <= 0) { 1126 /* Wait for TID space */ 1127 goto bail; 1128 } 1129 hwords += delta; 1130 ss = &wpriv->ss; 1131 /* Check if this is the last segment */ 1132 if (req->cur_seg >= req->total_segs && 1133 ++qp->s_cur == qp->s_size) 1134 qp->s_cur = 0; 1135 qp->s_psn = req->s_next_psn; 1136 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, 1137 wqe->psn, wqe->lpsn, req); 1138 break; 1139 case TID_OP(READ_REQ): 1140 req = wqe_to_tid_req(wqe); 1141 delta = cmp_psn(qp->s_psn, wqe->psn); 1142 /* 1143 * If the current WR is not TID RDMA READ, or this is the start 1144 * of a new request, we need to change the qp->s_state so that 1145 * the request can be set up properly. 1146 */ 1147 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 || 1148 qp->s_cur == qp->s_tail) { 1149 qp->s_state = OP(RDMA_READ_REQUEST); 1150 if (delta == 0 || qp->s_cur == qp->s_tail) 1151 goto check_s_state; 1152 else 1153 goto bail; 1154 } 1155 1156 /* Rate limiting */ 1157 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { 1158 qp->s_flags |= RVT_S_WAIT_RDMAR; 1159 goto bail; 1160 } 1161 1162 wpriv = wqe->priv; 1163 /* Read one segment at a time */ 1164 len = min_t(u32, req->seg_len, 1165 wqe->length - req->seg_len * req->cur_seg); 1166 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1, 1167 &bth2, &len); 1168 if (delta <= 0) { 1169 /* Wait for TID space */ 1170 goto bail; 1171 } 1172 hwords += delta; 1173 ss = &wpriv->ss; 1174 /* Check if this is the last segment */ 1175 if (req->cur_seg >= req->total_segs && 1176 ++qp->s_cur == qp->s_size) 1177 qp->s_cur = 0; 1178 qp->s_psn = req->s_next_psn; 1179 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, 1180 wqe->psn, wqe->lpsn, req); 1181 break; 1182 } 1183 qp->s_sending_hpsn = bth2; 1184 delta = delta_psn(bth2, wqe->psn); 1185 if (delta && delta % HFI1_PSN_CREDIT == 0 && 1186 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 1187 bth2 |= IB_BTH_REQ_ACK; 1188 if (qp->s_flags & RVT_S_SEND_ONE) { 1189 qp->s_flags &= ~RVT_S_SEND_ONE; 1190 qp->s_flags |= RVT_S_WAIT_ACK; 1191 bth2 |= IB_BTH_REQ_ACK; 1192 } 1193 qp->s_len -= len; 1194 ps->s_txreq->hdr_dwords = hwords; 1195 ps->s_txreq->sde = priv->s_sde; 1196 ps->s_txreq->ss = ss; 1197 ps->s_txreq->s_cur_size = len; 1198 hfi1_make_ruc_header( 1199 qp, 1200 ohdr, 1201 bth0 | (qp->s_state << 24), 1202 bth1, 1203 bth2, 1204 middle, 1205 ps); 1206 return 1; 1207 1208 done_free_tx: 1209 hfi1_put_txreq(ps->s_txreq); 1210 ps->s_txreq = NULL; 1211 return 1; 1212 1213 bail: 1214 hfi1_put_txreq(ps->s_txreq); 1215 1216 bail_no_tx: 1217 ps->s_txreq = NULL; 1218 qp->s_flags &= ~RVT_S_BUSY; 1219 /* 1220 * If we didn't get a txreq, the QP will be woken up later to try 1221 * again. Set the flags to indicate which work item to wake 1222 * up. 1223 */ 1224 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); 1225 return 0; 1226 } 1227 1228 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp, 1229 struct ib_other_headers *ohdr, 1230 u32 bth0, u32 bth1) 1231 { 1232 if (qp->r_nak_state) 1233 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 1234 (qp->r_nak_state << 1235 IB_AETH_CREDIT_SHIFT)); 1236 else 1237 ohdr->u.aeth = rvt_compute_aeth(qp); 1238 1239 ohdr->bth[0] = cpu_to_be32(bth0); 1240 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); 1241 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 1242 } 1243 1244 static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn) 1245 { 1246 struct rvt_qp *qp = packet->qp; 1247 struct hfi1_ibport *ibp; 1248 unsigned long flags; 1249 1250 spin_lock_irqsave(&qp->s_lock, flags); 1251 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 1252 goto unlock; 1253 ibp = rcd_to_iport(packet->rcd); 1254 this_cpu_inc(*ibp->rvp.rc_qacks); 1255 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 1256 qp->s_nak_state = qp->r_nak_state; 1257 qp->s_ack_psn = qp->r_ack_psn; 1258 if (is_fecn) 1259 qp->s_flags |= RVT_S_ECN; 1260 1261 /* Schedule the send tasklet. */ 1262 hfi1_schedule_send(qp); 1263 unlock: 1264 spin_unlock_irqrestore(&qp->s_lock, flags); 1265 } 1266 1267 static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet, 1268 struct hfi1_opa_header *opa_hdr, 1269 u8 sc5, bool is_fecn, 1270 u64 *pbc_flags, u32 *hwords, 1271 u32 *nwords) 1272 { 1273 struct rvt_qp *qp = packet->qp; 1274 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1275 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1276 struct ib_header *hdr = &opa_hdr->ibh; 1277 struct ib_other_headers *ohdr; 1278 u16 lrh0 = HFI1_LRH_BTH; 1279 u16 pkey; 1280 u32 bth0, bth1; 1281 1282 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; 1283 ohdr = &hdr->u.oth; 1284 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ 1285 *hwords = 6; 1286 1287 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { 1288 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 1289 rdma_ah_read_grh(&qp->remote_ah_attr), 1290 *hwords - 2, SIZE_OF_CRC); 1291 ohdr = &hdr->u.l.oth; 1292 lrh0 = HFI1_LRH_GRH; 1293 } 1294 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 1295 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); 1296 1297 /* read pkey_index w/o lock (its atomic) */ 1298 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 1299 1300 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT | 1301 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << 1302 IB_SL_SHIFT; 1303 1304 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC, 1305 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), 1306 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); 1307 1308 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 1309 if (qp->s_mig_state == IB_MIG_MIGRATED) 1310 bth0 |= IB_BTH_MIG_REQ; 1311 bth1 = (!!is_fecn) << IB_BECN_SHIFT; 1312 /* 1313 * Inline ACKs go out without the use of the Verbs send engine, so 1314 * we need to set the STL Verbs Extended bit here 1315 */ 1316 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT; 1317 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 1318 } 1319 1320 static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet, 1321 struct hfi1_opa_header *opa_hdr, 1322 u8 sc5, bool is_fecn, 1323 u64 *pbc_flags, u32 *hwords, 1324 u32 *nwords) 1325 { 1326 struct rvt_qp *qp = packet->qp; 1327 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1328 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1329 struct hfi1_16b_header *hdr = &opa_hdr->opah; 1330 struct ib_other_headers *ohdr; 1331 u32 bth0, bth1 = 0; 1332 u16 len, pkey; 1333 bool becn = is_fecn; 1334 u8 l4 = OPA_16B_L4_IB_LOCAL; 1335 u8 extra_bytes; 1336 1337 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; 1338 ohdr = &hdr->u.oth; 1339 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ 1340 *hwords = 8; 1341 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0); 1342 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2); 1343 1344 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 1345 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { 1346 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 1347 rdma_ah_read_grh(&qp->remote_ah_attr), 1348 *hwords - 4, *nwords); 1349 ohdr = &hdr->u.l.oth; 1350 l4 = OPA_16B_L4_IB_GLOBAL; 1351 } 1352 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; 1353 1354 /* read pkey_index w/o lock (its atomic) */ 1355 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 1356 1357 /* Convert dwords to flits */ 1358 len = (*hwords + *nwords) >> 1; 1359 1360 hfi1_make_16b_hdr(hdr, ppd->lid | 1361 (rdma_ah_get_path_bits(&qp->remote_ah_attr) & 1362 ((1 << ppd->lmc) - 1)), 1363 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 1364 16B), len, pkey, becn, 0, l4, sc5); 1365 1366 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 1367 bth0 |= extra_bytes << 20; 1368 if (qp->s_mig_state == IB_MIG_MIGRATED) 1369 bth1 = OPA_BTH_MIG_REQ; 1370 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 1371 } 1372 1373 typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet, 1374 struct hfi1_opa_header *opa_hdr, 1375 u8 sc5, bool is_fecn, 1376 u64 *pbc_flags, u32 *hwords, 1377 u32 *nwords); 1378 1379 /* We support only two types - 9B and 16B for now */ 1380 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = { 1381 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B, 1382 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B 1383 }; 1384 1385 /** 1386 * hfi1_send_rc_ack - Construct an ACK packet and send it 1387 * @qp: a pointer to the QP 1388 * 1389 * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). 1390 * Note that RDMA reads and atomics are handled in the 1391 * send side QP state and send engine. 1392 */ 1393 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) 1394 { 1395 struct hfi1_ctxtdata *rcd = packet->rcd; 1396 struct rvt_qp *qp = packet->qp; 1397 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 1398 struct hfi1_qp_priv *priv = qp->priv; 1399 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1400 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 1401 u64 pbc, pbc_flags = 0; 1402 u32 hwords = 0; 1403 u32 nwords = 0; 1404 u32 plen; 1405 struct pio_buf *pbuf; 1406 struct hfi1_opa_header opa_hdr; 1407 1408 /* clear the defer count */ 1409 qp->r_adefered = 0; 1410 1411 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 1412 if (qp->s_flags & RVT_S_RESP_PENDING) { 1413 hfi1_queue_rc_ack(packet, is_fecn); 1414 return; 1415 } 1416 1417 /* Ensure s_rdma_ack_cnt changes are committed */ 1418 if (qp->s_rdma_ack_cnt) { 1419 hfi1_queue_rc_ack(packet, is_fecn); 1420 return; 1421 } 1422 1423 /* Don't try to send ACKs if the link isn't ACTIVE */ 1424 if (driver_lstate(ppd) != IB_PORT_ACTIVE) 1425 return; 1426 1427 /* Make the appropriate header */ 1428 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, 1429 &pbc_flags, &hwords, &nwords); 1430 1431 plen = 2 /* PBC */ + hwords + nwords; 1432 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, 1433 sc_to_vlt(ppd->dd, sc5), plen); 1434 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); 1435 if (IS_ERR_OR_NULL(pbuf)) { 1436 /* 1437 * We have no room to send at the moment. Pass 1438 * responsibility for sending the ACK to the send engine 1439 * so that when enough buffer space becomes available, 1440 * the ACK is sent ahead of other outgoing packets. 1441 */ 1442 hfi1_queue_rc_ack(packet, is_fecn); 1443 return; 1444 } 1445 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 1446 &opa_hdr, ib_is_sc5(sc5)); 1447 1448 /* write the pbc and data */ 1449 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, 1450 (priv->hdr_type == HFI1_PKT_TYPE_9B ? 1451 (void *)&opa_hdr.ibh : 1452 (void *)&opa_hdr.opah), hwords); 1453 return; 1454 } 1455 1456 /** 1457 * update_num_rd_atomic - update the qp->s_num_rd_atomic 1458 * @qp: the QP 1459 * @psn: the packet sequence number to restart at 1460 * @wqe: the wqe 1461 * 1462 * This is called from reset_psn() to update qp->s_num_rd_atomic 1463 * for the current wqe. 1464 * Called at interrupt level with the QP s_lock held. 1465 */ 1466 static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn, 1467 struct rvt_swqe *wqe) 1468 { 1469 u32 opcode = wqe->wr.opcode; 1470 1471 if (opcode == IB_WR_RDMA_READ || 1472 opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1473 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 1474 qp->s_num_rd_atomic++; 1475 } else if (opcode == IB_WR_TID_RDMA_READ) { 1476 struct tid_rdma_request *req = wqe_to_tid_req(wqe); 1477 struct hfi1_qp_priv *priv = qp->priv; 1478 1479 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1480 u32 cur_seg; 1481 1482 cur_seg = (psn - wqe->psn) / priv->pkts_ps; 1483 req->ack_pending = cur_seg - req->comp_seg; 1484 priv->pending_tid_r_segs += req->ack_pending; 1485 qp->s_num_rd_atomic += req->ack_pending; 1486 } else { 1487 priv->pending_tid_r_segs += req->total_segs; 1488 qp->s_num_rd_atomic += req->total_segs; 1489 } 1490 } 1491 } 1492 1493 /** 1494 * reset_psn - reset the QP state to send starting from PSN 1495 * @qp: the QP 1496 * @psn: the packet sequence number to restart at 1497 * 1498 * This is called from hfi1_rc_rcv() to process an incoming RC ACK 1499 * for the given QP. 1500 * Called at interrupt level with the QP s_lock held. 1501 */ 1502 static void reset_psn(struct rvt_qp *qp, u32 psn) 1503 { 1504 u32 n = qp->s_acked; 1505 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 1506 u32 opcode; 1507 struct hfi1_qp_priv *priv = qp->priv; 1508 1509 lockdep_assert_held(&qp->s_lock); 1510 qp->s_cur = n; 1511 priv->pending_tid_r_segs = 0; 1512 priv->pending_tid_w_resp = 0; 1513 qp->s_num_rd_atomic = 0; 1514 1515 /* 1516 * If we are starting the request from the beginning, 1517 * let the normal send code handle initialization. 1518 */ 1519 if (cmp_psn(psn, wqe->psn) <= 0) { 1520 qp->s_state = OP(SEND_LAST); 1521 goto done; 1522 } 1523 update_num_rd_atomic(qp, psn, wqe); 1524 1525 /* Find the work request opcode corresponding to the given PSN. */ 1526 for (;;) { 1527 int diff; 1528 1529 if (++n == qp->s_size) 1530 n = 0; 1531 if (n == qp->s_tail) 1532 break; 1533 wqe = rvt_get_swqe_ptr(qp, n); 1534 diff = cmp_psn(psn, wqe->psn); 1535 if (diff < 0) { 1536 /* Point wqe back to the previous one*/ 1537 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1538 break; 1539 } 1540 qp->s_cur = n; 1541 /* 1542 * If we are starting the request from the beginning, 1543 * let the normal send code handle initialization. 1544 */ 1545 if (diff == 0) { 1546 qp->s_state = OP(SEND_LAST); 1547 goto done; 1548 } 1549 1550 update_num_rd_atomic(qp, psn, wqe); 1551 } 1552 opcode = wqe->wr.opcode; 1553 1554 /* 1555 * Set the state to restart in the middle of a request. 1556 * Don't change the s_sge, s_cur_sge, or s_cur_size. 1557 * See hfi1_make_rc_req(). 1558 */ 1559 switch (opcode) { 1560 case IB_WR_SEND: 1561 case IB_WR_SEND_WITH_IMM: 1562 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); 1563 break; 1564 1565 case IB_WR_RDMA_WRITE: 1566 case IB_WR_RDMA_WRITE_WITH_IMM: 1567 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); 1568 break; 1569 1570 case IB_WR_TID_RDMA_WRITE: 1571 qp->s_state = TID_OP(WRITE_RESP); 1572 break; 1573 1574 case IB_WR_RDMA_READ: 1575 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); 1576 break; 1577 1578 case IB_WR_TID_RDMA_READ: 1579 qp->s_state = TID_OP(READ_RESP); 1580 break; 1581 1582 default: 1583 /* 1584 * This case shouldn't happen since its only 1585 * one PSN per req. 1586 */ 1587 qp->s_state = OP(SEND_LAST); 1588 } 1589 done: 1590 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; 1591 qp->s_psn = psn; 1592 /* 1593 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer 1594 * asynchronously before the send engine can get scheduled. 1595 * Doing it in hfi1_make_rc_req() is too late. 1596 */ 1597 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && 1598 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) 1599 qp->s_flags |= RVT_S_WAIT_PSN; 1600 qp->s_flags &= ~HFI1_S_AHG_VALID; 1601 trace_hfi1_sender_reset_psn(qp); 1602 } 1603 1604 /* 1605 * Back up requester to resend the last un-ACKed request. 1606 * The QP r_lock and s_lock should be held and interrupts disabled. 1607 */ 1608 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) 1609 { 1610 struct hfi1_qp_priv *priv = qp->priv; 1611 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1612 struct hfi1_ibport *ibp; 1613 1614 lockdep_assert_held(&qp->r_lock); 1615 lockdep_assert_held(&qp->s_lock); 1616 trace_hfi1_sender_restart_rc(qp); 1617 if (qp->s_retry == 0) { 1618 if (qp->s_mig_state == IB_MIG_ARMED) { 1619 hfi1_migrate_qp(qp); 1620 qp->s_retry = qp->s_retry_cnt; 1621 } else if (qp->s_last == qp->s_acked) { 1622 /* 1623 * We need special handling for the OPFN request WQEs as 1624 * they are not allowed to generate real user errors 1625 */ 1626 if (wqe->wr.opcode == IB_WR_OPFN) { 1627 struct hfi1_ibport *ibp = 1628 to_iport(qp->ibqp.device, qp->port_num); 1629 /* 1630 * Call opfn_conn_reply() with capcode and 1631 * remaining data as 0 to close out the 1632 * current request 1633 */ 1634 opfn_conn_reply(qp, priv->opfn.curr); 1635 wqe = do_rc_completion(qp, wqe, ibp); 1636 qp->s_flags &= ~RVT_S_WAIT_ACK; 1637 } else { 1638 trace_hfi1_tid_write_sender_restart_rc(qp, 0); 1639 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { 1640 struct tid_rdma_request *req; 1641 1642 req = wqe_to_tid_req(wqe); 1643 hfi1_kern_exp_rcv_clear_all(req); 1644 hfi1_kern_clear_hw_flow(priv->rcd, qp); 1645 } 1646 1647 hfi1_trdma_send_complete(qp, wqe, 1648 IB_WC_RETRY_EXC_ERR); 1649 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1650 } 1651 return; 1652 } else { /* need to handle delayed completion */ 1653 return; 1654 } 1655 } else { 1656 qp->s_retry--; 1657 } 1658 1659 ibp = to_iport(qp->ibqp.device, qp->port_num); 1660 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1661 wqe->wr.opcode == IB_WR_TID_RDMA_READ) 1662 ibp->rvp.n_rc_resends++; 1663 else 1664 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1665 1666 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | 1667 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | 1668 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP); 1669 if (wait) 1670 qp->s_flags |= RVT_S_SEND_ONE; 1671 reset_psn(qp, psn); 1672 } 1673 1674 /* 1675 * Set qp->s_sending_psn to the next PSN after the given one. 1676 * This would be psn+1 except when RDMA reads or TID RDMA ops 1677 * are present. 1678 */ 1679 static void reset_sending_psn(struct rvt_qp *qp, u32 psn) 1680 { 1681 struct rvt_swqe *wqe; 1682 u32 n = qp->s_last; 1683 1684 lockdep_assert_held(&qp->s_lock); 1685 /* Find the work request corresponding to the given PSN. */ 1686 for (;;) { 1687 wqe = rvt_get_swqe_ptr(qp, n); 1688 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1689 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1690 wqe->wr.opcode == IB_WR_TID_RDMA_READ || 1691 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 1692 qp->s_sending_psn = wqe->lpsn + 1; 1693 else 1694 qp->s_sending_psn = psn + 1; 1695 break; 1696 } 1697 if (++n == qp->s_size) 1698 n = 0; 1699 if (n == qp->s_tail) 1700 break; 1701 } 1702 } 1703 1704 /** 1705 * hfi1_rc_verbs_aborted - handle abort status 1706 * @qp: the QP 1707 * @opah: the opa header 1708 * 1709 * This code modifies both ACK bit in BTH[2] 1710 * and the s_flags to go into send one mode. 1711 * 1712 * This serves to throttle the send engine to only 1713 * send a single packet in the likely case the 1714 * a link has gone down. 1715 */ 1716 void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) 1717 { 1718 struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah); 1719 u8 opcode = ib_bth_get_opcode(ohdr); 1720 u32 psn; 1721 1722 /* ignore responses */ 1723 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1724 opcode <= OP(ATOMIC_ACKNOWLEDGE)) || 1725 opcode == TID_OP(READ_RESP) || 1726 opcode == TID_OP(WRITE_RESP)) 1727 return; 1728 1729 psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK; 1730 ohdr->bth[2] = cpu_to_be32(psn); 1731 qp->s_flags |= RVT_S_SEND_ONE; 1732 } 1733 1734 /* 1735 * This should be called with the QP s_lock held and interrupts disabled. 1736 */ 1737 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) 1738 { 1739 struct ib_other_headers *ohdr; 1740 struct hfi1_qp_priv *priv = qp->priv; 1741 struct rvt_swqe *wqe; 1742 u32 opcode, head, tail; 1743 u32 psn; 1744 struct tid_rdma_request *req; 1745 1746 lockdep_assert_held(&qp->s_lock); 1747 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) 1748 return; 1749 1750 ohdr = hfi1_get_rc_ohdr(opah); 1751 opcode = ib_bth_get_opcode(ohdr); 1752 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1753 opcode <= OP(ATOMIC_ACKNOWLEDGE)) || 1754 opcode == TID_OP(READ_RESP) || 1755 opcode == TID_OP(WRITE_RESP)) { 1756 WARN_ON(!qp->s_rdma_ack_cnt); 1757 qp->s_rdma_ack_cnt--; 1758 return; 1759 } 1760 1761 psn = ib_bth_get_psn(ohdr); 1762 /* 1763 * Don't attempt to reset the sending PSN for packets in the 1764 * KDETH PSN space since the PSN does not match anything. 1765 */ 1766 if (opcode != TID_OP(WRITE_DATA) && 1767 opcode != TID_OP(WRITE_DATA_LAST) && 1768 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC)) 1769 reset_sending_psn(qp, psn); 1770 1771 /* Handle TID RDMA WRITE packets differently */ 1772 if (opcode >= TID_OP(WRITE_REQ) && 1773 opcode <= TID_OP(WRITE_DATA_LAST)) { 1774 head = priv->s_tid_head; 1775 tail = priv->s_tid_cur; 1776 /* 1777 * s_tid_cur is set to s_tid_head in the case, where 1778 * a new TID RDMA request is being started and all 1779 * previous ones have been completed. 1780 * Therefore, we need to do a secondary check in order 1781 * to properly determine whether we should start the 1782 * RC timer. 1783 */ 1784 wqe = rvt_get_swqe_ptr(qp, tail); 1785 req = wqe_to_tid_req(wqe); 1786 if (head == tail && req->comp_seg < req->total_segs) { 1787 if (tail == 0) 1788 tail = qp->s_size - 1; 1789 else 1790 tail -= 1; 1791 } 1792 } else { 1793 head = qp->s_tail; 1794 tail = qp->s_acked; 1795 } 1796 1797 /* 1798 * Start timer after a packet requesting an ACK has been sent and 1799 * there are still requests that haven't been acked. 1800 */ 1801 if ((psn & IB_BTH_REQ_ACK) && tail != head && 1802 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) && 1803 opcode != TID_OP(RESYNC) && 1804 !(qp->s_flags & 1805 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && 1806 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 1807 if (opcode == TID_OP(READ_REQ)) 1808 rvt_add_retry_timer_ext(qp, priv->timeout_shift); 1809 else 1810 rvt_add_retry_timer(qp); 1811 } 1812 1813 /* Start TID RDMA ACK timer */ 1814 if ((opcode == TID_OP(WRITE_DATA) || 1815 opcode == TID_OP(WRITE_DATA_LAST) || 1816 opcode == TID_OP(RESYNC)) && 1817 (psn & IB_BTH_REQ_ACK) && 1818 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) && 1819 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 1820 /* 1821 * The TID RDMA ACK packet could be received before this 1822 * function is called. Therefore, add the timer only if TID 1823 * RDMA ACK packets are actually pending. 1824 */ 1825 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1826 req = wqe_to_tid_req(wqe); 1827 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 1828 req->ack_seg < req->cur_seg) 1829 hfi1_add_tid_retry_timer(qp); 1830 } 1831 1832 while (qp->s_last != qp->s_acked) { 1833 u32 s_last; 1834 1835 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1836 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && 1837 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1838 break; 1839 trdma_clean_swqe(qp, wqe); 1840 rvt_qp_wqe_unreserve(qp, wqe); 1841 s_last = qp->s_last; 1842 trace_hfi1_qp_send_completion(qp, wqe, s_last); 1843 if (++s_last >= qp->s_size) 1844 s_last = 0; 1845 qp->s_last = s_last; 1846 /* see post_send() */ 1847 barrier(); 1848 rvt_put_qp_swqe(qp, wqe); 1849 rvt_qp_swqe_complete(qp, 1850 wqe, 1851 ib_hfi1_wc_opcode[wqe->wr.opcode], 1852 IB_WC_SUCCESS); 1853 } 1854 /* 1855 * If we were waiting for sends to complete before re-sending, 1856 * and they are now complete, restart sending. 1857 */ 1858 trace_hfi1_sendcomplete(qp, psn); 1859 if (qp->s_flags & RVT_S_WAIT_PSN && 1860 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1861 qp->s_flags &= ~RVT_S_WAIT_PSN; 1862 qp->s_sending_psn = qp->s_psn; 1863 qp->s_sending_hpsn = qp->s_psn - 1; 1864 hfi1_schedule_send(qp); 1865 } 1866 } 1867 1868 static inline void update_last_psn(struct rvt_qp *qp, u32 psn) 1869 { 1870 qp->s_last_psn = psn; 1871 } 1872 1873 /* 1874 * Generate a SWQE completion. 1875 * This is similar to hfi1_send_complete but has to check to be sure 1876 * that the SGEs are not being referenced if the SWQE is being resent. 1877 */ 1878 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, 1879 struct rvt_swqe *wqe, 1880 struct hfi1_ibport *ibp) 1881 { 1882 struct hfi1_qp_priv *priv = qp->priv; 1883 1884 lockdep_assert_held(&qp->s_lock); 1885 /* 1886 * Don't decrement refcount and don't generate a 1887 * completion if the SWQE is being resent until the send 1888 * is finished. 1889 */ 1890 trace_hfi1_rc_completion(qp, wqe->lpsn); 1891 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1892 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1893 u32 s_last; 1894 1895 trdma_clean_swqe(qp, wqe); 1896 rvt_put_qp_swqe(qp, wqe); 1897 rvt_qp_wqe_unreserve(qp, wqe); 1898 s_last = qp->s_last; 1899 trace_hfi1_qp_send_completion(qp, wqe, s_last); 1900 if (++s_last >= qp->s_size) 1901 s_last = 0; 1902 qp->s_last = s_last; 1903 /* see post_send() */ 1904 barrier(); 1905 rvt_qp_swqe_complete(qp, 1906 wqe, 1907 ib_hfi1_wc_opcode[wqe->wr.opcode], 1908 IB_WC_SUCCESS); 1909 } else { 1910 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1911 1912 this_cpu_inc(*ibp->rvp.rc_delayed_comp); 1913 /* 1914 * If send progress not running attempt to progress 1915 * SDMA queue. 1916 */ 1917 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { 1918 struct sdma_engine *engine; 1919 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); 1920 u8 sc5; 1921 1922 /* For now use sc to find engine */ 1923 sc5 = ibp->sl_to_sc[sl]; 1924 engine = qp_to_sdma_engine(qp, sc5); 1925 sdma_engine_progress_schedule(engine); 1926 } 1927 } 1928 1929 qp->s_retry = qp->s_retry_cnt; 1930 /* 1931 * Don't update the last PSN if the request being completed is 1932 * a TID RDMA WRITE request. 1933 * Completion of the TID RDMA WRITE requests are done by the 1934 * TID RDMA ACKs and as such could be for a request that has 1935 * already been ACKed as far as the IB state machine is 1936 * concerned. 1937 */ 1938 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 1939 update_last_psn(qp, wqe->lpsn); 1940 1941 /* 1942 * If we are completing a request which is in the process of 1943 * being resent, we can stop re-sending it since we know the 1944 * responder has already seen it. 1945 */ 1946 if (qp->s_acked == qp->s_cur) { 1947 if (++qp->s_cur >= qp->s_size) 1948 qp->s_cur = 0; 1949 qp->s_acked = qp->s_cur; 1950 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1951 if (qp->s_acked != qp->s_tail) { 1952 qp->s_state = OP(SEND_LAST); 1953 qp->s_psn = wqe->psn; 1954 } 1955 } else { 1956 if (++qp->s_acked >= qp->s_size) 1957 qp->s_acked = 0; 1958 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) 1959 qp->s_draining = 0; 1960 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1961 } 1962 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) { 1963 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; 1964 hfi1_schedule_send(qp); 1965 } 1966 return wqe; 1967 } 1968 1969 static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) 1970 { 1971 /* Retry this request. */ 1972 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { 1973 qp->r_flags |= RVT_R_RDMAR_SEQ; 1974 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 1975 if (list_empty(&qp->rspwait)) { 1976 qp->r_flags |= RVT_R_RSP_SEND; 1977 rvt_get_qp(qp); 1978 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1979 } 1980 } 1981 } 1982 1983 /** 1984 * update_qp_retry_state - Update qp retry state. 1985 * @qp: the QP 1986 * @psn: the packet sequence number of the TID RDMA WRITE RESP. 1987 * @spsn: The start psn for the given TID RDMA WRITE swqe. 1988 * @lpsn: The last psn for the given TID RDMA WRITE swqe. 1989 * 1990 * This function is called to update the qp retry state upon 1991 * receiving a TID WRITE RESP after the qp is scheduled to retry 1992 * a request. 1993 */ 1994 static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, 1995 u32 lpsn) 1996 { 1997 struct hfi1_qp_priv *qpriv = qp->priv; 1998 1999 qp->s_psn = psn + 1; 2000 /* 2001 * If this is the first TID RDMA WRITE RESP packet for the current 2002 * request, change the s_state so that the retry will be processed 2003 * correctly. Similarly, if this is the last TID RDMA WRITE RESP 2004 * packet, change the s_state and advance the s_cur. 2005 */ 2006 if (cmp_psn(psn, lpsn) >= 0) { 2007 qp->s_cur = qpriv->s_tid_cur + 1; 2008 if (qp->s_cur >= qp->s_size) 2009 qp->s_cur = 0; 2010 qp->s_state = TID_OP(WRITE_REQ); 2011 } else if (!cmp_psn(psn, spsn)) { 2012 qp->s_cur = qpriv->s_tid_cur; 2013 qp->s_state = TID_OP(WRITE_RESP); 2014 } 2015 } 2016 2017 /** 2018 * do_rc_ack - process an incoming RC ACK 2019 * @qp: the QP the ACK came in on 2020 * @psn: the packet sequence number of the ACK 2021 * @opcode: the opcode of the request that resulted in the ACK 2022 * 2023 * This is called from rc_rcv_resp() to process an incoming RC ACK 2024 * for the given QP. 2025 * May be called at interrupt level, with the QP s_lock held. 2026 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 2027 */ 2028 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, 2029 u64 val, struct hfi1_ctxtdata *rcd) 2030 { 2031 struct hfi1_ibport *ibp; 2032 enum ib_wc_status status; 2033 struct hfi1_qp_priv *qpriv = qp->priv; 2034 struct rvt_swqe *wqe; 2035 int ret = 0; 2036 u32 ack_psn; 2037 int diff; 2038 struct rvt_dev_info *rdi; 2039 2040 lockdep_assert_held(&qp->s_lock); 2041 /* 2042 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2043 * requests and implicitly NAK RDMA read and atomic requests issued 2044 * before the NAK'ed request. The MSN won't include the NAK'ed 2045 * request but will include an ACK'ed request(s). 2046 */ 2047 ack_psn = psn; 2048 if (aeth >> IB_AETH_NAK_SHIFT) 2049 ack_psn--; 2050 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2051 ibp = rcd_to_iport(rcd); 2052 2053 /* 2054 * The MSN might be for a later WQE than the PSN indicates so 2055 * only complete WQEs that the PSN finishes. 2056 */ 2057 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { 2058 /* 2059 * RDMA_READ_RESPONSE_ONLY is a special case since 2060 * we want to generate completion events for everything 2061 * before the RDMA read, copy the data, then generate 2062 * the completion for the read. 2063 */ 2064 if (wqe->wr.opcode == IB_WR_RDMA_READ && 2065 opcode == OP(RDMA_READ_RESPONSE_ONLY) && 2066 diff == 0) { 2067 ret = 1; 2068 goto bail_stop; 2069 } 2070 /* 2071 * If this request is a RDMA read or atomic, and the ACK is 2072 * for a later operation, this ACK NAKs the RDMA read or 2073 * atomic. In other words, only a RDMA_READ_LAST or ONLY 2074 * can ACK a RDMA read and likewise for atomic ops. Note 2075 * that the NAK case can only happen if relaxed ordering is 2076 * used and requests are sent after an RDMA read or atomic 2077 * is sent but before the response is received. 2078 */ 2079 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 2080 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || 2081 (wqe->wr.opcode == IB_WR_TID_RDMA_READ && 2082 (opcode != TID_OP(READ_RESP) || diff != 0)) || 2083 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2084 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 2085 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) || 2086 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 2087 (delta_psn(psn, qp->s_last_psn) != 1))) { 2088 set_restart_qp(qp, rcd); 2089 /* 2090 * No need to process the ACK/NAK since we are 2091 * restarting an earlier request. 2092 */ 2093 goto bail_stop; 2094 } 2095 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2096 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2097 u64 *vaddr = wqe->sg_list[0].vaddr; 2098 *vaddr = val; 2099 } 2100 if (wqe->wr.opcode == IB_WR_OPFN) 2101 opfn_conn_reply(qp, val); 2102 2103 if (qp->s_num_rd_atomic && 2104 (wqe->wr.opcode == IB_WR_RDMA_READ || 2105 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2106 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 2107 qp->s_num_rd_atomic--; 2108 /* Restart sending task if fence is complete */ 2109 if ((qp->s_flags & RVT_S_WAIT_FENCE) && 2110 !qp->s_num_rd_atomic) { 2111 qp->s_flags &= ~(RVT_S_WAIT_FENCE | 2112 RVT_S_WAIT_ACK); 2113 hfi1_schedule_send(qp); 2114 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { 2115 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | 2116 RVT_S_WAIT_ACK); 2117 hfi1_schedule_send(qp); 2118 } 2119 } 2120 2121 /* 2122 * TID RDMA WRITE requests will be completed by the TID RDMA 2123 * ACK packet handler (see tid_rdma.c). 2124 */ 2125 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 2126 break; 2127 2128 wqe = do_rc_completion(qp, wqe, ibp); 2129 if (qp->s_acked == qp->s_tail) 2130 break; 2131 } 2132 2133 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe); 2134 trace_hfi1_sender_do_rc_ack(qp); 2135 switch (aeth >> IB_AETH_NAK_SHIFT) { 2136 case 0: /* ACK */ 2137 this_cpu_inc(*ibp->rvp.rc_acks); 2138 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { 2139 if (wqe_to_tid_req(wqe)->ack_pending) 2140 rvt_mod_retry_timer_ext(qp, 2141 qpriv->timeout_shift); 2142 else 2143 rvt_stop_rc_timers(qp); 2144 } else if (qp->s_acked != qp->s_tail) { 2145 struct rvt_swqe *__w = NULL; 2146 2147 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID) 2148 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); 2149 2150 /* 2151 * Stop timers if we've received all of the TID RDMA 2152 * WRITE * responses. 2153 */ 2154 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE && 2155 opcode == TID_OP(WRITE_RESP)) { 2156 /* 2157 * Normally, the loop above would correctly 2158 * process all WQEs from s_acked onward and 2159 * either complete them or check for correct 2160 * PSN sequencing. 2161 * However, for TID RDMA, due to pipelining, 2162 * the response may not be for the request at 2163 * s_acked so the above look would just be 2164 * skipped. This does not allow for checking 2165 * the PSN sequencing. It has to be done 2166 * separately. 2167 */ 2168 if (cmp_psn(psn, qp->s_last_psn + 1)) { 2169 set_restart_qp(qp, rcd); 2170 goto bail_stop; 2171 } 2172 /* 2173 * If the psn is being resent, stop the 2174 * resending. 2175 */ 2176 if (qp->s_cur != qp->s_tail && 2177 cmp_psn(qp->s_psn, psn) <= 0) 2178 update_qp_retry_state(qp, psn, 2179 __w->psn, 2180 __w->lpsn); 2181 else if (--qpriv->pending_tid_w_resp) 2182 rvt_mod_retry_timer(qp); 2183 else 2184 rvt_stop_rc_timers(qp); 2185 } else { 2186 /* 2187 * We are expecting more ACKs so 2188 * mod the retry timer. 2189 */ 2190 rvt_mod_retry_timer(qp); 2191 /* 2192 * We can stop re-sending the earlier packets 2193 * and continue with the next packet the 2194 * receiver wants. 2195 */ 2196 if (cmp_psn(qp->s_psn, psn) <= 0) 2197 reset_psn(qp, psn + 1); 2198 } 2199 } else { 2200 /* No more acks - kill all timers */ 2201 rvt_stop_rc_timers(qp); 2202 if (cmp_psn(qp->s_psn, psn) <= 0) { 2203 qp->s_state = OP(SEND_LAST); 2204 qp->s_psn = psn + 1; 2205 } 2206 } 2207 if (qp->s_flags & RVT_S_WAIT_ACK) { 2208 qp->s_flags &= ~RVT_S_WAIT_ACK; 2209 hfi1_schedule_send(qp); 2210 } 2211 rvt_get_credit(qp, aeth); 2212 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 2213 qp->s_retry = qp->s_retry_cnt; 2214 /* 2215 * If the current request is a TID RDMA WRITE request and the 2216 * response is not a TID RDMA WRITE RESP packet, s_last_psn 2217 * can't be advanced. 2218 */ 2219 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 2220 opcode != TID_OP(WRITE_RESP) && 2221 cmp_psn(psn, wqe->psn) >= 0) 2222 return 1; 2223 update_last_psn(qp, psn); 2224 return 1; 2225 2226 case 1: /* RNR NAK */ 2227 ibp->rvp.n_rnr_naks++; 2228 if (qp->s_acked == qp->s_tail) 2229 goto bail_stop; 2230 if (qp->s_flags & RVT_S_WAIT_RNR) 2231 goto bail_stop; 2232 rdi = ib_to_rvt(qp->ibqp.device); 2233 if (qp->s_rnr_retry == 0 && 2234 !((rdi->post_parms[wqe->wr.opcode].flags & 2235 RVT_OPERATION_IGN_RNR_CNT) && 2236 qp->s_rnr_retry_cnt == 0)) { 2237 status = IB_WC_RNR_RETRY_EXC_ERR; 2238 goto class_b; 2239 } 2240 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) 2241 qp->s_rnr_retry--; 2242 2243 /* 2244 * The last valid PSN is the previous PSN. For TID RDMA WRITE 2245 * request, s_last_psn should be incremented only when a TID 2246 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA 2247 * WRITE RESP packets. 2248 */ 2249 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { 2250 reset_psn(qp, qp->s_last_psn + 1); 2251 } else { 2252 update_last_psn(qp, psn - 1); 2253 reset_psn(qp, psn); 2254 } 2255 2256 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 2257 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); 2258 rvt_stop_rc_timers(qp); 2259 rvt_add_rnr_timer(qp, aeth); 2260 return 0; 2261 2262 case 3: /* NAK */ 2263 if (qp->s_acked == qp->s_tail) 2264 goto bail_stop; 2265 /* The last valid PSN is the previous PSN. */ 2266 update_last_psn(qp, psn - 1); 2267 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 2268 IB_AETH_CREDIT_MASK) { 2269 case 0: /* PSN sequence error */ 2270 ibp->rvp.n_seq_naks++; 2271 /* 2272 * Back up to the responder's expected PSN. 2273 * Note that we might get a NAK in the middle of an 2274 * RDMA READ response which terminates the RDMA 2275 * READ. 2276 */ 2277 hfi1_restart_rc(qp, psn, 0); 2278 hfi1_schedule_send(qp); 2279 break; 2280 2281 case 1: /* Invalid Request */ 2282 status = IB_WC_REM_INV_REQ_ERR; 2283 ibp->rvp.n_other_naks++; 2284 goto class_b; 2285 2286 case 2: /* Remote Access Error */ 2287 status = IB_WC_REM_ACCESS_ERR; 2288 ibp->rvp.n_other_naks++; 2289 goto class_b; 2290 2291 case 3: /* Remote Operation Error */ 2292 status = IB_WC_REM_OP_ERR; 2293 ibp->rvp.n_other_naks++; 2294 class_b: 2295 if (qp->s_last == qp->s_acked) { 2296 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) 2297 hfi1_kern_read_tid_flow_free(qp); 2298 2299 hfi1_trdma_send_complete(qp, wqe, status); 2300 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 2301 } 2302 break; 2303 2304 default: 2305 /* Ignore other reserved NAK error codes */ 2306 goto reserved; 2307 } 2308 qp->s_retry = qp->s_retry_cnt; 2309 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 2310 goto bail_stop; 2311 2312 default: /* 2: reserved */ 2313 reserved: 2314 /* Ignore reserved NAK codes. */ 2315 goto bail_stop; 2316 } 2317 /* cannot be reached */ 2318 bail_stop: 2319 rvt_stop_rc_timers(qp); 2320 return ret; 2321 } 2322 2323 /* 2324 * We have seen an out of sequence RDMA read middle or last packet. 2325 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. 2326 */ 2327 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, 2328 struct hfi1_ctxtdata *rcd) 2329 { 2330 struct rvt_swqe *wqe; 2331 2332 lockdep_assert_held(&qp->s_lock); 2333 /* Remove QP from retry timer */ 2334 rvt_stop_rc_timers(qp); 2335 2336 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2337 2338 while (cmp_psn(psn, wqe->lpsn) > 0) { 2339 if (wqe->wr.opcode == IB_WR_RDMA_READ || 2340 wqe->wr.opcode == IB_WR_TID_RDMA_READ || 2341 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE || 2342 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2343 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 2344 break; 2345 wqe = do_rc_completion(qp, wqe, ibp); 2346 } 2347 2348 ibp->rvp.n_rdma_seq++; 2349 qp->r_flags |= RVT_R_RDMAR_SEQ; 2350 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 2351 if (list_empty(&qp->rspwait)) { 2352 qp->r_flags |= RVT_R_RSP_SEND; 2353 rvt_get_qp(qp); 2354 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2355 } 2356 } 2357 2358 /** 2359 * rc_rcv_resp - process an incoming RC response packet 2360 * @packet: data packet information 2361 * 2362 * This is called from hfi1_rc_rcv() to process an incoming RC response 2363 * packet for the given QP. 2364 * Called at interrupt level. 2365 */ 2366 static void rc_rcv_resp(struct hfi1_packet *packet) 2367 { 2368 struct hfi1_ctxtdata *rcd = packet->rcd; 2369 void *data = packet->payload; 2370 u32 tlen = packet->tlen; 2371 struct rvt_qp *qp = packet->qp; 2372 struct hfi1_ibport *ibp; 2373 struct ib_other_headers *ohdr = packet->ohdr; 2374 struct rvt_swqe *wqe; 2375 enum ib_wc_status status; 2376 unsigned long flags; 2377 int diff; 2378 u64 val; 2379 u32 aeth; 2380 u32 psn = ib_bth_get_psn(packet->ohdr); 2381 u32 pmtu = qp->pmtu; 2382 u16 hdrsize = packet->hlen; 2383 u8 opcode = packet->opcode; 2384 u8 pad = packet->pad; 2385 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 2386 2387 spin_lock_irqsave(&qp->s_lock, flags); 2388 trace_hfi1_ack(qp, psn); 2389 2390 /* Ignore invalid responses. */ 2391 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) 2392 goto ack_done; 2393 2394 /* Ignore duplicate responses. */ 2395 diff = cmp_psn(psn, qp->s_last_psn); 2396 if (unlikely(diff <= 0)) { 2397 /* Update credits for "ghost" ACKs */ 2398 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { 2399 aeth = be32_to_cpu(ohdr->u.aeth); 2400 if ((aeth >> IB_AETH_NAK_SHIFT) == 0) 2401 rvt_get_credit(qp, aeth); 2402 } 2403 goto ack_done; 2404 } 2405 2406 /* 2407 * Skip everything other than the PSN we expect, if we are waiting 2408 * for a reply to a restarted RDMA read or atomic op. 2409 */ 2410 if (qp->r_flags & RVT_R_RDMAR_SEQ) { 2411 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) 2412 goto ack_done; 2413 qp->r_flags &= ~RVT_R_RDMAR_SEQ; 2414 } 2415 2416 if (unlikely(qp->s_acked == qp->s_tail)) 2417 goto ack_done; 2418 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2419 status = IB_WC_SUCCESS; 2420 2421 switch (opcode) { 2422 case OP(ACKNOWLEDGE): 2423 case OP(ATOMIC_ACKNOWLEDGE): 2424 case OP(RDMA_READ_RESPONSE_FIRST): 2425 aeth = be32_to_cpu(ohdr->u.aeth); 2426 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) 2427 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); 2428 else 2429 val = 0; 2430 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || 2431 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 2432 goto ack_done; 2433 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2434 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2435 goto ack_op_err; 2436 /* 2437 * If this is a response to a resent RDMA read, we 2438 * have to be careful to copy the data to the right 2439 * location. 2440 */ 2441 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 2442 wqe, psn, pmtu); 2443 goto read_middle; 2444 2445 case OP(RDMA_READ_RESPONSE_MIDDLE): 2446 /* no AETH, no ACK */ 2447 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 2448 goto ack_seq_err; 2449 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2450 goto ack_op_err; 2451 read_middle: 2452 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 2453 goto ack_len_err; 2454 if (unlikely(pmtu >= qp->s_rdma_read_len)) 2455 goto ack_len_err; 2456 2457 /* 2458 * We got a response so update the timeout. 2459 * 4.096 usec. * (1 << qp->timeout) 2460 */ 2461 rvt_mod_retry_timer(qp); 2462 if (qp->s_flags & RVT_S_WAIT_ACK) { 2463 qp->s_flags &= ~RVT_S_WAIT_ACK; 2464 hfi1_schedule_send(qp); 2465 } 2466 2467 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 2468 qp->s_retry = qp->s_retry_cnt; 2469 2470 /* 2471 * Update the RDMA receive state but do the copy w/o 2472 * holding the locks and blocking interrupts. 2473 */ 2474 qp->s_rdma_read_len -= pmtu; 2475 update_last_psn(qp, psn); 2476 spin_unlock_irqrestore(&qp->s_lock, flags); 2477 rvt_copy_sge(qp, &qp->s_rdma_read_sge, 2478 data, pmtu, false, false); 2479 goto bail; 2480 2481 case OP(RDMA_READ_RESPONSE_ONLY): 2482 aeth = be32_to_cpu(ohdr->u.aeth); 2483 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) 2484 goto ack_done; 2485 /* 2486 * Check that the data size is >= 0 && <= pmtu. 2487 * Remember to account for ICRC (4). 2488 */ 2489 if (unlikely(tlen < (hdrsize + extra_bytes))) 2490 goto ack_len_err; 2491 /* 2492 * If this is a response to a resent RDMA read, we 2493 * have to be careful to copy the data to the right 2494 * location. 2495 */ 2496 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2497 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 2498 wqe, psn, pmtu); 2499 goto read_last; 2500 2501 case OP(RDMA_READ_RESPONSE_LAST): 2502 /* ACKs READ req. */ 2503 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 2504 goto ack_seq_err; 2505 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2506 goto ack_op_err; 2507 /* 2508 * Check that the data size is >= 1 && <= pmtu. 2509 * Remember to account for ICRC (4). 2510 */ 2511 if (unlikely(tlen <= (hdrsize + extra_bytes))) 2512 goto ack_len_err; 2513 read_last: 2514 tlen -= hdrsize + extra_bytes; 2515 if (unlikely(tlen != qp->s_rdma_read_len)) 2516 goto ack_len_err; 2517 aeth = be32_to_cpu(ohdr->u.aeth); 2518 rvt_copy_sge(qp, &qp->s_rdma_read_sge, 2519 data, tlen, false, false); 2520 WARN_ON(qp->s_rdma_read_sge.num_sge); 2521 (void)do_rc_ack(qp, aeth, psn, 2522 OP(RDMA_READ_RESPONSE_LAST), 0, rcd); 2523 goto ack_done; 2524 } 2525 2526 ack_op_err: 2527 status = IB_WC_LOC_QP_OP_ERR; 2528 goto ack_err; 2529 2530 ack_seq_err: 2531 ibp = rcd_to_iport(rcd); 2532 rdma_seq_err(qp, ibp, psn, rcd); 2533 goto ack_done; 2534 2535 ack_len_err: 2536 status = IB_WC_LOC_LEN_ERR; 2537 ack_err: 2538 if (qp->s_last == qp->s_acked) { 2539 rvt_send_complete(qp, wqe, status); 2540 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 2541 } 2542 ack_done: 2543 spin_unlock_irqrestore(&qp->s_lock, flags); 2544 bail: 2545 return; 2546 } 2547 2548 static inline void rc_cancel_ack(struct rvt_qp *qp) 2549 { 2550 qp->r_adefered = 0; 2551 if (list_empty(&qp->rspwait)) 2552 return; 2553 list_del_init(&qp->rspwait); 2554 qp->r_flags &= ~RVT_R_RSP_NAK; 2555 rvt_put_qp(qp); 2556 } 2557 2558 /** 2559 * rc_rcv_error - process an incoming duplicate or error RC packet 2560 * @ohdr: the other headers for this packet 2561 * @data: the packet data 2562 * @qp: the QP for this packet 2563 * @opcode: the opcode for this packet 2564 * @psn: the packet sequence number for this packet 2565 * @diff: the difference between the PSN and the expected PSN 2566 * 2567 * This is called from hfi1_rc_rcv() to process an unexpected 2568 * incoming RC packet for the given QP. 2569 * Called at interrupt level. 2570 * Return 1 if no more processing is needed; otherwise return 0 to 2571 * schedule a response to be sent. 2572 */ 2573 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, 2574 struct rvt_qp *qp, u32 opcode, u32 psn, 2575 int diff, struct hfi1_ctxtdata *rcd) 2576 { 2577 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2578 struct rvt_ack_entry *e; 2579 unsigned long flags; 2580 u8 prev; 2581 u8 mra; /* most recent ACK */ 2582 bool old_req; 2583 2584 trace_hfi1_rcv_error(qp, psn); 2585 if (diff > 0) { 2586 /* 2587 * Packet sequence error. 2588 * A NAK will ACK earlier sends and RDMA writes. 2589 * Don't queue the NAK if we already sent one. 2590 */ 2591 if (!qp->r_nak_state) { 2592 ibp->rvp.n_rc_seqnak++; 2593 qp->r_nak_state = IB_NAK_PSN_ERROR; 2594 /* Use the expected PSN. */ 2595 qp->r_ack_psn = qp->r_psn; 2596 /* 2597 * Wait to send the sequence NAK until all packets 2598 * in the receive queue have been processed. 2599 * Otherwise, we end up propagating congestion. 2600 */ 2601 rc_defered_ack(rcd, qp); 2602 } 2603 goto done; 2604 } 2605 2606 /* 2607 * Handle a duplicate request. Don't re-execute SEND, RDMA 2608 * write or atomic op. Don't NAK errors, just silently drop 2609 * the duplicate request. Note that r_sge, r_len, and 2610 * r_rcv_len may be in use so don't modify them. 2611 * 2612 * We are supposed to ACK the earliest duplicate PSN but we 2613 * can coalesce an outstanding duplicate ACK. We have to 2614 * send the earliest so that RDMA reads can be restarted at 2615 * the requester's expected PSN. 2616 * 2617 * First, find where this duplicate PSN falls within the 2618 * ACKs previously sent. 2619 * old_req is true if there is an older response that is scheduled 2620 * to be sent before sending this one. 2621 */ 2622 e = NULL; 2623 old_req = 1; 2624 ibp->rvp.n_rc_dupreq++; 2625 2626 spin_lock_irqsave(&qp->s_lock, flags); 2627 2628 e = find_prev_entry(qp, psn, &prev, &mra, &old_req); 2629 2630 switch (opcode) { 2631 case OP(RDMA_READ_REQUEST): { 2632 struct ib_reth *reth; 2633 u32 offset; 2634 u32 len; 2635 2636 /* 2637 * If we didn't find the RDMA read request in the ack queue, 2638 * we can ignore this request. 2639 */ 2640 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) 2641 goto unlock_done; 2642 /* RETH comes after BTH */ 2643 reth = &ohdr->u.rc.reth; 2644 /* 2645 * Address range must be a subset of the original 2646 * request and start on pmtu boundaries. 2647 * We reuse the old ack_queue slot since the requester 2648 * should not back up and request an earlier PSN for the 2649 * same request. 2650 */ 2651 offset = delta_psn(psn, e->psn) * qp->pmtu; 2652 len = be32_to_cpu(reth->length); 2653 if (unlikely(offset + len != e->rdma_sge.sge_length)) 2654 goto unlock_done; 2655 release_rdma_sge_mr(e); 2656 if (len != 0) { 2657 u32 rkey = be32_to_cpu(reth->rkey); 2658 u64 vaddr = get_ib_reth_vaddr(reth); 2659 int ok; 2660 2661 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, 2662 IB_ACCESS_REMOTE_READ); 2663 if (unlikely(!ok)) 2664 goto unlock_done; 2665 } else { 2666 e->rdma_sge.vaddr = NULL; 2667 e->rdma_sge.length = 0; 2668 e->rdma_sge.sge_length = 0; 2669 } 2670 e->psn = psn; 2671 if (old_req) 2672 goto unlock_done; 2673 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) 2674 qp->s_acked_ack_queue = prev; 2675 qp->s_tail_ack_queue = prev; 2676 break; 2677 } 2678 2679 case OP(COMPARE_SWAP): 2680 case OP(FETCH_ADD): { 2681 /* 2682 * If we didn't find the atomic request in the ack queue 2683 * or the send engine is already backed up to send an 2684 * earlier entry, we can ignore this request. 2685 */ 2686 if (!e || e->opcode != (u8)opcode || old_req) 2687 goto unlock_done; 2688 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) 2689 qp->s_acked_ack_queue = prev; 2690 qp->s_tail_ack_queue = prev; 2691 break; 2692 } 2693 2694 default: 2695 /* 2696 * Ignore this operation if it doesn't request an ACK 2697 * or an earlier RDMA read or atomic is going to be resent. 2698 */ 2699 if (!(psn & IB_BTH_REQ_ACK) || old_req) 2700 goto unlock_done; 2701 /* 2702 * Resend the most recent ACK if this request is 2703 * after all the previous RDMA reads and atomics. 2704 */ 2705 if (mra == qp->r_head_ack_queue) { 2706 spin_unlock_irqrestore(&qp->s_lock, flags); 2707 qp->r_nak_state = 0; 2708 qp->r_ack_psn = qp->r_psn - 1; 2709 goto send_ack; 2710 } 2711 2712 /* 2713 * Resend the RDMA read or atomic op which 2714 * ACKs this duplicate request. 2715 */ 2716 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) 2717 qp->s_acked_ack_queue = mra; 2718 qp->s_tail_ack_queue = mra; 2719 break; 2720 } 2721 qp->s_ack_state = OP(ACKNOWLEDGE); 2722 qp->s_flags |= RVT_S_RESP_PENDING; 2723 qp->r_nak_state = 0; 2724 hfi1_schedule_send(qp); 2725 2726 unlock_done: 2727 spin_unlock_irqrestore(&qp->s_lock, flags); 2728 done: 2729 return 1; 2730 2731 send_ack: 2732 return 0; 2733 } 2734 2735 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, 2736 u32 lqpn, u32 rqpn, u8 svc_type) 2737 { 2738 struct opa_hfi1_cong_log_event_internal *cc_event; 2739 unsigned long flags; 2740 2741 if (sl >= OPA_MAX_SLS) 2742 return; 2743 2744 spin_lock_irqsave(&ppd->cc_log_lock, flags); 2745 2746 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); 2747 ppd->threshold_event_counter++; 2748 2749 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; 2750 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) 2751 ppd->cc_log_idx = 0; 2752 cc_event->lqpn = lqpn & RVT_QPN_MASK; 2753 cc_event->rqpn = rqpn & RVT_QPN_MASK; 2754 cc_event->sl = sl; 2755 cc_event->svc_type = svc_type; 2756 cc_event->rlid = rlid; 2757 /* keep timestamp in units of 1.024 usec */ 2758 cc_event->timestamp = ktime_get_ns() / 1024; 2759 2760 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); 2761 } 2762 2763 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, 2764 u32 rqpn, u8 svc_type) 2765 { 2766 struct cca_timer *cca_timer; 2767 u16 ccti, ccti_incr, ccti_timer, ccti_limit; 2768 u8 trigger_threshold; 2769 struct cc_state *cc_state; 2770 unsigned long flags; 2771 2772 if (sl >= OPA_MAX_SLS) 2773 return; 2774 2775 cc_state = get_cc_state(ppd); 2776 2777 if (!cc_state) 2778 return; 2779 2780 /* 2781 * 1) increase CCTI (for this SL) 2782 * 2) select IPG (i.e., call set_link_ipg()) 2783 * 3) start timer 2784 */ 2785 ccti_limit = cc_state->cct.ccti_limit; 2786 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; 2787 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 2788 trigger_threshold = 2789 cc_state->cong_setting.entries[sl].trigger_threshold; 2790 2791 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 2792 2793 cca_timer = &ppd->cca_timer[sl]; 2794 if (cca_timer->ccti < ccti_limit) { 2795 if (cca_timer->ccti + ccti_incr <= ccti_limit) 2796 cca_timer->ccti += ccti_incr; 2797 else 2798 cca_timer->ccti = ccti_limit; 2799 set_link_ipg(ppd); 2800 } 2801 2802 ccti = cca_timer->ccti; 2803 2804 if (!hrtimer_active(&cca_timer->hrtimer)) { 2805 /* ccti_timer is in units of 1.024 usec */ 2806 unsigned long nsec = 1024 * ccti_timer; 2807 2808 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), 2809 HRTIMER_MODE_REL_PINNED); 2810 } 2811 2812 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 2813 2814 if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) 2815 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); 2816 } 2817 2818 /** 2819 * hfi1_rc_rcv - process an incoming RC packet 2820 * @packet: data packet information 2821 * 2822 * This is called from qp_rcv() to process an incoming RC packet 2823 * for the given QP. 2824 * May be called at interrupt level. 2825 */ 2826 void hfi1_rc_rcv(struct hfi1_packet *packet) 2827 { 2828 struct hfi1_ctxtdata *rcd = packet->rcd; 2829 void *data = packet->payload; 2830 u32 tlen = packet->tlen; 2831 struct rvt_qp *qp = packet->qp; 2832 struct hfi1_qp_priv *qpriv = qp->priv; 2833 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2834 struct ib_other_headers *ohdr = packet->ohdr; 2835 u32 opcode = packet->opcode; 2836 u32 hdrsize = packet->hlen; 2837 u32 psn = ib_bth_get_psn(packet->ohdr); 2838 u32 pad = packet->pad; 2839 struct ib_wc wc; 2840 u32 pmtu = qp->pmtu; 2841 int diff; 2842 struct ib_reth *reth; 2843 unsigned long flags; 2844 int ret; 2845 bool copy_last = false, fecn; 2846 u32 rkey; 2847 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 2848 2849 lockdep_assert_held(&qp->r_lock); 2850 2851 if (hfi1_ruc_check_hdr(ibp, packet)) 2852 return; 2853 2854 fecn = process_ecn(qp, packet); 2855 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1])); 2856 2857 /* 2858 * Process responses (ACKs) before anything else. Note that the 2859 * packet sequence number will be for something in the send work 2860 * queue rather than the expected receive packet sequence number. 2861 * In other words, this QP is the requester. 2862 */ 2863 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 2864 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 2865 rc_rcv_resp(packet); 2866 return; 2867 } 2868 2869 /* Compute 24 bits worth of difference. */ 2870 diff = delta_psn(psn, qp->r_psn); 2871 if (unlikely(diff)) { 2872 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 2873 return; 2874 goto send_ack; 2875 } 2876 2877 /* Check for opcode sequence errors. */ 2878 switch (qp->r_state) { 2879 case OP(SEND_FIRST): 2880 case OP(SEND_MIDDLE): 2881 if (opcode == OP(SEND_MIDDLE) || 2882 opcode == OP(SEND_LAST) || 2883 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2884 opcode == OP(SEND_LAST_WITH_INVALIDATE)) 2885 break; 2886 goto nack_inv; 2887 2888 case OP(RDMA_WRITE_FIRST): 2889 case OP(RDMA_WRITE_MIDDLE): 2890 if (opcode == OP(RDMA_WRITE_MIDDLE) || 2891 opcode == OP(RDMA_WRITE_LAST) || 2892 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2893 break; 2894 goto nack_inv; 2895 2896 default: 2897 if (opcode == OP(SEND_MIDDLE) || 2898 opcode == OP(SEND_LAST) || 2899 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2900 opcode == OP(SEND_LAST_WITH_INVALIDATE) || 2901 opcode == OP(RDMA_WRITE_MIDDLE) || 2902 opcode == OP(RDMA_WRITE_LAST) || 2903 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2904 goto nack_inv; 2905 /* 2906 * Note that it is up to the requester to not send a new 2907 * RDMA read or atomic operation before receiving an ACK 2908 * for the previous operation. 2909 */ 2910 break; 2911 } 2912 2913 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 2914 rvt_comm_est(qp); 2915 2916 /* OK, process the packet. */ 2917 switch (opcode) { 2918 case OP(SEND_FIRST): 2919 ret = rvt_get_rwqe(qp, false); 2920 if (ret < 0) 2921 goto nack_op_err; 2922 if (!ret) 2923 goto rnr_nak; 2924 qp->r_rcv_len = 0; 2925 /* FALLTHROUGH */ 2926 case OP(SEND_MIDDLE): 2927 case OP(RDMA_WRITE_MIDDLE): 2928 send_middle: 2929 /* Check for invalid length PMTU or posted rwqe len. */ 2930 /* 2931 * There will be no padding for 9B packet but 16B packets 2932 * will come in with some padding since we always add 2933 * CRC and LT bytes which will need to be flit aligned 2934 */ 2935 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 2936 goto nack_inv; 2937 qp->r_rcv_len += pmtu; 2938 if (unlikely(qp->r_rcv_len > qp->r_len)) 2939 goto nack_inv; 2940 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); 2941 break; 2942 2943 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 2944 /* consume RWQE */ 2945 ret = rvt_get_rwqe(qp, true); 2946 if (ret < 0) 2947 goto nack_op_err; 2948 if (!ret) 2949 goto rnr_nak; 2950 goto send_last_imm; 2951 2952 case OP(SEND_ONLY): 2953 case OP(SEND_ONLY_WITH_IMMEDIATE): 2954 case OP(SEND_ONLY_WITH_INVALIDATE): 2955 ret = rvt_get_rwqe(qp, false); 2956 if (ret < 0) 2957 goto nack_op_err; 2958 if (!ret) 2959 goto rnr_nak; 2960 qp->r_rcv_len = 0; 2961 if (opcode == OP(SEND_ONLY)) 2962 goto no_immediate_data; 2963 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) 2964 goto send_last_inv; 2965 /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */ 2966 case OP(SEND_LAST_WITH_IMMEDIATE): 2967 send_last_imm: 2968 wc.ex.imm_data = ohdr->u.imm_data; 2969 wc.wc_flags = IB_WC_WITH_IMM; 2970 goto send_last; 2971 case OP(SEND_LAST_WITH_INVALIDATE): 2972 send_last_inv: 2973 rkey = be32_to_cpu(ohdr->u.ieth); 2974 if (rvt_invalidate_rkey(qp, rkey)) 2975 goto no_immediate_data; 2976 wc.ex.invalidate_rkey = rkey; 2977 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2978 goto send_last; 2979 case OP(RDMA_WRITE_LAST): 2980 copy_last = rvt_is_user_qp(qp); 2981 /* fall through */ 2982 case OP(SEND_LAST): 2983 no_immediate_data: 2984 wc.wc_flags = 0; 2985 wc.ex.imm_data = 0; 2986 send_last: 2987 /* Check for invalid length. */ 2988 /* LAST len should be >= 1 */ 2989 if (unlikely(tlen < (hdrsize + extra_bytes))) 2990 goto nack_inv; 2991 /* Don't count the CRC(and padding and LT byte for 16B). */ 2992 tlen -= (hdrsize + extra_bytes); 2993 wc.byte_len = tlen + qp->r_rcv_len; 2994 if (unlikely(wc.byte_len > qp->r_len)) 2995 goto nack_inv; 2996 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last); 2997 rvt_put_ss(&qp->r_sge); 2998 qp->r_msn++; 2999 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 3000 break; 3001 wc.wr_id = qp->r_wr_id; 3002 wc.status = IB_WC_SUCCESS; 3003 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || 3004 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 3005 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 3006 else 3007 wc.opcode = IB_WC_RECV; 3008 wc.qp = &qp->ibqp; 3009 wc.src_qp = qp->remote_qpn; 3010 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; 3011 /* 3012 * It seems that IB mandates the presence of an SL in a 3013 * work completion only for the UD transport (see section 3014 * 11.4.2 of IBTA Vol. 1). 3015 * 3016 * However, the way the SL is chosen below is consistent 3017 * with the way that IB/qib works and is trying avoid 3018 * introducing incompatibilities. 3019 * 3020 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 3021 */ 3022 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); 3023 /* zero fields that are N/A */ 3024 wc.vendor_err = 0; 3025 wc.pkey_index = 0; 3026 wc.dlid_path_bits = 0; 3027 wc.port_num = 0; 3028 /* Signal completion event if the solicited bit is set. */ 3029 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 3030 ib_bth_is_solicited(ohdr)); 3031 break; 3032 3033 case OP(RDMA_WRITE_ONLY): 3034 copy_last = rvt_is_user_qp(qp); 3035 /* fall through */ 3036 case OP(RDMA_WRITE_FIRST): 3037 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 3038 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 3039 goto nack_inv; 3040 /* consume RWQE */ 3041 reth = &ohdr->u.rc.reth; 3042 qp->r_len = be32_to_cpu(reth->length); 3043 qp->r_rcv_len = 0; 3044 qp->r_sge.sg_list = NULL; 3045 if (qp->r_len != 0) { 3046 u32 rkey = be32_to_cpu(reth->rkey); 3047 u64 vaddr = get_ib_reth_vaddr(reth); 3048 int ok; 3049 3050 /* Check rkey & NAK */ 3051 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, 3052 rkey, IB_ACCESS_REMOTE_WRITE); 3053 if (unlikely(!ok)) 3054 goto nack_acc; 3055 qp->r_sge.num_sge = 1; 3056 } else { 3057 qp->r_sge.num_sge = 0; 3058 qp->r_sge.sge.mr = NULL; 3059 qp->r_sge.sge.vaddr = NULL; 3060 qp->r_sge.sge.length = 0; 3061 qp->r_sge.sge.sge_length = 0; 3062 } 3063 if (opcode == OP(RDMA_WRITE_FIRST)) 3064 goto send_middle; 3065 else if (opcode == OP(RDMA_WRITE_ONLY)) 3066 goto no_immediate_data; 3067 ret = rvt_get_rwqe(qp, true); 3068 if (ret < 0) 3069 goto nack_op_err; 3070 if (!ret) { 3071 /* peer will send again */ 3072 rvt_put_ss(&qp->r_sge); 3073 goto rnr_nak; 3074 } 3075 wc.ex.imm_data = ohdr->u.rc.imm_data; 3076 wc.wc_flags = IB_WC_WITH_IMM; 3077 goto send_last; 3078 3079 case OP(RDMA_READ_REQUEST): { 3080 struct rvt_ack_entry *e; 3081 u32 len; 3082 u8 next; 3083 3084 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 3085 goto nack_inv; 3086 next = qp->r_head_ack_queue + 1; 3087 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */ 3088 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) 3089 next = 0; 3090 spin_lock_irqsave(&qp->s_lock, flags); 3091 if (unlikely(next == qp->s_acked_ack_queue)) { 3092 if (!qp->s_ack_queue[next].sent) 3093 goto nack_inv_unlck; 3094 update_ack_queue(qp, next); 3095 } 3096 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3097 release_rdma_sge_mr(e); 3098 reth = &ohdr->u.rc.reth; 3099 len = be32_to_cpu(reth->length); 3100 if (len) { 3101 u32 rkey = be32_to_cpu(reth->rkey); 3102 u64 vaddr = get_ib_reth_vaddr(reth); 3103 int ok; 3104 3105 /* Check rkey & NAK */ 3106 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, 3107 rkey, IB_ACCESS_REMOTE_READ); 3108 if (unlikely(!ok)) 3109 goto nack_acc_unlck; 3110 /* 3111 * Update the next expected PSN. We add 1 later 3112 * below, so only add the remainder here. 3113 */ 3114 qp->r_psn += rvt_div_mtu(qp, len - 1); 3115 } else { 3116 e->rdma_sge.mr = NULL; 3117 e->rdma_sge.vaddr = NULL; 3118 e->rdma_sge.length = 0; 3119 e->rdma_sge.sge_length = 0; 3120 } 3121 e->opcode = opcode; 3122 e->sent = 0; 3123 e->psn = psn; 3124 e->lpsn = qp->r_psn; 3125 /* 3126 * We need to increment the MSN here instead of when we 3127 * finish sending the result since a duplicate request would 3128 * increment it more than once. 3129 */ 3130 qp->r_msn++; 3131 qp->r_psn++; 3132 qp->r_state = opcode; 3133 qp->r_nak_state = 0; 3134 qp->r_head_ack_queue = next; 3135 qpriv->r_tid_alloc = qp->r_head_ack_queue; 3136 3137 /* Schedule the send engine. */ 3138 qp->s_flags |= RVT_S_RESP_PENDING; 3139 if (fecn) 3140 qp->s_flags |= RVT_S_ECN; 3141 hfi1_schedule_send(qp); 3142 3143 spin_unlock_irqrestore(&qp->s_lock, flags); 3144 return; 3145 } 3146 3147 case OP(COMPARE_SWAP): 3148 case OP(FETCH_ADD): { 3149 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth; 3150 u64 vaddr = get_ib_ateth_vaddr(ateth); 3151 bool opfn = opcode == OP(COMPARE_SWAP) && 3152 vaddr == HFI1_VERBS_E_ATOMIC_VADDR; 3153 struct rvt_ack_entry *e; 3154 atomic64_t *maddr; 3155 u64 sdata; 3156 u32 rkey; 3157 u8 next; 3158 3159 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && 3160 !opfn)) 3161 goto nack_inv; 3162 next = qp->r_head_ack_queue + 1; 3163 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) 3164 next = 0; 3165 spin_lock_irqsave(&qp->s_lock, flags); 3166 if (unlikely(next == qp->s_acked_ack_queue)) { 3167 if (!qp->s_ack_queue[next].sent) 3168 goto nack_inv_unlck; 3169 update_ack_queue(qp, next); 3170 } 3171 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3172 release_rdma_sge_mr(e); 3173 /* Process OPFN special virtual address */ 3174 if (opfn) { 3175 opfn_conn_response(qp, e, ateth); 3176 goto ack; 3177 } 3178 if (unlikely(vaddr & (sizeof(u64) - 1))) 3179 goto nack_inv_unlck; 3180 rkey = be32_to_cpu(ateth->rkey); 3181 /* Check rkey & NAK */ 3182 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 3183 vaddr, rkey, 3184 IB_ACCESS_REMOTE_ATOMIC))) 3185 goto nack_acc_unlck; 3186 /* Perform atomic OP and save result. */ 3187 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 3188 sdata = get_ib_ateth_swap(ateth); 3189 e->atomic_data = (opcode == OP(FETCH_ADD)) ? 3190 (u64)atomic64_add_return(sdata, maddr) - sdata : 3191 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 3192 get_ib_ateth_compare(ateth), 3193 sdata); 3194 rvt_put_mr(qp->r_sge.sge.mr); 3195 qp->r_sge.num_sge = 0; 3196 ack: 3197 e->opcode = opcode; 3198 e->sent = 0; 3199 e->psn = psn; 3200 e->lpsn = psn; 3201 qp->r_msn++; 3202 qp->r_psn++; 3203 qp->r_state = opcode; 3204 qp->r_nak_state = 0; 3205 qp->r_head_ack_queue = next; 3206 qpriv->r_tid_alloc = qp->r_head_ack_queue; 3207 3208 /* Schedule the send engine. */ 3209 qp->s_flags |= RVT_S_RESP_PENDING; 3210 if (fecn) 3211 qp->s_flags |= RVT_S_ECN; 3212 hfi1_schedule_send(qp); 3213 3214 spin_unlock_irqrestore(&qp->s_lock, flags); 3215 return; 3216 } 3217 3218 default: 3219 /* NAK unknown opcodes. */ 3220 goto nack_inv; 3221 } 3222 qp->r_psn++; 3223 qp->r_state = opcode; 3224 qp->r_ack_psn = psn; 3225 qp->r_nak_state = 0; 3226 /* Send an ACK if requested or required. */ 3227 if (psn & IB_BTH_REQ_ACK || fecn) { 3228 if (packet->numpkt == 0 || fecn || 3229 qp->r_adefered >= HFI1_PSN_CREDIT) { 3230 rc_cancel_ack(qp); 3231 goto send_ack; 3232 } 3233 qp->r_adefered++; 3234 rc_defered_ack(rcd, qp); 3235 } 3236 return; 3237 3238 rnr_nak: 3239 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; 3240 qp->r_ack_psn = qp->r_psn; 3241 /* Queue RNR NAK for later */ 3242 rc_defered_ack(rcd, qp); 3243 return; 3244 3245 nack_op_err: 3246 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 3247 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 3248 qp->r_ack_psn = qp->r_psn; 3249 /* Queue NAK for later */ 3250 rc_defered_ack(rcd, qp); 3251 return; 3252 3253 nack_inv_unlck: 3254 spin_unlock_irqrestore(&qp->s_lock, flags); 3255 nack_inv: 3256 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 3257 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 3258 qp->r_ack_psn = qp->r_psn; 3259 /* Queue NAK for later */ 3260 rc_defered_ack(rcd, qp); 3261 return; 3262 3263 nack_acc_unlck: 3264 spin_unlock_irqrestore(&qp->s_lock, flags); 3265 nack_acc: 3266 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); 3267 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 3268 qp->r_ack_psn = qp->r_psn; 3269 send_ack: 3270 hfi1_send_rc_ack(packet, fecn); 3271 } 3272 3273 void hfi1_rc_hdrerr( 3274 struct hfi1_ctxtdata *rcd, 3275 struct hfi1_packet *packet, 3276 struct rvt_qp *qp) 3277 { 3278 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 3279 int diff; 3280 u32 opcode; 3281 u32 psn; 3282 3283 if (hfi1_ruc_check_hdr(ibp, packet)) 3284 return; 3285 3286 psn = ib_bth_get_psn(packet->ohdr); 3287 opcode = ib_bth_get_opcode(packet->ohdr); 3288 3289 /* Only deal with RDMA Writes for now */ 3290 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 3291 diff = delta_psn(psn, qp->r_psn); 3292 if (!qp->r_nak_state && diff >= 0) { 3293 ibp->rvp.n_rc_seqnak++; 3294 qp->r_nak_state = IB_NAK_PSN_ERROR; 3295 /* Use the expected PSN. */ 3296 qp->r_ack_psn = qp->r_psn; 3297 /* 3298 * Wait to send the sequence 3299 * NAK until all packets 3300 * in the receive queue have 3301 * been processed. 3302 * Otherwise, we end up 3303 * propagating congestion. 3304 */ 3305 rc_defered_ack(rcd, qp); 3306 } /* Out of sequence NAK */ 3307 } /* QP Request NAKs */ 3308 } 3309