1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/io.h> 49 #include <rdma/rdma_vt.h> 50 #include <rdma/rdmavt_qp.h> 51 52 #include "hfi.h" 53 #include "qp.h" 54 #include "rc.h" 55 #include "verbs_txreq.h" 56 #include "trace.h" 57 58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, 59 u8 *prev_ack, bool *scheduled) 60 __must_hold(&qp->s_lock) 61 { 62 struct rvt_ack_entry *e = NULL; 63 u8 i, p; 64 bool s = true; 65 66 for (i = qp->r_head_ack_queue; ; i = p) { 67 if (i == qp->s_tail_ack_queue) 68 s = false; 69 if (i) 70 p = i - 1; 71 else 72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); 73 if (p == qp->r_head_ack_queue) { 74 e = NULL; 75 break; 76 } 77 e = &qp->s_ack_queue[p]; 78 if (!e->opcode) { 79 e = NULL; 80 break; 81 } 82 if (cmp_psn(psn, e->psn) >= 0) { 83 if (p == qp->s_tail_ack_queue && 84 cmp_psn(psn, e->lpsn) <= 0) 85 s = false; 86 break; 87 } 88 } 89 if (prev) 90 *prev = p; 91 if (prev_ack) 92 *prev_ack = i; 93 if (scheduled) 94 *scheduled = s; 95 return e; 96 } 97 98 /** 99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) 100 * @dev: the device for this QP 101 * @qp: a pointer to the QP 102 * @ohdr: a pointer to the IB header being constructed 103 * @ps: the xmit packet state 104 * 105 * Return 1 if constructed; otherwise, return 0. 106 * Note that we are in the responder's side of the QP context. 107 * Note the QP s_lock must be held. 108 */ 109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, 110 struct ib_other_headers *ohdr, 111 struct hfi1_pkt_state *ps) 112 { 113 struct rvt_ack_entry *e; 114 u32 hwords, hdrlen; 115 u32 len = 0; 116 u32 bth0 = 0, bth2 = 0; 117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); 118 int middle = 0; 119 u32 pmtu = qp->pmtu; 120 struct hfi1_qp_priv *qpriv = qp->priv; 121 bool last_pkt; 122 u32 delta; 123 u8 next = qp->s_tail_ack_queue; 124 struct tid_rdma_request *req; 125 126 trace_hfi1_rsp_make_rc_ack(qp, 0); 127 lockdep_assert_held(&qp->s_lock); 128 /* Don't send an ACK if we aren't supposed to. */ 129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 130 goto bail; 131 132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B) 133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 134 hwords = 5; 135 else 136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 137 hwords = 7; 138 139 switch (qp->s_ack_state) { 140 case OP(RDMA_READ_RESPONSE_LAST): 141 case OP(RDMA_READ_RESPONSE_ONLY): 142 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 143 release_rdma_sge_mr(e); 144 fallthrough; 145 case OP(ATOMIC_ACKNOWLEDGE): 146 /* 147 * We can increment the tail pointer now that the last 148 * response has been sent instead of only being 149 * constructed. 150 */ 151 if (++next > rvt_size_atomic(&dev->rdi)) 152 next = 0; 153 /* 154 * Only advance the s_acked_ack_queue pointer if there 155 * have been no TID RDMA requests. 156 */ 157 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 158 if (e->opcode != TID_OP(WRITE_REQ) && 159 qp->s_acked_ack_queue == qp->s_tail_ack_queue) 160 qp->s_acked_ack_queue = next; 161 qp->s_tail_ack_queue = next; 162 trace_hfi1_rsp_make_rc_ack(qp, e->psn); 163 fallthrough; 164 case OP(SEND_ONLY): 165 case OP(ACKNOWLEDGE): 166 /* Check for no next entry in the queue. */ 167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 168 if (qp->s_flags & RVT_S_ACK_PENDING) 169 goto normal; 170 goto bail; 171 } 172 173 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 174 /* Check for tid write fence */ 175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) || 176 hfi1_tid_rdma_ack_interlock(qp, e)) { 177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB); 178 goto bail; 179 } 180 if (e->opcode == OP(RDMA_READ_REQUEST)) { 181 /* 182 * If a RDMA read response is being resent and 183 * we haven't seen the duplicate request yet, 184 * then stop sending the remaining responses the 185 * responder has seen until the requester re-sends it. 186 */ 187 len = e->rdma_sge.sge_length; 188 if (len && !e->rdma_sge.mr) { 189 if (qp->s_acked_ack_queue == 190 qp->s_tail_ack_queue) 191 qp->s_acked_ack_queue = 192 qp->r_head_ack_queue; 193 qp->s_tail_ack_queue = qp->r_head_ack_queue; 194 goto bail; 195 } 196 /* Copy SGE state in case we need to resend */ 197 ps->s_txreq->mr = e->rdma_sge.mr; 198 if (ps->s_txreq->mr) 199 rvt_get_mr(ps->s_txreq->mr); 200 qp->s_ack_rdma_sge.sge = e->rdma_sge; 201 qp->s_ack_rdma_sge.num_sge = 1; 202 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 203 if (len > pmtu) { 204 len = pmtu; 205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 206 } else { 207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 208 e->sent = 1; 209 } 210 ohdr->u.aeth = rvt_compute_aeth(qp); 211 hwords++; 212 qp->s_ack_rdma_psn = e->psn; 213 bth2 = mask_psn(qp->s_ack_rdma_psn++); 214 } else if (e->opcode == TID_OP(WRITE_REQ)) { 215 /* 216 * If a TID RDMA WRITE RESP is being resent, we have to 217 * wait for the actual request. All requests that are to 218 * be resent will have their state set to 219 * TID_REQUEST_RESEND. When the new request arrives, the 220 * state will be changed to TID_REQUEST_RESEND_ACTIVE. 221 */ 222 req = ack_to_tid_req(e); 223 if (req->state == TID_REQUEST_RESEND || 224 req->state == TID_REQUEST_INIT_RESEND) 225 goto bail; 226 qp->s_ack_state = TID_OP(WRITE_RESP); 227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg); 228 goto write_resp; 229 } else if (e->opcode == TID_OP(READ_REQ)) { 230 /* 231 * If a TID RDMA read response is being resent and 232 * we haven't seen the duplicate request yet, 233 * then stop sending the remaining responses the 234 * responder has seen until the requester re-sends it. 235 */ 236 len = e->rdma_sge.sge_length; 237 if (len && !e->rdma_sge.mr) { 238 if (qp->s_acked_ack_queue == 239 qp->s_tail_ack_queue) 240 qp->s_acked_ack_queue = 241 qp->r_head_ack_queue; 242 qp->s_tail_ack_queue = qp->r_head_ack_queue; 243 goto bail; 244 } 245 /* Copy SGE state in case we need to resend */ 246 ps->s_txreq->mr = e->rdma_sge.mr; 247 if (ps->s_txreq->mr) 248 rvt_get_mr(ps->s_txreq->mr); 249 qp->s_ack_rdma_sge.sge = e->rdma_sge; 250 qp->s_ack_rdma_sge.num_sge = 1; 251 qp->s_ack_state = TID_OP(READ_RESP); 252 goto read_resp; 253 } else { 254 /* COMPARE_SWAP or FETCH_ADD */ 255 ps->s_txreq->ss = NULL; 256 len = 0; 257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 258 ohdr->u.at.aeth = rvt_compute_aeth(qp); 259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); 260 hwords += sizeof(ohdr->u.at) / sizeof(u32); 261 bth2 = mask_psn(e->psn); 262 e->sent = 1; 263 } 264 trace_hfi1_tid_write_rsp_make_rc_ack(qp); 265 bth0 = qp->s_ack_state << 24; 266 break; 267 268 case OP(RDMA_READ_RESPONSE_FIRST): 269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 270 fallthrough; 271 case OP(RDMA_READ_RESPONSE_MIDDLE): 272 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; 274 if (ps->s_txreq->mr) 275 rvt_get_mr(ps->s_txreq->mr); 276 len = qp->s_ack_rdma_sge.sge.sge_length; 277 if (len > pmtu) { 278 len = pmtu; 279 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 280 } else { 281 ohdr->u.aeth = rvt_compute_aeth(qp); 282 hwords++; 283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 284 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 285 e->sent = 1; 286 } 287 bth0 = qp->s_ack_state << 24; 288 bth2 = mask_psn(qp->s_ack_rdma_psn++); 289 break; 290 291 case TID_OP(WRITE_RESP): 292 write_resp: 293 /* 294 * 1. Check if RVT_S_ACK_PENDING is set. If yes, 295 * goto normal. 296 * 2. Attempt to allocate TID resources. 297 * 3. Remove RVT_S_RESP_PENDING flags from s_flags 298 * 4. If resources not available: 299 * 4.1 Set RVT_S_WAIT_TID_SPACE 300 * 4.2 Queue QP on RCD TID queue 301 * 4.3 Put QP on iowait list. 302 * 4.4 Build IB RNR NAK with appropriate timeout value 303 * 4.5 Return indication progress made. 304 * 5. If resources are available: 305 * 5.1 Program HW flow CSRs 306 * 5.2 Build TID RDMA WRITE RESP packet 307 * 5.3 If more resources needed, do 2.1 - 2.3. 308 * 5.4 Wake up next QP on RCD TID queue. 309 * 5.5 Return indication progress made. 310 */ 311 312 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 313 req = ack_to_tid_req(e); 314 315 /* 316 * Send scheduled RNR NAK's. RNR NAK's need to be sent at 317 * segment boundaries, not at request boundaries. Don't change 318 * s_ack_state because we are still in the middle of a request 319 */ 320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND && 321 qp->s_tail_ack_queue == qpriv->r_tid_alloc && 322 req->cur_seg == req->alloc_seg) { 323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT; 324 goto normal_no_state; 325 } 326 327 bth2 = mask_psn(qp->s_ack_rdma_psn); 328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1, 329 bth2, &len, 330 &ps->s_txreq->ss); 331 if (!hdrlen) 332 return 0; 333 334 hwords += hdrlen; 335 bth0 = qp->s_ack_state << 24; 336 qp->s_ack_rdma_psn++; 337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn, 338 e->lpsn, req); 339 if (req->cur_seg != req->total_segs) 340 break; 341 342 e->sent = 1; 343 /* Do not free e->rdma_sge until all data are received */ 344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 345 break; 346 347 case TID_OP(READ_RESP): 348 read_resp: 349 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 350 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0, 352 &bth1, &bth2, &len, 353 &last_pkt); 354 if (delta == 0) 355 goto error_qp; 356 hwords += delta; 357 if (last_pkt) { 358 e->sent = 1; 359 /* 360 * Increment qp->s_tail_ack_queue through s_ack_state 361 * transition. 362 */ 363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 364 } 365 break; 366 case TID_OP(READ_REQ): 367 goto bail; 368 369 default: 370 normal: 371 /* 372 * Send a regular ACK. 373 * Set the s_ack_state so we wait until after sending 374 * the ACK before setting s_ack_state to ACKNOWLEDGE 375 * (see above). 376 */ 377 qp->s_ack_state = OP(SEND_ONLY); 378 normal_no_state: 379 if (qp->s_nak_state) 380 ohdr->u.aeth = 381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 382 (qp->s_nak_state << 383 IB_AETH_CREDIT_SHIFT)); 384 else 385 ohdr->u.aeth = rvt_compute_aeth(qp); 386 hwords++; 387 len = 0; 388 bth0 = OP(ACKNOWLEDGE) << 24; 389 bth2 = mask_psn(qp->s_ack_psn); 390 qp->s_flags &= ~RVT_S_ACK_PENDING; 391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; 392 ps->s_txreq->ss = NULL; 393 } 394 qp->s_rdma_ack_cnt++; 395 ps->s_txreq->sde = qpriv->s_sde; 396 ps->s_txreq->s_cur_size = len; 397 ps->s_txreq->hdr_dwords = hwords; 398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps); 399 return 1; 400 error_qp: 401 spin_unlock_irqrestore(&qp->s_lock, ps->flags); 402 spin_lock_irqsave(&qp->r_lock, ps->flags); 403 spin_lock(&qp->s_lock); 404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 405 spin_unlock(&qp->s_lock); 406 spin_unlock_irqrestore(&qp->r_lock, ps->flags); 407 spin_lock_irqsave(&qp->s_lock, ps->flags); 408 bail: 409 qp->s_ack_state = OP(ACKNOWLEDGE); 410 /* 411 * Ensure s_rdma_ack_cnt changes are committed prior to resetting 412 * RVT_S_RESP_PENDING 413 */ 414 smp_wmb(); 415 qp->s_flags &= ~(RVT_S_RESP_PENDING 416 | RVT_S_ACK_PENDING 417 | HFI1_S_AHG_VALID); 418 return 0; 419 } 420 421 /** 422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 423 * @qp: a pointer to the QP 424 * @ps: the current packet state 425 * 426 * Assumes s_lock is held. 427 * 428 * Return 1 if constructed; otherwise, return 0. 429 */ 430 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 431 { 432 struct hfi1_qp_priv *priv = qp->priv; 433 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 434 struct ib_other_headers *ohdr; 435 struct rvt_sge_state *ss = NULL; 436 struct rvt_swqe *wqe; 437 struct hfi1_swqe_priv *wpriv; 438 struct tid_rdma_request *req = NULL; 439 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 440 u32 hwords = 5; 441 u32 len = 0; 442 u32 bth0 = 0, bth2 = 0; 443 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); 444 u32 pmtu = qp->pmtu; 445 char newreq; 446 int middle = 0; 447 int delta; 448 struct tid_rdma_flow *flow = NULL; 449 struct tid_rdma_params *remote; 450 451 trace_hfi1_sender_make_rc_req(qp); 452 lockdep_assert_held(&qp->s_lock); 453 ps->s_txreq = get_txreq(ps->dev, qp); 454 if (!ps->s_txreq) 455 goto bail_no_tx; 456 457 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 458 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 459 hwords = 5; 460 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) 461 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; 462 else 463 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; 464 } else { 465 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 466 hwords = 7; 467 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 468 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) 469 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; 470 else 471 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; 472 } 473 474 /* Sending responses has higher priority over sending requests. */ 475 if ((qp->s_flags & RVT_S_RESP_PENDING) && 476 make_rc_ack(dev, qp, ohdr, ps)) 477 return 1; 478 479 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 480 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 481 goto bail; 482 /* We are in the error state, flush the work request. */ 483 if (qp->s_last == READ_ONCE(qp->s_head)) 484 goto bail; 485 /* If DMAs are in progress, we can't flush immediately. */ 486 if (iowait_sdma_pending(&priv->s_iowait)) { 487 qp->s_flags |= RVT_S_WAIT_DMA; 488 goto bail; 489 } 490 clear_ahg(qp); 491 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 492 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 493 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 494 /* will get called again */ 495 goto done_free_tx; 496 } 497 498 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT)) 499 goto bail; 500 501 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { 502 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { 503 qp->s_flags |= RVT_S_WAIT_PSN; 504 goto bail; 505 } 506 qp->s_sending_psn = qp->s_psn; 507 qp->s_sending_hpsn = qp->s_psn - 1; 508 } 509 510 /* Send a request. */ 511 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 512 check_s_state: 513 switch (qp->s_state) { 514 default: 515 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) 516 goto bail; 517 /* 518 * Resend an old request or start a new one. 519 * 520 * We keep track of the current SWQE so that 521 * we don't reset the "furthest progress" state 522 * if we need to back up. 523 */ 524 newreq = 0; 525 if (qp->s_cur == qp->s_tail) { 526 /* Check if send work queue is empty. */ 527 if (qp->s_tail == READ_ONCE(qp->s_head)) { 528 clear_ahg(qp); 529 goto bail; 530 } 531 /* 532 * If a fence is requested, wait for previous 533 * RDMA read and atomic operations to finish. 534 * However, there is no need to guard against 535 * TID RDMA READ after TID RDMA READ. 536 */ 537 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 538 qp->s_num_rd_atomic && 539 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || 540 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) { 541 qp->s_flags |= RVT_S_WAIT_FENCE; 542 goto bail; 543 } 544 /* 545 * Local operations are processed immediately 546 * after all prior requests have completed 547 */ 548 if (wqe->wr.opcode == IB_WR_REG_MR || 549 wqe->wr.opcode == IB_WR_LOCAL_INV) { 550 int local_ops = 0; 551 int err = 0; 552 553 if (qp->s_last != qp->s_cur) 554 goto bail; 555 if (++qp->s_cur == qp->s_size) 556 qp->s_cur = 0; 557 if (++qp->s_tail == qp->s_size) 558 qp->s_tail = 0; 559 if (!(wqe->wr.send_flags & 560 RVT_SEND_COMPLETION_ONLY)) { 561 err = rvt_invalidate_rkey( 562 qp, 563 wqe->wr.ex.invalidate_rkey); 564 local_ops = 1; 565 } 566 rvt_send_complete(qp, wqe, 567 err ? IB_WC_LOC_PROT_ERR 568 : IB_WC_SUCCESS); 569 if (local_ops) 570 atomic_dec(&qp->local_ops_pending); 571 goto done_free_tx; 572 } 573 574 newreq = 1; 575 qp->s_psn = wqe->psn; 576 } 577 /* 578 * Note that we have to be careful not to modify the 579 * original work request since we may need to resend 580 * it. 581 */ 582 len = wqe->length; 583 ss = &qp->s_sge; 584 bth2 = mask_psn(qp->s_psn); 585 586 /* 587 * Interlock between various IB requests and TID RDMA 588 * if necessary. 589 */ 590 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) || 591 hfi1_tid_rdma_wqe_interlock(qp, wqe)) 592 goto bail; 593 594 switch (wqe->wr.opcode) { 595 case IB_WR_SEND: 596 case IB_WR_SEND_WITH_IMM: 597 case IB_WR_SEND_WITH_INV: 598 /* If no credit, return. */ 599 if (!rvt_rc_credit_avail(qp, wqe)) 600 goto bail; 601 if (len > pmtu) { 602 qp->s_state = OP(SEND_FIRST); 603 len = pmtu; 604 break; 605 } 606 if (wqe->wr.opcode == IB_WR_SEND) { 607 qp->s_state = OP(SEND_ONLY); 608 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 609 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 610 /* Immediate data comes after the BTH */ 611 ohdr->u.imm_data = wqe->wr.ex.imm_data; 612 hwords += 1; 613 } else { 614 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); 615 /* Invalidate rkey comes after the BTH */ 616 ohdr->u.ieth = cpu_to_be32( 617 wqe->wr.ex.invalidate_rkey); 618 hwords += 1; 619 } 620 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 621 bth0 |= IB_BTH_SOLICITED; 622 bth2 |= IB_BTH_REQ_ACK; 623 if (++qp->s_cur == qp->s_size) 624 qp->s_cur = 0; 625 break; 626 627 case IB_WR_RDMA_WRITE: 628 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 629 qp->s_lsn++; 630 goto no_flow_control; 631 case IB_WR_RDMA_WRITE_WITH_IMM: 632 /* If no credit, return. */ 633 if (!rvt_rc_credit_avail(qp, wqe)) 634 goto bail; 635 no_flow_control: 636 put_ib_reth_vaddr( 637 wqe->rdma_wr.remote_addr, 638 &ohdr->u.rc.reth); 639 ohdr->u.rc.reth.rkey = 640 cpu_to_be32(wqe->rdma_wr.rkey); 641 ohdr->u.rc.reth.length = cpu_to_be32(len); 642 hwords += sizeof(struct ib_reth) / sizeof(u32); 643 if (len > pmtu) { 644 qp->s_state = OP(RDMA_WRITE_FIRST); 645 len = pmtu; 646 break; 647 } 648 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 649 qp->s_state = OP(RDMA_WRITE_ONLY); 650 } else { 651 qp->s_state = 652 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 653 /* Immediate data comes after RETH */ 654 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 655 hwords += 1; 656 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 657 bth0 |= IB_BTH_SOLICITED; 658 } 659 bth2 |= IB_BTH_REQ_ACK; 660 if (++qp->s_cur == qp->s_size) 661 qp->s_cur = 0; 662 break; 663 664 case IB_WR_TID_RDMA_WRITE: 665 if (newreq) { 666 /* 667 * Limit the number of TID RDMA WRITE requests. 668 */ 669 if (atomic_read(&priv->n_tid_requests) >= 670 HFI1_TID_RDMA_WRITE_CNT) 671 goto bail; 672 673 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 674 qp->s_lsn++; 675 } 676 677 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, 678 &bth1, &bth2, 679 &len); 680 ss = NULL; 681 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) { 682 priv->s_tid_cur = qp->s_cur; 683 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) { 684 priv->s_tid_tail = qp->s_cur; 685 priv->s_state = TID_OP(WRITE_RESP); 686 } 687 } else if (priv->s_tid_cur == priv->s_tid_head) { 688 struct rvt_swqe *__w; 689 struct tid_rdma_request *__r; 690 691 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur); 692 __r = wqe_to_tid_req(__w); 693 694 /* 695 * The s_tid_cur pointer is advanced to s_cur if 696 * any of the following conditions about the WQE 697 * to which s_ti_cur currently points to are 698 * satisfied: 699 * 1. The request is not a TID RDMA WRITE 700 * request, 701 * 2. The request is in the INACTIVE or 702 * COMPLETE states (TID RDMA READ requests 703 * stay at INACTIVE and TID RDMA WRITE 704 * transition to COMPLETE when done), 705 * 3. The request is in the ACTIVE or SYNC 706 * state and the number of completed 707 * segments is equal to the total segment 708 * count. 709 * (If ACTIVE, the request is waiting for 710 * ACKs. If SYNC, the request has not 711 * received any responses because it's 712 * waiting on a sync point.) 713 */ 714 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE || 715 __r->state == TID_REQUEST_INACTIVE || 716 __r->state == TID_REQUEST_COMPLETE || 717 ((__r->state == TID_REQUEST_ACTIVE || 718 __r->state == TID_REQUEST_SYNC) && 719 __r->comp_seg == __r->total_segs)) { 720 if (priv->s_tid_tail == 721 priv->s_tid_cur && 722 priv->s_state == 723 TID_OP(WRITE_DATA_LAST)) { 724 priv->s_tid_tail = qp->s_cur; 725 priv->s_state = 726 TID_OP(WRITE_RESP); 727 } 728 priv->s_tid_cur = qp->s_cur; 729 } 730 /* 731 * A corner case: when the last TID RDMA WRITE 732 * request was completed, s_tid_head, 733 * s_tid_cur, and s_tid_tail all point to the 734 * same location. Other requests are posted and 735 * s_cur wraps around to the same location, 736 * where a new TID RDMA WRITE is posted. In 737 * this case, none of the indices need to be 738 * updated. However, the priv->s_state should. 739 */ 740 if (priv->s_tid_tail == qp->s_cur && 741 priv->s_state == TID_OP(WRITE_DATA_LAST)) 742 priv->s_state = TID_OP(WRITE_RESP); 743 } 744 req = wqe_to_tid_req(wqe); 745 if (newreq) { 746 priv->s_tid_head = qp->s_cur; 747 priv->pending_tid_w_resp += req->total_segs; 748 atomic_inc(&priv->n_tid_requests); 749 atomic_dec(&priv->n_requests); 750 } else { 751 req->state = TID_REQUEST_RESEND; 752 req->comp_seg = delta_psn(bth2, wqe->psn); 753 /* 754 * Pull back any segments since we are going 755 * to re-receive them. 756 */ 757 req->setup_head = req->clear_tail; 758 priv->pending_tid_w_resp += 759 delta_psn(wqe->lpsn, bth2) + 1; 760 } 761 762 trace_hfi1_tid_write_sender_make_req(qp, newreq); 763 trace_hfi1_tid_req_make_req_write(qp, newreq, 764 wqe->wr.opcode, 765 wqe->psn, wqe->lpsn, 766 req); 767 if (++qp->s_cur == qp->s_size) 768 qp->s_cur = 0; 769 break; 770 771 case IB_WR_RDMA_READ: 772 /* 773 * Don't allow more operations to be started 774 * than the QP limits allow. 775 */ 776 if (qp->s_num_rd_atomic >= 777 qp->s_max_rd_atomic) { 778 qp->s_flags |= RVT_S_WAIT_RDMAR; 779 goto bail; 780 } 781 qp->s_num_rd_atomic++; 782 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 783 qp->s_lsn++; 784 put_ib_reth_vaddr( 785 wqe->rdma_wr.remote_addr, 786 &ohdr->u.rc.reth); 787 ohdr->u.rc.reth.rkey = 788 cpu_to_be32(wqe->rdma_wr.rkey); 789 ohdr->u.rc.reth.length = cpu_to_be32(len); 790 qp->s_state = OP(RDMA_READ_REQUEST); 791 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 792 ss = NULL; 793 len = 0; 794 bth2 |= IB_BTH_REQ_ACK; 795 if (++qp->s_cur == qp->s_size) 796 qp->s_cur = 0; 797 break; 798 799 case IB_WR_TID_RDMA_READ: 800 trace_hfi1_tid_read_sender_make_req(qp, newreq); 801 wpriv = wqe->priv; 802 req = wqe_to_tid_req(wqe); 803 trace_hfi1_tid_req_make_req_read(qp, newreq, 804 wqe->wr.opcode, 805 wqe->psn, wqe->lpsn, 806 req); 807 delta = cmp_psn(qp->s_psn, wqe->psn); 808 809 /* 810 * Don't allow more operations to be started 811 * than the QP limits allow. We could get here under 812 * three conditions; (1) It's a new request; (2) We are 813 * sending the second or later segment of a request, 814 * but the qp->s_state is set to OP(RDMA_READ_REQUEST) 815 * when the last segment of a previous request is 816 * received just before this; (3) We are re-sending a 817 * request. 818 */ 819 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { 820 qp->s_flags |= RVT_S_WAIT_RDMAR; 821 goto bail; 822 } 823 if (newreq) { 824 struct tid_rdma_flow *flow = 825 &req->flows[req->setup_head]; 826 827 /* 828 * Set up s_sge as it is needed for TID 829 * allocation. However, if the pages have been 830 * walked and mapped, skip it. An earlier try 831 * has failed to allocate the TID entries. 832 */ 833 if (!flow->npagesets) { 834 qp->s_sge.sge = wqe->sg_list[0]; 835 qp->s_sge.sg_list = wqe->sg_list + 1; 836 qp->s_sge.num_sge = wqe->wr.num_sge; 837 qp->s_sge.total_len = wqe->length; 838 qp->s_len = wqe->length; 839 req->isge = 0; 840 req->clear_tail = req->setup_head; 841 req->flow_idx = req->setup_head; 842 req->state = TID_REQUEST_ACTIVE; 843 } 844 } else if (delta == 0) { 845 /* Re-send a request */ 846 req->cur_seg = 0; 847 req->comp_seg = 0; 848 req->ack_pending = 0; 849 req->flow_idx = req->clear_tail; 850 req->state = TID_REQUEST_RESEND; 851 } 852 req->s_next_psn = qp->s_psn; 853 /* Read one segment at a time */ 854 len = min_t(u32, req->seg_len, 855 wqe->length - req->seg_len * req->cur_seg); 856 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, 857 &bth1, &bth2, 858 &len); 859 if (delta <= 0) { 860 /* Wait for TID space */ 861 goto bail; 862 } 863 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 864 qp->s_lsn++; 865 hwords += delta; 866 ss = &wpriv->ss; 867 /* Check if this is the last segment */ 868 if (req->cur_seg >= req->total_segs && 869 ++qp->s_cur == qp->s_size) 870 qp->s_cur = 0; 871 break; 872 873 case IB_WR_ATOMIC_CMP_AND_SWP: 874 case IB_WR_ATOMIC_FETCH_AND_ADD: 875 /* 876 * Don't allow more operations to be started 877 * than the QP limits allow. 878 */ 879 if (qp->s_num_rd_atomic >= 880 qp->s_max_rd_atomic) { 881 qp->s_flags |= RVT_S_WAIT_RDMAR; 882 goto bail; 883 } 884 qp->s_num_rd_atomic++; 885 fallthrough; 886 case IB_WR_OPFN: 887 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 888 qp->s_lsn++; 889 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 890 wqe->wr.opcode == IB_WR_OPFN) { 891 qp->s_state = OP(COMPARE_SWAP); 892 put_ib_ateth_swap(wqe->atomic_wr.swap, 893 &ohdr->u.atomic_eth); 894 put_ib_ateth_compare(wqe->atomic_wr.compare_add, 895 &ohdr->u.atomic_eth); 896 } else { 897 qp->s_state = OP(FETCH_ADD); 898 put_ib_ateth_swap(wqe->atomic_wr.compare_add, 899 &ohdr->u.atomic_eth); 900 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); 901 } 902 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, 903 &ohdr->u.atomic_eth); 904 ohdr->u.atomic_eth.rkey = cpu_to_be32( 905 wqe->atomic_wr.rkey); 906 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 907 ss = NULL; 908 len = 0; 909 bth2 |= IB_BTH_REQ_ACK; 910 if (++qp->s_cur == qp->s_size) 911 qp->s_cur = 0; 912 break; 913 914 default: 915 goto bail; 916 } 917 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) { 918 qp->s_sge.sge = wqe->sg_list[0]; 919 qp->s_sge.sg_list = wqe->sg_list + 1; 920 qp->s_sge.num_sge = wqe->wr.num_sge; 921 qp->s_sge.total_len = wqe->length; 922 qp->s_len = wqe->length; 923 } 924 if (newreq) { 925 qp->s_tail++; 926 if (qp->s_tail >= qp->s_size) 927 qp->s_tail = 0; 928 } 929 if (wqe->wr.opcode == IB_WR_RDMA_READ || 930 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 931 qp->s_psn = wqe->lpsn + 1; 932 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) 933 qp->s_psn = req->s_next_psn; 934 else 935 qp->s_psn++; 936 break; 937 938 case OP(RDMA_READ_RESPONSE_FIRST): 939 /* 940 * qp->s_state is normally set to the opcode of the 941 * last packet constructed for new requests and therefore 942 * is never set to RDMA read response. 943 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing 944 * thread to indicate a SEND needs to be restarted from an 945 * earlier PSN without interfering with the sending thread. 946 * See restart_rc(). 947 */ 948 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 949 fallthrough; 950 case OP(SEND_FIRST): 951 qp->s_state = OP(SEND_MIDDLE); 952 fallthrough; 953 case OP(SEND_MIDDLE): 954 bth2 = mask_psn(qp->s_psn++); 955 ss = &qp->s_sge; 956 len = qp->s_len; 957 if (len > pmtu) { 958 len = pmtu; 959 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 960 break; 961 } 962 if (wqe->wr.opcode == IB_WR_SEND) { 963 qp->s_state = OP(SEND_LAST); 964 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 965 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 966 /* Immediate data comes after the BTH */ 967 ohdr->u.imm_data = wqe->wr.ex.imm_data; 968 hwords += 1; 969 } else { 970 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); 971 /* invalidate data comes after the BTH */ 972 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); 973 hwords += 1; 974 } 975 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 976 bth0 |= IB_BTH_SOLICITED; 977 bth2 |= IB_BTH_REQ_ACK; 978 qp->s_cur++; 979 if (qp->s_cur >= qp->s_size) 980 qp->s_cur = 0; 981 break; 982 983 case OP(RDMA_READ_RESPONSE_LAST): 984 /* 985 * qp->s_state is normally set to the opcode of the 986 * last packet constructed for new requests and therefore 987 * is never set to RDMA read response. 988 * RDMA_READ_RESPONSE_LAST is used by the ACK processing 989 * thread to indicate a RDMA write needs to be restarted from 990 * an earlier PSN without interfering with the sending thread. 991 * See restart_rc(). 992 */ 993 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 994 fallthrough; 995 case OP(RDMA_WRITE_FIRST): 996 qp->s_state = OP(RDMA_WRITE_MIDDLE); 997 fallthrough; 998 case OP(RDMA_WRITE_MIDDLE): 999 bth2 = mask_psn(qp->s_psn++); 1000 ss = &qp->s_sge; 1001 len = qp->s_len; 1002 if (len > pmtu) { 1003 len = pmtu; 1004 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 1005 break; 1006 } 1007 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 1008 qp->s_state = OP(RDMA_WRITE_LAST); 1009 } else { 1010 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 1011 /* Immediate data comes after the BTH */ 1012 ohdr->u.imm_data = wqe->wr.ex.imm_data; 1013 hwords += 1; 1014 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 1015 bth0 |= IB_BTH_SOLICITED; 1016 } 1017 bth2 |= IB_BTH_REQ_ACK; 1018 qp->s_cur++; 1019 if (qp->s_cur >= qp->s_size) 1020 qp->s_cur = 0; 1021 break; 1022 1023 case OP(RDMA_READ_RESPONSE_MIDDLE): 1024 /* 1025 * qp->s_state is normally set to the opcode of the 1026 * last packet constructed for new requests and therefore 1027 * is never set to RDMA read response. 1028 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing 1029 * thread to indicate a RDMA read needs to be restarted from 1030 * an earlier PSN without interfering with the sending thread. 1031 * See restart_rc(). 1032 */ 1033 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 1034 put_ib_reth_vaddr( 1035 wqe->rdma_wr.remote_addr + len, 1036 &ohdr->u.rc.reth); 1037 ohdr->u.rc.reth.rkey = 1038 cpu_to_be32(wqe->rdma_wr.rkey); 1039 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 1040 qp->s_state = OP(RDMA_READ_REQUEST); 1041 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 1042 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; 1043 qp->s_psn = wqe->lpsn + 1; 1044 ss = NULL; 1045 len = 0; 1046 qp->s_cur++; 1047 if (qp->s_cur == qp->s_size) 1048 qp->s_cur = 0; 1049 break; 1050 1051 case TID_OP(WRITE_RESP): 1052 /* 1053 * This value for s_state is used for restarting a TID RDMA 1054 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE 1055 * for more). 1056 */ 1057 req = wqe_to_tid_req(wqe); 1058 req->state = TID_REQUEST_RESEND; 1059 rcu_read_lock(); 1060 remote = rcu_dereference(priv->tid_rdma.remote); 1061 req->comp_seg = delta_psn(qp->s_psn, wqe->psn); 1062 len = wqe->length - (req->comp_seg * remote->max_len); 1063 rcu_read_unlock(); 1064 1065 bth2 = mask_psn(qp->s_psn); 1066 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1, 1067 &bth2, &len); 1068 qp->s_psn = wqe->lpsn + 1; 1069 ss = NULL; 1070 qp->s_state = TID_OP(WRITE_REQ); 1071 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; 1072 priv->s_tid_cur = qp->s_cur; 1073 if (++qp->s_cur == qp->s_size) 1074 qp->s_cur = 0; 1075 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode, 1076 wqe->psn, wqe->lpsn, req); 1077 break; 1078 1079 case TID_OP(READ_RESP): 1080 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 1081 goto bail; 1082 /* This is used to restart a TID read request */ 1083 req = wqe_to_tid_req(wqe); 1084 wpriv = wqe->priv; 1085 /* 1086 * Back down. The field qp->s_psn has been set to the psn with 1087 * which the request should be restart. It's OK to use division 1088 * as this is on the retry path. 1089 */ 1090 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps; 1091 1092 /* 1093 * The following function need to be redefined to return the 1094 * status to make sure that we find the flow. At the same 1095 * time, we can use the req->state change to check if the 1096 * call succeeds or not. 1097 */ 1098 req->state = TID_REQUEST_RESEND; 1099 hfi1_tid_rdma_restart_req(qp, wqe, &bth2); 1100 if (req->state != TID_REQUEST_ACTIVE) { 1101 /* 1102 * Failed to find the flow. Release all allocated tid 1103 * resources. 1104 */ 1105 hfi1_kern_exp_rcv_clear_all(req); 1106 hfi1_kern_clear_hw_flow(priv->rcd, qp); 1107 1108 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR); 1109 goto bail; 1110 } 1111 req->state = TID_REQUEST_RESEND; 1112 len = min_t(u32, req->seg_len, 1113 wqe->length - req->seg_len * req->cur_seg); 1114 flow = &req->flows[req->flow_idx]; 1115 len -= flow->sent; 1116 req->s_next_psn = flow->flow_state.ib_lpsn + 1; 1117 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1, 1118 &bth2, &len); 1119 if (delta <= 0) { 1120 /* Wait for TID space */ 1121 goto bail; 1122 } 1123 hwords += delta; 1124 ss = &wpriv->ss; 1125 /* Check if this is the last segment */ 1126 if (req->cur_seg >= req->total_segs && 1127 ++qp->s_cur == qp->s_size) 1128 qp->s_cur = 0; 1129 qp->s_psn = req->s_next_psn; 1130 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, 1131 wqe->psn, wqe->lpsn, req); 1132 break; 1133 case TID_OP(READ_REQ): 1134 req = wqe_to_tid_req(wqe); 1135 delta = cmp_psn(qp->s_psn, wqe->psn); 1136 /* 1137 * If the current WR is not TID RDMA READ, or this is the start 1138 * of a new request, we need to change the qp->s_state so that 1139 * the request can be set up properly. 1140 */ 1141 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 || 1142 qp->s_cur == qp->s_tail) { 1143 qp->s_state = OP(RDMA_READ_REQUEST); 1144 if (delta == 0 || qp->s_cur == qp->s_tail) 1145 goto check_s_state; 1146 else 1147 goto bail; 1148 } 1149 1150 /* Rate limiting */ 1151 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { 1152 qp->s_flags |= RVT_S_WAIT_RDMAR; 1153 goto bail; 1154 } 1155 1156 wpriv = wqe->priv; 1157 /* Read one segment at a time */ 1158 len = min_t(u32, req->seg_len, 1159 wqe->length - req->seg_len * req->cur_seg); 1160 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1, 1161 &bth2, &len); 1162 if (delta <= 0) { 1163 /* Wait for TID space */ 1164 goto bail; 1165 } 1166 hwords += delta; 1167 ss = &wpriv->ss; 1168 /* Check if this is the last segment */ 1169 if (req->cur_seg >= req->total_segs && 1170 ++qp->s_cur == qp->s_size) 1171 qp->s_cur = 0; 1172 qp->s_psn = req->s_next_psn; 1173 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, 1174 wqe->psn, wqe->lpsn, req); 1175 break; 1176 } 1177 qp->s_sending_hpsn = bth2; 1178 delta = delta_psn(bth2, wqe->psn); 1179 if (delta && delta % HFI1_PSN_CREDIT == 0 && 1180 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 1181 bth2 |= IB_BTH_REQ_ACK; 1182 if (qp->s_flags & RVT_S_SEND_ONE) { 1183 qp->s_flags &= ~RVT_S_SEND_ONE; 1184 qp->s_flags |= RVT_S_WAIT_ACK; 1185 bth2 |= IB_BTH_REQ_ACK; 1186 } 1187 qp->s_len -= len; 1188 ps->s_txreq->hdr_dwords = hwords; 1189 ps->s_txreq->sde = priv->s_sde; 1190 ps->s_txreq->ss = ss; 1191 ps->s_txreq->s_cur_size = len; 1192 hfi1_make_ruc_header( 1193 qp, 1194 ohdr, 1195 bth0 | (qp->s_state << 24), 1196 bth1, 1197 bth2, 1198 middle, 1199 ps); 1200 return 1; 1201 1202 done_free_tx: 1203 hfi1_put_txreq(ps->s_txreq); 1204 ps->s_txreq = NULL; 1205 return 1; 1206 1207 bail: 1208 hfi1_put_txreq(ps->s_txreq); 1209 1210 bail_no_tx: 1211 ps->s_txreq = NULL; 1212 qp->s_flags &= ~RVT_S_BUSY; 1213 /* 1214 * If we didn't get a txreq, the QP will be woken up later to try 1215 * again. Set the flags to indicate which work item to wake 1216 * up. 1217 */ 1218 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); 1219 return 0; 1220 } 1221 1222 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp, 1223 struct ib_other_headers *ohdr, 1224 u32 bth0, u32 bth1) 1225 { 1226 if (qp->r_nak_state) 1227 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 1228 (qp->r_nak_state << 1229 IB_AETH_CREDIT_SHIFT)); 1230 else 1231 ohdr->u.aeth = rvt_compute_aeth(qp); 1232 1233 ohdr->bth[0] = cpu_to_be32(bth0); 1234 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); 1235 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 1236 } 1237 1238 static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn) 1239 { 1240 struct rvt_qp *qp = packet->qp; 1241 struct hfi1_ibport *ibp; 1242 unsigned long flags; 1243 1244 spin_lock_irqsave(&qp->s_lock, flags); 1245 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 1246 goto unlock; 1247 ibp = rcd_to_iport(packet->rcd); 1248 this_cpu_inc(*ibp->rvp.rc_qacks); 1249 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 1250 qp->s_nak_state = qp->r_nak_state; 1251 qp->s_ack_psn = qp->r_ack_psn; 1252 if (is_fecn) 1253 qp->s_flags |= RVT_S_ECN; 1254 1255 /* Schedule the send tasklet. */ 1256 hfi1_schedule_send(qp); 1257 unlock: 1258 spin_unlock_irqrestore(&qp->s_lock, flags); 1259 } 1260 1261 static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet, 1262 struct hfi1_opa_header *opa_hdr, 1263 u8 sc5, bool is_fecn, 1264 u64 *pbc_flags, u32 *hwords, 1265 u32 *nwords) 1266 { 1267 struct rvt_qp *qp = packet->qp; 1268 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1269 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1270 struct ib_header *hdr = &opa_hdr->ibh; 1271 struct ib_other_headers *ohdr; 1272 u16 lrh0 = HFI1_LRH_BTH; 1273 u16 pkey; 1274 u32 bth0, bth1; 1275 1276 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; 1277 ohdr = &hdr->u.oth; 1278 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ 1279 *hwords = 6; 1280 1281 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { 1282 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 1283 rdma_ah_read_grh(&qp->remote_ah_attr), 1284 *hwords - 2, SIZE_OF_CRC); 1285 ohdr = &hdr->u.l.oth; 1286 lrh0 = HFI1_LRH_GRH; 1287 } 1288 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 1289 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); 1290 1291 /* read pkey_index w/o lock (its atomic) */ 1292 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 1293 1294 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT | 1295 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << 1296 IB_SL_SHIFT; 1297 1298 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC, 1299 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), 1300 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); 1301 1302 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 1303 if (qp->s_mig_state == IB_MIG_MIGRATED) 1304 bth0 |= IB_BTH_MIG_REQ; 1305 bth1 = (!!is_fecn) << IB_BECN_SHIFT; 1306 /* 1307 * Inline ACKs go out without the use of the Verbs send engine, so 1308 * we need to set the STL Verbs Extended bit here 1309 */ 1310 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT; 1311 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 1312 } 1313 1314 static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet, 1315 struct hfi1_opa_header *opa_hdr, 1316 u8 sc5, bool is_fecn, 1317 u64 *pbc_flags, u32 *hwords, 1318 u32 *nwords) 1319 { 1320 struct rvt_qp *qp = packet->qp; 1321 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1322 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1323 struct hfi1_16b_header *hdr = &opa_hdr->opah; 1324 struct ib_other_headers *ohdr; 1325 u32 bth0, bth1 = 0; 1326 u16 len, pkey; 1327 bool becn = is_fecn; 1328 u8 l4 = OPA_16B_L4_IB_LOCAL; 1329 u8 extra_bytes; 1330 1331 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; 1332 ohdr = &hdr->u.oth; 1333 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ 1334 *hwords = 8; 1335 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0); 1336 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2); 1337 1338 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 1339 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { 1340 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 1341 rdma_ah_read_grh(&qp->remote_ah_attr), 1342 *hwords - 4, *nwords); 1343 ohdr = &hdr->u.l.oth; 1344 l4 = OPA_16B_L4_IB_GLOBAL; 1345 } 1346 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; 1347 1348 /* read pkey_index w/o lock (its atomic) */ 1349 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 1350 1351 /* Convert dwords to flits */ 1352 len = (*hwords + *nwords) >> 1; 1353 1354 hfi1_make_16b_hdr(hdr, ppd->lid | 1355 (rdma_ah_get_path_bits(&qp->remote_ah_attr) & 1356 ((1 << ppd->lmc) - 1)), 1357 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 1358 16B), len, pkey, becn, 0, l4, sc5); 1359 1360 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 1361 bth0 |= extra_bytes << 20; 1362 if (qp->s_mig_state == IB_MIG_MIGRATED) 1363 bth1 = OPA_BTH_MIG_REQ; 1364 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 1365 } 1366 1367 typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet, 1368 struct hfi1_opa_header *opa_hdr, 1369 u8 sc5, bool is_fecn, 1370 u64 *pbc_flags, u32 *hwords, 1371 u32 *nwords); 1372 1373 /* We support only two types - 9B and 16B for now */ 1374 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = { 1375 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B, 1376 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B 1377 }; 1378 1379 /* 1380 * hfi1_send_rc_ack - Construct an ACK packet and send it 1381 * 1382 * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). 1383 * Note that RDMA reads and atomics are handled in the 1384 * send side QP state and send engine. 1385 */ 1386 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) 1387 { 1388 struct hfi1_ctxtdata *rcd = packet->rcd; 1389 struct rvt_qp *qp = packet->qp; 1390 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 1391 struct hfi1_qp_priv *priv = qp->priv; 1392 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1393 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 1394 u64 pbc, pbc_flags = 0; 1395 u32 hwords = 0; 1396 u32 nwords = 0; 1397 u32 plen; 1398 struct pio_buf *pbuf; 1399 struct hfi1_opa_header opa_hdr; 1400 1401 /* clear the defer count */ 1402 qp->r_adefered = 0; 1403 1404 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 1405 if (qp->s_flags & RVT_S_RESP_PENDING) { 1406 hfi1_queue_rc_ack(packet, is_fecn); 1407 return; 1408 } 1409 1410 /* Ensure s_rdma_ack_cnt changes are committed */ 1411 if (qp->s_rdma_ack_cnt) { 1412 hfi1_queue_rc_ack(packet, is_fecn); 1413 return; 1414 } 1415 1416 /* Don't try to send ACKs if the link isn't ACTIVE */ 1417 if (driver_lstate(ppd) != IB_PORT_ACTIVE) 1418 return; 1419 1420 /* Make the appropriate header */ 1421 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, 1422 &pbc_flags, &hwords, &nwords); 1423 1424 plen = 2 /* PBC */ + hwords + nwords; 1425 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, 1426 sc_to_vlt(ppd->dd, sc5), plen); 1427 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); 1428 if (IS_ERR_OR_NULL(pbuf)) { 1429 /* 1430 * We have no room to send at the moment. Pass 1431 * responsibility for sending the ACK to the send engine 1432 * so that when enough buffer space becomes available, 1433 * the ACK is sent ahead of other outgoing packets. 1434 */ 1435 hfi1_queue_rc_ack(packet, is_fecn); 1436 return; 1437 } 1438 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 1439 &opa_hdr, ib_is_sc5(sc5)); 1440 1441 /* write the pbc and data */ 1442 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, 1443 (priv->hdr_type == HFI1_PKT_TYPE_9B ? 1444 (void *)&opa_hdr.ibh : 1445 (void *)&opa_hdr.opah), hwords); 1446 return; 1447 } 1448 1449 /** 1450 * update_num_rd_atomic - update the qp->s_num_rd_atomic 1451 * @qp: the QP 1452 * @psn: the packet sequence number to restart at 1453 * @wqe: the wqe 1454 * 1455 * This is called from reset_psn() to update qp->s_num_rd_atomic 1456 * for the current wqe. 1457 * Called at interrupt level with the QP s_lock held. 1458 */ 1459 static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn, 1460 struct rvt_swqe *wqe) 1461 { 1462 u32 opcode = wqe->wr.opcode; 1463 1464 if (opcode == IB_WR_RDMA_READ || 1465 opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1466 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 1467 qp->s_num_rd_atomic++; 1468 } else if (opcode == IB_WR_TID_RDMA_READ) { 1469 struct tid_rdma_request *req = wqe_to_tid_req(wqe); 1470 struct hfi1_qp_priv *priv = qp->priv; 1471 1472 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1473 u32 cur_seg; 1474 1475 cur_seg = (psn - wqe->psn) / priv->pkts_ps; 1476 req->ack_pending = cur_seg - req->comp_seg; 1477 priv->pending_tid_r_segs += req->ack_pending; 1478 qp->s_num_rd_atomic += req->ack_pending; 1479 trace_hfi1_tid_req_update_num_rd_atomic(qp, 0, 1480 wqe->wr.opcode, 1481 wqe->psn, 1482 wqe->lpsn, 1483 req); 1484 } else { 1485 priv->pending_tid_r_segs += req->total_segs; 1486 qp->s_num_rd_atomic += req->total_segs; 1487 } 1488 } 1489 } 1490 1491 /** 1492 * reset_psn - reset the QP state to send starting from PSN 1493 * @qp: the QP 1494 * @psn: the packet sequence number to restart at 1495 * 1496 * This is called from hfi1_rc_rcv() to process an incoming RC ACK 1497 * for the given QP. 1498 * Called at interrupt level with the QP s_lock held. 1499 */ 1500 static void reset_psn(struct rvt_qp *qp, u32 psn) 1501 { 1502 u32 n = qp->s_acked; 1503 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 1504 u32 opcode; 1505 struct hfi1_qp_priv *priv = qp->priv; 1506 1507 lockdep_assert_held(&qp->s_lock); 1508 qp->s_cur = n; 1509 priv->pending_tid_r_segs = 0; 1510 priv->pending_tid_w_resp = 0; 1511 qp->s_num_rd_atomic = 0; 1512 1513 /* 1514 * If we are starting the request from the beginning, 1515 * let the normal send code handle initialization. 1516 */ 1517 if (cmp_psn(psn, wqe->psn) <= 0) { 1518 qp->s_state = OP(SEND_LAST); 1519 goto done; 1520 } 1521 update_num_rd_atomic(qp, psn, wqe); 1522 1523 /* Find the work request opcode corresponding to the given PSN. */ 1524 for (;;) { 1525 int diff; 1526 1527 if (++n == qp->s_size) 1528 n = 0; 1529 if (n == qp->s_tail) 1530 break; 1531 wqe = rvt_get_swqe_ptr(qp, n); 1532 diff = cmp_psn(psn, wqe->psn); 1533 if (diff < 0) { 1534 /* Point wqe back to the previous one*/ 1535 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1536 break; 1537 } 1538 qp->s_cur = n; 1539 /* 1540 * If we are starting the request from the beginning, 1541 * let the normal send code handle initialization. 1542 */ 1543 if (diff == 0) { 1544 qp->s_state = OP(SEND_LAST); 1545 goto done; 1546 } 1547 1548 update_num_rd_atomic(qp, psn, wqe); 1549 } 1550 opcode = wqe->wr.opcode; 1551 1552 /* 1553 * Set the state to restart in the middle of a request. 1554 * Don't change the s_sge, s_cur_sge, or s_cur_size. 1555 * See hfi1_make_rc_req(). 1556 */ 1557 switch (opcode) { 1558 case IB_WR_SEND: 1559 case IB_WR_SEND_WITH_IMM: 1560 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); 1561 break; 1562 1563 case IB_WR_RDMA_WRITE: 1564 case IB_WR_RDMA_WRITE_WITH_IMM: 1565 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); 1566 break; 1567 1568 case IB_WR_TID_RDMA_WRITE: 1569 qp->s_state = TID_OP(WRITE_RESP); 1570 break; 1571 1572 case IB_WR_RDMA_READ: 1573 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); 1574 break; 1575 1576 case IB_WR_TID_RDMA_READ: 1577 qp->s_state = TID_OP(READ_RESP); 1578 break; 1579 1580 default: 1581 /* 1582 * This case shouldn't happen since its only 1583 * one PSN per req. 1584 */ 1585 qp->s_state = OP(SEND_LAST); 1586 } 1587 done: 1588 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; 1589 qp->s_psn = psn; 1590 /* 1591 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer 1592 * asynchronously before the send engine can get scheduled. 1593 * Doing it in hfi1_make_rc_req() is too late. 1594 */ 1595 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && 1596 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) 1597 qp->s_flags |= RVT_S_WAIT_PSN; 1598 qp->s_flags &= ~HFI1_S_AHG_VALID; 1599 trace_hfi1_sender_reset_psn(qp); 1600 } 1601 1602 /* 1603 * Back up requester to resend the last un-ACKed request. 1604 * The QP r_lock and s_lock should be held and interrupts disabled. 1605 */ 1606 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) 1607 { 1608 struct hfi1_qp_priv *priv = qp->priv; 1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1610 struct hfi1_ibport *ibp; 1611 1612 lockdep_assert_held(&qp->r_lock); 1613 lockdep_assert_held(&qp->s_lock); 1614 trace_hfi1_sender_restart_rc(qp); 1615 if (qp->s_retry == 0) { 1616 if (qp->s_mig_state == IB_MIG_ARMED) { 1617 hfi1_migrate_qp(qp); 1618 qp->s_retry = qp->s_retry_cnt; 1619 } else if (qp->s_last == qp->s_acked) { 1620 /* 1621 * We need special handling for the OPFN request WQEs as 1622 * they are not allowed to generate real user errors 1623 */ 1624 if (wqe->wr.opcode == IB_WR_OPFN) { 1625 struct hfi1_ibport *ibp = 1626 to_iport(qp->ibqp.device, qp->port_num); 1627 /* 1628 * Call opfn_conn_reply() with capcode and 1629 * remaining data as 0 to close out the 1630 * current request 1631 */ 1632 opfn_conn_reply(qp, priv->opfn.curr); 1633 wqe = do_rc_completion(qp, wqe, ibp); 1634 qp->s_flags &= ~RVT_S_WAIT_ACK; 1635 } else { 1636 trace_hfi1_tid_write_sender_restart_rc(qp, 0); 1637 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { 1638 struct tid_rdma_request *req; 1639 1640 req = wqe_to_tid_req(wqe); 1641 hfi1_kern_exp_rcv_clear_all(req); 1642 hfi1_kern_clear_hw_flow(priv->rcd, qp); 1643 } 1644 1645 hfi1_trdma_send_complete(qp, wqe, 1646 IB_WC_RETRY_EXC_ERR); 1647 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1648 } 1649 return; 1650 } else { /* need to handle delayed completion */ 1651 return; 1652 } 1653 } else { 1654 qp->s_retry--; 1655 } 1656 1657 ibp = to_iport(qp->ibqp.device, qp->port_num); 1658 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1659 wqe->wr.opcode == IB_WR_TID_RDMA_READ) 1660 ibp->rvp.n_rc_resends++; 1661 else 1662 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1663 1664 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | 1665 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | 1666 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP); 1667 if (wait) 1668 qp->s_flags |= RVT_S_SEND_ONE; 1669 reset_psn(qp, psn); 1670 } 1671 1672 /* 1673 * Set qp->s_sending_psn to the next PSN after the given one. 1674 * This would be psn+1 except when RDMA reads or TID RDMA ops 1675 * are present. 1676 */ 1677 static void reset_sending_psn(struct rvt_qp *qp, u32 psn) 1678 { 1679 struct rvt_swqe *wqe; 1680 u32 n = qp->s_last; 1681 1682 lockdep_assert_held(&qp->s_lock); 1683 /* Find the work request corresponding to the given PSN. */ 1684 for (;;) { 1685 wqe = rvt_get_swqe_ptr(qp, n); 1686 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1687 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1688 wqe->wr.opcode == IB_WR_TID_RDMA_READ || 1689 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 1690 qp->s_sending_psn = wqe->lpsn + 1; 1691 else 1692 qp->s_sending_psn = psn + 1; 1693 break; 1694 } 1695 if (++n == qp->s_size) 1696 n = 0; 1697 if (n == qp->s_tail) 1698 break; 1699 } 1700 } 1701 1702 /** 1703 * hfi1_rc_verbs_aborted - handle abort status 1704 * @qp: the QP 1705 * @opah: the opa header 1706 * 1707 * This code modifies both ACK bit in BTH[2] 1708 * and the s_flags to go into send one mode. 1709 * 1710 * This serves to throttle the send engine to only 1711 * send a single packet in the likely case the 1712 * a link has gone down. 1713 */ 1714 void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) 1715 { 1716 struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah); 1717 u8 opcode = ib_bth_get_opcode(ohdr); 1718 u32 psn; 1719 1720 /* ignore responses */ 1721 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1722 opcode <= OP(ATOMIC_ACKNOWLEDGE)) || 1723 opcode == TID_OP(READ_RESP) || 1724 opcode == TID_OP(WRITE_RESP)) 1725 return; 1726 1727 psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK; 1728 ohdr->bth[2] = cpu_to_be32(psn); 1729 qp->s_flags |= RVT_S_SEND_ONE; 1730 } 1731 1732 /* 1733 * This should be called with the QP s_lock held and interrupts disabled. 1734 */ 1735 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) 1736 { 1737 struct ib_other_headers *ohdr; 1738 struct hfi1_qp_priv *priv = qp->priv; 1739 struct rvt_swqe *wqe; 1740 u32 opcode, head, tail; 1741 u32 psn; 1742 struct tid_rdma_request *req; 1743 1744 lockdep_assert_held(&qp->s_lock); 1745 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) 1746 return; 1747 1748 ohdr = hfi1_get_rc_ohdr(opah); 1749 opcode = ib_bth_get_opcode(ohdr); 1750 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1751 opcode <= OP(ATOMIC_ACKNOWLEDGE)) || 1752 opcode == TID_OP(READ_RESP) || 1753 opcode == TID_OP(WRITE_RESP)) { 1754 WARN_ON(!qp->s_rdma_ack_cnt); 1755 qp->s_rdma_ack_cnt--; 1756 return; 1757 } 1758 1759 psn = ib_bth_get_psn(ohdr); 1760 /* 1761 * Don't attempt to reset the sending PSN for packets in the 1762 * KDETH PSN space since the PSN does not match anything. 1763 */ 1764 if (opcode != TID_OP(WRITE_DATA) && 1765 opcode != TID_OP(WRITE_DATA_LAST) && 1766 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC)) 1767 reset_sending_psn(qp, psn); 1768 1769 /* Handle TID RDMA WRITE packets differently */ 1770 if (opcode >= TID_OP(WRITE_REQ) && 1771 opcode <= TID_OP(WRITE_DATA_LAST)) { 1772 head = priv->s_tid_head; 1773 tail = priv->s_tid_cur; 1774 /* 1775 * s_tid_cur is set to s_tid_head in the case, where 1776 * a new TID RDMA request is being started and all 1777 * previous ones have been completed. 1778 * Therefore, we need to do a secondary check in order 1779 * to properly determine whether we should start the 1780 * RC timer. 1781 */ 1782 wqe = rvt_get_swqe_ptr(qp, tail); 1783 req = wqe_to_tid_req(wqe); 1784 if (head == tail && req->comp_seg < req->total_segs) { 1785 if (tail == 0) 1786 tail = qp->s_size - 1; 1787 else 1788 tail -= 1; 1789 } 1790 } else { 1791 head = qp->s_tail; 1792 tail = qp->s_acked; 1793 } 1794 1795 /* 1796 * Start timer after a packet requesting an ACK has been sent and 1797 * there are still requests that haven't been acked. 1798 */ 1799 if ((psn & IB_BTH_REQ_ACK) && tail != head && 1800 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) && 1801 opcode != TID_OP(RESYNC) && 1802 !(qp->s_flags & 1803 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && 1804 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 1805 if (opcode == TID_OP(READ_REQ)) 1806 rvt_add_retry_timer_ext(qp, priv->timeout_shift); 1807 else 1808 rvt_add_retry_timer(qp); 1809 } 1810 1811 /* Start TID RDMA ACK timer */ 1812 if ((opcode == TID_OP(WRITE_DATA) || 1813 opcode == TID_OP(WRITE_DATA_LAST) || 1814 opcode == TID_OP(RESYNC)) && 1815 (psn & IB_BTH_REQ_ACK) && 1816 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) && 1817 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 1818 /* 1819 * The TID RDMA ACK packet could be received before this 1820 * function is called. Therefore, add the timer only if TID 1821 * RDMA ACK packets are actually pending. 1822 */ 1823 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1824 req = wqe_to_tid_req(wqe); 1825 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 1826 req->ack_seg < req->cur_seg) 1827 hfi1_add_tid_retry_timer(qp); 1828 } 1829 1830 while (qp->s_last != qp->s_acked) { 1831 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1832 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && 1833 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1834 break; 1835 trdma_clean_swqe(qp, wqe); 1836 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); 1837 rvt_qp_complete_swqe(qp, 1838 wqe, 1839 ib_hfi1_wc_opcode[wqe->wr.opcode], 1840 IB_WC_SUCCESS); 1841 } 1842 /* 1843 * If we were waiting for sends to complete before re-sending, 1844 * and they are now complete, restart sending. 1845 */ 1846 trace_hfi1_sendcomplete(qp, psn); 1847 if (qp->s_flags & RVT_S_WAIT_PSN && 1848 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1849 qp->s_flags &= ~RVT_S_WAIT_PSN; 1850 qp->s_sending_psn = qp->s_psn; 1851 qp->s_sending_hpsn = qp->s_psn - 1; 1852 hfi1_schedule_send(qp); 1853 } 1854 } 1855 1856 static inline void update_last_psn(struct rvt_qp *qp, u32 psn) 1857 { 1858 qp->s_last_psn = psn; 1859 } 1860 1861 /* 1862 * Generate a SWQE completion. 1863 * This is similar to hfi1_send_complete but has to check to be sure 1864 * that the SGEs are not being referenced if the SWQE is being resent. 1865 */ 1866 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, 1867 struct rvt_swqe *wqe, 1868 struct hfi1_ibport *ibp) 1869 { 1870 struct hfi1_qp_priv *priv = qp->priv; 1871 1872 lockdep_assert_held(&qp->s_lock); 1873 /* 1874 * Don't decrement refcount and don't generate a 1875 * completion if the SWQE is being resent until the send 1876 * is finished. 1877 */ 1878 trace_hfi1_rc_completion(qp, wqe->lpsn); 1879 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1880 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1881 trdma_clean_swqe(qp, wqe); 1882 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); 1883 rvt_qp_complete_swqe(qp, 1884 wqe, 1885 ib_hfi1_wc_opcode[wqe->wr.opcode], 1886 IB_WC_SUCCESS); 1887 } else { 1888 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1889 1890 this_cpu_inc(*ibp->rvp.rc_delayed_comp); 1891 /* 1892 * If send progress not running attempt to progress 1893 * SDMA queue. 1894 */ 1895 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { 1896 struct sdma_engine *engine; 1897 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); 1898 u8 sc5; 1899 1900 /* For now use sc to find engine */ 1901 sc5 = ibp->sl_to_sc[sl]; 1902 engine = qp_to_sdma_engine(qp, sc5); 1903 sdma_engine_progress_schedule(engine); 1904 } 1905 } 1906 1907 qp->s_retry = qp->s_retry_cnt; 1908 /* 1909 * Don't update the last PSN if the request being completed is 1910 * a TID RDMA WRITE request. 1911 * Completion of the TID RDMA WRITE requests are done by the 1912 * TID RDMA ACKs and as such could be for a request that has 1913 * already been ACKed as far as the IB state machine is 1914 * concerned. 1915 */ 1916 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 1917 update_last_psn(qp, wqe->lpsn); 1918 1919 /* 1920 * If we are completing a request which is in the process of 1921 * being resent, we can stop re-sending it since we know the 1922 * responder has already seen it. 1923 */ 1924 if (qp->s_acked == qp->s_cur) { 1925 if (++qp->s_cur >= qp->s_size) 1926 qp->s_cur = 0; 1927 qp->s_acked = qp->s_cur; 1928 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1929 if (qp->s_acked != qp->s_tail) { 1930 qp->s_state = OP(SEND_LAST); 1931 qp->s_psn = wqe->psn; 1932 } 1933 } else { 1934 if (++qp->s_acked >= qp->s_size) 1935 qp->s_acked = 0; 1936 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) 1937 qp->s_draining = 0; 1938 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1939 } 1940 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) { 1941 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; 1942 hfi1_schedule_send(qp); 1943 } 1944 return wqe; 1945 } 1946 1947 static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) 1948 { 1949 /* Retry this request. */ 1950 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { 1951 qp->r_flags |= RVT_R_RDMAR_SEQ; 1952 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 1953 if (list_empty(&qp->rspwait)) { 1954 qp->r_flags |= RVT_R_RSP_SEND; 1955 rvt_get_qp(qp); 1956 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1957 } 1958 } 1959 } 1960 1961 /** 1962 * update_qp_retry_state - Update qp retry state. 1963 * @qp: the QP 1964 * @psn: the packet sequence number of the TID RDMA WRITE RESP. 1965 * @spsn: The start psn for the given TID RDMA WRITE swqe. 1966 * @lpsn: The last psn for the given TID RDMA WRITE swqe. 1967 * 1968 * This function is called to update the qp retry state upon 1969 * receiving a TID WRITE RESP after the qp is scheduled to retry 1970 * a request. 1971 */ 1972 static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, 1973 u32 lpsn) 1974 { 1975 struct hfi1_qp_priv *qpriv = qp->priv; 1976 1977 qp->s_psn = psn + 1; 1978 /* 1979 * If this is the first TID RDMA WRITE RESP packet for the current 1980 * request, change the s_state so that the retry will be processed 1981 * correctly. Similarly, if this is the last TID RDMA WRITE RESP 1982 * packet, change the s_state and advance the s_cur. 1983 */ 1984 if (cmp_psn(psn, lpsn) >= 0) { 1985 qp->s_cur = qpriv->s_tid_cur + 1; 1986 if (qp->s_cur >= qp->s_size) 1987 qp->s_cur = 0; 1988 qp->s_state = TID_OP(WRITE_REQ); 1989 } else if (!cmp_psn(psn, spsn)) { 1990 qp->s_cur = qpriv->s_tid_cur; 1991 qp->s_state = TID_OP(WRITE_RESP); 1992 } 1993 } 1994 1995 /* 1996 * do_rc_ack - process an incoming RC ACK 1997 * @qp: the QP the ACK came in on 1998 * @psn: the packet sequence number of the ACK 1999 * @opcode: the opcode of the request that resulted in the ACK 2000 * 2001 * This is called from rc_rcv_resp() to process an incoming RC ACK 2002 * for the given QP. 2003 * May be called at interrupt level, with the QP s_lock held. 2004 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 2005 */ 2006 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, 2007 u64 val, struct hfi1_ctxtdata *rcd) 2008 { 2009 struct hfi1_ibport *ibp; 2010 enum ib_wc_status status; 2011 struct hfi1_qp_priv *qpriv = qp->priv; 2012 struct rvt_swqe *wqe; 2013 int ret = 0; 2014 u32 ack_psn; 2015 int diff; 2016 struct rvt_dev_info *rdi; 2017 2018 lockdep_assert_held(&qp->s_lock); 2019 /* 2020 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2021 * requests and implicitly NAK RDMA read and atomic requests issued 2022 * before the NAK'ed request. The MSN won't include the NAK'ed 2023 * request but will include an ACK'ed request(s). 2024 */ 2025 ack_psn = psn; 2026 if (aeth >> IB_AETH_NAK_SHIFT) 2027 ack_psn--; 2028 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2029 ibp = rcd_to_iport(rcd); 2030 2031 /* 2032 * The MSN might be for a later WQE than the PSN indicates so 2033 * only complete WQEs that the PSN finishes. 2034 */ 2035 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { 2036 /* 2037 * RDMA_READ_RESPONSE_ONLY is a special case since 2038 * we want to generate completion events for everything 2039 * before the RDMA read, copy the data, then generate 2040 * the completion for the read. 2041 */ 2042 if (wqe->wr.opcode == IB_WR_RDMA_READ && 2043 opcode == OP(RDMA_READ_RESPONSE_ONLY) && 2044 diff == 0) { 2045 ret = 1; 2046 goto bail_stop; 2047 } 2048 /* 2049 * If this request is a RDMA read or atomic, and the ACK is 2050 * for a later operation, this ACK NAKs the RDMA read or 2051 * atomic. In other words, only a RDMA_READ_LAST or ONLY 2052 * can ACK a RDMA read and likewise for atomic ops. Note 2053 * that the NAK case can only happen if relaxed ordering is 2054 * used and requests are sent after an RDMA read or atomic 2055 * is sent but before the response is received. 2056 */ 2057 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 2058 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || 2059 (wqe->wr.opcode == IB_WR_TID_RDMA_READ && 2060 (opcode != TID_OP(READ_RESP) || diff != 0)) || 2061 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2062 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 2063 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) || 2064 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 2065 (delta_psn(psn, qp->s_last_psn) != 1))) { 2066 set_restart_qp(qp, rcd); 2067 /* 2068 * No need to process the ACK/NAK since we are 2069 * restarting an earlier request. 2070 */ 2071 goto bail_stop; 2072 } 2073 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2074 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2075 u64 *vaddr = wqe->sg_list[0].vaddr; 2076 *vaddr = val; 2077 } 2078 if (wqe->wr.opcode == IB_WR_OPFN) 2079 opfn_conn_reply(qp, val); 2080 2081 if (qp->s_num_rd_atomic && 2082 (wqe->wr.opcode == IB_WR_RDMA_READ || 2083 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2084 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 2085 qp->s_num_rd_atomic--; 2086 /* Restart sending task if fence is complete */ 2087 if ((qp->s_flags & RVT_S_WAIT_FENCE) && 2088 !qp->s_num_rd_atomic) { 2089 qp->s_flags &= ~(RVT_S_WAIT_FENCE | 2090 RVT_S_WAIT_ACK); 2091 hfi1_schedule_send(qp); 2092 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { 2093 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | 2094 RVT_S_WAIT_ACK); 2095 hfi1_schedule_send(qp); 2096 } 2097 } 2098 2099 /* 2100 * TID RDMA WRITE requests will be completed by the TID RDMA 2101 * ACK packet handler (see tid_rdma.c). 2102 */ 2103 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) 2104 break; 2105 2106 wqe = do_rc_completion(qp, wqe, ibp); 2107 if (qp->s_acked == qp->s_tail) 2108 break; 2109 } 2110 2111 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe); 2112 trace_hfi1_sender_do_rc_ack(qp); 2113 switch (aeth >> IB_AETH_NAK_SHIFT) { 2114 case 0: /* ACK */ 2115 this_cpu_inc(*ibp->rvp.rc_acks); 2116 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { 2117 if (wqe_to_tid_req(wqe)->ack_pending) 2118 rvt_mod_retry_timer_ext(qp, 2119 qpriv->timeout_shift); 2120 else 2121 rvt_stop_rc_timers(qp); 2122 } else if (qp->s_acked != qp->s_tail) { 2123 struct rvt_swqe *__w = NULL; 2124 2125 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID) 2126 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); 2127 2128 /* 2129 * Stop timers if we've received all of the TID RDMA 2130 * WRITE * responses. 2131 */ 2132 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE && 2133 opcode == TID_OP(WRITE_RESP)) { 2134 /* 2135 * Normally, the loop above would correctly 2136 * process all WQEs from s_acked onward and 2137 * either complete them or check for correct 2138 * PSN sequencing. 2139 * However, for TID RDMA, due to pipelining, 2140 * the response may not be for the request at 2141 * s_acked so the above look would just be 2142 * skipped. This does not allow for checking 2143 * the PSN sequencing. It has to be done 2144 * separately. 2145 */ 2146 if (cmp_psn(psn, qp->s_last_psn + 1)) { 2147 set_restart_qp(qp, rcd); 2148 goto bail_stop; 2149 } 2150 /* 2151 * If the psn is being resent, stop the 2152 * resending. 2153 */ 2154 if (qp->s_cur != qp->s_tail && 2155 cmp_psn(qp->s_psn, psn) <= 0) 2156 update_qp_retry_state(qp, psn, 2157 __w->psn, 2158 __w->lpsn); 2159 else if (--qpriv->pending_tid_w_resp) 2160 rvt_mod_retry_timer(qp); 2161 else 2162 rvt_stop_rc_timers(qp); 2163 } else { 2164 /* 2165 * We are expecting more ACKs so 2166 * mod the retry timer. 2167 */ 2168 rvt_mod_retry_timer(qp); 2169 /* 2170 * We can stop re-sending the earlier packets 2171 * and continue with the next packet the 2172 * receiver wants. 2173 */ 2174 if (cmp_psn(qp->s_psn, psn) <= 0) 2175 reset_psn(qp, psn + 1); 2176 } 2177 } else { 2178 /* No more acks - kill all timers */ 2179 rvt_stop_rc_timers(qp); 2180 if (cmp_psn(qp->s_psn, psn) <= 0) { 2181 qp->s_state = OP(SEND_LAST); 2182 qp->s_psn = psn + 1; 2183 } 2184 } 2185 if (qp->s_flags & RVT_S_WAIT_ACK) { 2186 qp->s_flags &= ~RVT_S_WAIT_ACK; 2187 hfi1_schedule_send(qp); 2188 } 2189 rvt_get_credit(qp, aeth); 2190 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 2191 qp->s_retry = qp->s_retry_cnt; 2192 /* 2193 * If the current request is a TID RDMA WRITE request and the 2194 * response is not a TID RDMA WRITE RESP packet, s_last_psn 2195 * can't be advanced. 2196 */ 2197 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && 2198 opcode != TID_OP(WRITE_RESP) && 2199 cmp_psn(psn, wqe->psn) >= 0) 2200 return 1; 2201 update_last_psn(qp, psn); 2202 return 1; 2203 2204 case 1: /* RNR NAK */ 2205 ibp->rvp.n_rnr_naks++; 2206 if (qp->s_acked == qp->s_tail) 2207 goto bail_stop; 2208 if (qp->s_flags & RVT_S_WAIT_RNR) 2209 goto bail_stop; 2210 rdi = ib_to_rvt(qp->ibqp.device); 2211 if (!(rdi->post_parms[wqe->wr.opcode].flags & 2212 RVT_OPERATION_IGN_RNR_CNT)) { 2213 if (qp->s_rnr_retry == 0) { 2214 status = IB_WC_RNR_RETRY_EXC_ERR; 2215 goto class_b; 2216 } 2217 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) 2218 qp->s_rnr_retry--; 2219 } 2220 2221 /* 2222 * The last valid PSN is the previous PSN. For TID RDMA WRITE 2223 * request, s_last_psn should be incremented only when a TID 2224 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA 2225 * WRITE RESP packets. 2226 */ 2227 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { 2228 reset_psn(qp, qp->s_last_psn + 1); 2229 } else { 2230 update_last_psn(qp, psn - 1); 2231 reset_psn(qp, psn); 2232 } 2233 2234 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 2235 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); 2236 rvt_stop_rc_timers(qp); 2237 rvt_add_rnr_timer(qp, aeth); 2238 return 0; 2239 2240 case 3: /* NAK */ 2241 if (qp->s_acked == qp->s_tail) 2242 goto bail_stop; 2243 /* The last valid PSN is the previous PSN. */ 2244 update_last_psn(qp, psn - 1); 2245 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 2246 IB_AETH_CREDIT_MASK) { 2247 case 0: /* PSN sequence error */ 2248 ibp->rvp.n_seq_naks++; 2249 /* 2250 * Back up to the responder's expected PSN. 2251 * Note that we might get a NAK in the middle of an 2252 * RDMA READ response which terminates the RDMA 2253 * READ. 2254 */ 2255 hfi1_restart_rc(qp, psn, 0); 2256 hfi1_schedule_send(qp); 2257 break; 2258 2259 case 1: /* Invalid Request */ 2260 status = IB_WC_REM_INV_REQ_ERR; 2261 ibp->rvp.n_other_naks++; 2262 goto class_b; 2263 2264 case 2: /* Remote Access Error */ 2265 status = IB_WC_REM_ACCESS_ERR; 2266 ibp->rvp.n_other_naks++; 2267 goto class_b; 2268 2269 case 3: /* Remote Operation Error */ 2270 status = IB_WC_REM_OP_ERR; 2271 ibp->rvp.n_other_naks++; 2272 class_b: 2273 if (qp->s_last == qp->s_acked) { 2274 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) 2275 hfi1_kern_read_tid_flow_free(qp); 2276 2277 hfi1_trdma_send_complete(qp, wqe, status); 2278 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 2279 } 2280 break; 2281 2282 default: 2283 /* Ignore other reserved NAK error codes */ 2284 goto reserved; 2285 } 2286 qp->s_retry = qp->s_retry_cnt; 2287 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 2288 goto bail_stop; 2289 2290 default: /* 2: reserved */ 2291 reserved: 2292 /* Ignore reserved NAK codes. */ 2293 goto bail_stop; 2294 } 2295 /* cannot be reached */ 2296 bail_stop: 2297 rvt_stop_rc_timers(qp); 2298 return ret; 2299 } 2300 2301 /* 2302 * We have seen an out of sequence RDMA read middle or last packet. 2303 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. 2304 */ 2305 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, 2306 struct hfi1_ctxtdata *rcd) 2307 { 2308 struct rvt_swqe *wqe; 2309 2310 lockdep_assert_held(&qp->s_lock); 2311 /* Remove QP from retry timer */ 2312 rvt_stop_rc_timers(qp); 2313 2314 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2315 2316 while (cmp_psn(psn, wqe->lpsn) > 0) { 2317 if (wqe->wr.opcode == IB_WR_RDMA_READ || 2318 wqe->wr.opcode == IB_WR_TID_RDMA_READ || 2319 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE || 2320 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2321 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 2322 break; 2323 wqe = do_rc_completion(qp, wqe, ibp); 2324 } 2325 2326 ibp->rvp.n_rdma_seq++; 2327 qp->r_flags |= RVT_R_RDMAR_SEQ; 2328 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 2329 if (list_empty(&qp->rspwait)) { 2330 qp->r_flags |= RVT_R_RSP_SEND; 2331 rvt_get_qp(qp); 2332 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2333 } 2334 } 2335 2336 /** 2337 * rc_rcv_resp - process an incoming RC response packet 2338 * @packet: data packet information 2339 * 2340 * This is called from hfi1_rc_rcv() to process an incoming RC response 2341 * packet for the given QP. 2342 * Called at interrupt level. 2343 */ 2344 static void rc_rcv_resp(struct hfi1_packet *packet) 2345 { 2346 struct hfi1_ctxtdata *rcd = packet->rcd; 2347 void *data = packet->payload; 2348 u32 tlen = packet->tlen; 2349 struct rvt_qp *qp = packet->qp; 2350 struct hfi1_ibport *ibp; 2351 struct ib_other_headers *ohdr = packet->ohdr; 2352 struct rvt_swqe *wqe; 2353 enum ib_wc_status status; 2354 unsigned long flags; 2355 int diff; 2356 u64 val; 2357 u32 aeth; 2358 u32 psn = ib_bth_get_psn(packet->ohdr); 2359 u32 pmtu = qp->pmtu; 2360 u16 hdrsize = packet->hlen; 2361 u8 opcode = packet->opcode; 2362 u8 pad = packet->pad; 2363 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 2364 2365 spin_lock_irqsave(&qp->s_lock, flags); 2366 trace_hfi1_ack(qp, psn); 2367 2368 /* Ignore invalid responses. */ 2369 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) 2370 goto ack_done; 2371 2372 /* Ignore duplicate responses. */ 2373 diff = cmp_psn(psn, qp->s_last_psn); 2374 if (unlikely(diff <= 0)) { 2375 /* Update credits for "ghost" ACKs */ 2376 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { 2377 aeth = be32_to_cpu(ohdr->u.aeth); 2378 if ((aeth >> IB_AETH_NAK_SHIFT) == 0) 2379 rvt_get_credit(qp, aeth); 2380 } 2381 goto ack_done; 2382 } 2383 2384 /* 2385 * Skip everything other than the PSN we expect, if we are waiting 2386 * for a reply to a restarted RDMA read or atomic op. 2387 */ 2388 if (qp->r_flags & RVT_R_RDMAR_SEQ) { 2389 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) 2390 goto ack_done; 2391 qp->r_flags &= ~RVT_R_RDMAR_SEQ; 2392 } 2393 2394 if (unlikely(qp->s_acked == qp->s_tail)) 2395 goto ack_done; 2396 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2397 status = IB_WC_SUCCESS; 2398 2399 switch (opcode) { 2400 case OP(ACKNOWLEDGE): 2401 case OP(ATOMIC_ACKNOWLEDGE): 2402 case OP(RDMA_READ_RESPONSE_FIRST): 2403 aeth = be32_to_cpu(ohdr->u.aeth); 2404 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) 2405 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); 2406 else 2407 val = 0; 2408 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || 2409 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 2410 goto ack_done; 2411 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2412 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2413 goto ack_op_err; 2414 /* 2415 * If this is a response to a resent RDMA read, we 2416 * have to be careful to copy the data to the right 2417 * location. 2418 */ 2419 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 2420 wqe, psn, pmtu); 2421 goto read_middle; 2422 2423 case OP(RDMA_READ_RESPONSE_MIDDLE): 2424 /* no AETH, no ACK */ 2425 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 2426 goto ack_seq_err; 2427 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2428 goto ack_op_err; 2429 read_middle: 2430 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 2431 goto ack_len_err; 2432 if (unlikely(pmtu >= qp->s_rdma_read_len)) 2433 goto ack_len_err; 2434 2435 /* 2436 * We got a response so update the timeout. 2437 * 4.096 usec. * (1 << qp->timeout) 2438 */ 2439 rvt_mod_retry_timer(qp); 2440 if (qp->s_flags & RVT_S_WAIT_ACK) { 2441 qp->s_flags &= ~RVT_S_WAIT_ACK; 2442 hfi1_schedule_send(qp); 2443 } 2444 2445 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 2446 qp->s_retry = qp->s_retry_cnt; 2447 2448 /* 2449 * Update the RDMA receive state but do the copy w/o 2450 * holding the locks and blocking interrupts. 2451 */ 2452 qp->s_rdma_read_len -= pmtu; 2453 update_last_psn(qp, psn); 2454 spin_unlock_irqrestore(&qp->s_lock, flags); 2455 rvt_copy_sge(qp, &qp->s_rdma_read_sge, 2456 data, pmtu, false, false); 2457 goto bail; 2458 2459 case OP(RDMA_READ_RESPONSE_ONLY): 2460 aeth = be32_to_cpu(ohdr->u.aeth); 2461 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) 2462 goto ack_done; 2463 /* 2464 * Check that the data size is >= 0 && <= pmtu. 2465 * Remember to account for ICRC (4). 2466 */ 2467 if (unlikely(tlen < (hdrsize + extra_bytes))) 2468 goto ack_len_err; 2469 /* 2470 * If this is a response to a resent RDMA read, we 2471 * have to be careful to copy the data to the right 2472 * location. 2473 */ 2474 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 2475 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 2476 wqe, psn, pmtu); 2477 goto read_last; 2478 2479 case OP(RDMA_READ_RESPONSE_LAST): 2480 /* ACKs READ req. */ 2481 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 2482 goto ack_seq_err; 2483 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 2484 goto ack_op_err; 2485 /* 2486 * Check that the data size is >= 1 && <= pmtu. 2487 * Remember to account for ICRC (4). 2488 */ 2489 if (unlikely(tlen <= (hdrsize + extra_bytes))) 2490 goto ack_len_err; 2491 read_last: 2492 tlen -= hdrsize + extra_bytes; 2493 if (unlikely(tlen != qp->s_rdma_read_len)) 2494 goto ack_len_err; 2495 aeth = be32_to_cpu(ohdr->u.aeth); 2496 rvt_copy_sge(qp, &qp->s_rdma_read_sge, 2497 data, tlen, false, false); 2498 WARN_ON(qp->s_rdma_read_sge.num_sge); 2499 (void)do_rc_ack(qp, aeth, psn, 2500 OP(RDMA_READ_RESPONSE_LAST), 0, rcd); 2501 goto ack_done; 2502 } 2503 2504 ack_op_err: 2505 status = IB_WC_LOC_QP_OP_ERR; 2506 goto ack_err; 2507 2508 ack_seq_err: 2509 ibp = rcd_to_iport(rcd); 2510 rdma_seq_err(qp, ibp, psn, rcd); 2511 goto ack_done; 2512 2513 ack_len_err: 2514 status = IB_WC_LOC_LEN_ERR; 2515 ack_err: 2516 if (qp->s_last == qp->s_acked) { 2517 rvt_send_complete(qp, wqe, status); 2518 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 2519 } 2520 ack_done: 2521 spin_unlock_irqrestore(&qp->s_lock, flags); 2522 bail: 2523 return; 2524 } 2525 2526 static inline void rc_cancel_ack(struct rvt_qp *qp) 2527 { 2528 qp->r_adefered = 0; 2529 if (list_empty(&qp->rspwait)) 2530 return; 2531 list_del_init(&qp->rspwait); 2532 qp->r_flags &= ~RVT_R_RSP_NAK; 2533 rvt_put_qp(qp); 2534 } 2535 2536 /** 2537 * rc_rcv_error - process an incoming duplicate or error RC packet 2538 * @ohdr: the other headers for this packet 2539 * @data: the packet data 2540 * @qp: the QP for this packet 2541 * @opcode: the opcode for this packet 2542 * @psn: the packet sequence number for this packet 2543 * @diff: the difference between the PSN and the expected PSN 2544 * @rcd: the receive context 2545 * 2546 * This is called from hfi1_rc_rcv() to process an unexpected 2547 * incoming RC packet for the given QP. 2548 * Called at interrupt level. 2549 * Return 1 if no more processing is needed; otherwise return 0 to 2550 * schedule a response to be sent. 2551 */ 2552 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, 2553 struct rvt_qp *qp, u32 opcode, u32 psn, 2554 int diff, struct hfi1_ctxtdata *rcd) 2555 { 2556 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2557 struct rvt_ack_entry *e; 2558 unsigned long flags; 2559 u8 prev; 2560 u8 mra; /* most recent ACK */ 2561 bool old_req; 2562 2563 trace_hfi1_rcv_error(qp, psn); 2564 if (diff > 0) { 2565 /* 2566 * Packet sequence error. 2567 * A NAK will ACK earlier sends and RDMA writes. 2568 * Don't queue the NAK if we already sent one. 2569 */ 2570 if (!qp->r_nak_state) { 2571 ibp->rvp.n_rc_seqnak++; 2572 qp->r_nak_state = IB_NAK_PSN_ERROR; 2573 /* Use the expected PSN. */ 2574 qp->r_ack_psn = qp->r_psn; 2575 /* 2576 * Wait to send the sequence NAK until all packets 2577 * in the receive queue have been processed. 2578 * Otherwise, we end up propagating congestion. 2579 */ 2580 rc_defered_ack(rcd, qp); 2581 } 2582 goto done; 2583 } 2584 2585 /* 2586 * Handle a duplicate request. Don't re-execute SEND, RDMA 2587 * write or atomic op. Don't NAK errors, just silently drop 2588 * the duplicate request. Note that r_sge, r_len, and 2589 * r_rcv_len may be in use so don't modify them. 2590 * 2591 * We are supposed to ACK the earliest duplicate PSN but we 2592 * can coalesce an outstanding duplicate ACK. We have to 2593 * send the earliest so that RDMA reads can be restarted at 2594 * the requester's expected PSN. 2595 * 2596 * First, find where this duplicate PSN falls within the 2597 * ACKs previously sent. 2598 * old_req is true if there is an older response that is scheduled 2599 * to be sent before sending this one. 2600 */ 2601 e = NULL; 2602 old_req = true; 2603 ibp->rvp.n_rc_dupreq++; 2604 2605 spin_lock_irqsave(&qp->s_lock, flags); 2606 2607 e = find_prev_entry(qp, psn, &prev, &mra, &old_req); 2608 2609 switch (opcode) { 2610 case OP(RDMA_READ_REQUEST): { 2611 struct ib_reth *reth; 2612 u32 offset; 2613 u32 len; 2614 2615 /* 2616 * If we didn't find the RDMA read request in the ack queue, 2617 * we can ignore this request. 2618 */ 2619 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) 2620 goto unlock_done; 2621 /* RETH comes after BTH */ 2622 reth = &ohdr->u.rc.reth; 2623 /* 2624 * Address range must be a subset of the original 2625 * request and start on pmtu boundaries. 2626 * We reuse the old ack_queue slot since the requester 2627 * should not back up and request an earlier PSN for the 2628 * same request. 2629 */ 2630 offset = delta_psn(psn, e->psn) * qp->pmtu; 2631 len = be32_to_cpu(reth->length); 2632 if (unlikely(offset + len != e->rdma_sge.sge_length)) 2633 goto unlock_done; 2634 release_rdma_sge_mr(e); 2635 if (len != 0) { 2636 u32 rkey = be32_to_cpu(reth->rkey); 2637 u64 vaddr = get_ib_reth_vaddr(reth); 2638 int ok; 2639 2640 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, 2641 IB_ACCESS_REMOTE_READ); 2642 if (unlikely(!ok)) 2643 goto unlock_done; 2644 } else { 2645 e->rdma_sge.vaddr = NULL; 2646 e->rdma_sge.length = 0; 2647 e->rdma_sge.sge_length = 0; 2648 } 2649 e->psn = psn; 2650 if (old_req) 2651 goto unlock_done; 2652 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) 2653 qp->s_acked_ack_queue = prev; 2654 qp->s_tail_ack_queue = prev; 2655 break; 2656 } 2657 2658 case OP(COMPARE_SWAP): 2659 case OP(FETCH_ADD): { 2660 /* 2661 * If we didn't find the atomic request in the ack queue 2662 * or the send engine is already backed up to send an 2663 * earlier entry, we can ignore this request. 2664 */ 2665 if (!e || e->opcode != (u8)opcode || old_req) 2666 goto unlock_done; 2667 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) 2668 qp->s_acked_ack_queue = prev; 2669 qp->s_tail_ack_queue = prev; 2670 break; 2671 } 2672 2673 default: 2674 /* 2675 * Ignore this operation if it doesn't request an ACK 2676 * or an earlier RDMA read or atomic is going to be resent. 2677 */ 2678 if (!(psn & IB_BTH_REQ_ACK) || old_req) 2679 goto unlock_done; 2680 /* 2681 * Resend the most recent ACK if this request is 2682 * after all the previous RDMA reads and atomics. 2683 */ 2684 if (mra == qp->r_head_ack_queue) { 2685 spin_unlock_irqrestore(&qp->s_lock, flags); 2686 qp->r_nak_state = 0; 2687 qp->r_ack_psn = qp->r_psn - 1; 2688 goto send_ack; 2689 } 2690 2691 /* 2692 * Resend the RDMA read or atomic op which 2693 * ACKs this duplicate request. 2694 */ 2695 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) 2696 qp->s_acked_ack_queue = mra; 2697 qp->s_tail_ack_queue = mra; 2698 break; 2699 } 2700 qp->s_ack_state = OP(ACKNOWLEDGE); 2701 qp->s_flags |= RVT_S_RESP_PENDING; 2702 qp->r_nak_state = 0; 2703 hfi1_schedule_send(qp); 2704 2705 unlock_done: 2706 spin_unlock_irqrestore(&qp->s_lock, flags); 2707 done: 2708 return 1; 2709 2710 send_ack: 2711 return 0; 2712 } 2713 2714 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, 2715 u32 lqpn, u32 rqpn, u8 svc_type) 2716 { 2717 struct opa_hfi1_cong_log_event_internal *cc_event; 2718 unsigned long flags; 2719 2720 if (sl >= OPA_MAX_SLS) 2721 return; 2722 2723 spin_lock_irqsave(&ppd->cc_log_lock, flags); 2724 2725 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); 2726 ppd->threshold_event_counter++; 2727 2728 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; 2729 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) 2730 ppd->cc_log_idx = 0; 2731 cc_event->lqpn = lqpn & RVT_QPN_MASK; 2732 cc_event->rqpn = rqpn & RVT_QPN_MASK; 2733 cc_event->sl = sl; 2734 cc_event->svc_type = svc_type; 2735 cc_event->rlid = rlid; 2736 /* keep timestamp in units of 1.024 usec */ 2737 cc_event->timestamp = ktime_get_ns() / 1024; 2738 2739 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); 2740 } 2741 2742 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, 2743 u32 rqpn, u8 svc_type) 2744 { 2745 struct cca_timer *cca_timer; 2746 u16 ccti, ccti_incr, ccti_timer, ccti_limit; 2747 u8 trigger_threshold; 2748 struct cc_state *cc_state; 2749 unsigned long flags; 2750 2751 if (sl >= OPA_MAX_SLS) 2752 return; 2753 2754 cc_state = get_cc_state(ppd); 2755 2756 if (!cc_state) 2757 return; 2758 2759 /* 2760 * 1) increase CCTI (for this SL) 2761 * 2) select IPG (i.e., call set_link_ipg()) 2762 * 3) start timer 2763 */ 2764 ccti_limit = cc_state->cct.ccti_limit; 2765 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; 2766 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 2767 trigger_threshold = 2768 cc_state->cong_setting.entries[sl].trigger_threshold; 2769 2770 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 2771 2772 cca_timer = &ppd->cca_timer[sl]; 2773 if (cca_timer->ccti < ccti_limit) { 2774 if (cca_timer->ccti + ccti_incr <= ccti_limit) 2775 cca_timer->ccti += ccti_incr; 2776 else 2777 cca_timer->ccti = ccti_limit; 2778 set_link_ipg(ppd); 2779 } 2780 2781 ccti = cca_timer->ccti; 2782 2783 if (!hrtimer_active(&cca_timer->hrtimer)) { 2784 /* ccti_timer is in units of 1.024 usec */ 2785 unsigned long nsec = 1024 * ccti_timer; 2786 2787 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), 2788 HRTIMER_MODE_REL_PINNED); 2789 } 2790 2791 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 2792 2793 if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) 2794 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); 2795 } 2796 2797 /** 2798 * hfi1_rc_rcv - process an incoming RC packet 2799 * @packet: data packet information 2800 * 2801 * This is called from qp_rcv() to process an incoming RC packet 2802 * for the given QP. 2803 * May be called at interrupt level. 2804 */ 2805 void hfi1_rc_rcv(struct hfi1_packet *packet) 2806 { 2807 struct hfi1_ctxtdata *rcd = packet->rcd; 2808 void *data = packet->payload; 2809 u32 tlen = packet->tlen; 2810 struct rvt_qp *qp = packet->qp; 2811 struct hfi1_qp_priv *qpriv = qp->priv; 2812 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2813 struct ib_other_headers *ohdr = packet->ohdr; 2814 u32 opcode = packet->opcode; 2815 u32 hdrsize = packet->hlen; 2816 u32 psn = ib_bth_get_psn(packet->ohdr); 2817 u32 pad = packet->pad; 2818 struct ib_wc wc; 2819 u32 pmtu = qp->pmtu; 2820 int diff; 2821 struct ib_reth *reth; 2822 unsigned long flags; 2823 int ret; 2824 bool copy_last = false, fecn; 2825 u32 rkey; 2826 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 2827 2828 lockdep_assert_held(&qp->r_lock); 2829 2830 if (hfi1_ruc_check_hdr(ibp, packet)) 2831 return; 2832 2833 fecn = process_ecn(qp, packet); 2834 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1])); 2835 2836 /* 2837 * Process responses (ACKs) before anything else. Note that the 2838 * packet sequence number will be for something in the send work 2839 * queue rather than the expected receive packet sequence number. 2840 * In other words, this QP is the requester. 2841 */ 2842 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 2843 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 2844 rc_rcv_resp(packet); 2845 return; 2846 } 2847 2848 /* Compute 24 bits worth of difference. */ 2849 diff = delta_psn(psn, qp->r_psn); 2850 if (unlikely(diff)) { 2851 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 2852 return; 2853 goto send_ack; 2854 } 2855 2856 /* Check for opcode sequence errors. */ 2857 switch (qp->r_state) { 2858 case OP(SEND_FIRST): 2859 case OP(SEND_MIDDLE): 2860 if (opcode == OP(SEND_MIDDLE) || 2861 opcode == OP(SEND_LAST) || 2862 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2863 opcode == OP(SEND_LAST_WITH_INVALIDATE)) 2864 break; 2865 goto nack_inv; 2866 2867 case OP(RDMA_WRITE_FIRST): 2868 case OP(RDMA_WRITE_MIDDLE): 2869 if (opcode == OP(RDMA_WRITE_MIDDLE) || 2870 opcode == OP(RDMA_WRITE_LAST) || 2871 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2872 break; 2873 goto nack_inv; 2874 2875 default: 2876 if (opcode == OP(SEND_MIDDLE) || 2877 opcode == OP(SEND_LAST) || 2878 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2879 opcode == OP(SEND_LAST_WITH_INVALIDATE) || 2880 opcode == OP(RDMA_WRITE_MIDDLE) || 2881 opcode == OP(RDMA_WRITE_LAST) || 2882 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2883 goto nack_inv; 2884 /* 2885 * Note that it is up to the requester to not send a new 2886 * RDMA read or atomic operation before receiving an ACK 2887 * for the previous operation. 2888 */ 2889 break; 2890 } 2891 2892 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 2893 rvt_comm_est(qp); 2894 2895 /* OK, process the packet. */ 2896 switch (opcode) { 2897 case OP(SEND_FIRST): 2898 ret = rvt_get_rwqe(qp, false); 2899 if (ret < 0) 2900 goto nack_op_err; 2901 if (!ret) 2902 goto rnr_nak; 2903 qp->r_rcv_len = 0; 2904 fallthrough; 2905 case OP(SEND_MIDDLE): 2906 case OP(RDMA_WRITE_MIDDLE): 2907 send_middle: 2908 /* Check for invalid length PMTU or posted rwqe len. */ 2909 /* 2910 * There will be no padding for 9B packet but 16B packets 2911 * will come in with some padding since we always add 2912 * CRC and LT bytes which will need to be flit aligned 2913 */ 2914 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 2915 goto nack_inv; 2916 qp->r_rcv_len += pmtu; 2917 if (unlikely(qp->r_rcv_len > qp->r_len)) 2918 goto nack_inv; 2919 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); 2920 break; 2921 2922 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 2923 /* consume RWQE */ 2924 ret = rvt_get_rwqe(qp, true); 2925 if (ret < 0) 2926 goto nack_op_err; 2927 if (!ret) 2928 goto rnr_nak; 2929 goto send_last_imm; 2930 2931 case OP(SEND_ONLY): 2932 case OP(SEND_ONLY_WITH_IMMEDIATE): 2933 case OP(SEND_ONLY_WITH_INVALIDATE): 2934 ret = rvt_get_rwqe(qp, false); 2935 if (ret < 0) 2936 goto nack_op_err; 2937 if (!ret) 2938 goto rnr_nak; 2939 qp->r_rcv_len = 0; 2940 if (opcode == OP(SEND_ONLY)) 2941 goto no_immediate_data; 2942 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) 2943 goto send_last_inv; 2944 fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ 2945 case OP(SEND_LAST_WITH_IMMEDIATE): 2946 send_last_imm: 2947 wc.ex.imm_data = ohdr->u.imm_data; 2948 wc.wc_flags = IB_WC_WITH_IMM; 2949 goto send_last; 2950 case OP(SEND_LAST_WITH_INVALIDATE): 2951 send_last_inv: 2952 rkey = be32_to_cpu(ohdr->u.ieth); 2953 if (rvt_invalidate_rkey(qp, rkey)) 2954 goto no_immediate_data; 2955 wc.ex.invalidate_rkey = rkey; 2956 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2957 goto send_last; 2958 case OP(RDMA_WRITE_LAST): 2959 copy_last = rvt_is_user_qp(qp); 2960 fallthrough; 2961 case OP(SEND_LAST): 2962 no_immediate_data: 2963 wc.wc_flags = 0; 2964 wc.ex.imm_data = 0; 2965 send_last: 2966 /* Check for invalid length. */ 2967 /* LAST len should be >= 1 */ 2968 if (unlikely(tlen < (hdrsize + extra_bytes))) 2969 goto nack_inv; 2970 /* Don't count the CRC(and padding and LT byte for 16B). */ 2971 tlen -= (hdrsize + extra_bytes); 2972 wc.byte_len = tlen + qp->r_rcv_len; 2973 if (unlikely(wc.byte_len > qp->r_len)) 2974 goto nack_inv; 2975 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last); 2976 rvt_put_ss(&qp->r_sge); 2977 qp->r_msn++; 2978 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 2979 break; 2980 wc.wr_id = qp->r_wr_id; 2981 wc.status = IB_WC_SUCCESS; 2982 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || 2983 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 2984 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 2985 else 2986 wc.opcode = IB_WC_RECV; 2987 wc.qp = &qp->ibqp; 2988 wc.src_qp = qp->remote_qpn; 2989 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; 2990 /* 2991 * It seems that IB mandates the presence of an SL in a 2992 * work completion only for the UD transport (see section 2993 * 11.4.2 of IBTA Vol. 1). 2994 * 2995 * However, the way the SL is chosen below is consistent 2996 * with the way that IB/qib works and is trying avoid 2997 * introducing incompatibilities. 2998 * 2999 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 3000 */ 3001 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); 3002 /* zero fields that are N/A */ 3003 wc.vendor_err = 0; 3004 wc.pkey_index = 0; 3005 wc.dlid_path_bits = 0; 3006 wc.port_num = 0; 3007 /* Signal completion event if the solicited bit is set. */ 3008 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr)); 3009 break; 3010 3011 case OP(RDMA_WRITE_ONLY): 3012 copy_last = rvt_is_user_qp(qp); 3013 fallthrough; 3014 case OP(RDMA_WRITE_FIRST): 3015 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 3016 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 3017 goto nack_inv; 3018 /* consume RWQE */ 3019 reth = &ohdr->u.rc.reth; 3020 qp->r_len = be32_to_cpu(reth->length); 3021 qp->r_rcv_len = 0; 3022 qp->r_sge.sg_list = NULL; 3023 if (qp->r_len != 0) { 3024 u32 rkey = be32_to_cpu(reth->rkey); 3025 u64 vaddr = get_ib_reth_vaddr(reth); 3026 int ok; 3027 3028 /* Check rkey & NAK */ 3029 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, 3030 rkey, IB_ACCESS_REMOTE_WRITE); 3031 if (unlikely(!ok)) 3032 goto nack_acc; 3033 qp->r_sge.num_sge = 1; 3034 } else { 3035 qp->r_sge.num_sge = 0; 3036 qp->r_sge.sge.mr = NULL; 3037 qp->r_sge.sge.vaddr = NULL; 3038 qp->r_sge.sge.length = 0; 3039 qp->r_sge.sge.sge_length = 0; 3040 } 3041 if (opcode == OP(RDMA_WRITE_FIRST)) 3042 goto send_middle; 3043 else if (opcode == OP(RDMA_WRITE_ONLY)) 3044 goto no_immediate_data; 3045 ret = rvt_get_rwqe(qp, true); 3046 if (ret < 0) 3047 goto nack_op_err; 3048 if (!ret) { 3049 /* peer will send again */ 3050 rvt_put_ss(&qp->r_sge); 3051 goto rnr_nak; 3052 } 3053 wc.ex.imm_data = ohdr->u.rc.imm_data; 3054 wc.wc_flags = IB_WC_WITH_IMM; 3055 goto send_last; 3056 3057 case OP(RDMA_READ_REQUEST): { 3058 struct rvt_ack_entry *e; 3059 u32 len; 3060 u8 next; 3061 3062 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 3063 goto nack_inv; 3064 next = qp->r_head_ack_queue + 1; 3065 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */ 3066 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) 3067 next = 0; 3068 spin_lock_irqsave(&qp->s_lock, flags); 3069 if (unlikely(next == qp->s_acked_ack_queue)) { 3070 if (!qp->s_ack_queue[next].sent) 3071 goto nack_inv_unlck; 3072 update_ack_queue(qp, next); 3073 } 3074 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3075 release_rdma_sge_mr(e); 3076 reth = &ohdr->u.rc.reth; 3077 len = be32_to_cpu(reth->length); 3078 if (len) { 3079 u32 rkey = be32_to_cpu(reth->rkey); 3080 u64 vaddr = get_ib_reth_vaddr(reth); 3081 int ok; 3082 3083 /* Check rkey & NAK */ 3084 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, 3085 rkey, IB_ACCESS_REMOTE_READ); 3086 if (unlikely(!ok)) 3087 goto nack_acc_unlck; 3088 /* 3089 * Update the next expected PSN. We add 1 later 3090 * below, so only add the remainder here. 3091 */ 3092 qp->r_psn += rvt_div_mtu(qp, len - 1); 3093 } else { 3094 e->rdma_sge.mr = NULL; 3095 e->rdma_sge.vaddr = NULL; 3096 e->rdma_sge.length = 0; 3097 e->rdma_sge.sge_length = 0; 3098 } 3099 e->opcode = opcode; 3100 e->sent = 0; 3101 e->psn = psn; 3102 e->lpsn = qp->r_psn; 3103 /* 3104 * We need to increment the MSN here instead of when we 3105 * finish sending the result since a duplicate request would 3106 * increment it more than once. 3107 */ 3108 qp->r_msn++; 3109 qp->r_psn++; 3110 qp->r_state = opcode; 3111 qp->r_nak_state = 0; 3112 qp->r_head_ack_queue = next; 3113 qpriv->r_tid_alloc = qp->r_head_ack_queue; 3114 3115 /* Schedule the send engine. */ 3116 qp->s_flags |= RVT_S_RESP_PENDING; 3117 if (fecn) 3118 qp->s_flags |= RVT_S_ECN; 3119 hfi1_schedule_send(qp); 3120 3121 spin_unlock_irqrestore(&qp->s_lock, flags); 3122 return; 3123 } 3124 3125 case OP(COMPARE_SWAP): 3126 case OP(FETCH_ADD): { 3127 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth; 3128 u64 vaddr = get_ib_ateth_vaddr(ateth); 3129 bool opfn = opcode == OP(COMPARE_SWAP) && 3130 vaddr == HFI1_VERBS_E_ATOMIC_VADDR; 3131 struct rvt_ack_entry *e; 3132 atomic64_t *maddr; 3133 u64 sdata; 3134 u32 rkey; 3135 u8 next; 3136 3137 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && 3138 !opfn)) 3139 goto nack_inv; 3140 next = qp->r_head_ack_queue + 1; 3141 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) 3142 next = 0; 3143 spin_lock_irqsave(&qp->s_lock, flags); 3144 if (unlikely(next == qp->s_acked_ack_queue)) { 3145 if (!qp->s_ack_queue[next].sent) 3146 goto nack_inv_unlck; 3147 update_ack_queue(qp, next); 3148 } 3149 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 3150 release_rdma_sge_mr(e); 3151 /* Process OPFN special virtual address */ 3152 if (opfn) { 3153 opfn_conn_response(qp, e, ateth); 3154 goto ack; 3155 } 3156 if (unlikely(vaddr & (sizeof(u64) - 1))) 3157 goto nack_inv_unlck; 3158 rkey = be32_to_cpu(ateth->rkey); 3159 /* Check rkey & NAK */ 3160 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 3161 vaddr, rkey, 3162 IB_ACCESS_REMOTE_ATOMIC))) 3163 goto nack_acc_unlck; 3164 /* Perform atomic OP and save result. */ 3165 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 3166 sdata = get_ib_ateth_swap(ateth); 3167 e->atomic_data = (opcode == OP(FETCH_ADD)) ? 3168 (u64)atomic64_add_return(sdata, maddr) - sdata : 3169 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 3170 get_ib_ateth_compare(ateth), 3171 sdata); 3172 rvt_put_mr(qp->r_sge.sge.mr); 3173 qp->r_sge.num_sge = 0; 3174 ack: 3175 e->opcode = opcode; 3176 e->sent = 0; 3177 e->psn = psn; 3178 e->lpsn = psn; 3179 qp->r_msn++; 3180 qp->r_psn++; 3181 qp->r_state = opcode; 3182 qp->r_nak_state = 0; 3183 qp->r_head_ack_queue = next; 3184 qpriv->r_tid_alloc = qp->r_head_ack_queue; 3185 3186 /* Schedule the send engine. */ 3187 qp->s_flags |= RVT_S_RESP_PENDING; 3188 if (fecn) 3189 qp->s_flags |= RVT_S_ECN; 3190 hfi1_schedule_send(qp); 3191 3192 spin_unlock_irqrestore(&qp->s_lock, flags); 3193 return; 3194 } 3195 3196 default: 3197 /* NAK unknown opcodes. */ 3198 goto nack_inv; 3199 } 3200 qp->r_psn++; 3201 qp->r_state = opcode; 3202 qp->r_ack_psn = psn; 3203 qp->r_nak_state = 0; 3204 /* Send an ACK if requested or required. */ 3205 if (psn & IB_BTH_REQ_ACK || fecn) { 3206 if (packet->numpkt == 0 || fecn || 3207 qp->r_adefered >= HFI1_PSN_CREDIT) { 3208 rc_cancel_ack(qp); 3209 goto send_ack; 3210 } 3211 qp->r_adefered++; 3212 rc_defered_ack(rcd, qp); 3213 } 3214 return; 3215 3216 rnr_nak: 3217 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; 3218 qp->r_ack_psn = qp->r_psn; 3219 /* Queue RNR NAK for later */ 3220 rc_defered_ack(rcd, qp); 3221 return; 3222 3223 nack_op_err: 3224 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 3225 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 3226 qp->r_ack_psn = qp->r_psn; 3227 /* Queue NAK for later */ 3228 rc_defered_ack(rcd, qp); 3229 return; 3230 3231 nack_inv_unlck: 3232 spin_unlock_irqrestore(&qp->s_lock, flags); 3233 nack_inv: 3234 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 3235 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 3236 qp->r_ack_psn = qp->r_psn; 3237 /* Queue NAK for later */ 3238 rc_defered_ack(rcd, qp); 3239 return; 3240 3241 nack_acc_unlck: 3242 spin_unlock_irqrestore(&qp->s_lock, flags); 3243 nack_acc: 3244 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); 3245 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 3246 qp->r_ack_psn = qp->r_psn; 3247 send_ack: 3248 hfi1_send_rc_ack(packet, fecn); 3249 } 3250 3251 void hfi1_rc_hdrerr( 3252 struct hfi1_ctxtdata *rcd, 3253 struct hfi1_packet *packet, 3254 struct rvt_qp *qp) 3255 { 3256 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 3257 int diff; 3258 u32 opcode; 3259 u32 psn; 3260 3261 if (hfi1_ruc_check_hdr(ibp, packet)) 3262 return; 3263 3264 psn = ib_bth_get_psn(packet->ohdr); 3265 opcode = ib_bth_get_opcode(packet->ohdr); 3266 3267 /* Only deal with RDMA Writes for now */ 3268 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 3269 diff = delta_psn(psn, qp->r_psn); 3270 if (!qp->r_nak_state && diff >= 0) { 3271 ibp->rvp.n_rc_seqnak++; 3272 qp->r_nak_state = IB_NAK_PSN_ERROR; 3273 /* Use the expected PSN. */ 3274 qp->r_ack_psn = qp->r_psn; 3275 /* 3276 * Wait to send the sequence 3277 * NAK until all packets 3278 * in the receive queue have 3279 * been processed. 3280 * Otherwise, we end up 3281 * propagating congestion. 3282 */ 3283 rc_defered_ack(rcd, qp); 3284 } /* Out of sequence NAK */ 3285 } /* QP Request NAKs */ 3286 } 3287