1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/io.h> 49 #include <rdma/rdma_vt.h> 50 #include <rdma/rdmavt_qp.h> 51 52 #include "hfi.h" 53 #include "qp.h" 54 #include "verbs_txreq.h" 55 #include "trace.h" 56 57 /* cut down ridiculously long IB macro names */ 58 #define OP(x) RC_OP(x) 59 60 /** 61 * hfi1_add_retry_timer - add/start a retry timer 62 * @qp - the QP 63 * 64 * add a retry timer on the QP 65 */ 66 static inline void hfi1_add_retry_timer(struct rvt_qp *qp) 67 { 68 struct ib_qp *ibqp = &qp->ibqp; 69 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 70 71 lockdep_assert_held(&qp->s_lock); 72 qp->s_flags |= RVT_S_TIMER; 73 /* 4.096 usec. * (1 << qp->timeout) */ 74 qp->s_timer.expires = jiffies + qp->timeout_jiffies + 75 rdi->busy_jiffies; 76 add_timer(&qp->s_timer); 77 } 78 79 /** 80 * hfi1_add_rnr_timer - add/start an rnr timer 81 * @qp - the QP 82 * @to - timeout in usecs 83 * 84 * add an rnr timer on the QP 85 */ 86 void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) 87 { 88 struct hfi1_qp_priv *priv = qp->priv; 89 90 lockdep_assert_held(&qp->s_lock); 91 qp->s_flags |= RVT_S_WAIT_RNR; 92 priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); 93 add_timer(&priv->s_rnr_timer); 94 } 95 96 /** 97 * hfi1_mod_retry_timer - mod a retry timer 98 * @qp - the QP 99 * 100 * Modify a potentially already running retry 101 * timer 102 */ 103 static inline void hfi1_mod_retry_timer(struct rvt_qp *qp) 104 { 105 struct ib_qp *ibqp = &qp->ibqp; 106 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 107 108 lockdep_assert_held(&qp->s_lock); 109 qp->s_flags |= RVT_S_TIMER; 110 /* 4.096 usec. * (1 << qp->timeout) */ 111 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + 112 rdi->busy_jiffies); 113 } 114 115 /** 116 * hfi1_stop_retry_timer - stop a retry timer 117 * @qp - the QP 118 * 119 * stop a retry timer and return if the timer 120 * had been pending. 121 */ 122 static inline int hfi1_stop_retry_timer(struct rvt_qp *qp) 123 { 124 int rval = 0; 125 126 lockdep_assert_held(&qp->s_lock); 127 /* Remove QP from retry */ 128 if (qp->s_flags & RVT_S_TIMER) { 129 qp->s_flags &= ~RVT_S_TIMER; 130 rval = del_timer(&qp->s_timer); 131 } 132 return rval; 133 } 134 135 /** 136 * hfi1_stop_rc_timers - stop all timers 137 * @qp - the QP 138 * 139 * stop any pending timers 140 */ 141 void hfi1_stop_rc_timers(struct rvt_qp *qp) 142 { 143 struct hfi1_qp_priv *priv = qp->priv; 144 145 lockdep_assert_held(&qp->s_lock); 146 /* Remove QP from all timers */ 147 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 148 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 149 del_timer(&qp->s_timer); 150 del_timer(&priv->s_rnr_timer); 151 } 152 } 153 154 /** 155 * hfi1_stop_rnr_timer - stop an rnr timer 156 * @qp - the QP 157 * 158 * stop an rnr timer and return if the timer 159 * had been pending. 160 */ 161 static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) 162 { 163 int rval = 0; 164 struct hfi1_qp_priv *priv = qp->priv; 165 166 lockdep_assert_held(&qp->s_lock); 167 /* Remove QP from rnr timer */ 168 if (qp->s_flags & RVT_S_WAIT_RNR) { 169 qp->s_flags &= ~RVT_S_WAIT_RNR; 170 rval = del_timer(&priv->s_rnr_timer); 171 } 172 return rval; 173 } 174 175 /** 176 * hfi1_del_timers_sync - wait for any timeout routines to exit 177 * @qp - the QP 178 */ 179 void hfi1_del_timers_sync(struct rvt_qp *qp) 180 { 181 struct hfi1_qp_priv *priv = qp->priv; 182 183 del_timer_sync(&qp->s_timer); 184 del_timer_sync(&priv->s_rnr_timer); 185 } 186 187 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, 188 u32 psn, u32 pmtu) 189 { 190 u32 len; 191 192 len = delta_psn(psn, wqe->psn) * pmtu; 193 ss->sge = wqe->sg_list[0]; 194 ss->sg_list = wqe->sg_list + 1; 195 ss->num_sge = wqe->wr.num_sge; 196 ss->total_len = wqe->length; 197 hfi1_skip_sge(ss, len, 0); 198 return wqe->length - len; 199 } 200 201 /** 202 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) 203 * @dev: the device for this QP 204 * @qp: a pointer to the QP 205 * @ohdr: a pointer to the IB header being constructed 206 * @ps: the xmit packet state 207 * 208 * Return 1 if constructed; otherwise, return 0. 209 * Note that we are in the responder's side of the QP context. 210 * Note the QP s_lock must be held. 211 */ 212 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, 213 struct ib_other_headers *ohdr, 214 struct hfi1_pkt_state *ps) 215 { 216 struct rvt_ack_entry *e; 217 u32 hwords; 218 u32 len; 219 u32 bth0; 220 u32 bth2; 221 int middle = 0; 222 u32 pmtu = qp->pmtu; 223 struct hfi1_qp_priv *priv = qp->priv; 224 225 lockdep_assert_held(&qp->s_lock); 226 /* Don't send an ACK if we aren't supposed to. */ 227 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 228 goto bail; 229 230 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 231 hwords = 5; 232 233 switch (qp->s_ack_state) { 234 case OP(RDMA_READ_RESPONSE_LAST): 235 case OP(RDMA_READ_RESPONSE_ONLY): 236 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 237 if (e->rdma_sge.mr) { 238 rvt_put_mr(e->rdma_sge.mr); 239 e->rdma_sge.mr = NULL; 240 } 241 /* FALLTHROUGH */ 242 case OP(ATOMIC_ACKNOWLEDGE): 243 /* 244 * We can increment the tail pointer now that the last 245 * response has been sent instead of only being 246 * constructed. 247 */ 248 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) 249 qp->s_tail_ack_queue = 0; 250 /* FALLTHROUGH */ 251 case OP(SEND_ONLY): 252 case OP(ACKNOWLEDGE): 253 /* Check for no next entry in the queue. */ 254 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 255 if (qp->s_flags & RVT_S_ACK_PENDING) 256 goto normal; 257 goto bail; 258 } 259 260 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 261 if (e->opcode == OP(RDMA_READ_REQUEST)) { 262 /* 263 * If a RDMA read response is being resent and 264 * we haven't seen the duplicate request yet, 265 * then stop sending the remaining responses the 266 * responder has seen until the requester re-sends it. 267 */ 268 len = e->rdma_sge.sge_length; 269 if (len && !e->rdma_sge.mr) { 270 qp->s_tail_ack_queue = qp->r_head_ack_queue; 271 goto bail; 272 } 273 /* Copy SGE state in case we need to resend */ 274 ps->s_txreq->mr = e->rdma_sge.mr; 275 if (ps->s_txreq->mr) 276 rvt_get_mr(ps->s_txreq->mr); 277 qp->s_ack_rdma_sge.sge = e->rdma_sge; 278 qp->s_ack_rdma_sge.num_sge = 1; 279 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 280 if (len > pmtu) { 281 len = pmtu; 282 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 283 } else { 284 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 285 e->sent = 1; 286 } 287 ohdr->u.aeth = hfi1_compute_aeth(qp); 288 hwords++; 289 qp->s_ack_rdma_psn = e->psn; 290 bth2 = mask_psn(qp->s_ack_rdma_psn++); 291 } else { 292 /* COMPARE_SWAP or FETCH_ADD */ 293 ps->s_txreq->ss = NULL; 294 len = 0; 295 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 296 ohdr->u.at.aeth = hfi1_compute_aeth(qp); 297 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); 298 hwords += sizeof(ohdr->u.at) / sizeof(u32); 299 bth2 = mask_psn(e->psn); 300 e->sent = 1; 301 } 302 bth0 = qp->s_ack_state << 24; 303 break; 304 305 case OP(RDMA_READ_RESPONSE_FIRST): 306 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 307 /* FALLTHROUGH */ 308 case OP(RDMA_READ_RESPONSE_MIDDLE): 309 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 310 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; 311 if (ps->s_txreq->mr) 312 rvt_get_mr(ps->s_txreq->mr); 313 len = qp->s_ack_rdma_sge.sge.sge_length; 314 if (len > pmtu) { 315 len = pmtu; 316 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 317 } else { 318 ohdr->u.aeth = hfi1_compute_aeth(qp); 319 hwords++; 320 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 321 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 322 e->sent = 1; 323 } 324 bth0 = qp->s_ack_state << 24; 325 bth2 = mask_psn(qp->s_ack_rdma_psn++); 326 break; 327 328 default: 329 normal: 330 /* 331 * Send a regular ACK. 332 * Set the s_ack_state so we wait until after sending 333 * the ACK before setting s_ack_state to ACKNOWLEDGE 334 * (see above). 335 */ 336 qp->s_ack_state = OP(SEND_ONLY); 337 qp->s_flags &= ~RVT_S_ACK_PENDING; 338 ps->s_txreq->ss = NULL; 339 if (qp->s_nak_state) 340 ohdr->u.aeth = 341 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | 342 (qp->s_nak_state << 343 HFI1_AETH_CREDIT_SHIFT)); 344 else 345 ohdr->u.aeth = hfi1_compute_aeth(qp); 346 hwords++; 347 len = 0; 348 bth0 = OP(ACKNOWLEDGE) << 24; 349 bth2 = mask_psn(qp->s_ack_psn); 350 } 351 qp->s_rdma_ack_cnt++; 352 qp->s_hdrwords = hwords; 353 ps->s_txreq->sde = priv->s_sde; 354 ps->s_txreq->s_cur_size = len; 355 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); 356 /* pbc */ 357 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 358 return 1; 359 360 bail: 361 qp->s_ack_state = OP(ACKNOWLEDGE); 362 /* 363 * Ensure s_rdma_ack_cnt changes are committed prior to resetting 364 * RVT_S_RESP_PENDING 365 */ 366 smp_wmb(); 367 qp->s_flags &= ~(RVT_S_RESP_PENDING 368 | RVT_S_ACK_PENDING 369 | RVT_S_AHG_VALID); 370 return 0; 371 } 372 373 /** 374 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 375 * @qp: a pointer to the QP 376 * 377 * Assumes s_lock is held. 378 * 379 * Return 1 if constructed; otherwise, return 0. 380 */ 381 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 382 { 383 struct hfi1_qp_priv *priv = qp->priv; 384 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 385 struct ib_other_headers *ohdr; 386 struct rvt_sge_state *ss; 387 struct rvt_swqe *wqe; 388 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 389 u32 hwords = 5; 390 u32 len; 391 u32 bth0 = 0; 392 u32 bth2; 393 u32 pmtu = qp->pmtu; 394 char newreq; 395 int middle = 0; 396 int delta; 397 398 lockdep_assert_held(&qp->s_lock); 399 ps->s_txreq = get_txreq(ps->dev, qp); 400 if (IS_ERR(ps->s_txreq)) 401 goto bail_no_tx; 402 403 ohdr = &ps->s_txreq->phdr.hdr.u.oth; 404 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 405 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; 406 407 /* Sending responses has higher priority over sending requests. */ 408 if ((qp->s_flags & RVT_S_RESP_PENDING) && 409 make_rc_ack(dev, qp, ohdr, ps)) 410 return 1; 411 412 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 413 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 414 goto bail; 415 /* We are in the error state, flush the work request. */ 416 smp_read_barrier_depends(); /* see post_one_send() */ 417 if (qp->s_last == ACCESS_ONCE(qp->s_head)) 418 goto bail; 419 /* If DMAs are in progress, we can't flush immediately. */ 420 if (iowait_sdma_pending(&priv->s_iowait)) { 421 qp->s_flags |= RVT_S_WAIT_DMA; 422 goto bail; 423 } 424 clear_ahg(qp); 425 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 426 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 427 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 428 /* will get called again */ 429 goto done_free_tx; 430 } 431 432 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) 433 goto bail; 434 435 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { 436 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { 437 qp->s_flags |= RVT_S_WAIT_PSN; 438 goto bail; 439 } 440 qp->s_sending_psn = qp->s_psn; 441 qp->s_sending_hpsn = qp->s_psn - 1; 442 } 443 444 /* Send a request. */ 445 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 446 switch (qp->s_state) { 447 default: 448 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) 449 goto bail; 450 /* 451 * Resend an old request or start a new one. 452 * 453 * We keep track of the current SWQE so that 454 * we don't reset the "furthest progress" state 455 * if we need to back up. 456 */ 457 newreq = 0; 458 if (qp->s_cur == qp->s_tail) { 459 /* Check if send work queue is empty. */ 460 if (qp->s_tail == qp->s_head) { 461 clear_ahg(qp); 462 goto bail; 463 } 464 /* 465 * If a fence is requested, wait for previous 466 * RDMA read and atomic operations to finish. 467 */ 468 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 469 qp->s_num_rd_atomic) { 470 qp->s_flags |= RVT_S_WAIT_FENCE; 471 goto bail; 472 } 473 /* 474 * Local operations are processed immediately 475 * after all prior requests have completed 476 */ 477 if (wqe->wr.opcode == IB_WR_REG_MR || 478 wqe->wr.opcode == IB_WR_LOCAL_INV) { 479 int local_ops = 0; 480 int err = 0; 481 482 if (qp->s_last != qp->s_cur) 483 goto bail; 484 if (++qp->s_cur == qp->s_size) 485 qp->s_cur = 0; 486 if (++qp->s_tail == qp->s_size) 487 qp->s_tail = 0; 488 if (!(wqe->wr.send_flags & 489 RVT_SEND_COMPLETION_ONLY)) { 490 err = rvt_invalidate_rkey( 491 qp, 492 wqe->wr.ex.invalidate_rkey); 493 local_ops = 1; 494 } 495 hfi1_send_complete(qp, wqe, 496 err ? IB_WC_LOC_PROT_ERR 497 : IB_WC_SUCCESS); 498 if (local_ops) 499 atomic_dec(&qp->local_ops_pending); 500 qp->s_hdrwords = 0; 501 goto done_free_tx; 502 } 503 504 newreq = 1; 505 qp->s_psn = wqe->psn; 506 } 507 /* 508 * Note that we have to be careful not to modify the 509 * original work request since we may need to resend 510 * it. 511 */ 512 len = wqe->length; 513 ss = &qp->s_sge; 514 bth2 = mask_psn(qp->s_psn); 515 switch (wqe->wr.opcode) { 516 case IB_WR_SEND: 517 case IB_WR_SEND_WITH_IMM: 518 case IB_WR_SEND_WITH_INV: 519 /* If no credit, return. */ 520 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 521 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 522 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 523 goto bail; 524 } 525 if (len > pmtu) { 526 qp->s_state = OP(SEND_FIRST); 527 len = pmtu; 528 break; 529 } 530 if (wqe->wr.opcode == IB_WR_SEND) { 531 qp->s_state = OP(SEND_ONLY); 532 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 533 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 534 /* Immediate data comes after the BTH */ 535 ohdr->u.imm_data = wqe->wr.ex.imm_data; 536 hwords += 1; 537 } else { 538 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); 539 /* Invalidate rkey comes after the BTH */ 540 ohdr->u.ieth = cpu_to_be32( 541 wqe->wr.ex.invalidate_rkey); 542 hwords += 1; 543 } 544 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 545 bth0 |= IB_BTH_SOLICITED; 546 bth2 |= IB_BTH_REQ_ACK; 547 if (++qp->s_cur == qp->s_size) 548 qp->s_cur = 0; 549 break; 550 551 case IB_WR_RDMA_WRITE: 552 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 553 qp->s_lsn++; 554 /* FALLTHROUGH */ 555 case IB_WR_RDMA_WRITE_WITH_IMM: 556 /* If no credit, return. */ 557 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 558 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 559 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 560 goto bail; 561 } 562 put_ib_reth_vaddr( 563 wqe->rdma_wr.remote_addr, 564 &ohdr->u.rc.reth); 565 ohdr->u.rc.reth.rkey = 566 cpu_to_be32(wqe->rdma_wr.rkey); 567 ohdr->u.rc.reth.length = cpu_to_be32(len); 568 hwords += sizeof(struct ib_reth) / sizeof(u32); 569 if (len > pmtu) { 570 qp->s_state = OP(RDMA_WRITE_FIRST); 571 len = pmtu; 572 break; 573 } 574 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 575 qp->s_state = OP(RDMA_WRITE_ONLY); 576 } else { 577 qp->s_state = 578 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 579 /* Immediate data comes after RETH */ 580 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 581 hwords += 1; 582 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 583 bth0 |= IB_BTH_SOLICITED; 584 } 585 bth2 |= IB_BTH_REQ_ACK; 586 if (++qp->s_cur == qp->s_size) 587 qp->s_cur = 0; 588 break; 589 590 case IB_WR_RDMA_READ: 591 /* 592 * Don't allow more operations to be started 593 * than the QP limits allow. 594 */ 595 if (newreq) { 596 if (qp->s_num_rd_atomic >= 597 qp->s_max_rd_atomic) { 598 qp->s_flags |= RVT_S_WAIT_RDMAR; 599 goto bail; 600 } 601 qp->s_num_rd_atomic++; 602 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 603 qp->s_lsn++; 604 } 605 put_ib_reth_vaddr( 606 wqe->rdma_wr.remote_addr, 607 &ohdr->u.rc.reth); 608 ohdr->u.rc.reth.rkey = 609 cpu_to_be32(wqe->rdma_wr.rkey); 610 ohdr->u.rc.reth.length = cpu_to_be32(len); 611 qp->s_state = OP(RDMA_READ_REQUEST); 612 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 613 ss = NULL; 614 len = 0; 615 bth2 |= IB_BTH_REQ_ACK; 616 if (++qp->s_cur == qp->s_size) 617 qp->s_cur = 0; 618 break; 619 620 case IB_WR_ATOMIC_CMP_AND_SWP: 621 case IB_WR_ATOMIC_FETCH_AND_ADD: 622 /* 623 * Don't allow more operations to be started 624 * than the QP limits allow. 625 */ 626 if (newreq) { 627 if (qp->s_num_rd_atomic >= 628 qp->s_max_rd_atomic) { 629 qp->s_flags |= RVT_S_WAIT_RDMAR; 630 goto bail; 631 } 632 qp->s_num_rd_atomic++; 633 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 634 qp->s_lsn++; 635 } 636 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 637 qp->s_state = OP(COMPARE_SWAP); 638 put_ib_ateth_swap(wqe->atomic_wr.swap, 639 &ohdr->u.atomic_eth); 640 put_ib_ateth_compare(wqe->atomic_wr.compare_add, 641 &ohdr->u.atomic_eth); 642 } else { 643 qp->s_state = OP(FETCH_ADD); 644 put_ib_ateth_swap(wqe->atomic_wr.compare_add, 645 &ohdr->u.atomic_eth); 646 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); 647 } 648 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, 649 &ohdr->u.atomic_eth); 650 ohdr->u.atomic_eth.rkey = cpu_to_be32( 651 wqe->atomic_wr.rkey); 652 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 653 ss = NULL; 654 len = 0; 655 bth2 |= IB_BTH_REQ_ACK; 656 if (++qp->s_cur == qp->s_size) 657 qp->s_cur = 0; 658 break; 659 660 default: 661 goto bail; 662 } 663 qp->s_sge.sge = wqe->sg_list[0]; 664 qp->s_sge.sg_list = wqe->sg_list + 1; 665 qp->s_sge.num_sge = wqe->wr.num_sge; 666 qp->s_sge.total_len = wqe->length; 667 qp->s_len = wqe->length; 668 if (newreq) { 669 qp->s_tail++; 670 if (qp->s_tail >= qp->s_size) 671 qp->s_tail = 0; 672 } 673 if (wqe->wr.opcode == IB_WR_RDMA_READ) 674 qp->s_psn = wqe->lpsn + 1; 675 else 676 qp->s_psn++; 677 break; 678 679 case OP(RDMA_READ_RESPONSE_FIRST): 680 /* 681 * qp->s_state is normally set to the opcode of the 682 * last packet constructed for new requests and therefore 683 * is never set to RDMA read response. 684 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing 685 * thread to indicate a SEND needs to be restarted from an 686 * earlier PSN without interfering with the sending thread. 687 * See restart_rc(). 688 */ 689 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 690 /* FALLTHROUGH */ 691 case OP(SEND_FIRST): 692 qp->s_state = OP(SEND_MIDDLE); 693 /* FALLTHROUGH */ 694 case OP(SEND_MIDDLE): 695 bth2 = mask_psn(qp->s_psn++); 696 ss = &qp->s_sge; 697 len = qp->s_len; 698 if (len > pmtu) { 699 len = pmtu; 700 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 701 break; 702 } 703 if (wqe->wr.opcode == IB_WR_SEND) { 704 qp->s_state = OP(SEND_LAST); 705 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 706 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 707 /* Immediate data comes after the BTH */ 708 ohdr->u.imm_data = wqe->wr.ex.imm_data; 709 hwords += 1; 710 } else { 711 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); 712 /* invalidate data comes after the BTH */ 713 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); 714 hwords += 1; 715 } 716 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 717 bth0 |= IB_BTH_SOLICITED; 718 bth2 |= IB_BTH_REQ_ACK; 719 qp->s_cur++; 720 if (qp->s_cur >= qp->s_size) 721 qp->s_cur = 0; 722 break; 723 724 case OP(RDMA_READ_RESPONSE_LAST): 725 /* 726 * qp->s_state is normally set to the opcode of the 727 * last packet constructed for new requests and therefore 728 * is never set to RDMA read response. 729 * RDMA_READ_RESPONSE_LAST is used by the ACK processing 730 * thread to indicate a RDMA write needs to be restarted from 731 * an earlier PSN without interfering with the sending thread. 732 * See restart_rc(). 733 */ 734 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 735 /* FALLTHROUGH */ 736 case OP(RDMA_WRITE_FIRST): 737 qp->s_state = OP(RDMA_WRITE_MIDDLE); 738 /* FALLTHROUGH */ 739 case OP(RDMA_WRITE_MIDDLE): 740 bth2 = mask_psn(qp->s_psn++); 741 ss = &qp->s_sge; 742 len = qp->s_len; 743 if (len > pmtu) { 744 len = pmtu; 745 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 746 break; 747 } 748 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 749 qp->s_state = OP(RDMA_WRITE_LAST); 750 } else { 751 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 752 /* Immediate data comes after the BTH */ 753 ohdr->u.imm_data = wqe->wr.ex.imm_data; 754 hwords += 1; 755 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 756 bth0 |= IB_BTH_SOLICITED; 757 } 758 bth2 |= IB_BTH_REQ_ACK; 759 qp->s_cur++; 760 if (qp->s_cur >= qp->s_size) 761 qp->s_cur = 0; 762 break; 763 764 case OP(RDMA_READ_RESPONSE_MIDDLE): 765 /* 766 * qp->s_state is normally set to the opcode of the 767 * last packet constructed for new requests and therefore 768 * is never set to RDMA read response. 769 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing 770 * thread to indicate a RDMA read needs to be restarted from 771 * an earlier PSN without interfering with the sending thread. 772 * See restart_rc(). 773 */ 774 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 775 put_ib_reth_vaddr( 776 wqe->rdma_wr.remote_addr + len, 777 &ohdr->u.rc.reth); 778 ohdr->u.rc.reth.rkey = 779 cpu_to_be32(wqe->rdma_wr.rkey); 780 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 781 qp->s_state = OP(RDMA_READ_REQUEST); 782 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 783 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; 784 qp->s_psn = wqe->lpsn + 1; 785 ss = NULL; 786 len = 0; 787 qp->s_cur++; 788 if (qp->s_cur == qp->s_size) 789 qp->s_cur = 0; 790 break; 791 } 792 qp->s_sending_hpsn = bth2; 793 delta = delta_psn(bth2, wqe->psn); 794 if (delta && delta % HFI1_PSN_CREDIT == 0) 795 bth2 |= IB_BTH_REQ_ACK; 796 if (qp->s_flags & RVT_S_SEND_ONE) { 797 qp->s_flags &= ~RVT_S_SEND_ONE; 798 qp->s_flags |= RVT_S_WAIT_ACK; 799 bth2 |= IB_BTH_REQ_ACK; 800 } 801 qp->s_len -= len; 802 qp->s_hdrwords = hwords; 803 ps->s_txreq->sde = priv->s_sde; 804 ps->s_txreq->ss = ss; 805 ps->s_txreq->s_cur_size = len; 806 hfi1_make_ruc_header( 807 qp, 808 ohdr, 809 bth0 | (qp->s_state << 24), 810 bth2, 811 middle, 812 ps); 813 /* pbc */ 814 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 815 return 1; 816 817 done_free_tx: 818 hfi1_put_txreq(ps->s_txreq); 819 ps->s_txreq = NULL; 820 return 1; 821 822 bail: 823 hfi1_put_txreq(ps->s_txreq); 824 825 bail_no_tx: 826 ps->s_txreq = NULL; 827 qp->s_flags &= ~RVT_S_BUSY; 828 qp->s_hdrwords = 0; 829 return 0; 830 } 831 832 /** 833 * hfi1_send_rc_ack - Construct an ACK packet and send it 834 * @qp: a pointer to the QP 835 * 836 * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). 837 * Note that RDMA reads and atomics are handled in the 838 * send side QP state and send engine. 839 */ 840 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, 841 int is_fecn) 842 { 843 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 844 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 845 u64 pbc, pbc_flags = 0; 846 u16 lrh0; 847 u16 sc5; 848 u32 bth0; 849 u32 hwords; 850 u32 vl, plen; 851 struct send_context *sc; 852 struct pio_buf *pbuf; 853 struct ib_header hdr; 854 struct ib_other_headers *ohdr; 855 unsigned long flags; 856 857 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 858 if (qp->s_flags & RVT_S_RESP_PENDING) 859 goto queue_ack; 860 861 /* Ensure s_rdma_ack_cnt changes are committed */ 862 smp_read_barrier_depends(); 863 if (qp->s_rdma_ack_cnt) 864 goto queue_ack; 865 866 /* Construct the header */ 867 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ 868 hwords = 6; 869 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { 870 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh, 871 &qp->remote_ah_attr.grh, hwords, 0); 872 ohdr = &hdr.u.l.oth; 873 lrh0 = HFI1_LRH_GRH; 874 } else { 875 ohdr = &hdr.u.oth; 876 lrh0 = HFI1_LRH_BTH; 877 } 878 /* read pkey_index w/o lock (its atomic) */ 879 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); 880 if (qp->s_mig_state == IB_MIG_MIGRATED) 881 bth0 |= IB_BTH_MIG_REQ; 882 if (qp->r_nak_state) 883 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | 884 (qp->r_nak_state << 885 HFI1_AETH_CREDIT_SHIFT)); 886 else 887 ohdr->u.aeth = hfi1_compute_aeth(qp); 888 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 889 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 890 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); 891 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; 892 hdr.lrh[0] = cpu_to_be16(lrh0); 893 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 894 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 895 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); 896 ohdr->bth[0] = cpu_to_be32(bth0); 897 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 898 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT); 899 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 900 901 /* Don't try to send ACKs if the link isn't ACTIVE */ 902 if (driver_lstate(ppd) != IB_PORT_ACTIVE) 903 return; 904 905 sc = rcd->sc; 906 plen = 2 /* PBC */ + hwords; 907 vl = sc_to_vlt(ppd->dd, sc5); 908 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); 909 910 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL); 911 if (!pbuf) { 912 /* 913 * We have no room to send at the moment. Pass 914 * responsibility for sending the ACK to the send engine 915 * so that when enough buffer space becomes available, 916 * the ACK is sent ahead of other outgoing packets. 917 */ 918 goto queue_ack; 919 } 920 921 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); 922 923 /* write the pbc and data */ 924 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); 925 926 return; 927 928 queue_ack: 929 spin_lock_irqsave(&qp->s_lock, flags); 930 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 931 goto unlock; 932 this_cpu_inc(*ibp->rvp.rc_qacks); 933 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 934 qp->s_nak_state = qp->r_nak_state; 935 qp->s_ack_psn = qp->r_ack_psn; 936 if (is_fecn) 937 qp->s_flags |= RVT_S_ECN; 938 939 /* Schedule the send engine. */ 940 hfi1_schedule_send(qp); 941 unlock: 942 spin_unlock_irqrestore(&qp->s_lock, flags); 943 } 944 945 /** 946 * reset_psn - reset the QP state to send starting from PSN 947 * @qp: the QP 948 * @psn: the packet sequence number to restart at 949 * 950 * This is called from hfi1_rc_rcv() to process an incoming RC ACK 951 * for the given QP. 952 * Called at interrupt level with the QP s_lock held. 953 */ 954 static void reset_psn(struct rvt_qp *qp, u32 psn) 955 { 956 u32 n = qp->s_acked; 957 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 958 u32 opcode; 959 960 lockdep_assert_held(&qp->s_lock); 961 qp->s_cur = n; 962 963 /* 964 * If we are starting the request from the beginning, 965 * let the normal send code handle initialization. 966 */ 967 if (cmp_psn(psn, wqe->psn) <= 0) { 968 qp->s_state = OP(SEND_LAST); 969 goto done; 970 } 971 972 /* Find the work request opcode corresponding to the given PSN. */ 973 opcode = wqe->wr.opcode; 974 for (;;) { 975 int diff; 976 977 if (++n == qp->s_size) 978 n = 0; 979 if (n == qp->s_tail) 980 break; 981 wqe = rvt_get_swqe_ptr(qp, n); 982 diff = cmp_psn(psn, wqe->psn); 983 if (diff < 0) 984 break; 985 qp->s_cur = n; 986 /* 987 * If we are starting the request from the beginning, 988 * let the normal send code handle initialization. 989 */ 990 if (diff == 0) { 991 qp->s_state = OP(SEND_LAST); 992 goto done; 993 } 994 opcode = wqe->wr.opcode; 995 } 996 997 /* 998 * Set the state to restart in the middle of a request. 999 * Don't change the s_sge, s_cur_sge, or s_cur_size. 1000 * See hfi1_make_rc_req(). 1001 */ 1002 switch (opcode) { 1003 case IB_WR_SEND: 1004 case IB_WR_SEND_WITH_IMM: 1005 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); 1006 break; 1007 1008 case IB_WR_RDMA_WRITE: 1009 case IB_WR_RDMA_WRITE_WITH_IMM: 1010 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); 1011 break; 1012 1013 case IB_WR_RDMA_READ: 1014 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); 1015 break; 1016 1017 default: 1018 /* 1019 * This case shouldn't happen since its only 1020 * one PSN per req. 1021 */ 1022 qp->s_state = OP(SEND_LAST); 1023 } 1024 done: 1025 qp->s_psn = psn; 1026 /* 1027 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer 1028 * asynchronously before the send engine can get scheduled. 1029 * Doing it in hfi1_make_rc_req() is too late. 1030 */ 1031 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && 1032 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) 1033 qp->s_flags |= RVT_S_WAIT_PSN; 1034 qp->s_flags &= ~RVT_S_AHG_VALID; 1035 } 1036 1037 /* 1038 * Back up requester to resend the last un-ACKed request. 1039 * The QP r_lock and s_lock should be held and interrupts disabled. 1040 */ 1041 static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) 1042 { 1043 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1044 struct hfi1_ibport *ibp; 1045 1046 lockdep_assert_held(&qp->r_lock); 1047 lockdep_assert_held(&qp->s_lock); 1048 if (qp->s_retry == 0) { 1049 if (qp->s_mig_state == IB_MIG_ARMED) { 1050 hfi1_migrate_qp(qp); 1051 qp->s_retry = qp->s_retry_cnt; 1052 } else if (qp->s_last == qp->s_acked) { 1053 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 1054 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1055 return; 1056 } else { /* need to handle delayed completion */ 1057 return; 1058 } 1059 } else { 1060 qp->s_retry--; 1061 } 1062 1063 ibp = to_iport(qp->ibqp.device, qp->port_num); 1064 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1065 ibp->rvp.n_rc_resends++; 1066 else 1067 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1068 1069 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | 1070 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | 1071 RVT_S_WAIT_ACK); 1072 if (wait) 1073 qp->s_flags |= RVT_S_SEND_ONE; 1074 reset_psn(qp, psn); 1075 } 1076 1077 /* 1078 * This is called from s_timer for missing responses. 1079 */ 1080 void hfi1_rc_timeout(unsigned long arg) 1081 { 1082 struct rvt_qp *qp = (struct rvt_qp *)arg; 1083 struct hfi1_ibport *ibp; 1084 unsigned long flags; 1085 1086 spin_lock_irqsave(&qp->r_lock, flags); 1087 spin_lock(&qp->s_lock); 1088 if (qp->s_flags & RVT_S_TIMER) { 1089 ibp = to_iport(qp->ibqp.device, qp->port_num); 1090 ibp->rvp.n_rc_timeouts++; 1091 qp->s_flags &= ~RVT_S_TIMER; 1092 del_timer(&qp->s_timer); 1093 trace_hfi1_timeout(qp, qp->s_last_psn + 1); 1094 restart_rc(qp, qp->s_last_psn + 1, 1); 1095 hfi1_schedule_send(qp); 1096 } 1097 spin_unlock(&qp->s_lock); 1098 spin_unlock_irqrestore(&qp->r_lock, flags); 1099 } 1100 1101 /* 1102 * This is called from s_timer for RNR timeouts. 1103 */ 1104 void hfi1_rc_rnr_retry(unsigned long arg) 1105 { 1106 struct rvt_qp *qp = (struct rvt_qp *)arg; 1107 unsigned long flags; 1108 1109 spin_lock_irqsave(&qp->s_lock, flags); 1110 hfi1_stop_rnr_timer(qp); 1111 hfi1_schedule_send(qp); 1112 spin_unlock_irqrestore(&qp->s_lock, flags); 1113 } 1114 1115 /* 1116 * Set qp->s_sending_psn to the next PSN after the given one. 1117 * This would be psn+1 except when RDMA reads are present. 1118 */ 1119 static void reset_sending_psn(struct rvt_qp *qp, u32 psn) 1120 { 1121 struct rvt_swqe *wqe; 1122 u32 n = qp->s_last; 1123 1124 lockdep_assert_held(&qp->s_lock); 1125 /* Find the work request corresponding to the given PSN. */ 1126 for (;;) { 1127 wqe = rvt_get_swqe_ptr(qp, n); 1128 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1129 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1130 qp->s_sending_psn = wqe->lpsn + 1; 1131 else 1132 qp->s_sending_psn = psn + 1; 1133 break; 1134 } 1135 if (++n == qp->s_size) 1136 n = 0; 1137 if (n == qp->s_tail) 1138 break; 1139 } 1140 } 1141 1142 /* 1143 * This should be called with the QP s_lock held and interrupts disabled. 1144 */ 1145 void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) 1146 { 1147 struct ib_other_headers *ohdr; 1148 struct rvt_swqe *wqe; 1149 u32 opcode; 1150 u32 psn; 1151 1152 lockdep_assert_held(&qp->s_lock); 1153 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 1154 return; 1155 1156 /* Find out where the BTH is */ 1157 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) 1158 ohdr = &hdr->u.oth; 1159 else 1160 ohdr = &hdr->u.l.oth; 1161 1162 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 1163 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1164 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 1165 WARN_ON(!qp->s_rdma_ack_cnt); 1166 qp->s_rdma_ack_cnt--; 1167 return; 1168 } 1169 1170 psn = be32_to_cpu(ohdr->bth[2]); 1171 reset_sending_psn(qp, psn); 1172 1173 /* 1174 * Start timer after a packet requesting an ACK has been sent and 1175 * there are still requests that haven't been acked. 1176 */ 1177 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1178 !(qp->s_flags & 1179 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && 1180 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 1181 hfi1_add_retry_timer(qp); 1182 1183 while (qp->s_last != qp->s_acked) { 1184 u32 s_last; 1185 1186 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1187 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && 1188 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1189 break; 1190 s_last = qp->s_last; 1191 if (++s_last >= qp->s_size) 1192 s_last = 0; 1193 qp->s_last = s_last; 1194 /* see post_send() */ 1195 barrier(); 1196 rvt_put_swqe(wqe); 1197 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); 1198 } 1199 /* 1200 * If we were waiting for sends to complete before re-sending, 1201 * and they are now complete, restart sending. 1202 */ 1203 trace_hfi1_sendcomplete(qp, psn); 1204 if (qp->s_flags & RVT_S_WAIT_PSN && 1205 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1206 qp->s_flags &= ~RVT_S_WAIT_PSN; 1207 qp->s_sending_psn = qp->s_psn; 1208 qp->s_sending_hpsn = qp->s_psn - 1; 1209 hfi1_schedule_send(qp); 1210 } 1211 } 1212 1213 static inline void update_last_psn(struct rvt_qp *qp, u32 psn) 1214 { 1215 qp->s_last_psn = psn; 1216 } 1217 1218 /* 1219 * Generate a SWQE completion. 1220 * This is similar to hfi1_send_complete but has to check to be sure 1221 * that the SGEs are not being referenced if the SWQE is being resent. 1222 */ 1223 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, 1224 struct rvt_swqe *wqe, 1225 struct hfi1_ibport *ibp) 1226 { 1227 lockdep_assert_held(&qp->s_lock); 1228 /* 1229 * Don't decrement refcount and don't generate a 1230 * completion if the SWQE is being resent until the send 1231 * is finished. 1232 */ 1233 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1234 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1235 u32 s_last; 1236 1237 rvt_put_swqe(wqe); 1238 s_last = qp->s_last; 1239 if (++s_last >= qp->s_size) 1240 s_last = 0; 1241 qp->s_last = s_last; 1242 /* see post_send() */ 1243 barrier(); 1244 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); 1245 } else { 1246 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1247 1248 this_cpu_inc(*ibp->rvp.rc_delayed_comp); 1249 /* 1250 * If send progress not running attempt to progress 1251 * SDMA queue. 1252 */ 1253 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { 1254 struct sdma_engine *engine; 1255 u8 sc5; 1256 1257 /* For now use sc to find engine */ 1258 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 1259 engine = qp_to_sdma_engine(qp, sc5); 1260 sdma_engine_progress_schedule(engine); 1261 } 1262 } 1263 1264 qp->s_retry = qp->s_retry_cnt; 1265 update_last_psn(qp, wqe->lpsn); 1266 1267 /* 1268 * If we are completing a request which is in the process of 1269 * being resent, we can stop re-sending it since we know the 1270 * responder has already seen it. 1271 */ 1272 if (qp->s_acked == qp->s_cur) { 1273 if (++qp->s_cur >= qp->s_size) 1274 qp->s_cur = 0; 1275 qp->s_acked = qp->s_cur; 1276 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1277 if (qp->s_acked != qp->s_tail) { 1278 qp->s_state = OP(SEND_LAST); 1279 qp->s_psn = wqe->psn; 1280 } 1281 } else { 1282 if (++qp->s_acked >= qp->s_size) 1283 qp->s_acked = 0; 1284 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) 1285 qp->s_draining = 0; 1286 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1287 } 1288 return wqe; 1289 } 1290 1291 /** 1292 * do_rc_ack - process an incoming RC ACK 1293 * @qp: the QP the ACK came in on 1294 * @psn: the packet sequence number of the ACK 1295 * @opcode: the opcode of the request that resulted in the ACK 1296 * 1297 * This is called from rc_rcv_resp() to process an incoming RC ACK 1298 * for the given QP. 1299 * May be called at interrupt level, with the QP s_lock held. 1300 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 1301 */ 1302 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, 1303 u64 val, struct hfi1_ctxtdata *rcd) 1304 { 1305 struct hfi1_ibport *ibp; 1306 enum ib_wc_status status; 1307 struct rvt_swqe *wqe; 1308 int ret = 0; 1309 u32 ack_psn; 1310 int diff; 1311 unsigned long to; 1312 1313 lockdep_assert_held(&qp->s_lock); 1314 /* 1315 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 1316 * requests and implicitly NAK RDMA read and atomic requests issued 1317 * before the NAK'ed request. The MSN won't include the NAK'ed 1318 * request but will include an ACK'ed request(s). 1319 */ 1320 ack_psn = psn; 1321 if (aeth >> 29) 1322 ack_psn--; 1323 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1324 ibp = to_iport(qp->ibqp.device, qp->port_num); 1325 1326 /* 1327 * The MSN might be for a later WQE than the PSN indicates so 1328 * only complete WQEs that the PSN finishes. 1329 */ 1330 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { 1331 /* 1332 * RDMA_READ_RESPONSE_ONLY is a special case since 1333 * we want to generate completion events for everything 1334 * before the RDMA read, copy the data, then generate 1335 * the completion for the read. 1336 */ 1337 if (wqe->wr.opcode == IB_WR_RDMA_READ && 1338 opcode == OP(RDMA_READ_RESPONSE_ONLY) && 1339 diff == 0) { 1340 ret = 1; 1341 goto bail_stop; 1342 } 1343 /* 1344 * If this request is a RDMA read or atomic, and the ACK is 1345 * for a later operation, this ACK NAKs the RDMA read or 1346 * atomic. In other words, only a RDMA_READ_LAST or ONLY 1347 * can ACK a RDMA read and likewise for atomic ops. Note 1348 * that the NAK case can only happen if relaxed ordering is 1349 * used and requests are sent after an RDMA read or atomic 1350 * is sent but before the response is received. 1351 */ 1352 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 1353 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || 1354 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1355 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 1356 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { 1357 /* Retry this request. */ 1358 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { 1359 qp->r_flags |= RVT_R_RDMAR_SEQ; 1360 restart_rc(qp, qp->s_last_psn + 1, 0); 1361 if (list_empty(&qp->rspwait)) { 1362 qp->r_flags |= RVT_R_RSP_SEND; 1363 rvt_get_qp(qp); 1364 list_add_tail(&qp->rspwait, 1365 &rcd->qp_wait_list); 1366 } 1367 } 1368 /* 1369 * No need to process the ACK/NAK since we are 1370 * restarting an earlier request. 1371 */ 1372 goto bail_stop; 1373 } 1374 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1375 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 1376 u64 *vaddr = wqe->sg_list[0].vaddr; 1377 *vaddr = val; 1378 } 1379 if (qp->s_num_rd_atomic && 1380 (wqe->wr.opcode == IB_WR_RDMA_READ || 1381 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1382 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 1383 qp->s_num_rd_atomic--; 1384 /* Restart sending task if fence is complete */ 1385 if ((qp->s_flags & RVT_S_WAIT_FENCE) && 1386 !qp->s_num_rd_atomic) { 1387 qp->s_flags &= ~(RVT_S_WAIT_FENCE | 1388 RVT_S_WAIT_ACK); 1389 hfi1_schedule_send(qp); 1390 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { 1391 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | 1392 RVT_S_WAIT_ACK); 1393 hfi1_schedule_send(qp); 1394 } 1395 } 1396 wqe = do_rc_completion(qp, wqe, ibp); 1397 if (qp->s_acked == qp->s_tail) 1398 break; 1399 } 1400 1401 switch (aeth >> 29) { 1402 case 0: /* ACK */ 1403 this_cpu_inc(*ibp->rvp.rc_acks); 1404 if (qp->s_acked != qp->s_tail) { 1405 /* 1406 * We are expecting more ACKs so 1407 * mod the retry timer. 1408 */ 1409 hfi1_mod_retry_timer(qp); 1410 /* 1411 * We can stop re-sending the earlier packets and 1412 * continue with the next packet the receiver wants. 1413 */ 1414 if (cmp_psn(qp->s_psn, psn) <= 0) 1415 reset_psn(qp, psn + 1); 1416 } else { 1417 /* No more acks - kill all timers */ 1418 hfi1_stop_rc_timers(qp); 1419 if (cmp_psn(qp->s_psn, psn) <= 0) { 1420 qp->s_state = OP(SEND_LAST); 1421 qp->s_psn = psn + 1; 1422 } 1423 } 1424 if (qp->s_flags & RVT_S_WAIT_ACK) { 1425 qp->s_flags &= ~RVT_S_WAIT_ACK; 1426 hfi1_schedule_send(qp); 1427 } 1428 hfi1_get_credit(qp, aeth); 1429 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1430 qp->s_retry = qp->s_retry_cnt; 1431 update_last_psn(qp, psn); 1432 return 1; 1433 1434 case 1: /* RNR NAK */ 1435 ibp->rvp.n_rnr_naks++; 1436 if (qp->s_acked == qp->s_tail) 1437 goto bail_stop; 1438 if (qp->s_flags & RVT_S_WAIT_RNR) 1439 goto bail_stop; 1440 if (qp->s_rnr_retry == 0) { 1441 status = IB_WC_RNR_RETRY_EXC_ERR; 1442 goto class_b; 1443 } 1444 if (qp->s_rnr_retry_cnt < 7) 1445 qp->s_rnr_retry--; 1446 1447 /* The last valid PSN is the previous PSN. */ 1448 update_last_psn(qp, psn - 1); 1449 1450 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1451 1452 reset_psn(qp, psn); 1453 1454 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); 1455 hfi1_stop_rc_timers(qp); 1456 to = 1457 ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & 1458 HFI1_AETH_CREDIT_MASK]; 1459 hfi1_add_rnr_timer(qp, to); 1460 return 0; 1461 1462 case 3: /* NAK */ 1463 if (qp->s_acked == qp->s_tail) 1464 goto bail_stop; 1465 /* The last valid PSN is the previous PSN. */ 1466 update_last_psn(qp, psn - 1); 1467 switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) & 1468 HFI1_AETH_CREDIT_MASK) { 1469 case 0: /* PSN sequence error */ 1470 ibp->rvp.n_seq_naks++; 1471 /* 1472 * Back up to the responder's expected PSN. 1473 * Note that we might get a NAK in the middle of an 1474 * RDMA READ response which terminates the RDMA 1475 * READ. 1476 */ 1477 restart_rc(qp, psn, 0); 1478 hfi1_schedule_send(qp); 1479 break; 1480 1481 case 1: /* Invalid Request */ 1482 status = IB_WC_REM_INV_REQ_ERR; 1483 ibp->rvp.n_other_naks++; 1484 goto class_b; 1485 1486 case 2: /* Remote Access Error */ 1487 status = IB_WC_REM_ACCESS_ERR; 1488 ibp->rvp.n_other_naks++; 1489 goto class_b; 1490 1491 case 3: /* Remote Operation Error */ 1492 status = IB_WC_REM_OP_ERR; 1493 ibp->rvp.n_other_naks++; 1494 class_b: 1495 if (qp->s_last == qp->s_acked) { 1496 hfi1_send_complete(qp, wqe, status); 1497 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1498 } 1499 break; 1500 1501 default: 1502 /* Ignore other reserved NAK error codes */ 1503 goto reserved; 1504 } 1505 qp->s_retry = qp->s_retry_cnt; 1506 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1507 goto bail_stop; 1508 1509 default: /* 2: reserved */ 1510 reserved: 1511 /* Ignore reserved NAK codes. */ 1512 goto bail_stop; 1513 } 1514 /* cannot be reached */ 1515 bail_stop: 1516 hfi1_stop_rc_timers(qp); 1517 return ret; 1518 } 1519 1520 /* 1521 * We have seen an out of sequence RDMA read middle or last packet. 1522 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. 1523 */ 1524 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, 1525 struct hfi1_ctxtdata *rcd) 1526 { 1527 struct rvt_swqe *wqe; 1528 1529 lockdep_assert_held(&qp->s_lock); 1530 /* Remove QP from retry timer */ 1531 hfi1_stop_rc_timers(qp); 1532 1533 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1534 1535 while (cmp_psn(psn, wqe->lpsn) > 0) { 1536 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1537 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1538 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1539 break; 1540 wqe = do_rc_completion(qp, wqe, ibp); 1541 } 1542 1543 ibp->rvp.n_rdma_seq++; 1544 qp->r_flags |= RVT_R_RDMAR_SEQ; 1545 restart_rc(qp, qp->s_last_psn + 1, 0); 1546 if (list_empty(&qp->rspwait)) { 1547 qp->r_flags |= RVT_R_RSP_SEND; 1548 rvt_get_qp(qp); 1549 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1550 } 1551 } 1552 1553 /** 1554 * rc_rcv_resp - process an incoming RC response packet 1555 * @ibp: the port this packet came in on 1556 * @ohdr: the other headers for this packet 1557 * @data: the packet data 1558 * @tlen: the packet length 1559 * @qp: the QP for this packet 1560 * @opcode: the opcode for this packet 1561 * @psn: the packet sequence number for this packet 1562 * @hdrsize: the header length 1563 * @pmtu: the path MTU 1564 * 1565 * This is called from hfi1_rc_rcv() to process an incoming RC response 1566 * packet for the given QP. 1567 * Called at interrupt level. 1568 */ 1569 static void rc_rcv_resp(struct hfi1_ibport *ibp, 1570 struct ib_other_headers *ohdr, 1571 void *data, u32 tlen, struct rvt_qp *qp, 1572 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, 1573 struct hfi1_ctxtdata *rcd) 1574 { 1575 struct rvt_swqe *wqe; 1576 enum ib_wc_status status; 1577 unsigned long flags; 1578 int diff; 1579 u32 pad; 1580 u32 aeth; 1581 u64 val; 1582 1583 spin_lock_irqsave(&qp->s_lock, flags); 1584 1585 trace_hfi1_ack(qp, psn); 1586 1587 /* Ignore invalid responses. */ 1588 smp_read_barrier_depends(); /* see post_one_send */ 1589 if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) 1590 goto ack_done; 1591 1592 /* Ignore duplicate responses. */ 1593 diff = cmp_psn(psn, qp->s_last_psn); 1594 if (unlikely(diff <= 0)) { 1595 /* Update credits for "ghost" ACKs */ 1596 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { 1597 aeth = be32_to_cpu(ohdr->u.aeth); 1598 if ((aeth >> 29) == 0) 1599 hfi1_get_credit(qp, aeth); 1600 } 1601 goto ack_done; 1602 } 1603 1604 /* 1605 * Skip everything other than the PSN we expect, if we are waiting 1606 * for a reply to a restarted RDMA read or atomic op. 1607 */ 1608 if (qp->r_flags & RVT_R_RDMAR_SEQ) { 1609 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) 1610 goto ack_done; 1611 qp->r_flags &= ~RVT_R_RDMAR_SEQ; 1612 } 1613 1614 if (unlikely(qp->s_acked == qp->s_tail)) 1615 goto ack_done; 1616 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1617 status = IB_WC_SUCCESS; 1618 1619 switch (opcode) { 1620 case OP(ACKNOWLEDGE): 1621 case OP(ATOMIC_ACKNOWLEDGE): 1622 case OP(RDMA_READ_RESPONSE_FIRST): 1623 aeth = be32_to_cpu(ohdr->u.aeth); 1624 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) 1625 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); 1626 else 1627 val = 0; 1628 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || 1629 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 1630 goto ack_done; 1631 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1632 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1633 goto ack_op_err; 1634 /* 1635 * If this is a response to a resent RDMA read, we 1636 * have to be careful to copy the data to the right 1637 * location. 1638 */ 1639 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1640 wqe, psn, pmtu); 1641 goto read_middle; 1642 1643 case OP(RDMA_READ_RESPONSE_MIDDLE): 1644 /* no AETH, no ACK */ 1645 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1646 goto ack_seq_err; 1647 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1648 goto ack_op_err; 1649 read_middle: 1650 if (unlikely(tlen != (hdrsize + pmtu + 4))) 1651 goto ack_len_err; 1652 if (unlikely(pmtu >= qp->s_rdma_read_len)) 1653 goto ack_len_err; 1654 1655 /* 1656 * We got a response so update the timeout. 1657 * 4.096 usec. * (1 << qp->timeout) 1658 */ 1659 qp->s_flags |= RVT_S_TIMER; 1660 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); 1661 if (qp->s_flags & RVT_S_WAIT_ACK) { 1662 qp->s_flags &= ~RVT_S_WAIT_ACK; 1663 hfi1_schedule_send(qp); 1664 } 1665 1666 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 1667 qp->s_retry = qp->s_retry_cnt; 1668 1669 /* 1670 * Update the RDMA receive state but do the copy w/o 1671 * holding the locks and blocking interrupts. 1672 */ 1673 qp->s_rdma_read_len -= pmtu; 1674 update_last_psn(qp, psn); 1675 spin_unlock_irqrestore(&qp->s_lock, flags); 1676 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0); 1677 goto bail; 1678 1679 case OP(RDMA_READ_RESPONSE_ONLY): 1680 aeth = be32_to_cpu(ohdr->u.aeth); 1681 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) 1682 goto ack_done; 1683 /* Get the number of bytes the message was padded by. */ 1684 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1685 /* 1686 * Check that the data size is >= 0 && <= pmtu. 1687 * Remember to account for ICRC (4). 1688 */ 1689 if (unlikely(tlen < (hdrsize + pad + 4))) 1690 goto ack_len_err; 1691 /* 1692 * If this is a response to a resent RDMA read, we 1693 * have to be careful to copy the data to the right 1694 * location. 1695 */ 1696 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1697 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1698 wqe, psn, pmtu); 1699 goto read_last; 1700 1701 case OP(RDMA_READ_RESPONSE_LAST): 1702 /* ACKs READ req. */ 1703 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1704 goto ack_seq_err; 1705 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1706 goto ack_op_err; 1707 /* Get the number of bytes the message was padded by. */ 1708 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1709 /* 1710 * Check that the data size is >= 1 && <= pmtu. 1711 * Remember to account for ICRC (4). 1712 */ 1713 if (unlikely(tlen <= (hdrsize + pad + 4))) 1714 goto ack_len_err; 1715 read_last: 1716 tlen -= hdrsize + pad + 4; 1717 if (unlikely(tlen != qp->s_rdma_read_len)) 1718 goto ack_len_err; 1719 aeth = be32_to_cpu(ohdr->u.aeth); 1720 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0); 1721 WARN_ON(qp->s_rdma_read_sge.num_sge); 1722 (void)do_rc_ack(qp, aeth, psn, 1723 OP(RDMA_READ_RESPONSE_LAST), 0, rcd); 1724 goto ack_done; 1725 } 1726 1727 ack_op_err: 1728 status = IB_WC_LOC_QP_OP_ERR; 1729 goto ack_err; 1730 1731 ack_seq_err: 1732 rdma_seq_err(qp, ibp, psn, rcd); 1733 goto ack_done; 1734 1735 ack_len_err: 1736 status = IB_WC_LOC_LEN_ERR; 1737 ack_err: 1738 if (qp->s_last == qp->s_acked) { 1739 hfi1_send_complete(qp, wqe, status); 1740 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1741 } 1742 ack_done: 1743 spin_unlock_irqrestore(&qp->s_lock, flags); 1744 bail: 1745 return; 1746 } 1747 1748 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, 1749 struct rvt_qp *qp) 1750 { 1751 if (list_empty(&qp->rspwait)) { 1752 qp->r_flags |= RVT_R_RSP_NAK; 1753 rvt_get_qp(qp); 1754 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1755 } 1756 } 1757 1758 static inline void rc_cancel_ack(struct rvt_qp *qp) 1759 { 1760 struct hfi1_qp_priv *priv = qp->priv; 1761 1762 priv->r_adefered = 0; 1763 if (list_empty(&qp->rspwait)) 1764 return; 1765 list_del_init(&qp->rspwait); 1766 qp->r_flags &= ~RVT_R_RSP_NAK; 1767 rvt_put_qp(qp); 1768 } 1769 1770 /** 1771 * rc_rcv_error - process an incoming duplicate or error RC packet 1772 * @ohdr: the other headers for this packet 1773 * @data: the packet data 1774 * @qp: the QP for this packet 1775 * @opcode: the opcode for this packet 1776 * @psn: the packet sequence number for this packet 1777 * @diff: the difference between the PSN and the expected PSN 1778 * 1779 * This is called from hfi1_rc_rcv() to process an unexpected 1780 * incoming RC packet for the given QP. 1781 * Called at interrupt level. 1782 * Return 1 if no more processing is needed; otherwise return 0 to 1783 * schedule a response to be sent. 1784 */ 1785 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, 1786 struct rvt_qp *qp, u32 opcode, u32 psn, 1787 int diff, struct hfi1_ctxtdata *rcd) 1788 { 1789 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1790 struct rvt_ack_entry *e; 1791 unsigned long flags; 1792 u8 i, prev; 1793 int old_req; 1794 1795 trace_hfi1_rcv_error(qp, psn); 1796 if (diff > 0) { 1797 /* 1798 * Packet sequence error. 1799 * A NAK will ACK earlier sends and RDMA writes. 1800 * Don't queue the NAK if we already sent one. 1801 */ 1802 if (!qp->r_nak_state) { 1803 ibp->rvp.n_rc_seqnak++; 1804 qp->r_nak_state = IB_NAK_PSN_ERROR; 1805 /* Use the expected PSN. */ 1806 qp->r_ack_psn = qp->r_psn; 1807 /* 1808 * Wait to send the sequence NAK until all packets 1809 * in the receive queue have been processed. 1810 * Otherwise, we end up propagating congestion. 1811 */ 1812 rc_defered_ack(rcd, qp); 1813 } 1814 goto done; 1815 } 1816 1817 /* 1818 * Handle a duplicate request. Don't re-execute SEND, RDMA 1819 * write or atomic op. Don't NAK errors, just silently drop 1820 * the duplicate request. Note that r_sge, r_len, and 1821 * r_rcv_len may be in use so don't modify them. 1822 * 1823 * We are supposed to ACK the earliest duplicate PSN but we 1824 * can coalesce an outstanding duplicate ACK. We have to 1825 * send the earliest so that RDMA reads can be restarted at 1826 * the requester's expected PSN. 1827 * 1828 * First, find where this duplicate PSN falls within the 1829 * ACKs previously sent. 1830 * old_req is true if there is an older response that is scheduled 1831 * to be sent before sending this one. 1832 */ 1833 e = NULL; 1834 old_req = 1; 1835 ibp->rvp.n_rc_dupreq++; 1836 1837 spin_lock_irqsave(&qp->s_lock, flags); 1838 1839 for (i = qp->r_head_ack_queue; ; i = prev) { 1840 if (i == qp->s_tail_ack_queue) 1841 old_req = 0; 1842 if (i) 1843 prev = i - 1; 1844 else 1845 prev = HFI1_MAX_RDMA_ATOMIC; 1846 if (prev == qp->r_head_ack_queue) { 1847 e = NULL; 1848 break; 1849 } 1850 e = &qp->s_ack_queue[prev]; 1851 if (!e->opcode) { 1852 e = NULL; 1853 break; 1854 } 1855 if (cmp_psn(psn, e->psn) >= 0) { 1856 if (prev == qp->s_tail_ack_queue && 1857 cmp_psn(psn, e->lpsn) <= 0) 1858 old_req = 0; 1859 break; 1860 } 1861 } 1862 switch (opcode) { 1863 case OP(RDMA_READ_REQUEST): { 1864 struct ib_reth *reth; 1865 u32 offset; 1866 u32 len; 1867 1868 /* 1869 * If we didn't find the RDMA read request in the ack queue, 1870 * we can ignore this request. 1871 */ 1872 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) 1873 goto unlock_done; 1874 /* RETH comes after BTH */ 1875 reth = &ohdr->u.rc.reth; 1876 /* 1877 * Address range must be a subset of the original 1878 * request and start on pmtu boundaries. 1879 * We reuse the old ack_queue slot since the requester 1880 * should not back up and request an earlier PSN for the 1881 * same request. 1882 */ 1883 offset = delta_psn(psn, e->psn) * qp->pmtu; 1884 len = be32_to_cpu(reth->length); 1885 if (unlikely(offset + len != e->rdma_sge.sge_length)) 1886 goto unlock_done; 1887 if (e->rdma_sge.mr) { 1888 rvt_put_mr(e->rdma_sge.mr); 1889 e->rdma_sge.mr = NULL; 1890 } 1891 if (len != 0) { 1892 u32 rkey = be32_to_cpu(reth->rkey); 1893 u64 vaddr = get_ib_reth_vaddr(reth); 1894 int ok; 1895 1896 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, 1897 IB_ACCESS_REMOTE_READ); 1898 if (unlikely(!ok)) 1899 goto unlock_done; 1900 } else { 1901 e->rdma_sge.vaddr = NULL; 1902 e->rdma_sge.length = 0; 1903 e->rdma_sge.sge_length = 0; 1904 } 1905 e->psn = psn; 1906 if (old_req) 1907 goto unlock_done; 1908 qp->s_tail_ack_queue = prev; 1909 break; 1910 } 1911 1912 case OP(COMPARE_SWAP): 1913 case OP(FETCH_ADD): { 1914 /* 1915 * If we didn't find the atomic request in the ack queue 1916 * or the send engine is already backed up to send an 1917 * earlier entry, we can ignore this request. 1918 */ 1919 if (!e || e->opcode != (u8)opcode || old_req) 1920 goto unlock_done; 1921 qp->s_tail_ack_queue = prev; 1922 break; 1923 } 1924 1925 default: 1926 /* 1927 * Ignore this operation if it doesn't request an ACK 1928 * or an earlier RDMA read or atomic is going to be resent. 1929 */ 1930 if (!(psn & IB_BTH_REQ_ACK) || old_req) 1931 goto unlock_done; 1932 /* 1933 * Resend the most recent ACK if this request is 1934 * after all the previous RDMA reads and atomics. 1935 */ 1936 if (i == qp->r_head_ack_queue) { 1937 spin_unlock_irqrestore(&qp->s_lock, flags); 1938 qp->r_nak_state = 0; 1939 qp->r_ack_psn = qp->r_psn - 1; 1940 goto send_ack; 1941 } 1942 1943 /* 1944 * Resend the RDMA read or atomic op which 1945 * ACKs this duplicate request. 1946 */ 1947 qp->s_tail_ack_queue = i; 1948 break; 1949 } 1950 qp->s_ack_state = OP(ACKNOWLEDGE); 1951 qp->s_flags |= RVT_S_RESP_PENDING; 1952 qp->r_nak_state = 0; 1953 hfi1_schedule_send(qp); 1954 1955 unlock_done: 1956 spin_unlock_irqrestore(&qp->s_lock, flags); 1957 done: 1958 return 1; 1959 1960 send_ack: 1961 return 0; 1962 } 1963 1964 void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err) 1965 { 1966 unsigned long flags; 1967 int lastwqe; 1968 1969 spin_lock_irqsave(&qp->s_lock, flags); 1970 lastwqe = rvt_error_qp(qp, err); 1971 spin_unlock_irqrestore(&qp->s_lock, flags); 1972 1973 if (lastwqe) { 1974 struct ib_event ev; 1975 1976 ev.device = qp->ibqp.device; 1977 ev.element.qp = &qp->ibqp; 1978 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 1979 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 1980 } 1981 } 1982 1983 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n) 1984 { 1985 unsigned next; 1986 1987 next = n + 1; 1988 if (next > HFI1_MAX_RDMA_ATOMIC) 1989 next = 0; 1990 qp->s_tail_ack_queue = next; 1991 qp->s_ack_state = OP(ACKNOWLEDGE); 1992 } 1993 1994 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, 1995 u32 lqpn, u32 rqpn, u8 svc_type) 1996 { 1997 struct opa_hfi1_cong_log_event_internal *cc_event; 1998 unsigned long flags; 1999 2000 if (sl >= OPA_MAX_SLS) 2001 return; 2002 2003 spin_lock_irqsave(&ppd->cc_log_lock, flags); 2004 2005 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); 2006 ppd->threshold_event_counter++; 2007 2008 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; 2009 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) 2010 ppd->cc_log_idx = 0; 2011 cc_event->lqpn = lqpn & RVT_QPN_MASK; 2012 cc_event->rqpn = rqpn & RVT_QPN_MASK; 2013 cc_event->sl = sl; 2014 cc_event->svc_type = svc_type; 2015 cc_event->rlid = rlid; 2016 /* keep timestamp in units of 1.024 usec */ 2017 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024; 2018 2019 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); 2020 } 2021 2022 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, 2023 u32 rqpn, u8 svc_type) 2024 { 2025 struct cca_timer *cca_timer; 2026 u16 ccti, ccti_incr, ccti_timer, ccti_limit; 2027 u8 trigger_threshold; 2028 struct cc_state *cc_state; 2029 unsigned long flags; 2030 2031 if (sl >= OPA_MAX_SLS) 2032 return; 2033 2034 cc_state = get_cc_state(ppd); 2035 2036 if (!cc_state) 2037 return; 2038 2039 /* 2040 * 1) increase CCTI (for this SL) 2041 * 2) select IPG (i.e., call set_link_ipg()) 2042 * 3) start timer 2043 */ 2044 ccti_limit = cc_state->cct.ccti_limit; 2045 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; 2046 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 2047 trigger_threshold = 2048 cc_state->cong_setting.entries[sl].trigger_threshold; 2049 2050 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 2051 2052 cca_timer = &ppd->cca_timer[sl]; 2053 if (cca_timer->ccti < ccti_limit) { 2054 if (cca_timer->ccti + ccti_incr <= ccti_limit) 2055 cca_timer->ccti += ccti_incr; 2056 else 2057 cca_timer->ccti = ccti_limit; 2058 set_link_ipg(ppd); 2059 } 2060 2061 ccti = cca_timer->ccti; 2062 2063 if (!hrtimer_active(&cca_timer->hrtimer)) { 2064 /* ccti_timer is in units of 1.024 usec */ 2065 unsigned long nsec = 1024 * ccti_timer; 2066 2067 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), 2068 HRTIMER_MODE_REL); 2069 } 2070 2071 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 2072 2073 if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) 2074 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); 2075 } 2076 2077 /** 2078 * hfi1_rc_rcv - process an incoming RC packet 2079 * @rcd: the context pointer 2080 * @hdr: the header of this packet 2081 * @rcv_flags: flags relevant to rcv processing 2082 * @data: the packet data 2083 * @tlen: the packet length 2084 * @qp: the QP for this packet 2085 * 2086 * This is called from qp_rcv() to process an incoming RC packet 2087 * for the given QP. 2088 * May be called at interrupt level. 2089 */ 2090 void hfi1_rc_rcv(struct hfi1_packet *packet) 2091 { 2092 struct hfi1_ctxtdata *rcd = packet->rcd; 2093 struct ib_header *hdr = packet->hdr; 2094 u32 rcv_flags = packet->rcv_flags; 2095 void *data = packet->ebuf; 2096 u32 tlen = packet->tlen; 2097 struct rvt_qp *qp = packet->qp; 2098 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 2099 struct ib_other_headers *ohdr = packet->ohdr; 2100 u32 bth0, opcode; 2101 u32 hdrsize = packet->hlen; 2102 u32 psn; 2103 u32 pad; 2104 struct ib_wc wc; 2105 u32 pmtu = qp->pmtu; 2106 int diff; 2107 struct ib_reth *reth; 2108 unsigned long flags; 2109 int ret, is_fecn = 0; 2110 int copy_last = 0; 2111 u32 rkey; 2112 2113 lockdep_assert_held(&qp->r_lock); 2114 bth0 = be32_to_cpu(ohdr->bth[0]); 2115 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) 2116 return; 2117 2118 is_fecn = process_ecn(qp, packet, false); 2119 2120 psn = be32_to_cpu(ohdr->bth[2]); 2121 opcode = (bth0 >> 24) & 0xff; 2122 2123 /* 2124 * Process responses (ACKs) before anything else. Note that the 2125 * packet sequence number will be for something in the send work 2126 * queue rather than the expected receive packet sequence number. 2127 * In other words, this QP is the requester. 2128 */ 2129 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 2130 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 2131 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, 2132 hdrsize, pmtu, rcd); 2133 if (is_fecn) 2134 goto send_ack; 2135 return; 2136 } 2137 2138 /* Compute 24 bits worth of difference. */ 2139 diff = delta_psn(psn, qp->r_psn); 2140 if (unlikely(diff)) { 2141 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 2142 return; 2143 goto send_ack; 2144 } 2145 2146 /* Check for opcode sequence errors. */ 2147 switch (qp->r_state) { 2148 case OP(SEND_FIRST): 2149 case OP(SEND_MIDDLE): 2150 if (opcode == OP(SEND_MIDDLE) || 2151 opcode == OP(SEND_LAST) || 2152 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2153 opcode == OP(SEND_LAST_WITH_INVALIDATE)) 2154 break; 2155 goto nack_inv; 2156 2157 case OP(RDMA_WRITE_FIRST): 2158 case OP(RDMA_WRITE_MIDDLE): 2159 if (opcode == OP(RDMA_WRITE_MIDDLE) || 2160 opcode == OP(RDMA_WRITE_LAST) || 2161 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2162 break; 2163 goto nack_inv; 2164 2165 default: 2166 if (opcode == OP(SEND_MIDDLE) || 2167 opcode == OP(SEND_LAST) || 2168 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2169 opcode == OP(SEND_LAST_WITH_INVALIDATE) || 2170 opcode == OP(RDMA_WRITE_MIDDLE) || 2171 opcode == OP(RDMA_WRITE_LAST) || 2172 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2173 goto nack_inv; 2174 /* 2175 * Note that it is up to the requester to not send a new 2176 * RDMA read or atomic operation before receiving an ACK 2177 * for the previous operation. 2178 */ 2179 break; 2180 } 2181 2182 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 2183 qp_comm_est(qp); 2184 2185 /* OK, process the packet. */ 2186 switch (opcode) { 2187 case OP(SEND_FIRST): 2188 ret = hfi1_rvt_get_rwqe(qp, 0); 2189 if (ret < 0) 2190 goto nack_op_err; 2191 if (!ret) 2192 goto rnr_nak; 2193 qp->r_rcv_len = 0; 2194 /* FALLTHROUGH */ 2195 case OP(SEND_MIDDLE): 2196 case OP(RDMA_WRITE_MIDDLE): 2197 send_middle: 2198 /* Check for invalid length PMTU or posted rwqe len. */ 2199 if (unlikely(tlen != (hdrsize + pmtu + 4))) 2200 goto nack_inv; 2201 qp->r_rcv_len += pmtu; 2202 if (unlikely(qp->r_rcv_len > qp->r_len)) 2203 goto nack_inv; 2204 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0); 2205 break; 2206 2207 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 2208 /* consume RWQE */ 2209 ret = hfi1_rvt_get_rwqe(qp, 1); 2210 if (ret < 0) 2211 goto nack_op_err; 2212 if (!ret) 2213 goto rnr_nak; 2214 goto send_last_imm; 2215 2216 case OP(SEND_ONLY): 2217 case OP(SEND_ONLY_WITH_IMMEDIATE): 2218 case OP(SEND_ONLY_WITH_INVALIDATE): 2219 ret = hfi1_rvt_get_rwqe(qp, 0); 2220 if (ret < 0) 2221 goto nack_op_err; 2222 if (!ret) 2223 goto rnr_nak; 2224 qp->r_rcv_len = 0; 2225 if (opcode == OP(SEND_ONLY)) 2226 goto no_immediate_data; 2227 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) 2228 goto send_last_inv; 2229 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ 2230 case OP(SEND_LAST_WITH_IMMEDIATE): 2231 send_last_imm: 2232 wc.ex.imm_data = ohdr->u.imm_data; 2233 wc.wc_flags = IB_WC_WITH_IMM; 2234 goto send_last; 2235 case OP(SEND_LAST_WITH_INVALIDATE): 2236 send_last_inv: 2237 rkey = be32_to_cpu(ohdr->u.ieth); 2238 if (rvt_invalidate_rkey(qp, rkey)) 2239 goto no_immediate_data; 2240 wc.ex.invalidate_rkey = rkey; 2241 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2242 goto send_last; 2243 case OP(RDMA_WRITE_LAST): 2244 copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user; 2245 /* fall through */ 2246 case OP(SEND_LAST): 2247 no_immediate_data: 2248 wc.wc_flags = 0; 2249 wc.ex.imm_data = 0; 2250 send_last: 2251 /* Get the number of bytes the message was padded by. */ 2252 pad = (bth0 >> 20) & 3; 2253 /* Check for invalid length. */ 2254 /* LAST len should be >= 1 */ 2255 if (unlikely(tlen < (hdrsize + pad + 4))) 2256 goto nack_inv; 2257 /* Don't count the CRC. */ 2258 tlen -= (hdrsize + pad + 4); 2259 wc.byte_len = tlen + qp->r_rcv_len; 2260 if (unlikely(wc.byte_len > qp->r_len)) 2261 goto nack_inv; 2262 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last); 2263 rvt_put_ss(&qp->r_sge); 2264 qp->r_msn++; 2265 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 2266 break; 2267 wc.wr_id = qp->r_wr_id; 2268 wc.status = IB_WC_SUCCESS; 2269 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || 2270 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 2271 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 2272 else 2273 wc.opcode = IB_WC_RECV; 2274 wc.qp = &qp->ibqp; 2275 wc.src_qp = qp->remote_qpn; 2276 wc.slid = qp->remote_ah_attr.dlid; 2277 /* 2278 * It seems that IB mandates the presence of an SL in a 2279 * work completion only for the UD transport (see section 2280 * 11.4.2 of IBTA Vol. 1). 2281 * 2282 * However, the way the SL is chosen below is consistent 2283 * with the way that IB/qib works and is trying avoid 2284 * introducing incompatibilities. 2285 * 2286 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 2287 */ 2288 wc.sl = qp->remote_ah_attr.sl; 2289 /* zero fields that are N/A */ 2290 wc.vendor_err = 0; 2291 wc.pkey_index = 0; 2292 wc.dlid_path_bits = 0; 2293 wc.port_num = 0; 2294 /* Signal completion event if the solicited bit is set. */ 2295 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 2296 (bth0 & IB_BTH_SOLICITED) != 0); 2297 break; 2298 2299 case OP(RDMA_WRITE_ONLY): 2300 copy_last = 1; 2301 /* fall through */ 2302 case OP(RDMA_WRITE_FIRST): 2303 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 2304 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 2305 goto nack_inv; 2306 /* consume RWQE */ 2307 reth = &ohdr->u.rc.reth; 2308 qp->r_len = be32_to_cpu(reth->length); 2309 qp->r_rcv_len = 0; 2310 qp->r_sge.sg_list = NULL; 2311 if (qp->r_len != 0) { 2312 u32 rkey = be32_to_cpu(reth->rkey); 2313 u64 vaddr = get_ib_reth_vaddr(reth); 2314 int ok; 2315 2316 /* Check rkey & NAK */ 2317 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, 2318 rkey, IB_ACCESS_REMOTE_WRITE); 2319 if (unlikely(!ok)) 2320 goto nack_acc; 2321 qp->r_sge.num_sge = 1; 2322 } else { 2323 qp->r_sge.num_sge = 0; 2324 qp->r_sge.sge.mr = NULL; 2325 qp->r_sge.sge.vaddr = NULL; 2326 qp->r_sge.sge.length = 0; 2327 qp->r_sge.sge.sge_length = 0; 2328 } 2329 if (opcode == OP(RDMA_WRITE_FIRST)) 2330 goto send_middle; 2331 else if (opcode == OP(RDMA_WRITE_ONLY)) 2332 goto no_immediate_data; 2333 ret = hfi1_rvt_get_rwqe(qp, 1); 2334 if (ret < 0) 2335 goto nack_op_err; 2336 if (!ret) 2337 goto rnr_nak; 2338 wc.ex.imm_data = ohdr->u.rc.imm_data; 2339 wc.wc_flags = IB_WC_WITH_IMM; 2340 goto send_last; 2341 2342 case OP(RDMA_READ_REQUEST): { 2343 struct rvt_ack_entry *e; 2344 u32 len; 2345 u8 next; 2346 2347 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 2348 goto nack_inv; 2349 next = qp->r_head_ack_queue + 1; 2350 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */ 2351 if (next > HFI1_MAX_RDMA_ATOMIC) 2352 next = 0; 2353 spin_lock_irqsave(&qp->s_lock, flags); 2354 if (unlikely(next == qp->s_tail_ack_queue)) { 2355 if (!qp->s_ack_queue[next].sent) 2356 goto nack_inv_unlck; 2357 update_ack_queue(qp, next); 2358 } 2359 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2360 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2361 rvt_put_mr(e->rdma_sge.mr); 2362 e->rdma_sge.mr = NULL; 2363 } 2364 reth = &ohdr->u.rc.reth; 2365 len = be32_to_cpu(reth->length); 2366 if (len) { 2367 u32 rkey = be32_to_cpu(reth->rkey); 2368 u64 vaddr = get_ib_reth_vaddr(reth); 2369 int ok; 2370 2371 /* Check rkey & NAK */ 2372 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, 2373 rkey, IB_ACCESS_REMOTE_READ); 2374 if (unlikely(!ok)) 2375 goto nack_acc_unlck; 2376 /* 2377 * Update the next expected PSN. We add 1 later 2378 * below, so only add the remainder here. 2379 */ 2380 qp->r_psn += rvt_div_mtu(qp, len - 1); 2381 } else { 2382 e->rdma_sge.mr = NULL; 2383 e->rdma_sge.vaddr = NULL; 2384 e->rdma_sge.length = 0; 2385 e->rdma_sge.sge_length = 0; 2386 } 2387 e->opcode = opcode; 2388 e->sent = 0; 2389 e->psn = psn; 2390 e->lpsn = qp->r_psn; 2391 /* 2392 * We need to increment the MSN here instead of when we 2393 * finish sending the result since a duplicate request would 2394 * increment it more than once. 2395 */ 2396 qp->r_msn++; 2397 qp->r_psn++; 2398 qp->r_state = opcode; 2399 qp->r_nak_state = 0; 2400 qp->r_head_ack_queue = next; 2401 2402 /* Schedule the send engine. */ 2403 qp->s_flags |= RVT_S_RESP_PENDING; 2404 hfi1_schedule_send(qp); 2405 2406 spin_unlock_irqrestore(&qp->s_lock, flags); 2407 if (is_fecn) 2408 goto send_ack; 2409 return; 2410 } 2411 2412 case OP(COMPARE_SWAP): 2413 case OP(FETCH_ADD): { 2414 struct ib_atomic_eth *ateth; 2415 struct rvt_ack_entry *e; 2416 u64 vaddr; 2417 atomic64_t *maddr; 2418 u64 sdata; 2419 u32 rkey; 2420 u8 next; 2421 2422 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 2423 goto nack_inv; 2424 next = qp->r_head_ack_queue + 1; 2425 if (next > HFI1_MAX_RDMA_ATOMIC) 2426 next = 0; 2427 spin_lock_irqsave(&qp->s_lock, flags); 2428 if (unlikely(next == qp->s_tail_ack_queue)) { 2429 if (!qp->s_ack_queue[next].sent) 2430 goto nack_inv_unlck; 2431 update_ack_queue(qp, next); 2432 } 2433 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2434 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2435 rvt_put_mr(e->rdma_sge.mr); 2436 e->rdma_sge.mr = NULL; 2437 } 2438 ateth = &ohdr->u.atomic_eth; 2439 vaddr = get_ib_ateth_vaddr(ateth); 2440 if (unlikely(vaddr & (sizeof(u64) - 1))) 2441 goto nack_inv_unlck; 2442 rkey = be32_to_cpu(ateth->rkey); 2443 /* Check rkey & NAK */ 2444 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 2445 vaddr, rkey, 2446 IB_ACCESS_REMOTE_ATOMIC))) 2447 goto nack_acc_unlck; 2448 /* Perform atomic OP and save result. */ 2449 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 2450 sdata = get_ib_ateth_swap(ateth); 2451 e->atomic_data = (opcode == OP(FETCH_ADD)) ? 2452 (u64)atomic64_add_return(sdata, maddr) - sdata : 2453 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 2454 get_ib_ateth_compare(ateth), 2455 sdata); 2456 rvt_put_mr(qp->r_sge.sge.mr); 2457 qp->r_sge.num_sge = 0; 2458 e->opcode = opcode; 2459 e->sent = 0; 2460 e->psn = psn; 2461 e->lpsn = psn; 2462 qp->r_msn++; 2463 qp->r_psn++; 2464 qp->r_state = opcode; 2465 qp->r_nak_state = 0; 2466 qp->r_head_ack_queue = next; 2467 2468 /* Schedule the send engine. */ 2469 qp->s_flags |= RVT_S_RESP_PENDING; 2470 hfi1_schedule_send(qp); 2471 2472 spin_unlock_irqrestore(&qp->s_lock, flags); 2473 if (is_fecn) 2474 goto send_ack; 2475 return; 2476 } 2477 2478 default: 2479 /* NAK unknown opcodes. */ 2480 goto nack_inv; 2481 } 2482 qp->r_psn++; 2483 qp->r_state = opcode; 2484 qp->r_ack_psn = psn; 2485 qp->r_nak_state = 0; 2486 /* Send an ACK if requested or required. */ 2487 if (psn & IB_BTH_REQ_ACK) { 2488 struct hfi1_qp_priv *priv = qp->priv; 2489 2490 if (packet->numpkt == 0) { 2491 rc_cancel_ack(qp); 2492 goto send_ack; 2493 } 2494 if (priv->r_adefered >= HFI1_PSN_CREDIT) { 2495 rc_cancel_ack(qp); 2496 goto send_ack; 2497 } 2498 if (unlikely(is_fecn)) { 2499 rc_cancel_ack(qp); 2500 goto send_ack; 2501 } 2502 priv->r_adefered++; 2503 rc_defered_ack(rcd, qp); 2504 } 2505 return; 2506 2507 rnr_nak: 2508 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; 2509 qp->r_ack_psn = qp->r_psn; 2510 /* Queue RNR NAK for later */ 2511 rc_defered_ack(rcd, qp); 2512 return; 2513 2514 nack_op_err: 2515 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2516 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 2517 qp->r_ack_psn = qp->r_psn; 2518 /* Queue NAK for later */ 2519 rc_defered_ack(rcd, qp); 2520 return; 2521 2522 nack_inv_unlck: 2523 spin_unlock_irqrestore(&qp->s_lock, flags); 2524 nack_inv: 2525 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2526 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 2527 qp->r_ack_psn = qp->r_psn; 2528 /* Queue NAK for later */ 2529 rc_defered_ack(rcd, qp); 2530 return; 2531 2532 nack_acc_unlck: 2533 spin_unlock_irqrestore(&qp->s_lock, flags); 2534 nack_acc: 2535 hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR); 2536 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 2537 qp->r_ack_psn = qp->r_psn; 2538 send_ack: 2539 hfi1_send_rc_ack(rcd, qp, is_fecn); 2540 } 2541 2542 void hfi1_rc_hdrerr( 2543 struct hfi1_ctxtdata *rcd, 2544 struct ib_header *hdr, 2545 u32 rcv_flags, 2546 struct rvt_qp *qp) 2547 { 2548 int has_grh = rcv_flags & HFI1_HAS_GRH; 2549 struct ib_other_headers *ohdr; 2550 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 2551 int diff; 2552 u32 opcode; 2553 u32 psn, bth0; 2554 2555 /* Check for GRH */ 2556 ohdr = &hdr->u.oth; 2557 if (has_grh) 2558 ohdr = &hdr->u.l.oth; 2559 2560 bth0 = be32_to_cpu(ohdr->bth[0]); 2561 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) 2562 return; 2563 2564 psn = be32_to_cpu(ohdr->bth[2]); 2565 opcode = (bth0 >> 24) & 0xff; 2566 2567 /* Only deal with RDMA Writes for now */ 2568 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 2569 diff = delta_psn(psn, qp->r_psn); 2570 if (!qp->r_nak_state && diff >= 0) { 2571 ibp->rvp.n_rc_seqnak++; 2572 qp->r_nak_state = IB_NAK_PSN_ERROR; 2573 /* Use the expected PSN. */ 2574 qp->r_ack_psn = qp->r_psn; 2575 /* 2576 * Wait to send the sequence 2577 * NAK until all packets 2578 * in the receive queue have 2579 * been processed. 2580 * Otherwise, we end up 2581 * propagating congestion. 2582 */ 2583 rc_defered_ack(rcd, qp); 2584 } /* Out of sequence NAK */ 2585 } /* QP Request NAKs */ 2586 } 2587