1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/io.h> 49 #include <rdma/rdma_vt.h> 50 #include <rdma/rdmavt_qp.h> 51 52 #include "hfi.h" 53 #include "qp.h" 54 #include "verbs_txreq.h" 55 #include "trace.h" 56 57 /* cut down ridiculously long IB macro names */ 58 #define OP(x) IB_OPCODE_RC_##x 59 60 /** 61 * hfi1_add_retry_timer - add/start a retry timer 62 * @qp - the QP 63 * 64 * add a retry timer on the QP 65 */ 66 static inline void hfi1_add_retry_timer(struct rvt_qp *qp) 67 { 68 struct ib_qp *ibqp = &qp->ibqp; 69 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 70 71 qp->s_flags |= RVT_S_TIMER; 72 /* 4.096 usec. * (1 << qp->timeout) */ 73 qp->s_timer.expires = jiffies + qp->timeout_jiffies + 74 rdi->busy_jiffies; 75 add_timer(&qp->s_timer); 76 } 77 78 /** 79 * hfi1_add_rnr_timer - add/start an rnr timer 80 * @qp - the QP 81 * @to - timeout in usecs 82 * 83 * add an rnr timer on the QP 84 */ 85 void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) 86 { 87 struct hfi1_qp_priv *priv = qp->priv; 88 89 qp->s_flags |= RVT_S_WAIT_RNR; 90 qp->s_timer.expires = jiffies + usecs_to_jiffies(to); 91 add_timer(&priv->s_rnr_timer); 92 } 93 94 /** 95 * hfi1_mod_retry_timer - mod a retry timer 96 * @qp - the QP 97 * 98 * Modify a potentially already running retry 99 * timer 100 */ 101 static inline void hfi1_mod_retry_timer(struct rvt_qp *qp) 102 { 103 struct ib_qp *ibqp = &qp->ibqp; 104 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 105 106 qp->s_flags |= RVT_S_TIMER; 107 /* 4.096 usec. * (1 << qp->timeout) */ 108 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + 109 rdi->busy_jiffies); 110 } 111 112 /** 113 * hfi1_stop_retry_timer - stop a retry timer 114 * @qp - the QP 115 * 116 * stop a retry timer and return if the timer 117 * had been pending. 118 */ 119 static inline int hfi1_stop_retry_timer(struct rvt_qp *qp) 120 { 121 int rval = 0; 122 123 /* Remove QP from retry */ 124 if (qp->s_flags & RVT_S_TIMER) { 125 qp->s_flags &= ~RVT_S_TIMER; 126 rval = del_timer(&qp->s_timer); 127 } 128 return rval; 129 } 130 131 /** 132 * hfi1_stop_rc_timers - stop all timers 133 * @qp - the QP 134 * 135 * stop any pending timers 136 */ 137 void hfi1_stop_rc_timers(struct rvt_qp *qp) 138 { 139 struct hfi1_qp_priv *priv = qp->priv; 140 141 /* Remove QP from all timers */ 142 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 143 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 144 del_timer(&qp->s_timer); 145 del_timer(&priv->s_rnr_timer); 146 } 147 } 148 149 /** 150 * hfi1_stop_rnr_timer - stop an rnr timer 151 * @qp - the QP 152 * 153 * stop an rnr timer and return if the timer 154 * had been pending. 155 */ 156 static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) 157 { 158 int rval = 0; 159 struct hfi1_qp_priv *priv = qp->priv; 160 161 /* Remove QP from rnr timer */ 162 if (qp->s_flags & RVT_S_WAIT_RNR) { 163 qp->s_flags &= ~RVT_S_WAIT_RNR; 164 rval = del_timer(&priv->s_rnr_timer); 165 } 166 return rval; 167 } 168 169 /** 170 * hfi1_del_timers_sync - wait for any timeout routines to exit 171 * @qp - the QP 172 */ 173 void hfi1_del_timers_sync(struct rvt_qp *qp) 174 { 175 struct hfi1_qp_priv *priv = qp->priv; 176 177 del_timer_sync(&qp->s_timer); 178 del_timer_sync(&priv->s_rnr_timer); 179 } 180 181 /* only opcode mask for adaptive pio */ 182 const u32 rc_only_opcode = 183 BIT(OP(SEND_ONLY) & 0x1f) | 184 BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) | 185 BIT(OP(RDMA_WRITE_ONLY & 0x1f)) | 186 BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) | 187 BIT(OP(RDMA_READ_REQUEST & 0x1f)) | 188 BIT(OP(ACKNOWLEDGE & 0x1f)) | 189 BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) | 190 BIT(OP(COMPARE_SWAP & 0x1f)) | 191 BIT(OP(FETCH_ADD & 0x1f)); 192 193 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, 194 u32 psn, u32 pmtu) 195 { 196 u32 len; 197 198 len = delta_psn(psn, wqe->psn) * pmtu; 199 ss->sge = wqe->sg_list[0]; 200 ss->sg_list = wqe->sg_list + 1; 201 ss->num_sge = wqe->wr.num_sge; 202 ss->total_len = wqe->length; 203 hfi1_skip_sge(ss, len, 0); 204 return wqe->length - len; 205 } 206 207 /** 208 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) 209 * @dev: the device for this QP 210 * @qp: a pointer to the QP 211 * @ohdr: a pointer to the IB header being constructed 212 * @ps: the xmit packet state 213 * 214 * Return 1 if constructed; otherwise, return 0. 215 * Note that we are in the responder's side of the QP context. 216 * Note the QP s_lock must be held. 217 */ 218 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, 219 struct hfi1_other_headers *ohdr, 220 struct hfi1_pkt_state *ps) 221 { 222 struct rvt_ack_entry *e; 223 u32 hwords; 224 u32 len; 225 u32 bth0; 226 u32 bth2; 227 int middle = 0; 228 u32 pmtu = qp->pmtu; 229 struct hfi1_qp_priv *priv = qp->priv; 230 231 /* Don't send an ACK if we aren't supposed to. */ 232 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 233 goto bail; 234 235 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 236 hwords = 5; 237 238 switch (qp->s_ack_state) { 239 case OP(RDMA_READ_RESPONSE_LAST): 240 case OP(RDMA_READ_RESPONSE_ONLY): 241 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 242 if (e->rdma_sge.mr) { 243 rvt_put_mr(e->rdma_sge.mr); 244 e->rdma_sge.mr = NULL; 245 } 246 /* FALLTHROUGH */ 247 case OP(ATOMIC_ACKNOWLEDGE): 248 /* 249 * We can increment the tail pointer now that the last 250 * response has been sent instead of only being 251 * constructed. 252 */ 253 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) 254 qp->s_tail_ack_queue = 0; 255 /* FALLTHROUGH */ 256 case OP(SEND_ONLY): 257 case OP(ACKNOWLEDGE): 258 /* Check for no next entry in the queue. */ 259 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 260 if (qp->s_flags & RVT_S_ACK_PENDING) 261 goto normal; 262 goto bail; 263 } 264 265 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 266 if (e->opcode == OP(RDMA_READ_REQUEST)) { 267 /* 268 * If a RDMA read response is being resent and 269 * we haven't seen the duplicate request yet, 270 * then stop sending the remaining responses the 271 * responder has seen until the requester re-sends it. 272 */ 273 len = e->rdma_sge.sge_length; 274 if (len && !e->rdma_sge.mr) { 275 qp->s_tail_ack_queue = qp->r_head_ack_queue; 276 goto bail; 277 } 278 /* Copy SGE state in case we need to resend */ 279 ps->s_txreq->mr = e->rdma_sge.mr; 280 if (ps->s_txreq->mr) 281 rvt_get_mr(ps->s_txreq->mr); 282 qp->s_ack_rdma_sge.sge = e->rdma_sge; 283 qp->s_ack_rdma_sge.num_sge = 1; 284 qp->s_cur_sge = &qp->s_ack_rdma_sge; 285 if (len > pmtu) { 286 len = pmtu; 287 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 288 } else { 289 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 290 e->sent = 1; 291 } 292 ohdr->u.aeth = hfi1_compute_aeth(qp); 293 hwords++; 294 qp->s_ack_rdma_psn = e->psn; 295 bth2 = mask_psn(qp->s_ack_rdma_psn++); 296 } else { 297 /* COMPARE_SWAP or FETCH_ADD */ 298 qp->s_cur_sge = NULL; 299 len = 0; 300 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 301 ohdr->u.at.aeth = hfi1_compute_aeth(qp); 302 ohdr->u.at.atomic_ack_eth[0] = 303 cpu_to_be32(e->atomic_data >> 32); 304 ohdr->u.at.atomic_ack_eth[1] = 305 cpu_to_be32(e->atomic_data); 306 hwords += sizeof(ohdr->u.at) / sizeof(u32); 307 bth2 = mask_psn(e->psn); 308 e->sent = 1; 309 } 310 bth0 = qp->s_ack_state << 24; 311 break; 312 313 case OP(RDMA_READ_RESPONSE_FIRST): 314 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 315 /* FALLTHROUGH */ 316 case OP(RDMA_READ_RESPONSE_MIDDLE): 317 qp->s_cur_sge = &qp->s_ack_rdma_sge; 318 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; 319 if (ps->s_txreq->mr) 320 rvt_get_mr(ps->s_txreq->mr); 321 len = qp->s_ack_rdma_sge.sge.sge_length; 322 if (len > pmtu) { 323 len = pmtu; 324 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 325 } else { 326 ohdr->u.aeth = hfi1_compute_aeth(qp); 327 hwords++; 328 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 329 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 330 e->sent = 1; 331 } 332 bth0 = qp->s_ack_state << 24; 333 bth2 = mask_psn(qp->s_ack_rdma_psn++); 334 break; 335 336 default: 337 normal: 338 /* 339 * Send a regular ACK. 340 * Set the s_ack_state so we wait until after sending 341 * the ACK before setting s_ack_state to ACKNOWLEDGE 342 * (see above). 343 */ 344 qp->s_ack_state = OP(SEND_ONLY); 345 qp->s_flags &= ~RVT_S_ACK_PENDING; 346 qp->s_cur_sge = NULL; 347 if (qp->s_nak_state) 348 ohdr->u.aeth = 349 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | 350 (qp->s_nak_state << 351 HFI1_AETH_CREDIT_SHIFT)); 352 else 353 ohdr->u.aeth = hfi1_compute_aeth(qp); 354 hwords++; 355 len = 0; 356 bth0 = OP(ACKNOWLEDGE) << 24; 357 bth2 = mask_psn(qp->s_ack_psn); 358 } 359 qp->s_rdma_ack_cnt++; 360 qp->s_hdrwords = hwords; 361 ps->s_txreq->sde = priv->s_sde; 362 qp->s_cur_size = len; 363 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); 364 /* pbc */ 365 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 366 return 1; 367 368 bail: 369 qp->s_ack_state = OP(ACKNOWLEDGE); 370 /* 371 * Ensure s_rdma_ack_cnt changes are committed prior to resetting 372 * RVT_S_RESP_PENDING 373 */ 374 smp_wmb(); 375 qp->s_flags &= ~(RVT_S_RESP_PENDING 376 | RVT_S_ACK_PENDING 377 | RVT_S_AHG_VALID); 378 return 0; 379 } 380 381 /** 382 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 383 * @qp: a pointer to the QP 384 * 385 * Assumes s_lock is held. 386 * 387 * Return 1 if constructed; otherwise, return 0. 388 */ 389 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 390 { 391 struct hfi1_qp_priv *priv = qp->priv; 392 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 393 struct hfi1_other_headers *ohdr; 394 struct rvt_sge_state *ss; 395 struct rvt_swqe *wqe; 396 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 397 u32 hwords = 5; 398 u32 len; 399 u32 bth0 = 0; 400 u32 bth2; 401 u32 pmtu = qp->pmtu; 402 char newreq; 403 int middle = 0; 404 int delta; 405 406 ps->s_txreq = get_txreq(ps->dev, qp); 407 if (IS_ERR(ps->s_txreq)) 408 goto bail_no_tx; 409 410 ohdr = &ps->s_txreq->phdr.hdr.u.oth; 411 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 412 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; 413 414 /* Sending responses has higher priority over sending requests. */ 415 if ((qp->s_flags & RVT_S_RESP_PENDING) && 416 make_rc_ack(dev, qp, ohdr, ps)) 417 return 1; 418 419 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 420 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 421 goto bail; 422 /* We are in the error state, flush the work request. */ 423 smp_read_barrier_depends(); /* see post_one_send() */ 424 if (qp->s_last == ACCESS_ONCE(qp->s_head)) 425 goto bail; 426 /* If DMAs are in progress, we can't flush immediately. */ 427 if (iowait_sdma_pending(&priv->s_iowait)) { 428 qp->s_flags |= RVT_S_WAIT_DMA; 429 goto bail; 430 } 431 clear_ahg(qp); 432 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 433 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 434 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 435 /* will get called again */ 436 goto done_free_tx; 437 } 438 439 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) 440 goto bail; 441 442 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { 443 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { 444 qp->s_flags |= RVT_S_WAIT_PSN; 445 goto bail; 446 } 447 qp->s_sending_psn = qp->s_psn; 448 qp->s_sending_hpsn = qp->s_psn - 1; 449 } 450 451 /* Send a request. */ 452 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 453 switch (qp->s_state) { 454 default: 455 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) 456 goto bail; 457 /* 458 * Resend an old request or start a new one. 459 * 460 * We keep track of the current SWQE so that 461 * we don't reset the "furthest progress" state 462 * if we need to back up. 463 */ 464 newreq = 0; 465 if (qp->s_cur == qp->s_tail) { 466 /* Check if send work queue is empty. */ 467 if (qp->s_tail == qp->s_head) { 468 clear_ahg(qp); 469 goto bail; 470 } 471 /* 472 * If a fence is requested, wait for previous 473 * RDMA read and atomic operations to finish. 474 */ 475 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 476 qp->s_num_rd_atomic) { 477 qp->s_flags |= RVT_S_WAIT_FENCE; 478 goto bail; 479 } 480 /* 481 * Local operations are processed immediately 482 * after all prior requests have completed 483 */ 484 if (wqe->wr.opcode == IB_WR_REG_MR || 485 wqe->wr.opcode == IB_WR_LOCAL_INV) { 486 int local_ops = 0; 487 int err = 0; 488 489 if (qp->s_last != qp->s_cur) 490 goto bail; 491 if (++qp->s_cur == qp->s_size) 492 qp->s_cur = 0; 493 if (++qp->s_tail == qp->s_size) 494 qp->s_tail = 0; 495 if (!(wqe->wr.send_flags & 496 RVT_SEND_COMPLETION_ONLY)) { 497 err = rvt_invalidate_rkey( 498 qp, 499 wqe->wr.ex.invalidate_rkey); 500 local_ops = 1; 501 } 502 hfi1_send_complete(qp, wqe, 503 err ? IB_WC_LOC_PROT_ERR 504 : IB_WC_SUCCESS); 505 if (local_ops) 506 atomic_dec(&qp->local_ops_pending); 507 qp->s_hdrwords = 0; 508 goto done_free_tx; 509 } 510 511 newreq = 1; 512 qp->s_psn = wqe->psn; 513 } 514 /* 515 * Note that we have to be careful not to modify the 516 * original work request since we may need to resend 517 * it. 518 */ 519 len = wqe->length; 520 ss = &qp->s_sge; 521 bth2 = mask_psn(qp->s_psn); 522 switch (wqe->wr.opcode) { 523 case IB_WR_SEND: 524 case IB_WR_SEND_WITH_IMM: 525 case IB_WR_SEND_WITH_INV: 526 /* If no credit, return. */ 527 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 528 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 529 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 530 goto bail; 531 } 532 if (len > pmtu) { 533 qp->s_state = OP(SEND_FIRST); 534 len = pmtu; 535 break; 536 } 537 if (wqe->wr.opcode == IB_WR_SEND) { 538 qp->s_state = OP(SEND_ONLY); 539 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 540 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 541 /* Immediate data comes after the BTH */ 542 ohdr->u.imm_data = wqe->wr.ex.imm_data; 543 hwords += 1; 544 } else { 545 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); 546 /* Invalidate rkey comes after the BTH */ 547 ohdr->u.ieth = cpu_to_be32( 548 wqe->wr.ex.invalidate_rkey); 549 hwords += 1; 550 } 551 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 552 bth0 |= IB_BTH_SOLICITED; 553 bth2 |= IB_BTH_REQ_ACK; 554 if (++qp->s_cur == qp->s_size) 555 qp->s_cur = 0; 556 break; 557 558 case IB_WR_RDMA_WRITE: 559 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 560 qp->s_lsn++; 561 /* FALLTHROUGH */ 562 case IB_WR_RDMA_WRITE_WITH_IMM: 563 /* If no credit, return. */ 564 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 565 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 566 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 567 goto bail; 568 } 569 ohdr->u.rc.reth.vaddr = 570 cpu_to_be64(wqe->rdma_wr.remote_addr); 571 ohdr->u.rc.reth.rkey = 572 cpu_to_be32(wqe->rdma_wr.rkey); 573 ohdr->u.rc.reth.length = cpu_to_be32(len); 574 hwords += sizeof(struct ib_reth) / sizeof(u32); 575 if (len > pmtu) { 576 qp->s_state = OP(RDMA_WRITE_FIRST); 577 len = pmtu; 578 break; 579 } 580 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 581 qp->s_state = OP(RDMA_WRITE_ONLY); 582 } else { 583 qp->s_state = 584 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 585 /* Immediate data comes after RETH */ 586 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 587 hwords += 1; 588 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 589 bth0 |= IB_BTH_SOLICITED; 590 } 591 bth2 |= IB_BTH_REQ_ACK; 592 if (++qp->s_cur == qp->s_size) 593 qp->s_cur = 0; 594 break; 595 596 case IB_WR_RDMA_READ: 597 /* 598 * Don't allow more operations to be started 599 * than the QP limits allow. 600 */ 601 if (newreq) { 602 if (qp->s_num_rd_atomic >= 603 qp->s_max_rd_atomic) { 604 qp->s_flags |= RVT_S_WAIT_RDMAR; 605 goto bail; 606 } 607 qp->s_num_rd_atomic++; 608 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 609 qp->s_lsn++; 610 } 611 ohdr->u.rc.reth.vaddr = 612 cpu_to_be64(wqe->rdma_wr.remote_addr); 613 ohdr->u.rc.reth.rkey = 614 cpu_to_be32(wqe->rdma_wr.rkey); 615 ohdr->u.rc.reth.length = cpu_to_be32(len); 616 qp->s_state = OP(RDMA_READ_REQUEST); 617 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 618 ss = NULL; 619 len = 0; 620 bth2 |= IB_BTH_REQ_ACK; 621 if (++qp->s_cur == qp->s_size) 622 qp->s_cur = 0; 623 break; 624 625 case IB_WR_ATOMIC_CMP_AND_SWP: 626 case IB_WR_ATOMIC_FETCH_AND_ADD: 627 /* 628 * Don't allow more operations to be started 629 * than the QP limits allow. 630 */ 631 if (newreq) { 632 if (qp->s_num_rd_atomic >= 633 qp->s_max_rd_atomic) { 634 qp->s_flags |= RVT_S_WAIT_RDMAR; 635 goto bail; 636 } 637 qp->s_num_rd_atomic++; 638 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 639 qp->s_lsn++; 640 } 641 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 642 qp->s_state = OP(COMPARE_SWAP); 643 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 644 wqe->atomic_wr.swap); 645 ohdr->u.atomic_eth.compare_data = cpu_to_be64( 646 wqe->atomic_wr.compare_add); 647 } else { 648 qp->s_state = OP(FETCH_ADD); 649 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 650 wqe->atomic_wr.compare_add); 651 ohdr->u.atomic_eth.compare_data = 0; 652 } 653 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( 654 wqe->atomic_wr.remote_addr >> 32); 655 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( 656 wqe->atomic_wr.remote_addr); 657 ohdr->u.atomic_eth.rkey = cpu_to_be32( 658 wqe->atomic_wr.rkey); 659 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 660 ss = NULL; 661 len = 0; 662 bth2 |= IB_BTH_REQ_ACK; 663 if (++qp->s_cur == qp->s_size) 664 qp->s_cur = 0; 665 break; 666 667 default: 668 goto bail; 669 } 670 qp->s_sge.sge = wqe->sg_list[0]; 671 qp->s_sge.sg_list = wqe->sg_list + 1; 672 qp->s_sge.num_sge = wqe->wr.num_sge; 673 qp->s_sge.total_len = wqe->length; 674 qp->s_len = wqe->length; 675 if (newreq) { 676 qp->s_tail++; 677 if (qp->s_tail >= qp->s_size) 678 qp->s_tail = 0; 679 } 680 if (wqe->wr.opcode == IB_WR_RDMA_READ) 681 qp->s_psn = wqe->lpsn + 1; 682 else 683 qp->s_psn++; 684 break; 685 686 case OP(RDMA_READ_RESPONSE_FIRST): 687 /* 688 * qp->s_state is normally set to the opcode of the 689 * last packet constructed for new requests and therefore 690 * is never set to RDMA read response. 691 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing 692 * thread to indicate a SEND needs to be restarted from an 693 * earlier PSN without interfering with the sending thread. 694 * See restart_rc(). 695 */ 696 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 697 /* FALLTHROUGH */ 698 case OP(SEND_FIRST): 699 qp->s_state = OP(SEND_MIDDLE); 700 /* FALLTHROUGH */ 701 case OP(SEND_MIDDLE): 702 bth2 = mask_psn(qp->s_psn++); 703 ss = &qp->s_sge; 704 len = qp->s_len; 705 if (len > pmtu) { 706 len = pmtu; 707 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 708 break; 709 } 710 if (wqe->wr.opcode == IB_WR_SEND) { 711 qp->s_state = OP(SEND_LAST); 712 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 713 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 714 /* Immediate data comes after the BTH */ 715 ohdr->u.imm_data = wqe->wr.ex.imm_data; 716 hwords += 1; 717 } else { 718 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); 719 /* invalidate data comes after the BTH */ 720 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); 721 hwords += 1; 722 } 723 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 724 bth0 |= IB_BTH_SOLICITED; 725 bth2 |= IB_BTH_REQ_ACK; 726 qp->s_cur++; 727 if (qp->s_cur >= qp->s_size) 728 qp->s_cur = 0; 729 break; 730 731 case OP(RDMA_READ_RESPONSE_LAST): 732 /* 733 * qp->s_state is normally set to the opcode of the 734 * last packet constructed for new requests and therefore 735 * is never set to RDMA read response. 736 * RDMA_READ_RESPONSE_LAST is used by the ACK processing 737 * thread to indicate a RDMA write needs to be restarted from 738 * an earlier PSN without interfering with the sending thread. 739 * See restart_rc(). 740 */ 741 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 742 /* FALLTHROUGH */ 743 case OP(RDMA_WRITE_FIRST): 744 qp->s_state = OP(RDMA_WRITE_MIDDLE); 745 /* FALLTHROUGH */ 746 case OP(RDMA_WRITE_MIDDLE): 747 bth2 = mask_psn(qp->s_psn++); 748 ss = &qp->s_sge; 749 len = qp->s_len; 750 if (len > pmtu) { 751 len = pmtu; 752 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 753 break; 754 } 755 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 756 qp->s_state = OP(RDMA_WRITE_LAST); 757 } else { 758 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 759 /* Immediate data comes after the BTH */ 760 ohdr->u.imm_data = wqe->wr.ex.imm_data; 761 hwords += 1; 762 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 763 bth0 |= IB_BTH_SOLICITED; 764 } 765 bth2 |= IB_BTH_REQ_ACK; 766 qp->s_cur++; 767 if (qp->s_cur >= qp->s_size) 768 qp->s_cur = 0; 769 break; 770 771 case OP(RDMA_READ_RESPONSE_MIDDLE): 772 /* 773 * qp->s_state is normally set to the opcode of the 774 * last packet constructed for new requests and therefore 775 * is never set to RDMA read response. 776 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing 777 * thread to indicate a RDMA read needs to be restarted from 778 * an earlier PSN without interfering with the sending thread. 779 * See restart_rc(). 780 */ 781 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 782 ohdr->u.rc.reth.vaddr = 783 cpu_to_be64(wqe->rdma_wr.remote_addr + len); 784 ohdr->u.rc.reth.rkey = 785 cpu_to_be32(wqe->rdma_wr.rkey); 786 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 787 qp->s_state = OP(RDMA_READ_REQUEST); 788 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 789 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; 790 qp->s_psn = wqe->lpsn + 1; 791 ss = NULL; 792 len = 0; 793 qp->s_cur++; 794 if (qp->s_cur == qp->s_size) 795 qp->s_cur = 0; 796 break; 797 } 798 qp->s_sending_hpsn = bth2; 799 delta = delta_psn(bth2, wqe->psn); 800 if (delta && delta % HFI1_PSN_CREDIT == 0) 801 bth2 |= IB_BTH_REQ_ACK; 802 if (qp->s_flags & RVT_S_SEND_ONE) { 803 qp->s_flags &= ~RVT_S_SEND_ONE; 804 qp->s_flags |= RVT_S_WAIT_ACK; 805 bth2 |= IB_BTH_REQ_ACK; 806 } 807 qp->s_len -= len; 808 qp->s_hdrwords = hwords; 809 ps->s_txreq->sde = priv->s_sde; 810 qp->s_cur_sge = ss; 811 qp->s_cur_size = len; 812 hfi1_make_ruc_header( 813 qp, 814 ohdr, 815 bth0 | (qp->s_state << 24), 816 bth2, 817 middle, 818 ps); 819 /* pbc */ 820 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 821 return 1; 822 823 done_free_tx: 824 hfi1_put_txreq(ps->s_txreq); 825 ps->s_txreq = NULL; 826 return 1; 827 828 bail: 829 hfi1_put_txreq(ps->s_txreq); 830 831 bail_no_tx: 832 ps->s_txreq = NULL; 833 qp->s_flags &= ~RVT_S_BUSY; 834 qp->s_hdrwords = 0; 835 return 0; 836 } 837 838 /** 839 * hfi1_send_rc_ack - Construct an ACK packet and send it 840 * @qp: a pointer to the QP 841 * 842 * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). 843 * Note that RDMA reads and atomics are handled in the 844 * send side QP state and tasklet. 845 */ 846 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, 847 int is_fecn) 848 { 849 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 850 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 851 u64 pbc, pbc_flags = 0; 852 u16 lrh0; 853 u16 sc5; 854 u32 bth0; 855 u32 hwords; 856 u32 vl, plen; 857 struct send_context *sc; 858 struct pio_buf *pbuf; 859 struct hfi1_ib_header hdr; 860 struct hfi1_other_headers *ohdr; 861 unsigned long flags; 862 863 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 864 if (qp->s_flags & RVT_S_RESP_PENDING) 865 goto queue_ack; 866 867 /* Ensure s_rdma_ack_cnt changes are committed */ 868 smp_read_barrier_depends(); 869 if (qp->s_rdma_ack_cnt) 870 goto queue_ack; 871 872 /* Construct the header */ 873 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ 874 hwords = 6; 875 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { 876 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh, 877 &qp->remote_ah_attr.grh, hwords, 0); 878 ohdr = &hdr.u.l.oth; 879 lrh0 = HFI1_LRH_GRH; 880 } else { 881 ohdr = &hdr.u.oth; 882 lrh0 = HFI1_LRH_BTH; 883 } 884 /* read pkey_index w/o lock (its atomic) */ 885 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); 886 if (qp->s_mig_state == IB_MIG_MIGRATED) 887 bth0 |= IB_BTH_MIG_REQ; 888 if (qp->r_nak_state) 889 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | 890 (qp->r_nak_state << 891 HFI1_AETH_CREDIT_SHIFT)); 892 else 893 ohdr->u.aeth = hfi1_compute_aeth(qp); 894 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 895 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 896 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); 897 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; 898 hdr.lrh[0] = cpu_to_be16(lrh0); 899 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 900 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 901 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); 902 ohdr->bth[0] = cpu_to_be32(bth0); 903 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 904 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT); 905 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 906 907 /* Don't try to send ACKs if the link isn't ACTIVE */ 908 if (driver_lstate(ppd) != IB_PORT_ACTIVE) 909 return; 910 911 sc = rcd->sc; 912 plen = 2 /* PBC */ + hwords; 913 vl = sc_to_vlt(ppd->dd, sc5); 914 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); 915 916 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL); 917 if (!pbuf) { 918 /* 919 * We have no room to send at the moment. Pass 920 * responsibility for sending the ACK to the send tasklet 921 * so that when enough buffer space becomes available, 922 * the ACK is sent ahead of other outgoing packets. 923 */ 924 goto queue_ack; 925 } 926 927 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); 928 929 /* write the pbc and data */ 930 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); 931 932 return; 933 934 queue_ack: 935 this_cpu_inc(*ibp->rvp.rc_qacks); 936 spin_lock_irqsave(&qp->s_lock, flags); 937 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 938 qp->s_nak_state = qp->r_nak_state; 939 qp->s_ack_psn = qp->r_ack_psn; 940 if (is_fecn) 941 qp->s_flags |= RVT_S_ECN; 942 943 /* Schedule the send tasklet. */ 944 hfi1_schedule_send(qp); 945 spin_unlock_irqrestore(&qp->s_lock, flags); 946 } 947 948 /** 949 * reset_psn - reset the QP state to send starting from PSN 950 * @qp: the QP 951 * @psn: the packet sequence number to restart at 952 * 953 * This is called from hfi1_rc_rcv() to process an incoming RC ACK 954 * for the given QP. 955 * Called at interrupt level with the QP s_lock held. 956 */ 957 static void reset_psn(struct rvt_qp *qp, u32 psn) 958 { 959 u32 n = qp->s_acked; 960 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 961 u32 opcode; 962 963 qp->s_cur = n; 964 965 /* 966 * If we are starting the request from the beginning, 967 * let the normal send code handle initialization. 968 */ 969 if (cmp_psn(psn, wqe->psn) <= 0) { 970 qp->s_state = OP(SEND_LAST); 971 goto done; 972 } 973 974 /* Find the work request opcode corresponding to the given PSN. */ 975 opcode = wqe->wr.opcode; 976 for (;;) { 977 int diff; 978 979 if (++n == qp->s_size) 980 n = 0; 981 if (n == qp->s_tail) 982 break; 983 wqe = rvt_get_swqe_ptr(qp, n); 984 diff = cmp_psn(psn, wqe->psn); 985 if (diff < 0) 986 break; 987 qp->s_cur = n; 988 /* 989 * If we are starting the request from the beginning, 990 * let the normal send code handle initialization. 991 */ 992 if (diff == 0) { 993 qp->s_state = OP(SEND_LAST); 994 goto done; 995 } 996 opcode = wqe->wr.opcode; 997 } 998 999 /* 1000 * Set the state to restart in the middle of a request. 1001 * Don't change the s_sge, s_cur_sge, or s_cur_size. 1002 * See hfi1_make_rc_req(). 1003 */ 1004 switch (opcode) { 1005 case IB_WR_SEND: 1006 case IB_WR_SEND_WITH_IMM: 1007 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); 1008 break; 1009 1010 case IB_WR_RDMA_WRITE: 1011 case IB_WR_RDMA_WRITE_WITH_IMM: 1012 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); 1013 break; 1014 1015 case IB_WR_RDMA_READ: 1016 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); 1017 break; 1018 1019 default: 1020 /* 1021 * This case shouldn't happen since its only 1022 * one PSN per req. 1023 */ 1024 qp->s_state = OP(SEND_LAST); 1025 } 1026 done: 1027 qp->s_psn = psn; 1028 /* 1029 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer 1030 * asynchronously before the send tasklet can get scheduled. 1031 * Doing it in hfi1_make_rc_req() is too late. 1032 */ 1033 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && 1034 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) 1035 qp->s_flags |= RVT_S_WAIT_PSN; 1036 qp->s_flags &= ~RVT_S_AHG_VALID; 1037 } 1038 1039 /* 1040 * Back up requester to resend the last un-ACKed request. 1041 * The QP r_lock and s_lock should be held and interrupts disabled. 1042 */ 1043 static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) 1044 { 1045 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1046 struct hfi1_ibport *ibp; 1047 1048 if (qp->s_retry == 0) { 1049 if (qp->s_mig_state == IB_MIG_ARMED) { 1050 hfi1_migrate_qp(qp); 1051 qp->s_retry = qp->s_retry_cnt; 1052 } else if (qp->s_last == qp->s_acked) { 1053 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 1054 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1055 return; 1056 } else { /* need to handle delayed completion */ 1057 return; 1058 } 1059 } else { 1060 qp->s_retry--; 1061 } 1062 1063 ibp = to_iport(qp->ibqp.device, qp->port_num); 1064 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1065 ibp->rvp.n_rc_resends++; 1066 else 1067 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1068 1069 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | 1070 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | 1071 RVT_S_WAIT_ACK); 1072 if (wait) 1073 qp->s_flags |= RVT_S_SEND_ONE; 1074 reset_psn(qp, psn); 1075 } 1076 1077 /* 1078 * This is called from s_timer for missing responses. 1079 */ 1080 void hfi1_rc_timeout(unsigned long arg) 1081 { 1082 struct rvt_qp *qp = (struct rvt_qp *)arg; 1083 struct hfi1_ibport *ibp; 1084 unsigned long flags; 1085 1086 spin_lock_irqsave(&qp->r_lock, flags); 1087 spin_lock(&qp->s_lock); 1088 if (qp->s_flags & RVT_S_TIMER) { 1089 ibp = to_iport(qp->ibqp.device, qp->port_num); 1090 ibp->rvp.n_rc_timeouts++; 1091 qp->s_flags &= ~RVT_S_TIMER; 1092 del_timer(&qp->s_timer); 1093 trace_hfi1_timeout(qp, qp->s_last_psn + 1); 1094 restart_rc(qp, qp->s_last_psn + 1, 1); 1095 hfi1_schedule_send(qp); 1096 } 1097 spin_unlock(&qp->s_lock); 1098 spin_unlock_irqrestore(&qp->r_lock, flags); 1099 } 1100 1101 /* 1102 * This is called from s_timer for RNR timeouts. 1103 */ 1104 void hfi1_rc_rnr_retry(unsigned long arg) 1105 { 1106 struct rvt_qp *qp = (struct rvt_qp *)arg; 1107 unsigned long flags; 1108 1109 spin_lock_irqsave(&qp->s_lock, flags); 1110 hfi1_stop_rnr_timer(qp); 1111 hfi1_schedule_send(qp); 1112 spin_unlock_irqrestore(&qp->s_lock, flags); 1113 } 1114 1115 /* 1116 * Set qp->s_sending_psn to the next PSN after the given one. 1117 * This would be psn+1 except when RDMA reads are present. 1118 */ 1119 static void reset_sending_psn(struct rvt_qp *qp, u32 psn) 1120 { 1121 struct rvt_swqe *wqe; 1122 u32 n = qp->s_last; 1123 1124 /* Find the work request corresponding to the given PSN. */ 1125 for (;;) { 1126 wqe = rvt_get_swqe_ptr(qp, n); 1127 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1128 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1129 qp->s_sending_psn = wqe->lpsn + 1; 1130 else 1131 qp->s_sending_psn = psn + 1; 1132 break; 1133 } 1134 if (++n == qp->s_size) 1135 n = 0; 1136 if (n == qp->s_tail) 1137 break; 1138 } 1139 } 1140 1141 /* 1142 * This should be called with the QP s_lock held and interrupts disabled. 1143 */ 1144 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) 1145 { 1146 struct hfi1_other_headers *ohdr; 1147 struct rvt_swqe *wqe; 1148 struct ib_wc wc; 1149 unsigned i; 1150 u32 opcode; 1151 u32 psn; 1152 1153 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 1154 return; 1155 1156 /* Find out where the BTH is */ 1157 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) 1158 ohdr = &hdr->u.oth; 1159 else 1160 ohdr = &hdr->u.l.oth; 1161 1162 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 1163 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1164 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 1165 WARN_ON(!qp->s_rdma_ack_cnt); 1166 qp->s_rdma_ack_cnt--; 1167 return; 1168 } 1169 1170 psn = be32_to_cpu(ohdr->bth[2]); 1171 reset_sending_psn(qp, psn); 1172 1173 /* 1174 * Start timer after a packet requesting an ACK has been sent and 1175 * there are still requests that haven't been acked. 1176 */ 1177 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1178 !(qp->s_flags & 1179 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && 1180 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 1181 hfi1_add_retry_timer(qp); 1182 1183 while (qp->s_last != qp->s_acked) { 1184 u32 s_last; 1185 1186 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1187 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && 1188 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1189 break; 1190 s_last = qp->s_last; 1191 if (++s_last >= qp->s_size) 1192 s_last = 0; 1193 qp->s_last = s_last; 1194 /* see post_send() */ 1195 barrier(); 1196 for (i = 0; i < wqe->wr.num_sge; i++) { 1197 struct rvt_sge *sge = &wqe->sg_list[i]; 1198 1199 rvt_put_mr(sge->mr); 1200 } 1201 /* Post a send completion queue entry if requested. */ 1202 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || 1203 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1204 memset(&wc, 0, sizeof(wc)); 1205 wc.wr_id = wqe->wr.wr_id; 1206 wc.status = IB_WC_SUCCESS; 1207 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; 1208 wc.byte_len = wqe->length; 1209 wc.qp = &qp->ibqp; 1210 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); 1211 } 1212 } 1213 /* 1214 * If we were waiting for sends to complete before re-sending, 1215 * and they are now complete, restart sending. 1216 */ 1217 trace_hfi1_sendcomplete(qp, psn); 1218 if (qp->s_flags & RVT_S_WAIT_PSN && 1219 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1220 qp->s_flags &= ~RVT_S_WAIT_PSN; 1221 qp->s_sending_psn = qp->s_psn; 1222 qp->s_sending_hpsn = qp->s_psn - 1; 1223 hfi1_schedule_send(qp); 1224 } 1225 } 1226 1227 static inline void update_last_psn(struct rvt_qp *qp, u32 psn) 1228 { 1229 qp->s_last_psn = psn; 1230 } 1231 1232 /* 1233 * Generate a SWQE completion. 1234 * This is similar to hfi1_send_complete but has to check to be sure 1235 * that the SGEs are not being referenced if the SWQE is being resent. 1236 */ 1237 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, 1238 struct rvt_swqe *wqe, 1239 struct hfi1_ibport *ibp) 1240 { 1241 struct ib_wc wc; 1242 unsigned i; 1243 1244 /* 1245 * Don't decrement refcount and don't generate a 1246 * completion if the SWQE is being resent until the send 1247 * is finished. 1248 */ 1249 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1250 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1251 u32 s_last; 1252 1253 for (i = 0; i < wqe->wr.num_sge; i++) { 1254 struct rvt_sge *sge = &wqe->sg_list[i]; 1255 1256 rvt_put_mr(sge->mr); 1257 } 1258 s_last = qp->s_last; 1259 if (++s_last >= qp->s_size) 1260 s_last = 0; 1261 qp->s_last = s_last; 1262 /* see post_send() */ 1263 barrier(); 1264 /* Post a send completion queue entry if requested. */ 1265 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || 1266 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1267 memset(&wc, 0, sizeof(wc)); 1268 wc.wr_id = wqe->wr.wr_id; 1269 wc.status = IB_WC_SUCCESS; 1270 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; 1271 wc.byte_len = wqe->length; 1272 wc.qp = &qp->ibqp; 1273 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); 1274 } 1275 } else { 1276 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1277 1278 this_cpu_inc(*ibp->rvp.rc_delayed_comp); 1279 /* 1280 * If send progress not running attempt to progress 1281 * SDMA queue. 1282 */ 1283 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { 1284 struct sdma_engine *engine; 1285 u8 sc5; 1286 1287 /* For now use sc to find engine */ 1288 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 1289 engine = qp_to_sdma_engine(qp, sc5); 1290 sdma_engine_progress_schedule(engine); 1291 } 1292 } 1293 1294 qp->s_retry = qp->s_retry_cnt; 1295 update_last_psn(qp, wqe->lpsn); 1296 1297 /* 1298 * If we are completing a request which is in the process of 1299 * being resent, we can stop re-sending it since we know the 1300 * responder has already seen it. 1301 */ 1302 if (qp->s_acked == qp->s_cur) { 1303 if (++qp->s_cur >= qp->s_size) 1304 qp->s_cur = 0; 1305 qp->s_acked = qp->s_cur; 1306 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1307 if (qp->s_acked != qp->s_tail) { 1308 qp->s_state = OP(SEND_LAST); 1309 qp->s_psn = wqe->psn; 1310 } 1311 } else { 1312 if (++qp->s_acked >= qp->s_size) 1313 qp->s_acked = 0; 1314 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) 1315 qp->s_draining = 0; 1316 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1317 } 1318 return wqe; 1319 } 1320 1321 /** 1322 * do_rc_ack - process an incoming RC ACK 1323 * @qp: the QP the ACK came in on 1324 * @psn: the packet sequence number of the ACK 1325 * @opcode: the opcode of the request that resulted in the ACK 1326 * 1327 * This is called from rc_rcv_resp() to process an incoming RC ACK 1328 * for the given QP. 1329 * May be called at interrupt level, with the QP s_lock held. 1330 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 1331 */ 1332 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, 1333 u64 val, struct hfi1_ctxtdata *rcd) 1334 { 1335 struct hfi1_ibport *ibp; 1336 enum ib_wc_status status; 1337 struct rvt_swqe *wqe; 1338 int ret = 0; 1339 u32 ack_psn; 1340 int diff; 1341 unsigned long to; 1342 1343 /* 1344 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 1345 * requests and implicitly NAK RDMA read and atomic requests issued 1346 * before the NAK'ed request. The MSN won't include the NAK'ed 1347 * request but will include an ACK'ed request(s). 1348 */ 1349 ack_psn = psn; 1350 if (aeth >> 29) 1351 ack_psn--; 1352 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1353 ibp = to_iport(qp->ibqp.device, qp->port_num); 1354 1355 /* 1356 * The MSN might be for a later WQE than the PSN indicates so 1357 * only complete WQEs that the PSN finishes. 1358 */ 1359 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { 1360 /* 1361 * RDMA_READ_RESPONSE_ONLY is a special case since 1362 * we want to generate completion events for everything 1363 * before the RDMA read, copy the data, then generate 1364 * the completion for the read. 1365 */ 1366 if (wqe->wr.opcode == IB_WR_RDMA_READ && 1367 opcode == OP(RDMA_READ_RESPONSE_ONLY) && 1368 diff == 0) { 1369 ret = 1; 1370 goto bail_stop; 1371 } 1372 /* 1373 * If this request is a RDMA read or atomic, and the ACK is 1374 * for a later operation, this ACK NAKs the RDMA read or 1375 * atomic. In other words, only a RDMA_READ_LAST or ONLY 1376 * can ACK a RDMA read and likewise for atomic ops. Note 1377 * that the NAK case can only happen if relaxed ordering is 1378 * used and requests are sent after an RDMA read or atomic 1379 * is sent but before the response is received. 1380 */ 1381 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 1382 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || 1383 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1384 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 1385 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { 1386 /* Retry this request. */ 1387 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { 1388 qp->r_flags |= RVT_R_RDMAR_SEQ; 1389 restart_rc(qp, qp->s_last_psn + 1, 0); 1390 if (list_empty(&qp->rspwait)) { 1391 qp->r_flags |= RVT_R_RSP_SEND; 1392 atomic_inc(&qp->refcount); 1393 list_add_tail(&qp->rspwait, 1394 &rcd->qp_wait_list); 1395 } 1396 } 1397 /* 1398 * No need to process the ACK/NAK since we are 1399 * restarting an earlier request. 1400 */ 1401 goto bail_stop; 1402 } 1403 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1404 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 1405 u64 *vaddr = wqe->sg_list[0].vaddr; 1406 *vaddr = val; 1407 } 1408 if (qp->s_num_rd_atomic && 1409 (wqe->wr.opcode == IB_WR_RDMA_READ || 1410 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1411 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 1412 qp->s_num_rd_atomic--; 1413 /* Restart sending task if fence is complete */ 1414 if ((qp->s_flags & RVT_S_WAIT_FENCE) && 1415 !qp->s_num_rd_atomic) { 1416 qp->s_flags &= ~(RVT_S_WAIT_FENCE | 1417 RVT_S_WAIT_ACK); 1418 hfi1_schedule_send(qp); 1419 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { 1420 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | 1421 RVT_S_WAIT_ACK); 1422 hfi1_schedule_send(qp); 1423 } 1424 } 1425 wqe = do_rc_completion(qp, wqe, ibp); 1426 if (qp->s_acked == qp->s_tail) 1427 break; 1428 } 1429 1430 switch (aeth >> 29) { 1431 case 0: /* ACK */ 1432 this_cpu_inc(*ibp->rvp.rc_acks); 1433 if (qp->s_acked != qp->s_tail) { 1434 /* 1435 * We are expecting more ACKs so 1436 * mod the retry timer. 1437 */ 1438 hfi1_mod_retry_timer(qp); 1439 /* 1440 * We can stop re-sending the earlier packets and 1441 * continue with the next packet the receiver wants. 1442 */ 1443 if (cmp_psn(qp->s_psn, psn) <= 0) 1444 reset_psn(qp, psn + 1); 1445 } else { 1446 /* No more acks - kill all timers */ 1447 hfi1_stop_rc_timers(qp); 1448 if (cmp_psn(qp->s_psn, psn) <= 0) { 1449 qp->s_state = OP(SEND_LAST); 1450 qp->s_psn = psn + 1; 1451 } 1452 } 1453 if (qp->s_flags & RVT_S_WAIT_ACK) { 1454 qp->s_flags &= ~RVT_S_WAIT_ACK; 1455 hfi1_schedule_send(qp); 1456 } 1457 hfi1_get_credit(qp, aeth); 1458 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1459 qp->s_retry = qp->s_retry_cnt; 1460 update_last_psn(qp, psn); 1461 return 1; 1462 1463 case 1: /* RNR NAK */ 1464 ibp->rvp.n_rnr_naks++; 1465 if (qp->s_acked == qp->s_tail) 1466 goto bail_stop; 1467 if (qp->s_flags & RVT_S_WAIT_RNR) 1468 goto bail_stop; 1469 if (qp->s_rnr_retry == 0) { 1470 status = IB_WC_RNR_RETRY_EXC_ERR; 1471 goto class_b; 1472 } 1473 if (qp->s_rnr_retry_cnt < 7) 1474 qp->s_rnr_retry--; 1475 1476 /* The last valid PSN is the previous PSN. */ 1477 update_last_psn(qp, psn - 1); 1478 1479 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1480 1481 reset_psn(qp, psn); 1482 1483 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); 1484 hfi1_stop_rc_timers(qp); 1485 to = 1486 ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & 1487 HFI1_AETH_CREDIT_MASK]; 1488 hfi1_add_rnr_timer(qp, to); 1489 return 0; 1490 1491 case 3: /* NAK */ 1492 if (qp->s_acked == qp->s_tail) 1493 goto bail_stop; 1494 /* The last valid PSN is the previous PSN. */ 1495 update_last_psn(qp, psn - 1); 1496 switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) & 1497 HFI1_AETH_CREDIT_MASK) { 1498 case 0: /* PSN sequence error */ 1499 ibp->rvp.n_seq_naks++; 1500 /* 1501 * Back up to the responder's expected PSN. 1502 * Note that we might get a NAK in the middle of an 1503 * RDMA READ response which terminates the RDMA 1504 * READ. 1505 */ 1506 restart_rc(qp, psn, 0); 1507 hfi1_schedule_send(qp); 1508 break; 1509 1510 case 1: /* Invalid Request */ 1511 status = IB_WC_REM_INV_REQ_ERR; 1512 ibp->rvp.n_other_naks++; 1513 goto class_b; 1514 1515 case 2: /* Remote Access Error */ 1516 status = IB_WC_REM_ACCESS_ERR; 1517 ibp->rvp.n_other_naks++; 1518 goto class_b; 1519 1520 case 3: /* Remote Operation Error */ 1521 status = IB_WC_REM_OP_ERR; 1522 ibp->rvp.n_other_naks++; 1523 class_b: 1524 if (qp->s_last == qp->s_acked) { 1525 hfi1_send_complete(qp, wqe, status); 1526 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1527 } 1528 break; 1529 1530 default: 1531 /* Ignore other reserved NAK error codes */ 1532 goto reserved; 1533 } 1534 qp->s_retry = qp->s_retry_cnt; 1535 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1536 goto bail_stop; 1537 1538 default: /* 2: reserved */ 1539 reserved: 1540 /* Ignore reserved NAK codes. */ 1541 goto bail_stop; 1542 } 1543 /* cannot be reached */ 1544 bail_stop: 1545 hfi1_stop_rc_timers(qp); 1546 return ret; 1547 } 1548 1549 /* 1550 * We have seen an out of sequence RDMA read middle or last packet. 1551 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. 1552 */ 1553 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, 1554 struct hfi1_ctxtdata *rcd) 1555 { 1556 struct rvt_swqe *wqe; 1557 1558 /* Remove QP from retry timer */ 1559 hfi1_stop_rc_timers(qp); 1560 1561 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1562 1563 while (cmp_psn(psn, wqe->lpsn) > 0) { 1564 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1565 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1566 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1567 break; 1568 wqe = do_rc_completion(qp, wqe, ibp); 1569 } 1570 1571 ibp->rvp.n_rdma_seq++; 1572 qp->r_flags |= RVT_R_RDMAR_SEQ; 1573 restart_rc(qp, qp->s_last_psn + 1, 0); 1574 if (list_empty(&qp->rspwait)) { 1575 qp->r_flags |= RVT_R_RSP_SEND; 1576 atomic_inc(&qp->refcount); 1577 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1578 } 1579 } 1580 1581 /** 1582 * rc_rcv_resp - process an incoming RC response packet 1583 * @ibp: the port this packet came in on 1584 * @ohdr: the other headers for this packet 1585 * @data: the packet data 1586 * @tlen: the packet length 1587 * @qp: the QP for this packet 1588 * @opcode: the opcode for this packet 1589 * @psn: the packet sequence number for this packet 1590 * @hdrsize: the header length 1591 * @pmtu: the path MTU 1592 * 1593 * This is called from hfi1_rc_rcv() to process an incoming RC response 1594 * packet for the given QP. 1595 * Called at interrupt level. 1596 */ 1597 static void rc_rcv_resp(struct hfi1_ibport *ibp, 1598 struct hfi1_other_headers *ohdr, 1599 void *data, u32 tlen, struct rvt_qp *qp, 1600 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, 1601 struct hfi1_ctxtdata *rcd) 1602 { 1603 struct rvt_swqe *wqe; 1604 enum ib_wc_status status; 1605 unsigned long flags; 1606 int diff; 1607 u32 pad; 1608 u32 aeth; 1609 u64 val; 1610 1611 spin_lock_irqsave(&qp->s_lock, flags); 1612 1613 trace_hfi1_ack(qp, psn); 1614 1615 /* Ignore invalid responses. */ 1616 smp_read_barrier_depends(); /* see post_one_send */ 1617 if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) 1618 goto ack_done; 1619 1620 /* Ignore duplicate responses. */ 1621 diff = cmp_psn(psn, qp->s_last_psn); 1622 if (unlikely(diff <= 0)) { 1623 /* Update credits for "ghost" ACKs */ 1624 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { 1625 aeth = be32_to_cpu(ohdr->u.aeth); 1626 if ((aeth >> 29) == 0) 1627 hfi1_get_credit(qp, aeth); 1628 } 1629 goto ack_done; 1630 } 1631 1632 /* 1633 * Skip everything other than the PSN we expect, if we are waiting 1634 * for a reply to a restarted RDMA read or atomic op. 1635 */ 1636 if (qp->r_flags & RVT_R_RDMAR_SEQ) { 1637 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) 1638 goto ack_done; 1639 qp->r_flags &= ~RVT_R_RDMAR_SEQ; 1640 } 1641 1642 if (unlikely(qp->s_acked == qp->s_tail)) 1643 goto ack_done; 1644 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1645 status = IB_WC_SUCCESS; 1646 1647 switch (opcode) { 1648 case OP(ACKNOWLEDGE): 1649 case OP(ATOMIC_ACKNOWLEDGE): 1650 case OP(RDMA_READ_RESPONSE_FIRST): 1651 aeth = be32_to_cpu(ohdr->u.aeth); 1652 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { 1653 __be32 *p = ohdr->u.at.atomic_ack_eth; 1654 1655 val = ((u64)be32_to_cpu(p[0]) << 32) | 1656 be32_to_cpu(p[1]); 1657 } else { 1658 val = 0; 1659 } 1660 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || 1661 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 1662 goto ack_done; 1663 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1664 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1665 goto ack_op_err; 1666 /* 1667 * If this is a response to a resent RDMA read, we 1668 * have to be careful to copy the data to the right 1669 * location. 1670 */ 1671 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1672 wqe, psn, pmtu); 1673 goto read_middle; 1674 1675 case OP(RDMA_READ_RESPONSE_MIDDLE): 1676 /* no AETH, no ACK */ 1677 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1678 goto ack_seq_err; 1679 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1680 goto ack_op_err; 1681 read_middle: 1682 if (unlikely(tlen != (hdrsize + pmtu + 4))) 1683 goto ack_len_err; 1684 if (unlikely(pmtu >= qp->s_rdma_read_len)) 1685 goto ack_len_err; 1686 1687 /* 1688 * We got a response so update the timeout. 1689 * 4.096 usec. * (1 << qp->timeout) 1690 */ 1691 qp->s_flags |= RVT_S_TIMER; 1692 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); 1693 if (qp->s_flags & RVT_S_WAIT_ACK) { 1694 qp->s_flags &= ~RVT_S_WAIT_ACK; 1695 hfi1_schedule_send(qp); 1696 } 1697 1698 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 1699 qp->s_retry = qp->s_retry_cnt; 1700 1701 /* 1702 * Update the RDMA receive state but do the copy w/o 1703 * holding the locks and blocking interrupts. 1704 */ 1705 qp->s_rdma_read_len -= pmtu; 1706 update_last_psn(qp, psn); 1707 spin_unlock_irqrestore(&qp->s_lock, flags); 1708 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0); 1709 goto bail; 1710 1711 case OP(RDMA_READ_RESPONSE_ONLY): 1712 aeth = be32_to_cpu(ohdr->u.aeth); 1713 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) 1714 goto ack_done; 1715 /* Get the number of bytes the message was padded by. */ 1716 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1717 /* 1718 * Check that the data size is >= 0 && <= pmtu. 1719 * Remember to account for ICRC (4). 1720 */ 1721 if (unlikely(tlen < (hdrsize + pad + 4))) 1722 goto ack_len_err; 1723 /* 1724 * If this is a response to a resent RDMA read, we 1725 * have to be careful to copy the data to the right 1726 * location. 1727 */ 1728 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1729 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1730 wqe, psn, pmtu); 1731 goto read_last; 1732 1733 case OP(RDMA_READ_RESPONSE_LAST): 1734 /* ACKs READ req. */ 1735 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1736 goto ack_seq_err; 1737 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1738 goto ack_op_err; 1739 /* Get the number of bytes the message was padded by. */ 1740 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1741 /* 1742 * Check that the data size is >= 1 && <= pmtu. 1743 * Remember to account for ICRC (4). 1744 */ 1745 if (unlikely(tlen <= (hdrsize + pad + 4))) 1746 goto ack_len_err; 1747 read_last: 1748 tlen -= hdrsize + pad + 4; 1749 if (unlikely(tlen != qp->s_rdma_read_len)) 1750 goto ack_len_err; 1751 aeth = be32_to_cpu(ohdr->u.aeth); 1752 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0); 1753 WARN_ON(qp->s_rdma_read_sge.num_sge); 1754 (void)do_rc_ack(qp, aeth, psn, 1755 OP(RDMA_READ_RESPONSE_LAST), 0, rcd); 1756 goto ack_done; 1757 } 1758 1759 ack_op_err: 1760 status = IB_WC_LOC_QP_OP_ERR; 1761 goto ack_err; 1762 1763 ack_seq_err: 1764 rdma_seq_err(qp, ibp, psn, rcd); 1765 goto ack_done; 1766 1767 ack_len_err: 1768 status = IB_WC_LOC_LEN_ERR; 1769 ack_err: 1770 if (qp->s_last == qp->s_acked) { 1771 hfi1_send_complete(qp, wqe, status); 1772 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1773 } 1774 ack_done: 1775 spin_unlock_irqrestore(&qp->s_lock, flags); 1776 bail: 1777 return; 1778 } 1779 1780 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, 1781 struct rvt_qp *qp) 1782 { 1783 if (list_empty(&qp->rspwait)) { 1784 qp->r_flags |= RVT_R_RSP_NAK; 1785 atomic_inc(&qp->refcount); 1786 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1787 } 1788 } 1789 1790 static inline void rc_cancel_ack(struct rvt_qp *qp) 1791 { 1792 struct hfi1_qp_priv *priv = qp->priv; 1793 1794 priv->r_adefered = 0; 1795 if (list_empty(&qp->rspwait)) 1796 return; 1797 list_del_init(&qp->rspwait); 1798 qp->r_flags &= ~RVT_R_RSP_NAK; 1799 if (atomic_dec_and_test(&qp->refcount)) 1800 wake_up(&qp->wait); 1801 } 1802 1803 /** 1804 * rc_rcv_error - process an incoming duplicate or error RC packet 1805 * @ohdr: the other headers for this packet 1806 * @data: the packet data 1807 * @qp: the QP for this packet 1808 * @opcode: the opcode for this packet 1809 * @psn: the packet sequence number for this packet 1810 * @diff: the difference between the PSN and the expected PSN 1811 * 1812 * This is called from hfi1_rc_rcv() to process an unexpected 1813 * incoming RC packet for the given QP. 1814 * Called at interrupt level. 1815 * Return 1 if no more processing is needed; otherwise return 0 to 1816 * schedule a response to be sent. 1817 */ 1818 static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, 1819 struct rvt_qp *qp, u32 opcode, u32 psn, 1820 int diff, struct hfi1_ctxtdata *rcd) 1821 { 1822 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1823 struct rvt_ack_entry *e; 1824 unsigned long flags; 1825 u8 i, prev; 1826 int old_req; 1827 1828 trace_hfi1_rcv_error(qp, psn); 1829 if (diff > 0) { 1830 /* 1831 * Packet sequence error. 1832 * A NAK will ACK earlier sends and RDMA writes. 1833 * Don't queue the NAK if we already sent one. 1834 */ 1835 if (!qp->r_nak_state) { 1836 ibp->rvp.n_rc_seqnak++; 1837 qp->r_nak_state = IB_NAK_PSN_ERROR; 1838 /* Use the expected PSN. */ 1839 qp->r_ack_psn = qp->r_psn; 1840 /* 1841 * Wait to send the sequence NAK until all packets 1842 * in the receive queue have been processed. 1843 * Otherwise, we end up propagating congestion. 1844 */ 1845 rc_defered_ack(rcd, qp); 1846 } 1847 goto done; 1848 } 1849 1850 /* 1851 * Handle a duplicate request. Don't re-execute SEND, RDMA 1852 * write or atomic op. Don't NAK errors, just silently drop 1853 * the duplicate request. Note that r_sge, r_len, and 1854 * r_rcv_len may be in use so don't modify them. 1855 * 1856 * We are supposed to ACK the earliest duplicate PSN but we 1857 * can coalesce an outstanding duplicate ACK. We have to 1858 * send the earliest so that RDMA reads can be restarted at 1859 * the requester's expected PSN. 1860 * 1861 * First, find where this duplicate PSN falls within the 1862 * ACKs previously sent. 1863 * old_req is true if there is an older response that is scheduled 1864 * to be sent before sending this one. 1865 */ 1866 e = NULL; 1867 old_req = 1; 1868 ibp->rvp.n_rc_dupreq++; 1869 1870 spin_lock_irqsave(&qp->s_lock, flags); 1871 1872 for (i = qp->r_head_ack_queue; ; i = prev) { 1873 if (i == qp->s_tail_ack_queue) 1874 old_req = 0; 1875 if (i) 1876 prev = i - 1; 1877 else 1878 prev = HFI1_MAX_RDMA_ATOMIC; 1879 if (prev == qp->r_head_ack_queue) { 1880 e = NULL; 1881 break; 1882 } 1883 e = &qp->s_ack_queue[prev]; 1884 if (!e->opcode) { 1885 e = NULL; 1886 break; 1887 } 1888 if (cmp_psn(psn, e->psn) >= 0) { 1889 if (prev == qp->s_tail_ack_queue && 1890 cmp_psn(psn, e->lpsn) <= 0) 1891 old_req = 0; 1892 break; 1893 } 1894 } 1895 switch (opcode) { 1896 case OP(RDMA_READ_REQUEST): { 1897 struct ib_reth *reth; 1898 u32 offset; 1899 u32 len; 1900 1901 /* 1902 * If we didn't find the RDMA read request in the ack queue, 1903 * we can ignore this request. 1904 */ 1905 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) 1906 goto unlock_done; 1907 /* RETH comes after BTH */ 1908 reth = &ohdr->u.rc.reth; 1909 /* 1910 * Address range must be a subset of the original 1911 * request and start on pmtu boundaries. 1912 * We reuse the old ack_queue slot since the requester 1913 * should not back up and request an earlier PSN for the 1914 * same request. 1915 */ 1916 offset = delta_psn(psn, e->psn) * qp->pmtu; 1917 len = be32_to_cpu(reth->length); 1918 if (unlikely(offset + len != e->rdma_sge.sge_length)) 1919 goto unlock_done; 1920 if (e->rdma_sge.mr) { 1921 rvt_put_mr(e->rdma_sge.mr); 1922 e->rdma_sge.mr = NULL; 1923 } 1924 if (len != 0) { 1925 u32 rkey = be32_to_cpu(reth->rkey); 1926 u64 vaddr = be64_to_cpu(reth->vaddr); 1927 int ok; 1928 1929 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, 1930 IB_ACCESS_REMOTE_READ); 1931 if (unlikely(!ok)) 1932 goto unlock_done; 1933 } else { 1934 e->rdma_sge.vaddr = NULL; 1935 e->rdma_sge.length = 0; 1936 e->rdma_sge.sge_length = 0; 1937 } 1938 e->psn = psn; 1939 if (old_req) 1940 goto unlock_done; 1941 qp->s_tail_ack_queue = prev; 1942 break; 1943 } 1944 1945 case OP(COMPARE_SWAP): 1946 case OP(FETCH_ADD): { 1947 /* 1948 * If we didn't find the atomic request in the ack queue 1949 * or the send tasklet is already backed up to send an 1950 * earlier entry, we can ignore this request. 1951 */ 1952 if (!e || e->opcode != (u8)opcode || old_req) 1953 goto unlock_done; 1954 qp->s_tail_ack_queue = prev; 1955 break; 1956 } 1957 1958 default: 1959 /* 1960 * Ignore this operation if it doesn't request an ACK 1961 * or an earlier RDMA read or atomic is going to be resent. 1962 */ 1963 if (!(psn & IB_BTH_REQ_ACK) || old_req) 1964 goto unlock_done; 1965 /* 1966 * Resend the most recent ACK if this request is 1967 * after all the previous RDMA reads and atomics. 1968 */ 1969 if (i == qp->r_head_ack_queue) { 1970 spin_unlock_irqrestore(&qp->s_lock, flags); 1971 qp->r_nak_state = 0; 1972 qp->r_ack_psn = qp->r_psn - 1; 1973 goto send_ack; 1974 } 1975 1976 /* 1977 * Resend the RDMA read or atomic op which 1978 * ACKs this duplicate request. 1979 */ 1980 qp->s_tail_ack_queue = i; 1981 break; 1982 } 1983 qp->s_ack_state = OP(ACKNOWLEDGE); 1984 qp->s_flags |= RVT_S_RESP_PENDING; 1985 qp->r_nak_state = 0; 1986 hfi1_schedule_send(qp); 1987 1988 unlock_done: 1989 spin_unlock_irqrestore(&qp->s_lock, flags); 1990 done: 1991 return 1; 1992 1993 send_ack: 1994 return 0; 1995 } 1996 1997 void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err) 1998 { 1999 unsigned long flags; 2000 int lastwqe; 2001 2002 spin_lock_irqsave(&qp->s_lock, flags); 2003 lastwqe = rvt_error_qp(qp, err); 2004 spin_unlock_irqrestore(&qp->s_lock, flags); 2005 2006 if (lastwqe) { 2007 struct ib_event ev; 2008 2009 ev.device = qp->ibqp.device; 2010 ev.element.qp = &qp->ibqp; 2011 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 2012 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 2013 } 2014 } 2015 2016 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n) 2017 { 2018 unsigned next; 2019 2020 next = n + 1; 2021 if (next > HFI1_MAX_RDMA_ATOMIC) 2022 next = 0; 2023 qp->s_tail_ack_queue = next; 2024 qp->s_ack_state = OP(ACKNOWLEDGE); 2025 } 2026 2027 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, 2028 u32 lqpn, u32 rqpn, u8 svc_type) 2029 { 2030 struct opa_hfi1_cong_log_event_internal *cc_event; 2031 unsigned long flags; 2032 2033 if (sl >= OPA_MAX_SLS) 2034 return; 2035 2036 spin_lock_irqsave(&ppd->cc_log_lock, flags); 2037 2038 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); 2039 ppd->threshold_event_counter++; 2040 2041 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; 2042 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) 2043 ppd->cc_log_idx = 0; 2044 cc_event->lqpn = lqpn & RVT_QPN_MASK; 2045 cc_event->rqpn = rqpn & RVT_QPN_MASK; 2046 cc_event->sl = sl; 2047 cc_event->svc_type = svc_type; 2048 cc_event->rlid = rlid; 2049 /* keep timestamp in units of 1.024 usec */ 2050 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024; 2051 2052 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); 2053 } 2054 2055 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, 2056 u32 rqpn, u8 svc_type) 2057 { 2058 struct cca_timer *cca_timer; 2059 u16 ccti, ccti_incr, ccti_timer, ccti_limit; 2060 u8 trigger_threshold; 2061 struct cc_state *cc_state; 2062 unsigned long flags; 2063 2064 if (sl >= OPA_MAX_SLS) 2065 return; 2066 2067 cc_state = get_cc_state(ppd); 2068 2069 if (!cc_state) 2070 return; 2071 2072 /* 2073 * 1) increase CCTI (for this SL) 2074 * 2) select IPG (i.e., call set_link_ipg()) 2075 * 3) start timer 2076 */ 2077 ccti_limit = cc_state->cct.ccti_limit; 2078 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; 2079 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 2080 trigger_threshold = 2081 cc_state->cong_setting.entries[sl].trigger_threshold; 2082 2083 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 2084 2085 cca_timer = &ppd->cca_timer[sl]; 2086 if (cca_timer->ccti < ccti_limit) { 2087 if (cca_timer->ccti + ccti_incr <= ccti_limit) 2088 cca_timer->ccti += ccti_incr; 2089 else 2090 cca_timer->ccti = ccti_limit; 2091 set_link_ipg(ppd); 2092 } 2093 2094 ccti = cca_timer->ccti; 2095 2096 if (!hrtimer_active(&cca_timer->hrtimer)) { 2097 /* ccti_timer is in units of 1.024 usec */ 2098 unsigned long nsec = 1024 * ccti_timer; 2099 2100 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), 2101 HRTIMER_MODE_REL); 2102 } 2103 2104 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 2105 2106 if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) 2107 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); 2108 } 2109 2110 /** 2111 * hfi1_rc_rcv - process an incoming RC packet 2112 * @rcd: the context pointer 2113 * @hdr: the header of this packet 2114 * @rcv_flags: flags relevant to rcv processing 2115 * @data: the packet data 2116 * @tlen: the packet length 2117 * @qp: the QP for this packet 2118 * 2119 * This is called from qp_rcv() to process an incoming RC packet 2120 * for the given QP. 2121 * May be called at interrupt level. 2122 */ 2123 void hfi1_rc_rcv(struct hfi1_packet *packet) 2124 { 2125 struct hfi1_ctxtdata *rcd = packet->rcd; 2126 struct hfi1_ib_header *hdr = packet->hdr; 2127 u32 rcv_flags = packet->rcv_flags; 2128 void *data = packet->ebuf; 2129 u32 tlen = packet->tlen; 2130 struct rvt_qp *qp = packet->qp; 2131 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 2132 struct hfi1_other_headers *ohdr = packet->ohdr; 2133 u32 bth0, opcode; 2134 u32 hdrsize = packet->hlen; 2135 u32 psn; 2136 u32 pad; 2137 struct ib_wc wc; 2138 u32 pmtu = qp->pmtu; 2139 int diff; 2140 struct ib_reth *reth; 2141 unsigned long flags; 2142 int ret, is_fecn = 0; 2143 int copy_last = 0; 2144 u32 rkey; 2145 2146 bth0 = be32_to_cpu(ohdr->bth[0]); 2147 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) 2148 return; 2149 2150 is_fecn = process_ecn(qp, packet, false); 2151 2152 psn = be32_to_cpu(ohdr->bth[2]); 2153 opcode = (bth0 >> 24) & 0xff; 2154 2155 /* 2156 * Process responses (ACKs) before anything else. Note that the 2157 * packet sequence number will be for something in the send work 2158 * queue rather than the expected receive packet sequence number. 2159 * In other words, this QP is the requester. 2160 */ 2161 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 2162 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 2163 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, 2164 hdrsize, pmtu, rcd); 2165 if (is_fecn) 2166 goto send_ack; 2167 return; 2168 } 2169 2170 /* Compute 24 bits worth of difference. */ 2171 diff = delta_psn(psn, qp->r_psn); 2172 if (unlikely(diff)) { 2173 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 2174 return; 2175 goto send_ack; 2176 } 2177 2178 /* Check for opcode sequence errors. */ 2179 switch (qp->r_state) { 2180 case OP(SEND_FIRST): 2181 case OP(SEND_MIDDLE): 2182 if (opcode == OP(SEND_MIDDLE) || 2183 opcode == OP(SEND_LAST) || 2184 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2185 opcode == OP(SEND_LAST_WITH_INVALIDATE)) 2186 break; 2187 goto nack_inv; 2188 2189 case OP(RDMA_WRITE_FIRST): 2190 case OP(RDMA_WRITE_MIDDLE): 2191 if (opcode == OP(RDMA_WRITE_MIDDLE) || 2192 opcode == OP(RDMA_WRITE_LAST) || 2193 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2194 break; 2195 goto nack_inv; 2196 2197 default: 2198 if (opcode == OP(SEND_MIDDLE) || 2199 opcode == OP(SEND_LAST) || 2200 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2201 opcode == OP(SEND_LAST_WITH_INVALIDATE) || 2202 opcode == OP(RDMA_WRITE_MIDDLE) || 2203 opcode == OP(RDMA_WRITE_LAST) || 2204 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2205 goto nack_inv; 2206 /* 2207 * Note that it is up to the requester to not send a new 2208 * RDMA read or atomic operation before receiving an ACK 2209 * for the previous operation. 2210 */ 2211 break; 2212 } 2213 2214 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 2215 qp_comm_est(qp); 2216 2217 /* OK, process the packet. */ 2218 switch (opcode) { 2219 case OP(SEND_FIRST): 2220 ret = hfi1_rvt_get_rwqe(qp, 0); 2221 if (ret < 0) 2222 goto nack_op_err; 2223 if (!ret) 2224 goto rnr_nak; 2225 qp->r_rcv_len = 0; 2226 /* FALLTHROUGH */ 2227 case OP(SEND_MIDDLE): 2228 case OP(RDMA_WRITE_MIDDLE): 2229 send_middle: 2230 /* Check for invalid length PMTU or posted rwqe len. */ 2231 if (unlikely(tlen != (hdrsize + pmtu + 4))) 2232 goto nack_inv; 2233 qp->r_rcv_len += pmtu; 2234 if (unlikely(qp->r_rcv_len > qp->r_len)) 2235 goto nack_inv; 2236 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0); 2237 break; 2238 2239 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 2240 /* consume RWQE */ 2241 ret = hfi1_rvt_get_rwqe(qp, 1); 2242 if (ret < 0) 2243 goto nack_op_err; 2244 if (!ret) 2245 goto rnr_nak; 2246 goto send_last_imm; 2247 2248 case OP(SEND_ONLY): 2249 case OP(SEND_ONLY_WITH_IMMEDIATE): 2250 case OP(SEND_ONLY_WITH_INVALIDATE): 2251 ret = hfi1_rvt_get_rwqe(qp, 0); 2252 if (ret < 0) 2253 goto nack_op_err; 2254 if (!ret) 2255 goto rnr_nak; 2256 qp->r_rcv_len = 0; 2257 if (opcode == OP(SEND_ONLY)) 2258 goto no_immediate_data; 2259 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) 2260 goto send_last_inv; 2261 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ 2262 case OP(SEND_LAST_WITH_IMMEDIATE): 2263 send_last_imm: 2264 wc.ex.imm_data = ohdr->u.imm_data; 2265 wc.wc_flags = IB_WC_WITH_IMM; 2266 goto send_last; 2267 case OP(SEND_LAST_WITH_INVALIDATE): 2268 send_last_inv: 2269 rkey = be32_to_cpu(ohdr->u.ieth); 2270 if (rvt_invalidate_rkey(qp, rkey)) 2271 goto no_immediate_data; 2272 wc.ex.invalidate_rkey = rkey; 2273 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2274 goto send_last; 2275 case OP(RDMA_WRITE_LAST): 2276 copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user; 2277 /* fall through */ 2278 case OP(SEND_LAST): 2279 no_immediate_data: 2280 wc.wc_flags = 0; 2281 wc.ex.imm_data = 0; 2282 send_last: 2283 /* Get the number of bytes the message was padded by. */ 2284 pad = (bth0 >> 20) & 3; 2285 /* Check for invalid length. */ 2286 /* LAST len should be >= 1 */ 2287 if (unlikely(tlen < (hdrsize + pad + 4))) 2288 goto nack_inv; 2289 /* Don't count the CRC. */ 2290 tlen -= (hdrsize + pad + 4); 2291 wc.byte_len = tlen + qp->r_rcv_len; 2292 if (unlikely(wc.byte_len > qp->r_len)) 2293 goto nack_inv; 2294 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last); 2295 rvt_put_ss(&qp->r_sge); 2296 qp->r_msn++; 2297 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 2298 break; 2299 wc.wr_id = qp->r_wr_id; 2300 wc.status = IB_WC_SUCCESS; 2301 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || 2302 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 2303 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 2304 else 2305 wc.opcode = IB_WC_RECV; 2306 wc.qp = &qp->ibqp; 2307 wc.src_qp = qp->remote_qpn; 2308 wc.slid = qp->remote_ah_attr.dlid; 2309 /* 2310 * It seems that IB mandates the presence of an SL in a 2311 * work completion only for the UD transport (see section 2312 * 11.4.2 of IBTA Vol. 1). 2313 * 2314 * However, the way the SL is chosen below is consistent 2315 * with the way that IB/qib works and is trying avoid 2316 * introducing incompatibilities. 2317 * 2318 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 2319 */ 2320 wc.sl = qp->remote_ah_attr.sl; 2321 /* zero fields that are N/A */ 2322 wc.vendor_err = 0; 2323 wc.pkey_index = 0; 2324 wc.dlid_path_bits = 0; 2325 wc.port_num = 0; 2326 /* Signal completion event if the solicited bit is set. */ 2327 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 2328 (bth0 & IB_BTH_SOLICITED) != 0); 2329 break; 2330 2331 case OP(RDMA_WRITE_ONLY): 2332 copy_last = 1; 2333 /* fall through */ 2334 case OP(RDMA_WRITE_FIRST): 2335 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 2336 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 2337 goto nack_inv; 2338 /* consume RWQE */ 2339 reth = &ohdr->u.rc.reth; 2340 qp->r_len = be32_to_cpu(reth->length); 2341 qp->r_rcv_len = 0; 2342 qp->r_sge.sg_list = NULL; 2343 if (qp->r_len != 0) { 2344 u32 rkey = be32_to_cpu(reth->rkey); 2345 u64 vaddr = be64_to_cpu(reth->vaddr); 2346 int ok; 2347 2348 /* Check rkey & NAK */ 2349 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, 2350 rkey, IB_ACCESS_REMOTE_WRITE); 2351 if (unlikely(!ok)) 2352 goto nack_acc; 2353 qp->r_sge.num_sge = 1; 2354 } else { 2355 qp->r_sge.num_sge = 0; 2356 qp->r_sge.sge.mr = NULL; 2357 qp->r_sge.sge.vaddr = NULL; 2358 qp->r_sge.sge.length = 0; 2359 qp->r_sge.sge.sge_length = 0; 2360 } 2361 if (opcode == OP(RDMA_WRITE_FIRST)) 2362 goto send_middle; 2363 else if (opcode == OP(RDMA_WRITE_ONLY)) 2364 goto no_immediate_data; 2365 ret = hfi1_rvt_get_rwqe(qp, 1); 2366 if (ret < 0) 2367 goto nack_op_err; 2368 if (!ret) 2369 goto rnr_nak; 2370 wc.ex.imm_data = ohdr->u.rc.imm_data; 2371 wc.wc_flags = IB_WC_WITH_IMM; 2372 goto send_last; 2373 2374 case OP(RDMA_READ_REQUEST): { 2375 struct rvt_ack_entry *e; 2376 u32 len; 2377 u8 next; 2378 2379 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 2380 goto nack_inv; 2381 next = qp->r_head_ack_queue + 1; 2382 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */ 2383 if (next > HFI1_MAX_RDMA_ATOMIC) 2384 next = 0; 2385 spin_lock_irqsave(&qp->s_lock, flags); 2386 if (unlikely(next == qp->s_tail_ack_queue)) { 2387 if (!qp->s_ack_queue[next].sent) 2388 goto nack_inv_unlck; 2389 update_ack_queue(qp, next); 2390 } 2391 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2392 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2393 rvt_put_mr(e->rdma_sge.mr); 2394 e->rdma_sge.mr = NULL; 2395 } 2396 reth = &ohdr->u.rc.reth; 2397 len = be32_to_cpu(reth->length); 2398 if (len) { 2399 u32 rkey = be32_to_cpu(reth->rkey); 2400 u64 vaddr = be64_to_cpu(reth->vaddr); 2401 int ok; 2402 2403 /* Check rkey & NAK */ 2404 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, 2405 rkey, IB_ACCESS_REMOTE_READ); 2406 if (unlikely(!ok)) 2407 goto nack_acc_unlck; 2408 /* 2409 * Update the next expected PSN. We add 1 later 2410 * below, so only add the remainder here. 2411 */ 2412 if (len > pmtu) 2413 qp->r_psn += (len - 1) / pmtu; 2414 } else { 2415 e->rdma_sge.mr = NULL; 2416 e->rdma_sge.vaddr = NULL; 2417 e->rdma_sge.length = 0; 2418 e->rdma_sge.sge_length = 0; 2419 } 2420 e->opcode = opcode; 2421 e->sent = 0; 2422 e->psn = psn; 2423 e->lpsn = qp->r_psn; 2424 /* 2425 * We need to increment the MSN here instead of when we 2426 * finish sending the result since a duplicate request would 2427 * increment it more than once. 2428 */ 2429 qp->r_msn++; 2430 qp->r_psn++; 2431 qp->r_state = opcode; 2432 qp->r_nak_state = 0; 2433 qp->r_head_ack_queue = next; 2434 2435 /* Schedule the send tasklet. */ 2436 qp->s_flags |= RVT_S_RESP_PENDING; 2437 hfi1_schedule_send(qp); 2438 2439 spin_unlock_irqrestore(&qp->s_lock, flags); 2440 if (is_fecn) 2441 goto send_ack; 2442 return; 2443 } 2444 2445 case OP(COMPARE_SWAP): 2446 case OP(FETCH_ADD): { 2447 struct ib_atomic_eth *ateth; 2448 struct rvt_ack_entry *e; 2449 u64 vaddr; 2450 atomic64_t *maddr; 2451 u64 sdata; 2452 u32 rkey; 2453 u8 next; 2454 2455 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 2456 goto nack_inv; 2457 next = qp->r_head_ack_queue + 1; 2458 if (next > HFI1_MAX_RDMA_ATOMIC) 2459 next = 0; 2460 spin_lock_irqsave(&qp->s_lock, flags); 2461 if (unlikely(next == qp->s_tail_ack_queue)) { 2462 if (!qp->s_ack_queue[next].sent) 2463 goto nack_inv_unlck; 2464 update_ack_queue(qp, next); 2465 } 2466 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2467 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2468 rvt_put_mr(e->rdma_sge.mr); 2469 e->rdma_sge.mr = NULL; 2470 } 2471 ateth = &ohdr->u.atomic_eth; 2472 vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) | 2473 be32_to_cpu(ateth->vaddr[1]); 2474 if (unlikely(vaddr & (sizeof(u64) - 1))) 2475 goto nack_inv_unlck; 2476 rkey = be32_to_cpu(ateth->rkey); 2477 /* Check rkey & NAK */ 2478 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 2479 vaddr, rkey, 2480 IB_ACCESS_REMOTE_ATOMIC))) 2481 goto nack_acc_unlck; 2482 /* Perform atomic OP and save result. */ 2483 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 2484 sdata = be64_to_cpu(ateth->swap_data); 2485 e->atomic_data = (opcode == OP(FETCH_ADD)) ? 2486 (u64)atomic64_add_return(sdata, maddr) - sdata : 2487 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 2488 be64_to_cpu(ateth->compare_data), 2489 sdata); 2490 rvt_put_mr(qp->r_sge.sge.mr); 2491 qp->r_sge.num_sge = 0; 2492 e->opcode = opcode; 2493 e->sent = 0; 2494 e->psn = psn; 2495 e->lpsn = psn; 2496 qp->r_msn++; 2497 qp->r_psn++; 2498 qp->r_state = opcode; 2499 qp->r_nak_state = 0; 2500 qp->r_head_ack_queue = next; 2501 2502 /* Schedule the send tasklet. */ 2503 qp->s_flags |= RVT_S_RESP_PENDING; 2504 hfi1_schedule_send(qp); 2505 2506 spin_unlock_irqrestore(&qp->s_lock, flags); 2507 if (is_fecn) 2508 goto send_ack; 2509 return; 2510 } 2511 2512 default: 2513 /* NAK unknown opcodes. */ 2514 goto nack_inv; 2515 } 2516 qp->r_psn++; 2517 qp->r_state = opcode; 2518 qp->r_ack_psn = psn; 2519 qp->r_nak_state = 0; 2520 /* Send an ACK if requested or required. */ 2521 if (psn & IB_BTH_REQ_ACK) { 2522 struct hfi1_qp_priv *priv = qp->priv; 2523 2524 if (packet->numpkt == 0) { 2525 rc_cancel_ack(qp); 2526 goto send_ack; 2527 } 2528 if (priv->r_adefered >= HFI1_PSN_CREDIT) { 2529 rc_cancel_ack(qp); 2530 goto send_ack; 2531 } 2532 if (unlikely(is_fecn)) { 2533 rc_cancel_ack(qp); 2534 goto send_ack; 2535 } 2536 priv->r_adefered++; 2537 rc_defered_ack(rcd, qp); 2538 } 2539 return; 2540 2541 rnr_nak: 2542 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; 2543 qp->r_ack_psn = qp->r_psn; 2544 /* Queue RNR NAK for later */ 2545 rc_defered_ack(rcd, qp); 2546 return; 2547 2548 nack_op_err: 2549 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2550 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 2551 qp->r_ack_psn = qp->r_psn; 2552 /* Queue NAK for later */ 2553 rc_defered_ack(rcd, qp); 2554 return; 2555 2556 nack_inv_unlck: 2557 spin_unlock_irqrestore(&qp->s_lock, flags); 2558 nack_inv: 2559 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2560 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 2561 qp->r_ack_psn = qp->r_psn; 2562 /* Queue NAK for later */ 2563 rc_defered_ack(rcd, qp); 2564 return; 2565 2566 nack_acc_unlck: 2567 spin_unlock_irqrestore(&qp->s_lock, flags); 2568 nack_acc: 2569 hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR); 2570 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 2571 qp->r_ack_psn = qp->r_psn; 2572 send_ack: 2573 hfi1_send_rc_ack(rcd, qp, is_fecn); 2574 } 2575 2576 void hfi1_rc_hdrerr( 2577 struct hfi1_ctxtdata *rcd, 2578 struct hfi1_ib_header *hdr, 2579 u32 rcv_flags, 2580 struct rvt_qp *qp) 2581 { 2582 int has_grh = rcv_flags & HFI1_HAS_GRH; 2583 struct hfi1_other_headers *ohdr; 2584 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 2585 int diff; 2586 u32 opcode; 2587 u32 psn, bth0; 2588 2589 /* Check for GRH */ 2590 ohdr = &hdr->u.oth; 2591 if (has_grh) 2592 ohdr = &hdr->u.l.oth; 2593 2594 bth0 = be32_to_cpu(ohdr->bth[0]); 2595 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) 2596 return; 2597 2598 psn = be32_to_cpu(ohdr->bth[2]); 2599 opcode = (bth0 >> 24) & 0xff; 2600 2601 /* Only deal with RDMA Writes for now */ 2602 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 2603 diff = delta_psn(psn, qp->r_psn); 2604 if (!qp->r_nak_state && diff >= 0) { 2605 ibp->rvp.n_rc_seqnak++; 2606 qp->r_nak_state = IB_NAK_PSN_ERROR; 2607 /* Use the expected PSN. */ 2608 qp->r_ack_psn = qp->r_psn; 2609 /* 2610 * Wait to send the sequence 2611 * NAK until all packets 2612 * in the receive queue have 2613 * been processed. 2614 * Otherwise, we end up 2615 * propagating congestion. 2616 */ 2617 rc_defered_ack(rcd, qp); 2618 } /* Out of sequence NAK */ 2619 } /* QP Request NAKs */ 2620 } 2621