1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 9 #include "rxe.h" 10 #include "rxe_loc.h" 11 #include "rxe_queue.h" 12 #include "rxe_task.h" 13 14 enum comp_state { 15 COMPST_GET_ACK, 16 COMPST_GET_WQE, 17 COMPST_COMP_WQE, 18 COMPST_COMP_ACK, 19 COMPST_CHECK_PSN, 20 COMPST_CHECK_ACK, 21 COMPST_READ, 22 COMPST_ATOMIC, 23 COMPST_WRITE_SEND, 24 COMPST_UPDATE_COMP, 25 COMPST_ERROR_RETRY, 26 COMPST_RNR_RETRY, 27 COMPST_ERROR, 28 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */ 29 COMPST_DONE, /* The completer finished successflly */ 30 }; 31 32 static char *comp_state_name[] = { 33 [COMPST_GET_ACK] = "GET ACK", 34 [COMPST_GET_WQE] = "GET WQE", 35 [COMPST_COMP_WQE] = "COMP WQE", 36 [COMPST_COMP_ACK] = "COMP ACK", 37 [COMPST_CHECK_PSN] = "CHECK PSN", 38 [COMPST_CHECK_ACK] = "CHECK ACK", 39 [COMPST_READ] = "READ", 40 [COMPST_ATOMIC] = "ATOMIC", 41 [COMPST_WRITE_SEND] = "WRITE/SEND", 42 [COMPST_UPDATE_COMP] = "UPDATE COMP", 43 [COMPST_ERROR_RETRY] = "ERROR RETRY", 44 [COMPST_RNR_RETRY] = "RNR RETRY", 45 [COMPST_ERROR] = "ERROR", 46 [COMPST_EXIT] = "EXIT", 47 [COMPST_DONE] = "DONE", 48 }; 49 50 static unsigned long rnrnak_usec[32] = { 51 [IB_RNR_TIMER_655_36] = 655360, 52 [IB_RNR_TIMER_000_01] = 10, 53 [IB_RNR_TIMER_000_02] = 20, 54 [IB_RNR_TIMER_000_03] = 30, 55 [IB_RNR_TIMER_000_04] = 40, 56 [IB_RNR_TIMER_000_06] = 60, 57 [IB_RNR_TIMER_000_08] = 80, 58 [IB_RNR_TIMER_000_12] = 120, 59 [IB_RNR_TIMER_000_16] = 160, 60 [IB_RNR_TIMER_000_24] = 240, 61 [IB_RNR_TIMER_000_32] = 320, 62 [IB_RNR_TIMER_000_48] = 480, 63 [IB_RNR_TIMER_000_64] = 640, 64 [IB_RNR_TIMER_000_96] = 960, 65 [IB_RNR_TIMER_001_28] = 1280, 66 [IB_RNR_TIMER_001_92] = 1920, 67 [IB_RNR_TIMER_002_56] = 2560, 68 [IB_RNR_TIMER_003_84] = 3840, 69 [IB_RNR_TIMER_005_12] = 5120, 70 [IB_RNR_TIMER_007_68] = 7680, 71 [IB_RNR_TIMER_010_24] = 10240, 72 [IB_RNR_TIMER_015_36] = 15360, 73 [IB_RNR_TIMER_020_48] = 20480, 74 [IB_RNR_TIMER_030_72] = 30720, 75 [IB_RNR_TIMER_040_96] = 40960, 76 [IB_RNR_TIMER_061_44] = 61410, 77 [IB_RNR_TIMER_081_92] = 81920, 78 [IB_RNR_TIMER_122_88] = 122880, 79 [IB_RNR_TIMER_163_84] = 163840, 80 [IB_RNR_TIMER_245_76] = 245760, 81 [IB_RNR_TIMER_327_68] = 327680, 82 [IB_RNR_TIMER_491_52] = 491520, 83 }; 84 85 static inline unsigned long rnrnak_jiffies(u8 timeout) 86 { 87 return max_t(unsigned long, 88 usecs_to_jiffies(rnrnak_usec[timeout]), 1); 89 } 90 91 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) 92 { 93 switch (opcode) { 94 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE; 95 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE; 96 case IB_WR_SEND: return IB_WC_SEND; 97 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND; 98 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ; 99 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP; 100 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD; 101 case IB_WR_LSO: return IB_WC_LSO; 102 case IB_WR_SEND_WITH_INV: return IB_WC_SEND; 103 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ; 104 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV; 105 case IB_WR_REG_MR: return IB_WC_REG_MR; 106 107 default: 108 return 0xff; 109 } 110 } 111 112 void retransmit_timer(struct timer_list *t) 113 { 114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); 115 116 if (qp->valid) { 117 qp->comp.timeout = 1; 118 rxe_run_task(&qp->comp.task, 1); 119 } 120 } 121 122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) 123 { 124 int must_sched; 125 126 skb_queue_tail(&qp->resp_pkts, skb); 127 128 must_sched = skb_queue_len(&qp->resp_pkts) > 1; 129 if (must_sched != 0) 130 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED); 131 132 rxe_run_task(&qp->comp.task, must_sched); 133 } 134 135 static inline enum comp_state get_wqe(struct rxe_qp *qp, 136 struct rxe_pkt_info *pkt, 137 struct rxe_send_wqe **wqe_p) 138 { 139 struct rxe_send_wqe *wqe; 140 141 /* we come here whether or not we found a response packet to see if 142 * there are any posted WQEs 143 */ 144 wqe = queue_head(qp->sq.queue); 145 *wqe_p = wqe; 146 147 /* no WQE or requester has not started it yet */ 148 if (!wqe || wqe->state == wqe_state_posted) 149 return pkt ? COMPST_DONE : COMPST_EXIT; 150 151 /* WQE does not require an ack */ 152 if (wqe->state == wqe_state_done) 153 return COMPST_COMP_WQE; 154 155 /* WQE caused an error */ 156 if (wqe->state == wqe_state_error) 157 return COMPST_ERROR; 158 159 /* we have a WQE, if we also have an ack check its PSN */ 160 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT; 161 } 162 163 static inline void reset_retry_counters(struct rxe_qp *qp) 164 { 165 qp->comp.retry_cnt = qp->attr.retry_cnt; 166 qp->comp.rnr_retry = qp->attr.rnr_retry; 167 qp->comp.started_retry = 0; 168 } 169 170 static inline enum comp_state check_psn(struct rxe_qp *qp, 171 struct rxe_pkt_info *pkt, 172 struct rxe_send_wqe *wqe) 173 { 174 s32 diff; 175 176 /* check to see if response is past the oldest WQE. if it is, complete 177 * send/write or error read/atomic 178 */ 179 diff = psn_compare(pkt->psn, wqe->last_psn); 180 if (diff > 0) { 181 if (wqe->state == wqe_state_pending) { 182 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) 183 return COMPST_ERROR_RETRY; 184 185 reset_retry_counters(qp); 186 return COMPST_COMP_WQE; 187 } else { 188 return COMPST_DONE; 189 } 190 } 191 192 /* compare response packet to expected response */ 193 diff = psn_compare(pkt->psn, qp->comp.psn); 194 if (diff < 0) { 195 /* response is most likely a retried packet if it matches an 196 * uncompleted WQE go complete it else ignore it 197 */ 198 if (pkt->psn == wqe->last_psn) 199 return COMPST_COMP_ACK; 200 else 201 return COMPST_DONE; 202 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { 203 return COMPST_DONE; 204 } else { 205 return COMPST_CHECK_ACK; 206 } 207 } 208 209 static inline enum comp_state check_ack(struct rxe_qp *qp, 210 struct rxe_pkt_info *pkt, 211 struct rxe_send_wqe *wqe) 212 { 213 unsigned int mask = pkt->mask; 214 u8 syn; 215 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 216 217 /* Check the sequence only */ 218 switch (qp->comp.opcode) { 219 case -1: 220 /* Will catch all *_ONLY cases. */ 221 if (!(mask & RXE_START_MASK)) 222 return COMPST_ERROR; 223 224 break; 225 226 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: 227 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: 228 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && 229 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { 230 /* read retries of partial data may restart from 231 * read response first or response only. 232 */ 233 if ((pkt->psn == wqe->first_psn && 234 pkt->opcode == 235 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) || 236 (wqe->first_psn == wqe->last_psn && 237 pkt->opcode == 238 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY)) 239 break; 240 241 return COMPST_ERROR; 242 } 243 break; 244 default: 245 WARN_ON_ONCE(1); 246 } 247 248 /* Check operation validity. */ 249 switch (pkt->opcode) { 250 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST: 251 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST: 252 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY: 253 syn = aeth_syn(pkt); 254 255 if ((syn & AETH_TYPE_MASK) != AETH_ACK) 256 return COMPST_ERROR; 257 258 fallthrough; 259 /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH) 260 */ 261 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: 262 if (wqe->wr.opcode != IB_WR_RDMA_READ && 263 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { 264 wqe->status = IB_WC_FATAL_ERR; 265 return COMPST_ERROR; 266 } 267 reset_retry_counters(qp); 268 return COMPST_READ; 269 270 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE: 271 syn = aeth_syn(pkt); 272 273 if ((syn & AETH_TYPE_MASK) != AETH_ACK) 274 return COMPST_ERROR; 275 276 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP && 277 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD) 278 return COMPST_ERROR; 279 reset_retry_counters(qp); 280 return COMPST_ATOMIC; 281 282 case IB_OPCODE_RC_ACKNOWLEDGE: 283 syn = aeth_syn(pkt); 284 switch (syn & AETH_TYPE_MASK) { 285 case AETH_ACK: 286 reset_retry_counters(qp); 287 return COMPST_WRITE_SEND; 288 289 case AETH_RNR_NAK: 290 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR); 291 return COMPST_RNR_RETRY; 292 293 case AETH_NAK: 294 switch (syn) { 295 case AETH_NAK_PSN_SEQ_ERROR: 296 /* a nak implicitly acks all packets with psns 297 * before 298 */ 299 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { 300 rxe_counter_inc(rxe, 301 RXE_CNT_RCV_SEQ_ERR); 302 qp->comp.psn = pkt->psn; 303 if (qp->req.wait_psn) { 304 qp->req.wait_psn = 0; 305 rxe_run_task(&qp->req.task, 0); 306 } 307 } 308 return COMPST_ERROR_RETRY; 309 310 case AETH_NAK_INVALID_REQ: 311 wqe->status = IB_WC_REM_INV_REQ_ERR; 312 return COMPST_ERROR; 313 314 case AETH_NAK_REM_ACC_ERR: 315 wqe->status = IB_WC_REM_ACCESS_ERR; 316 return COMPST_ERROR; 317 318 case AETH_NAK_REM_OP_ERR: 319 wqe->status = IB_WC_REM_OP_ERR; 320 return COMPST_ERROR; 321 322 default: 323 pr_warn("unexpected nak %x\n", syn); 324 wqe->status = IB_WC_REM_OP_ERR; 325 return COMPST_ERROR; 326 } 327 328 default: 329 return COMPST_ERROR; 330 } 331 break; 332 333 default: 334 pr_warn("unexpected opcode\n"); 335 } 336 337 return COMPST_ERROR; 338 } 339 340 static inline enum comp_state do_read(struct rxe_qp *qp, 341 struct rxe_pkt_info *pkt, 342 struct rxe_send_wqe *wqe) 343 { 344 int ret; 345 346 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, 347 &wqe->dma, payload_addr(pkt), 348 payload_size(pkt), to_mem_obj, NULL); 349 if (ret) 350 return COMPST_ERROR; 351 352 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) 353 return COMPST_COMP_ACK; 354 else 355 return COMPST_UPDATE_COMP; 356 } 357 358 static inline enum comp_state do_atomic(struct rxe_qp *qp, 359 struct rxe_pkt_info *pkt, 360 struct rxe_send_wqe *wqe) 361 { 362 int ret; 363 364 u64 atomic_orig = atmack_orig(pkt); 365 366 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, 367 &wqe->dma, &atomic_orig, 368 sizeof(u64), to_mem_obj, NULL); 369 if (ret) 370 return COMPST_ERROR; 371 else 372 return COMPST_COMP_ACK; 373 } 374 375 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 376 struct rxe_cqe *cqe) 377 { 378 memset(cqe, 0, sizeof(*cqe)); 379 380 if (!qp->is_user) { 381 struct ib_wc *wc = &cqe->ibwc; 382 383 wc->wr_id = wqe->wr.wr_id; 384 wc->status = wqe->status; 385 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); 386 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || 387 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 388 wc->wc_flags = IB_WC_WITH_IMM; 389 wc->byte_len = wqe->dma.length; 390 wc->qp = &qp->ibqp; 391 } else { 392 struct ib_uverbs_wc *uwc = &cqe->uibwc; 393 394 uwc->wr_id = wqe->wr.wr_id; 395 uwc->status = wqe->status; 396 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); 397 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || 398 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) 399 uwc->wc_flags = IB_WC_WITH_IMM; 400 uwc->byte_len = wqe->dma.length; 401 uwc->qp_num = qp->ibqp.qp_num; 402 } 403 } 404 405 /* 406 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS 407 * ---------8<---------8<------------- 408 * ...Note that if a completion error occurs, a Work Completion 409 * will always be generated, even if the signaling 410 * indicator requests an Unsignaled Completion. 411 * ---------8<---------8<------------- 412 */ 413 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 414 { 415 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 416 struct rxe_cqe cqe; 417 418 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || 419 (wqe->wr.send_flags & IB_SEND_SIGNALED) || 420 wqe->status != IB_WC_SUCCESS) { 421 make_send_cqe(qp, wqe, &cqe); 422 advance_consumer(qp->sq.queue); 423 rxe_cq_post(qp->scq, &cqe, 0); 424 } else { 425 advance_consumer(qp->sq.queue); 426 } 427 428 if (wqe->wr.opcode == IB_WR_SEND || 429 wqe->wr.opcode == IB_WR_SEND_WITH_IMM || 430 wqe->wr.opcode == IB_WR_SEND_WITH_INV) 431 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND); 432 433 /* 434 * we completed something so let req run again 435 * if it is trying to fence 436 */ 437 if (qp->req.wait_fence) { 438 qp->req.wait_fence = 0; 439 rxe_run_task(&qp->req.task, 0); 440 } 441 } 442 443 static inline enum comp_state complete_ack(struct rxe_qp *qp, 444 struct rxe_pkt_info *pkt, 445 struct rxe_send_wqe *wqe) 446 { 447 unsigned long flags; 448 449 if (wqe->has_rd_atomic) { 450 wqe->has_rd_atomic = 0; 451 atomic_inc(&qp->req.rd_atomic); 452 if (qp->req.need_rd_atomic) { 453 qp->comp.timeout_retry = 0; 454 qp->req.need_rd_atomic = 0; 455 rxe_run_task(&qp->req.task, 0); 456 } 457 } 458 459 if (unlikely(qp->req.state == QP_STATE_DRAIN)) { 460 /* state_lock used by requester & completer */ 461 spin_lock_irqsave(&qp->state_lock, flags); 462 if ((qp->req.state == QP_STATE_DRAIN) && 463 (qp->comp.psn == qp->req.psn)) { 464 qp->req.state = QP_STATE_DRAINED; 465 spin_unlock_irqrestore(&qp->state_lock, flags); 466 467 if (qp->ibqp.event_handler) { 468 struct ib_event ev; 469 470 ev.device = qp->ibqp.device; 471 ev.element.qp = &qp->ibqp; 472 ev.event = IB_EVENT_SQ_DRAINED; 473 qp->ibqp.event_handler(&ev, 474 qp->ibqp.qp_context); 475 } 476 } else { 477 spin_unlock_irqrestore(&qp->state_lock, flags); 478 } 479 } 480 481 do_complete(qp, wqe); 482 483 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) 484 return COMPST_UPDATE_COMP; 485 else 486 return COMPST_DONE; 487 } 488 489 static inline enum comp_state complete_wqe(struct rxe_qp *qp, 490 struct rxe_pkt_info *pkt, 491 struct rxe_send_wqe *wqe) 492 { 493 if (pkt && wqe->state == wqe_state_pending) { 494 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { 495 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; 496 qp->comp.opcode = -1; 497 } 498 499 if (qp->req.wait_psn) { 500 qp->req.wait_psn = 0; 501 rxe_run_task(&qp->req.task, 1); 502 } 503 } 504 505 do_complete(qp, wqe); 506 507 return COMPST_GET_WQE; 508 } 509 510 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) 511 { 512 struct sk_buff *skb; 513 struct rxe_send_wqe *wqe; 514 515 while ((skb = skb_dequeue(&qp->resp_pkts))) { 516 rxe_drop_ref(qp); 517 kfree_skb(skb); 518 ib_device_put(qp->ibqp.device); 519 } 520 521 while ((wqe = queue_head(qp->sq.queue))) { 522 if (notify) { 523 wqe->status = IB_WC_WR_FLUSH_ERR; 524 do_complete(qp, wqe); 525 } else { 526 advance_consumer(qp->sq.queue); 527 } 528 } 529 } 530 531 static void free_pkt(struct rxe_pkt_info *pkt) 532 { 533 struct sk_buff *skb = PKT_TO_SKB(pkt); 534 struct rxe_qp *qp = pkt->qp; 535 struct ib_device *dev = qp->ibqp.device; 536 537 kfree_skb(skb); 538 rxe_drop_ref(qp); 539 ib_device_put(dev); 540 } 541 542 int rxe_completer(void *arg) 543 { 544 struct rxe_qp *qp = (struct rxe_qp *)arg; 545 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 546 struct rxe_send_wqe *wqe = NULL; 547 struct sk_buff *skb = NULL; 548 struct rxe_pkt_info *pkt = NULL; 549 enum comp_state state; 550 int ret = 0; 551 552 rxe_add_ref(qp); 553 554 if (!qp->valid || qp->req.state == QP_STATE_ERROR || 555 qp->req.state == QP_STATE_RESET) { 556 rxe_drain_resp_pkts(qp, qp->valid && 557 qp->req.state == QP_STATE_ERROR); 558 ret = -EAGAIN; 559 goto done; 560 } 561 562 if (qp->comp.timeout) { 563 qp->comp.timeout_retry = 1; 564 qp->comp.timeout = 0; 565 } else { 566 qp->comp.timeout_retry = 0; 567 } 568 569 if (qp->req.need_retry) { 570 ret = -EAGAIN; 571 goto done; 572 } 573 574 state = COMPST_GET_ACK; 575 576 while (1) { 577 pr_debug("qp#%d state = %s\n", qp_num(qp), 578 comp_state_name[state]); 579 switch (state) { 580 case COMPST_GET_ACK: 581 skb = skb_dequeue(&qp->resp_pkts); 582 if (skb) { 583 pkt = SKB_TO_PKT(skb); 584 qp->comp.timeout_retry = 0; 585 } 586 state = COMPST_GET_WQE; 587 break; 588 589 case COMPST_GET_WQE: 590 state = get_wqe(qp, pkt, &wqe); 591 break; 592 593 case COMPST_CHECK_PSN: 594 state = check_psn(qp, pkt, wqe); 595 break; 596 597 case COMPST_CHECK_ACK: 598 state = check_ack(qp, pkt, wqe); 599 break; 600 601 case COMPST_READ: 602 state = do_read(qp, pkt, wqe); 603 break; 604 605 case COMPST_ATOMIC: 606 state = do_atomic(qp, pkt, wqe); 607 break; 608 609 case COMPST_WRITE_SEND: 610 if (wqe->state == wqe_state_pending && 611 wqe->last_psn == pkt->psn) 612 state = COMPST_COMP_ACK; 613 else 614 state = COMPST_UPDATE_COMP; 615 break; 616 617 case COMPST_COMP_ACK: 618 state = complete_ack(qp, pkt, wqe); 619 break; 620 621 case COMPST_COMP_WQE: 622 state = complete_wqe(qp, pkt, wqe); 623 break; 624 625 case COMPST_UPDATE_COMP: 626 if (pkt->mask & RXE_END_MASK) 627 qp->comp.opcode = -1; 628 else 629 qp->comp.opcode = pkt->opcode; 630 631 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) 632 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 633 634 if (qp->req.wait_psn) { 635 qp->req.wait_psn = 0; 636 rxe_run_task(&qp->req.task, 1); 637 } 638 639 state = COMPST_DONE; 640 break; 641 642 case COMPST_DONE: 643 goto done; 644 645 case COMPST_EXIT: 646 if (qp->comp.timeout_retry && wqe) { 647 state = COMPST_ERROR_RETRY; 648 break; 649 } 650 651 /* re reset the timeout counter if 652 * (1) QP is type RC 653 * (2) the QP is alive 654 * (3) there is a packet sent by the requester that 655 * might be acked (we still might get spurious 656 * timeouts but try to keep them as few as possible) 657 * (4) the timeout parameter is set 658 */ 659 if ((qp_type(qp) == IB_QPT_RC) && 660 (qp->req.state == QP_STATE_READY) && 661 (psn_compare(qp->req.psn, qp->comp.psn) > 0) && 662 qp->qp_timeout_jiffies) 663 mod_timer(&qp->retrans_timer, 664 jiffies + qp->qp_timeout_jiffies); 665 ret = -EAGAIN; 666 goto done; 667 668 case COMPST_ERROR_RETRY: 669 /* we come here if the retry timer fired and we did 670 * not receive a response packet. try to retry the send 671 * queue if that makes sense and the limits have not 672 * been exceeded. remember that some timeouts are 673 * spurious since we do not reset the timer but kick 674 * it down the road or let it expire 675 */ 676 677 /* there is nothing to retry in this case */ 678 if (!wqe || (wqe->state == wqe_state_posted)) { 679 pr_warn("Retry attempted without a valid wqe\n"); 680 ret = -EAGAIN; 681 goto done; 682 } 683 684 /* if we've started a retry, don't start another 685 * retry sequence, unless this is a timeout. 686 */ 687 if (qp->comp.started_retry && 688 !qp->comp.timeout_retry) 689 goto done; 690 691 if (qp->comp.retry_cnt > 0) { 692 if (qp->comp.retry_cnt != 7) 693 qp->comp.retry_cnt--; 694 695 /* no point in retrying if we have already 696 * seen the last ack that the requester could 697 * have caused 698 */ 699 if (psn_compare(qp->req.psn, 700 qp->comp.psn) > 0) { 701 /* tell the requester to retry the 702 * send queue next time around 703 */ 704 rxe_counter_inc(rxe, 705 RXE_CNT_COMP_RETRY); 706 qp->req.need_retry = 1; 707 qp->comp.started_retry = 1; 708 rxe_run_task(&qp->req.task, 0); 709 } 710 goto done; 711 712 } else { 713 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED); 714 wqe->status = IB_WC_RETRY_EXC_ERR; 715 state = COMPST_ERROR; 716 } 717 break; 718 719 case COMPST_RNR_RETRY: 720 if (qp->comp.rnr_retry > 0) { 721 if (qp->comp.rnr_retry != 7) 722 qp->comp.rnr_retry--; 723 724 qp->req.need_retry = 1; 725 pr_debug("qp#%d set rnr nak timer\n", 726 qp_num(qp)); 727 mod_timer(&qp->rnr_nak_timer, 728 jiffies + rnrnak_jiffies(aeth_syn(pkt) 729 & ~AETH_TYPE_MASK)); 730 ret = -EAGAIN; 731 goto done; 732 } else { 733 rxe_counter_inc(rxe, 734 RXE_CNT_RNR_RETRY_EXCEEDED); 735 wqe->status = IB_WC_RNR_RETRY_EXC_ERR; 736 state = COMPST_ERROR; 737 } 738 break; 739 740 case COMPST_ERROR: 741 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); 742 do_complete(qp, wqe); 743 rxe_qp_error(qp); 744 ret = -EAGAIN; 745 goto done; 746 } 747 } 748 749 done: 750 if (pkt) 751 free_pkt(pkt); 752 rxe_drop_ref(qp); 753 754 return ret; 755 } 756