1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 9 #include "rxe.h" 10 #include "rxe_loc.h" 11 #include "rxe_queue.h" 12 13 enum resp_states { 14 RESPST_NONE, 15 RESPST_GET_REQ, 16 RESPST_CHK_PSN, 17 RESPST_CHK_OP_SEQ, 18 RESPST_CHK_OP_VALID, 19 RESPST_CHK_RESOURCE, 20 RESPST_CHK_LENGTH, 21 RESPST_CHK_RKEY, 22 RESPST_EXECUTE, 23 RESPST_READ_REPLY, 24 RESPST_COMPLETE, 25 RESPST_ACKNOWLEDGE, 26 RESPST_CLEANUP, 27 RESPST_DUPLICATE_REQUEST, 28 RESPST_ERR_MALFORMED_WQE, 29 RESPST_ERR_UNSUPPORTED_OPCODE, 30 RESPST_ERR_MISALIGNED_ATOMIC, 31 RESPST_ERR_PSN_OUT_OF_SEQ, 32 RESPST_ERR_MISSING_OPCODE_FIRST, 33 RESPST_ERR_MISSING_OPCODE_LAST_C, 34 RESPST_ERR_MISSING_OPCODE_LAST_D1E, 35 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ, 36 RESPST_ERR_RNR, 37 RESPST_ERR_RKEY_VIOLATION, 38 RESPST_ERR_LENGTH, 39 RESPST_ERR_CQ_OVERFLOW, 40 RESPST_ERROR, 41 RESPST_RESET, 42 RESPST_DONE, 43 RESPST_EXIT, 44 }; 45 46 static char *resp_state_name[] = { 47 [RESPST_NONE] = "NONE", 48 [RESPST_GET_REQ] = "GET_REQ", 49 [RESPST_CHK_PSN] = "CHK_PSN", 50 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ", 51 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID", 52 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE", 53 [RESPST_CHK_LENGTH] = "CHK_LENGTH", 54 [RESPST_CHK_RKEY] = "CHK_RKEY", 55 [RESPST_EXECUTE] = "EXECUTE", 56 [RESPST_READ_REPLY] = "READ_REPLY", 57 [RESPST_COMPLETE] = "COMPLETE", 58 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE", 59 [RESPST_CLEANUP] = "CLEANUP", 60 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST", 61 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE", 62 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE", 63 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC", 64 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ", 65 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST", 66 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C", 67 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E", 68 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ", 69 [RESPST_ERR_RNR] = "ERR_RNR", 70 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION", 71 [RESPST_ERR_LENGTH] = "ERR_LENGTH", 72 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW", 73 [RESPST_ERROR] = "ERROR", 74 [RESPST_RESET] = "RESET", 75 [RESPST_DONE] = "DONE", 76 [RESPST_EXIT] = "EXIT", 77 }; 78 79 /* rxe_recv calls here to add a request packet to the input queue */ 80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) 81 { 82 int must_sched; 83 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 84 85 skb_queue_tail(&qp->req_pkts, skb); 86 87 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || 88 (skb_queue_len(&qp->req_pkts) > 1); 89 90 rxe_run_task(&qp->resp.task, must_sched); 91 } 92 93 static inline enum resp_states get_req(struct rxe_qp *qp, 94 struct rxe_pkt_info **pkt_p) 95 { 96 struct sk_buff *skb; 97 98 if (qp->resp.state == QP_STATE_ERROR) { 99 while ((skb = skb_dequeue(&qp->req_pkts))) { 100 rxe_drop_ref(qp); 101 kfree_skb(skb); 102 ib_device_put(qp->ibqp.device); 103 } 104 105 /* go drain recv wr queue */ 106 return RESPST_CHK_RESOURCE; 107 } 108 109 skb = skb_peek(&qp->req_pkts); 110 if (!skb) 111 return RESPST_EXIT; 112 113 *pkt_p = SKB_TO_PKT(skb); 114 115 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; 116 } 117 118 static enum resp_states check_psn(struct rxe_qp *qp, 119 struct rxe_pkt_info *pkt) 120 { 121 int diff = psn_compare(pkt->psn, qp->resp.psn); 122 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 123 124 switch (qp_type(qp)) { 125 case IB_QPT_RC: 126 if (diff > 0) { 127 if (qp->resp.sent_psn_nak) 128 return RESPST_CLEANUP; 129 130 qp->resp.sent_psn_nak = 1; 131 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ); 132 return RESPST_ERR_PSN_OUT_OF_SEQ; 133 134 } else if (diff < 0) { 135 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ); 136 return RESPST_DUPLICATE_REQUEST; 137 } 138 139 if (qp->resp.sent_psn_nak) 140 qp->resp.sent_psn_nak = 0; 141 142 break; 143 144 case IB_QPT_UC: 145 if (qp->resp.drop_msg || diff != 0) { 146 if (pkt->mask & RXE_START_MASK) { 147 qp->resp.drop_msg = 0; 148 return RESPST_CHK_OP_SEQ; 149 } 150 151 qp->resp.drop_msg = 1; 152 return RESPST_CLEANUP; 153 } 154 break; 155 default: 156 break; 157 } 158 159 return RESPST_CHK_OP_SEQ; 160 } 161 162 static enum resp_states check_op_seq(struct rxe_qp *qp, 163 struct rxe_pkt_info *pkt) 164 { 165 switch (qp_type(qp)) { 166 case IB_QPT_RC: 167 switch (qp->resp.opcode) { 168 case IB_OPCODE_RC_SEND_FIRST: 169 case IB_OPCODE_RC_SEND_MIDDLE: 170 switch (pkt->opcode) { 171 case IB_OPCODE_RC_SEND_MIDDLE: 172 case IB_OPCODE_RC_SEND_LAST: 173 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE: 174 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE: 175 return RESPST_CHK_OP_VALID; 176 default: 177 return RESPST_ERR_MISSING_OPCODE_LAST_C; 178 } 179 180 case IB_OPCODE_RC_RDMA_WRITE_FIRST: 181 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE: 182 switch (pkt->opcode) { 183 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE: 184 case IB_OPCODE_RC_RDMA_WRITE_LAST: 185 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE: 186 return RESPST_CHK_OP_VALID; 187 default: 188 return RESPST_ERR_MISSING_OPCODE_LAST_C; 189 } 190 191 default: 192 switch (pkt->opcode) { 193 case IB_OPCODE_RC_SEND_MIDDLE: 194 case IB_OPCODE_RC_SEND_LAST: 195 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE: 196 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE: 197 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE: 198 case IB_OPCODE_RC_RDMA_WRITE_LAST: 199 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE: 200 return RESPST_ERR_MISSING_OPCODE_FIRST; 201 default: 202 return RESPST_CHK_OP_VALID; 203 } 204 } 205 break; 206 207 case IB_QPT_UC: 208 switch (qp->resp.opcode) { 209 case IB_OPCODE_UC_SEND_FIRST: 210 case IB_OPCODE_UC_SEND_MIDDLE: 211 switch (pkt->opcode) { 212 case IB_OPCODE_UC_SEND_MIDDLE: 213 case IB_OPCODE_UC_SEND_LAST: 214 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE: 215 return RESPST_CHK_OP_VALID; 216 default: 217 return RESPST_ERR_MISSING_OPCODE_LAST_D1E; 218 } 219 220 case IB_OPCODE_UC_RDMA_WRITE_FIRST: 221 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE: 222 switch (pkt->opcode) { 223 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE: 224 case IB_OPCODE_UC_RDMA_WRITE_LAST: 225 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE: 226 return RESPST_CHK_OP_VALID; 227 default: 228 return RESPST_ERR_MISSING_OPCODE_LAST_D1E; 229 } 230 231 default: 232 switch (pkt->opcode) { 233 case IB_OPCODE_UC_SEND_MIDDLE: 234 case IB_OPCODE_UC_SEND_LAST: 235 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE: 236 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE: 237 case IB_OPCODE_UC_RDMA_WRITE_LAST: 238 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE: 239 qp->resp.drop_msg = 1; 240 return RESPST_CLEANUP; 241 default: 242 return RESPST_CHK_OP_VALID; 243 } 244 } 245 break; 246 247 default: 248 return RESPST_CHK_OP_VALID; 249 } 250 } 251 252 static enum resp_states check_op_valid(struct rxe_qp *qp, 253 struct rxe_pkt_info *pkt) 254 { 255 switch (qp_type(qp)) { 256 case IB_QPT_RC: 257 if (((pkt->mask & RXE_READ_MASK) && 258 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || 259 ((pkt->mask & RXE_WRITE_MASK) && 260 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || 261 ((pkt->mask & RXE_ATOMIC_MASK) && 262 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) { 263 return RESPST_ERR_UNSUPPORTED_OPCODE; 264 } 265 266 break; 267 268 case IB_QPT_UC: 269 if ((pkt->mask & RXE_WRITE_MASK) && 270 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) { 271 qp->resp.drop_msg = 1; 272 return RESPST_CLEANUP; 273 } 274 275 break; 276 277 case IB_QPT_UD: 278 case IB_QPT_SMI: 279 case IB_QPT_GSI: 280 break; 281 282 default: 283 WARN_ON_ONCE(1); 284 break; 285 } 286 287 return RESPST_CHK_RESOURCE; 288 } 289 290 static enum resp_states get_srq_wqe(struct rxe_qp *qp) 291 { 292 struct rxe_srq *srq = qp->srq; 293 struct rxe_queue *q = srq->rq.queue; 294 struct rxe_recv_wqe *wqe; 295 struct ib_event ev; 296 297 if (srq->error) 298 return RESPST_ERR_RNR; 299 300 spin_lock_bh(&srq->rq.consumer_lock); 301 302 wqe = queue_head(q); 303 if (!wqe) { 304 spin_unlock_bh(&srq->rq.consumer_lock); 305 return RESPST_ERR_RNR; 306 } 307 308 /* note kernel and user space recv wqes have same size */ 309 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe)); 310 311 qp->resp.wqe = &qp->resp.srq_wqe.wqe; 312 advance_consumer(q); 313 314 if (srq->limit && srq->ibsrq.event_handler && 315 (queue_count(q) < srq->limit)) { 316 srq->limit = 0; 317 goto event; 318 } 319 320 spin_unlock_bh(&srq->rq.consumer_lock); 321 return RESPST_CHK_LENGTH; 322 323 event: 324 spin_unlock_bh(&srq->rq.consumer_lock); 325 ev.device = qp->ibqp.device; 326 ev.element.srq = qp->ibqp.srq; 327 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 328 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context); 329 return RESPST_CHK_LENGTH; 330 } 331 332 static enum resp_states check_resource(struct rxe_qp *qp, 333 struct rxe_pkt_info *pkt) 334 { 335 struct rxe_srq *srq = qp->srq; 336 337 if (qp->resp.state == QP_STATE_ERROR) { 338 if (qp->resp.wqe) { 339 qp->resp.status = IB_WC_WR_FLUSH_ERR; 340 return RESPST_COMPLETE; 341 } else if (!srq) { 342 qp->resp.wqe = queue_head(qp->rq.queue); 343 if (qp->resp.wqe) { 344 qp->resp.status = IB_WC_WR_FLUSH_ERR; 345 return RESPST_COMPLETE; 346 } else { 347 return RESPST_EXIT; 348 } 349 } else { 350 return RESPST_EXIT; 351 } 352 } 353 354 if (pkt->mask & RXE_READ_OR_ATOMIC) { 355 /* it is the requesters job to not send 356 * too many read/atomic ops, we just 357 * recycle the responder resource queue 358 */ 359 if (likely(qp->attr.max_dest_rd_atomic > 0)) 360 return RESPST_CHK_LENGTH; 361 else 362 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ; 363 } 364 365 if (pkt->mask & RXE_RWR_MASK) { 366 if (srq) 367 return get_srq_wqe(qp); 368 369 qp->resp.wqe = queue_head(qp->rq.queue); 370 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; 371 } 372 373 return RESPST_CHK_LENGTH; 374 } 375 376 static enum resp_states check_length(struct rxe_qp *qp, 377 struct rxe_pkt_info *pkt) 378 { 379 switch (qp_type(qp)) { 380 case IB_QPT_RC: 381 return RESPST_CHK_RKEY; 382 383 case IB_QPT_UC: 384 return RESPST_CHK_RKEY; 385 386 default: 387 return RESPST_CHK_RKEY; 388 } 389 } 390 391 static enum resp_states check_rkey(struct rxe_qp *qp, 392 struct rxe_pkt_info *pkt) 393 { 394 struct rxe_mr *mr = NULL; 395 u64 va; 396 u32 rkey; 397 u32 resid; 398 u32 pktlen; 399 int mtu = qp->mtu; 400 enum resp_states state; 401 int access; 402 403 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { 404 if (pkt->mask & RXE_RETH_MASK) { 405 qp->resp.va = reth_va(pkt); 406 qp->resp.rkey = reth_rkey(pkt); 407 qp->resp.resid = reth_len(pkt); 408 qp->resp.length = reth_len(pkt); 409 } 410 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ 411 : IB_ACCESS_REMOTE_WRITE; 412 } else if (pkt->mask & RXE_ATOMIC_MASK) { 413 qp->resp.va = atmeth_va(pkt); 414 qp->resp.rkey = atmeth_rkey(pkt); 415 qp->resp.resid = sizeof(u64); 416 access = IB_ACCESS_REMOTE_ATOMIC; 417 } else { 418 return RESPST_EXECUTE; 419 } 420 421 /* A zero-byte op is not required to set an addr or rkey. */ 422 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && 423 (pkt->mask & RXE_RETH_MASK) && 424 reth_len(pkt) == 0) { 425 return RESPST_EXECUTE; 426 } 427 428 va = qp->resp.va; 429 rkey = qp->resp.rkey; 430 resid = qp->resp.resid; 431 pktlen = payload_size(pkt); 432 433 mr = lookup_mr(qp->pd, access, rkey, lookup_remote); 434 if (!mr) { 435 state = RESPST_ERR_RKEY_VIOLATION; 436 goto err; 437 } 438 439 if (unlikely(mr->state == RXE_MR_STATE_FREE)) { 440 state = RESPST_ERR_RKEY_VIOLATION; 441 goto err; 442 } 443 444 if (mr_check_range(mr, va, resid)) { 445 state = RESPST_ERR_RKEY_VIOLATION; 446 goto err; 447 } 448 449 if (pkt->mask & RXE_WRITE_MASK) { 450 if (resid > mtu) { 451 if (pktlen != mtu || bth_pad(pkt)) { 452 state = RESPST_ERR_LENGTH; 453 goto err; 454 } 455 } else { 456 if (pktlen != resid) { 457 state = RESPST_ERR_LENGTH; 458 goto err; 459 } 460 if ((bth_pad(pkt) != (0x3 & (-resid)))) { 461 /* This case may not be exactly that 462 * but nothing else fits. 463 */ 464 state = RESPST_ERR_LENGTH; 465 goto err; 466 } 467 } 468 } 469 470 WARN_ON_ONCE(qp->resp.mr); 471 472 qp->resp.mr = mr; 473 return RESPST_EXECUTE; 474 475 err: 476 if (mr) 477 rxe_drop_ref(mr); 478 return state; 479 } 480 481 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, 482 int data_len) 483 { 484 int err; 485 486 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, 487 data_addr, data_len, to_mr_obj, NULL); 488 if (unlikely(err)) 489 return (err == -ENOSPC) ? RESPST_ERR_LENGTH 490 : RESPST_ERR_MALFORMED_WQE; 491 492 return RESPST_NONE; 493 } 494 495 static enum resp_states write_data_in(struct rxe_qp *qp, 496 struct rxe_pkt_info *pkt) 497 { 498 enum resp_states rc = RESPST_NONE; 499 int err; 500 int data_len = payload_size(pkt); 501 502 err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len, 503 to_mr_obj, NULL); 504 if (err) { 505 rc = RESPST_ERR_RKEY_VIOLATION; 506 goto out; 507 } 508 509 qp->resp.va += data_len; 510 qp->resp.resid -= data_len; 511 512 out: 513 return rc; 514 } 515 516 /* Guarantee atomicity of atomic operations at the machine level. */ 517 static DEFINE_SPINLOCK(atomic_ops_lock); 518 519 static enum resp_states process_atomic(struct rxe_qp *qp, 520 struct rxe_pkt_info *pkt) 521 { 522 u64 iova = atmeth_va(pkt); 523 u64 *vaddr; 524 enum resp_states ret; 525 struct rxe_mr *mr = qp->resp.mr; 526 527 if (mr->state != RXE_MR_STATE_VALID) { 528 ret = RESPST_ERR_RKEY_VIOLATION; 529 goto out; 530 } 531 532 vaddr = iova_to_vaddr(mr, iova, sizeof(u64)); 533 534 /* check vaddr is 8 bytes aligned. */ 535 if (!vaddr || (uintptr_t)vaddr & 7) { 536 ret = RESPST_ERR_MISALIGNED_ATOMIC; 537 goto out; 538 } 539 540 spin_lock_bh(&atomic_ops_lock); 541 542 qp->resp.atomic_orig = *vaddr; 543 544 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP || 545 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) { 546 if (*vaddr == atmeth_comp(pkt)) 547 *vaddr = atmeth_swap_add(pkt); 548 } else { 549 *vaddr += atmeth_swap_add(pkt); 550 } 551 552 spin_unlock_bh(&atomic_ops_lock); 553 554 ret = RESPST_NONE; 555 out: 556 return ret; 557 } 558 559 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, 560 struct rxe_pkt_info *pkt, 561 struct rxe_pkt_info *ack, 562 int opcode, 563 int payload, 564 u32 psn, 565 u8 syndrome, 566 u32 *crcp) 567 { 568 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 569 struct sk_buff *skb; 570 u32 crc = 0; 571 u32 *p; 572 int paylen; 573 int pad; 574 int err; 575 576 /* 577 * allocate packet 578 */ 579 pad = (-payload) & 0x3; 580 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; 581 582 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); 583 if (!skb) 584 return NULL; 585 586 ack->qp = qp; 587 ack->opcode = opcode; 588 ack->mask = rxe_opcode[opcode].mask; 589 ack->paylen = paylen; 590 591 /* fill in bth using the request packet headers */ 592 memcpy(ack->hdr, pkt->hdr, RXE_BTH_BYTES); 593 594 bth_set_opcode(ack, opcode); 595 bth_set_qpn(ack, qp->attr.dest_qp_num); 596 bth_set_pad(ack, pad); 597 bth_set_se(ack, 0); 598 bth_set_psn(ack, psn); 599 bth_set_ack(ack, 0); 600 ack->psn = psn; 601 602 if (ack->mask & RXE_AETH_MASK) { 603 aeth_set_syn(ack, syndrome); 604 aeth_set_msn(ack, qp->resp.msn); 605 } 606 607 if (ack->mask & RXE_ATMACK_MASK) 608 atmack_set_orig(ack, qp->resp.atomic_orig); 609 610 err = rxe_prepare(ack, skb, &crc); 611 if (err) { 612 kfree_skb(skb); 613 return NULL; 614 } 615 616 if (crcp) { 617 /* CRC computation will be continued by the caller */ 618 *crcp = crc; 619 } else { 620 p = payload_addr(ack) + payload + bth_pad(ack); 621 *p = ~crc; 622 } 623 624 return skb; 625 } 626 627 /* RDMA read response. If res is not NULL, then we have a current RDMA request 628 * being processed or replayed. 629 */ 630 static enum resp_states read_reply(struct rxe_qp *qp, 631 struct rxe_pkt_info *req_pkt) 632 { 633 struct rxe_pkt_info ack_pkt; 634 struct sk_buff *skb; 635 int mtu = qp->mtu; 636 enum resp_states state; 637 int payload; 638 int opcode; 639 int err; 640 struct resp_res *res = qp->resp.res; 641 u32 icrc; 642 u32 *p; 643 644 if (!res) { 645 /* This is the first time we process that request. Get a 646 * resource 647 */ 648 res = &qp->resp.resources[qp->resp.res_head]; 649 650 free_rd_atomic_resource(qp, res); 651 rxe_advance_resp_resource(qp); 652 653 res->type = RXE_READ_MASK; 654 res->replay = 0; 655 656 res->read.va = qp->resp.va; 657 res->read.va_org = qp->resp.va; 658 659 res->first_psn = req_pkt->psn; 660 661 if (reth_len(req_pkt)) { 662 res->last_psn = (req_pkt->psn + 663 (reth_len(req_pkt) + mtu - 1) / 664 mtu - 1) & BTH_PSN_MASK; 665 } else { 666 res->last_psn = res->first_psn; 667 } 668 res->cur_psn = req_pkt->psn; 669 670 res->read.resid = qp->resp.resid; 671 res->read.length = qp->resp.resid; 672 res->read.rkey = qp->resp.rkey; 673 674 /* note res inherits the reference to mr from qp */ 675 res->read.mr = qp->resp.mr; 676 qp->resp.mr = NULL; 677 678 qp->resp.res = res; 679 res->state = rdatm_res_state_new; 680 } 681 682 if (res->state == rdatm_res_state_new) { 683 if (res->read.resid <= mtu) 684 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY; 685 else 686 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST; 687 } else { 688 if (res->read.resid > mtu) 689 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE; 690 else 691 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST; 692 } 693 694 res->state = rdatm_res_state_next; 695 696 payload = min_t(int, res->read.resid, mtu); 697 698 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, 699 res->cur_psn, AETH_ACK_UNLIMITED, &icrc); 700 if (!skb) 701 return RESPST_ERR_RNR; 702 703 err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), 704 payload, from_mr_obj, &icrc); 705 if (err) 706 pr_err("Failed copying memory\n"); 707 708 if (bth_pad(&ack_pkt)) { 709 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 710 u8 *pad = payload_addr(&ack_pkt) + payload; 711 712 memset(pad, 0, bth_pad(&ack_pkt)); 713 icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); 714 } 715 p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); 716 *p = ~icrc; 717 718 err = rxe_xmit_packet(qp, &ack_pkt, skb); 719 if (err) { 720 pr_err("Failed sending RDMA reply.\n"); 721 return RESPST_ERR_RNR; 722 } 723 724 res->read.va += payload; 725 res->read.resid -= payload; 726 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK; 727 728 if (res->read.resid > 0) { 729 state = RESPST_DONE; 730 } else { 731 qp->resp.res = NULL; 732 if (!res->replay) 733 qp->resp.opcode = -1; 734 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) 735 qp->resp.psn = res->cur_psn; 736 state = RESPST_CLEANUP; 737 } 738 739 return state; 740 } 741 742 static void build_rdma_network_hdr(union rdma_network_hdr *hdr, 743 struct rxe_pkt_info *pkt) 744 { 745 struct sk_buff *skb = PKT_TO_SKB(pkt); 746 747 memset(hdr, 0, sizeof(*hdr)); 748 if (skb->protocol == htons(ETH_P_IP)) 749 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh)); 750 else if (skb->protocol == htons(ETH_P_IPV6)) 751 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh)); 752 } 753 754 /* Executes a new request. A retried request never reach that function (send 755 * and writes are discarded, and reads and atomics are retried elsewhere. 756 */ 757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) 758 { 759 enum resp_states err; 760 761 if (pkt->mask & RXE_SEND_MASK) { 762 if (qp_type(qp) == IB_QPT_UD || 763 qp_type(qp) == IB_QPT_SMI || 764 qp_type(qp) == IB_QPT_GSI) { 765 union rdma_network_hdr hdr; 766 767 build_rdma_network_hdr(&hdr, pkt); 768 769 err = send_data_in(qp, &hdr, sizeof(hdr)); 770 if (err) 771 return err; 772 } 773 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); 774 if (err) 775 return err; 776 } else if (pkt->mask & RXE_WRITE_MASK) { 777 err = write_data_in(qp, pkt); 778 if (err) 779 return err; 780 } else if (pkt->mask & RXE_READ_MASK) { 781 /* For RDMA Read we can increment the msn now. See C9-148. */ 782 qp->resp.msn++; 783 return RESPST_READ_REPLY; 784 } else if (pkt->mask & RXE_ATOMIC_MASK) { 785 err = process_atomic(qp, pkt); 786 if (err) 787 return err; 788 } else { 789 /* Unreachable */ 790 WARN_ON_ONCE(1); 791 } 792 793 /* next expected psn, read handles this separately */ 794 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 795 qp->resp.ack_psn = qp->resp.psn; 796 797 qp->resp.opcode = pkt->opcode; 798 qp->resp.status = IB_WC_SUCCESS; 799 800 if (pkt->mask & RXE_COMP_MASK) { 801 /* We successfully processed this new request. */ 802 qp->resp.msn++; 803 return RESPST_COMPLETE; 804 } else if (qp_type(qp) == IB_QPT_RC) 805 return RESPST_ACKNOWLEDGE; 806 else 807 return RESPST_CLEANUP; 808 } 809 810 static enum resp_states do_complete(struct rxe_qp *qp, 811 struct rxe_pkt_info *pkt) 812 { 813 struct rxe_cqe cqe; 814 struct ib_wc *wc = &cqe.ibwc; 815 struct ib_uverbs_wc *uwc = &cqe.uibwc; 816 struct rxe_recv_wqe *wqe = qp->resp.wqe; 817 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 818 819 if (!wqe) 820 goto finish; 821 822 memset(&cqe, 0, sizeof(cqe)); 823 824 if (qp->rcq->is_user) { 825 uwc->status = qp->resp.status; 826 uwc->qp_num = qp->ibqp.qp_num; 827 uwc->wr_id = wqe->wr_id; 828 } else { 829 wc->status = qp->resp.status; 830 wc->qp = &qp->ibqp; 831 wc->wr_id = wqe->wr_id; 832 } 833 834 if (wc->status == IB_WC_SUCCESS) { 835 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV); 836 wc->opcode = (pkt->mask & RXE_IMMDT_MASK && 837 pkt->mask & RXE_WRITE_MASK) ? 838 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; 839 wc->vendor_err = 0; 840 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && 841 pkt->mask & RXE_WRITE_MASK) ? 842 qp->resp.length : wqe->dma.length - wqe->dma.resid; 843 844 /* fields after byte_len are different between kernel and user 845 * space 846 */ 847 if (qp->rcq->is_user) { 848 uwc->wc_flags = IB_WC_GRH; 849 850 if (pkt->mask & RXE_IMMDT_MASK) { 851 uwc->wc_flags |= IB_WC_WITH_IMM; 852 uwc->ex.imm_data = immdt_imm(pkt); 853 } 854 855 if (pkt->mask & RXE_IETH_MASK) { 856 uwc->wc_flags |= IB_WC_WITH_INVALIDATE; 857 uwc->ex.invalidate_rkey = ieth_rkey(pkt); 858 } 859 860 uwc->qp_num = qp->ibqp.qp_num; 861 862 if (pkt->mask & RXE_DETH_MASK) 863 uwc->src_qp = deth_sqp(pkt); 864 865 uwc->port_num = qp->attr.port_num; 866 } else { 867 struct sk_buff *skb = PKT_TO_SKB(pkt); 868 869 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE; 870 if (skb->protocol == htons(ETH_P_IP)) 871 wc->network_hdr_type = RDMA_NETWORK_IPV4; 872 else 873 wc->network_hdr_type = RDMA_NETWORK_IPV6; 874 875 if (is_vlan_dev(skb->dev)) { 876 wc->wc_flags |= IB_WC_WITH_VLAN; 877 wc->vlan_id = vlan_dev_vlan_id(skb->dev); 878 } 879 880 if (pkt->mask & RXE_IMMDT_MASK) { 881 wc->wc_flags |= IB_WC_WITH_IMM; 882 wc->ex.imm_data = immdt_imm(pkt); 883 } 884 885 if (pkt->mask & RXE_IETH_MASK) { 886 struct rxe_mr *rmr; 887 888 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 889 wc->ex.invalidate_rkey = ieth_rkey(pkt); 890 891 rmr = rxe_pool_get_index(&rxe->mr_pool, 892 wc->ex.invalidate_rkey >> 8); 893 if (unlikely(!rmr)) { 894 pr_err("Bad rkey %#x invalidation\n", 895 wc->ex.invalidate_rkey); 896 return RESPST_ERROR; 897 } 898 rmr->state = RXE_MR_STATE_FREE; 899 rxe_drop_ref(rmr); 900 } 901 902 wc->qp = &qp->ibqp; 903 904 if (pkt->mask & RXE_DETH_MASK) 905 wc->src_qp = deth_sqp(pkt); 906 907 wc->port_num = qp->attr.port_num; 908 } 909 } 910 911 /* have copy for srq and reference for !srq */ 912 if (!qp->srq) 913 advance_consumer(qp->rq.queue); 914 915 qp->resp.wqe = NULL; 916 917 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) 918 return RESPST_ERR_CQ_OVERFLOW; 919 920 finish: 921 if (unlikely(qp->resp.state == QP_STATE_ERROR)) 922 return RESPST_CHK_RESOURCE; 923 if (unlikely(!pkt)) 924 return RESPST_DONE; 925 if (qp_type(qp) == IB_QPT_RC) 926 return RESPST_ACKNOWLEDGE; 927 else 928 return RESPST_CLEANUP; 929 } 930 931 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, 932 u8 syndrome, u32 psn) 933 { 934 int err = 0; 935 struct rxe_pkt_info ack_pkt; 936 struct sk_buff *skb; 937 938 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, 939 0, psn, syndrome, NULL); 940 if (!skb) { 941 err = -ENOMEM; 942 goto err1; 943 } 944 945 err = rxe_xmit_packet(qp, &ack_pkt, skb); 946 if (err) 947 pr_err_ratelimited("Failed sending ack\n"); 948 949 err1: 950 return err; 951 } 952 953 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, 954 u8 syndrome) 955 { 956 int rc = 0; 957 struct rxe_pkt_info ack_pkt; 958 struct sk_buff *skb; 959 struct resp_res *res; 960 961 skb = prepare_ack_packet(qp, pkt, &ack_pkt, 962 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, 963 syndrome, NULL); 964 if (!skb) { 965 rc = -ENOMEM; 966 goto out; 967 } 968 969 rxe_add_ref(qp); 970 971 res = &qp->resp.resources[qp->resp.res_head]; 972 free_rd_atomic_resource(qp, res); 973 rxe_advance_resp_resource(qp); 974 975 memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt)); 976 memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0, 977 sizeof(skb->cb) - sizeof(ack_pkt)); 978 979 skb_get(skb); 980 res->type = RXE_ATOMIC_MASK; 981 res->atomic.skb = skb; 982 res->first_psn = ack_pkt.psn; 983 res->last_psn = ack_pkt.psn; 984 res->cur_psn = ack_pkt.psn; 985 986 rc = rxe_xmit_packet(qp, &ack_pkt, skb); 987 if (rc) { 988 pr_err_ratelimited("Failed sending ack\n"); 989 rxe_drop_ref(qp); 990 } 991 out: 992 return rc; 993 } 994 995 static enum resp_states acknowledge(struct rxe_qp *qp, 996 struct rxe_pkt_info *pkt) 997 { 998 if (qp_type(qp) != IB_QPT_RC) 999 return RESPST_CLEANUP; 1000 1001 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) 1002 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); 1003 else if (pkt->mask & RXE_ATOMIC_MASK) 1004 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); 1005 else if (bth_ack(pkt)) 1006 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); 1007 1008 return RESPST_CLEANUP; 1009 } 1010 1011 static enum resp_states cleanup(struct rxe_qp *qp, 1012 struct rxe_pkt_info *pkt) 1013 { 1014 struct sk_buff *skb; 1015 1016 if (pkt) { 1017 skb = skb_dequeue(&qp->req_pkts); 1018 rxe_drop_ref(qp); 1019 kfree_skb(skb); 1020 ib_device_put(qp->ibqp.device); 1021 } 1022 1023 if (qp->resp.mr) { 1024 rxe_drop_ref(qp->resp.mr); 1025 qp->resp.mr = NULL; 1026 } 1027 1028 return RESPST_DONE; 1029 } 1030 1031 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn) 1032 { 1033 int i; 1034 1035 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { 1036 struct resp_res *res = &qp->resp.resources[i]; 1037 1038 if (res->type == 0) 1039 continue; 1040 1041 if (psn_compare(psn, res->first_psn) >= 0 && 1042 psn_compare(psn, res->last_psn) <= 0) { 1043 return res; 1044 } 1045 } 1046 1047 return NULL; 1048 } 1049 1050 static enum resp_states duplicate_request(struct rxe_qp *qp, 1051 struct rxe_pkt_info *pkt) 1052 { 1053 enum resp_states rc; 1054 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; 1055 1056 if (pkt->mask & RXE_SEND_MASK || 1057 pkt->mask & RXE_WRITE_MASK) { 1058 /* SEND. Ack again and cleanup. C9-105. */ 1059 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); 1060 return RESPST_CLEANUP; 1061 } else if (pkt->mask & RXE_READ_MASK) { 1062 struct resp_res *res; 1063 1064 res = find_resource(qp, pkt->psn); 1065 if (!res) { 1066 /* Resource not found. Class D error. Drop the 1067 * request. 1068 */ 1069 rc = RESPST_CLEANUP; 1070 goto out; 1071 } else { 1072 /* Ensure this new request is the same as the previous 1073 * one or a subset of it. 1074 */ 1075 u64 iova = reth_va(pkt); 1076 u32 resid = reth_len(pkt); 1077 1078 if (iova < res->read.va_org || 1079 resid > res->read.length || 1080 (iova + resid) > (res->read.va_org + 1081 res->read.length)) { 1082 rc = RESPST_CLEANUP; 1083 goto out; 1084 } 1085 1086 if (reth_rkey(pkt) != res->read.rkey) { 1087 rc = RESPST_CLEANUP; 1088 goto out; 1089 } 1090 1091 res->cur_psn = pkt->psn; 1092 res->state = (pkt->psn == res->first_psn) ? 1093 rdatm_res_state_new : 1094 rdatm_res_state_replay; 1095 res->replay = 1; 1096 1097 /* Reset the resource, except length. */ 1098 res->read.va_org = iova; 1099 res->read.va = iova; 1100 res->read.resid = resid; 1101 1102 /* Replay the RDMA read reply. */ 1103 qp->resp.res = res; 1104 rc = RESPST_READ_REPLY; 1105 goto out; 1106 } 1107 } else { 1108 struct resp_res *res; 1109 1110 /* Find the operation in our list of responder resources. */ 1111 res = find_resource(qp, pkt->psn); 1112 if (res) { 1113 skb_get(res->atomic.skb); 1114 /* Resend the result. */ 1115 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb); 1116 if (rc) { 1117 pr_err("Failed resending result. This flow is not handled - skb ignored\n"); 1118 rc = RESPST_CLEANUP; 1119 goto out; 1120 } 1121 } 1122 1123 /* Resource not found. Class D error. Drop the request. */ 1124 rc = RESPST_CLEANUP; 1125 goto out; 1126 } 1127 out: 1128 return rc; 1129 } 1130 1131 /* Process a class A or C. Both are treated the same in this implementation. */ 1132 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, 1133 enum ib_wc_status status) 1134 { 1135 qp->resp.aeth_syndrome = syndrome; 1136 qp->resp.status = status; 1137 1138 /* indicate that we should go through the ERROR state */ 1139 qp->resp.goto_error = 1; 1140 } 1141 1142 static enum resp_states do_class_d1e_error(struct rxe_qp *qp) 1143 { 1144 /* UC */ 1145 if (qp->srq) { 1146 /* Class E */ 1147 qp->resp.drop_msg = 1; 1148 if (qp->resp.wqe) { 1149 qp->resp.status = IB_WC_REM_INV_REQ_ERR; 1150 return RESPST_COMPLETE; 1151 } else { 1152 return RESPST_CLEANUP; 1153 } 1154 } else { 1155 /* Class D1. This packet may be the start of a 1156 * new message and could be valid. The previous 1157 * message is invalid and ignored. reset the 1158 * recv wr to its original state 1159 */ 1160 if (qp->resp.wqe) { 1161 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length; 1162 qp->resp.wqe->dma.cur_sge = 0; 1163 qp->resp.wqe->dma.sge_offset = 0; 1164 qp->resp.opcode = -1; 1165 } 1166 1167 if (qp->resp.mr) { 1168 rxe_drop_ref(qp->resp.mr); 1169 qp->resp.mr = NULL; 1170 } 1171 1172 return RESPST_CLEANUP; 1173 } 1174 } 1175 1176 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) 1177 { 1178 struct sk_buff *skb; 1179 1180 while ((skb = skb_dequeue(&qp->req_pkts))) { 1181 rxe_drop_ref(qp); 1182 kfree_skb(skb); 1183 ib_device_put(qp->ibqp.device); 1184 } 1185 1186 if (notify) 1187 return; 1188 1189 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) 1190 advance_consumer(qp->rq.queue); 1191 } 1192 1193 int rxe_responder(void *arg) 1194 { 1195 struct rxe_qp *qp = (struct rxe_qp *)arg; 1196 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 1197 enum resp_states state; 1198 struct rxe_pkt_info *pkt = NULL; 1199 int ret = 0; 1200 1201 rxe_add_ref(qp); 1202 1203 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; 1204 1205 if (!qp->valid) { 1206 ret = -EINVAL; 1207 goto done; 1208 } 1209 1210 switch (qp->resp.state) { 1211 case QP_STATE_RESET: 1212 state = RESPST_RESET; 1213 break; 1214 1215 default: 1216 state = RESPST_GET_REQ; 1217 break; 1218 } 1219 1220 while (1) { 1221 pr_debug("qp#%d state = %s\n", qp_num(qp), 1222 resp_state_name[state]); 1223 switch (state) { 1224 case RESPST_GET_REQ: 1225 state = get_req(qp, &pkt); 1226 break; 1227 case RESPST_CHK_PSN: 1228 state = check_psn(qp, pkt); 1229 break; 1230 case RESPST_CHK_OP_SEQ: 1231 state = check_op_seq(qp, pkt); 1232 break; 1233 case RESPST_CHK_OP_VALID: 1234 state = check_op_valid(qp, pkt); 1235 break; 1236 case RESPST_CHK_RESOURCE: 1237 state = check_resource(qp, pkt); 1238 break; 1239 case RESPST_CHK_LENGTH: 1240 state = check_length(qp, pkt); 1241 break; 1242 case RESPST_CHK_RKEY: 1243 state = check_rkey(qp, pkt); 1244 break; 1245 case RESPST_EXECUTE: 1246 state = execute(qp, pkt); 1247 break; 1248 case RESPST_COMPLETE: 1249 state = do_complete(qp, pkt); 1250 break; 1251 case RESPST_READ_REPLY: 1252 state = read_reply(qp, pkt); 1253 break; 1254 case RESPST_ACKNOWLEDGE: 1255 state = acknowledge(qp, pkt); 1256 break; 1257 case RESPST_CLEANUP: 1258 state = cleanup(qp, pkt); 1259 break; 1260 case RESPST_DUPLICATE_REQUEST: 1261 state = duplicate_request(qp, pkt); 1262 break; 1263 case RESPST_ERR_PSN_OUT_OF_SEQ: 1264 /* RC only - Class B. Drop packet. */ 1265 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); 1266 state = RESPST_CLEANUP; 1267 break; 1268 1269 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ: 1270 case RESPST_ERR_MISSING_OPCODE_FIRST: 1271 case RESPST_ERR_MISSING_OPCODE_LAST_C: 1272 case RESPST_ERR_UNSUPPORTED_OPCODE: 1273 case RESPST_ERR_MISALIGNED_ATOMIC: 1274 /* RC Only - Class C. */ 1275 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, 1276 IB_WC_REM_INV_REQ_ERR); 1277 state = RESPST_COMPLETE; 1278 break; 1279 1280 case RESPST_ERR_MISSING_OPCODE_LAST_D1E: 1281 state = do_class_d1e_error(qp); 1282 break; 1283 case RESPST_ERR_RNR: 1284 if (qp_type(qp) == IB_QPT_RC) { 1285 rxe_counter_inc(rxe, RXE_CNT_SND_RNR); 1286 /* RC - class B */ 1287 send_ack(qp, pkt, AETH_RNR_NAK | 1288 (~AETH_TYPE_MASK & 1289 qp->attr.min_rnr_timer), 1290 pkt->psn); 1291 } else { 1292 /* UD/UC - class D */ 1293 qp->resp.drop_msg = 1; 1294 } 1295 state = RESPST_CLEANUP; 1296 break; 1297 1298 case RESPST_ERR_RKEY_VIOLATION: 1299 if (qp_type(qp) == IB_QPT_RC) { 1300 /* Class C */ 1301 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR, 1302 IB_WC_REM_ACCESS_ERR); 1303 state = RESPST_COMPLETE; 1304 } else { 1305 qp->resp.drop_msg = 1; 1306 if (qp->srq) { 1307 /* UC/SRQ Class D */ 1308 qp->resp.status = IB_WC_REM_ACCESS_ERR; 1309 state = RESPST_COMPLETE; 1310 } else { 1311 /* UC/non-SRQ Class E. */ 1312 state = RESPST_CLEANUP; 1313 } 1314 } 1315 break; 1316 1317 case RESPST_ERR_LENGTH: 1318 if (qp_type(qp) == IB_QPT_RC) { 1319 /* Class C */ 1320 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, 1321 IB_WC_REM_INV_REQ_ERR); 1322 state = RESPST_COMPLETE; 1323 } else if (qp->srq) { 1324 /* UC/UD - class E */ 1325 qp->resp.status = IB_WC_REM_INV_REQ_ERR; 1326 state = RESPST_COMPLETE; 1327 } else { 1328 /* UC/UD - class D */ 1329 qp->resp.drop_msg = 1; 1330 state = RESPST_CLEANUP; 1331 } 1332 break; 1333 1334 case RESPST_ERR_MALFORMED_WQE: 1335 /* All, Class A. */ 1336 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR, 1337 IB_WC_LOC_QP_OP_ERR); 1338 state = RESPST_COMPLETE; 1339 break; 1340 1341 case RESPST_ERR_CQ_OVERFLOW: 1342 /* All - Class G */ 1343 state = RESPST_ERROR; 1344 break; 1345 1346 case RESPST_DONE: 1347 if (qp->resp.goto_error) { 1348 state = RESPST_ERROR; 1349 break; 1350 } 1351 1352 goto done; 1353 1354 case RESPST_EXIT: 1355 if (qp->resp.goto_error) { 1356 state = RESPST_ERROR; 1357 break; 1358 } 1359 1360 goto exit; 1361 1362 case RESPST_RESET: 1363 rxe_drain_req_pkts(qp, false); 1364 qp->resp.wqe = NULL; 1365 goto exit; 1366 1367 case RESPST_ERROR: 1368 qp->resp.goto_error = 0; 1369 pr_warn("qp#%d moved to error state\n", qp_num(qp)); 1370 rxe_qp_error(qp); 1371 goto exit; 1372 1373 default: 1374 WARN_ON_ONCE(1); 1375 } 1376 } 1377 1378 exit: 1379 ret = -EAGAIN; 1380 done: 1381 rxe_drop_ref(qp); 1382 return ret; 1383 } 1384