Lines Matching refs:qp

50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)  in rxe_resp_queue_pkt()  argument
55 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
58 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
61 rxe_sched_task(&qp->resp.task); in rxe_resp_queue_pkt()
63 rxe_run_task(&qp->resp.task); in rxe_resp_queue_pkt()
66 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
71 skb = skb_peek(&qp->req_pkts); in get_req()
77 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
80 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
83 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
84 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
86 switch (qp_type(qp)) { in check_psn()
89 if (qp->resp.sent_psn_nak) in check_psn()
92 qp->resp.sent_psn_nak = 1; in check_psn()
101 if (qp->resp.sent_psn_nak) in check_psn()
102 qp->resp.sent_psn_nak = 0; in check_psn()
107 if (qp->resp.drop_msg || diff != 0) { in check_psn()
109 qp->resp.drop_msg = 0; in check_psn()
113 qp->resp.drop_msg = 1; in check_psn()
124 static enum resp_states check_op_seq(struct rxe_qp *qp, in check_op_seq() argument
127 switch (qp_type(qp)) { in check_op_seq()
129 switch (qp->resp.opcode) { in check_op_seq()
170 switch (qp->resp.opcode) { in check_op_seq()
201 qp->resp.drop_msg = 1; in check_op_seq()
214 static bool check_qp_attr_access(struct rxe_qp *qp, in check_qp_attr_access() argument
218 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || in check_qp_attr_access()
220 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || in check_qp_attr_access()
222 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) in check_qp_attr_access()
229 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) || in check_qp_attr_access()
231 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT))) in check_qp_attr_access()
238 static enum resp_states check_op_valid(struct rxe_qp *qp, in check_op_valid() argument
241 switch (qp_type(qp)) { in check_op_valid()
243 if (!check_qp_attr_access(qp, pkt)) in check_op_valid()
250 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) { in check_op_valid()
251 qp->resp.drop_msg = 1; in check_op_valid()
269 static enum resp_states get_srq_wqe(struct rxe_qp *qp) in get_srq_wqe() argument
271 struct rxe_srq *srq = qp->srq; in get_srq_wqe()
293 rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n"); in get_srq_wqe()
297 memcpy(&qp->resp.srq_wqe, wqe, size); in get_srq_wqe()
299 qp->resp.wqe = &qp->resp.srq_wqe.wqe; in get_srq_wqe()
313 ev.device = qp->ibqp.device; in get_srq_wqe()
314 ev.element.srq = qp->ibqp.srq; in get_srq_wqe()
320 static enum resp_states check_resource(struct rxe_qp *qp, in check_resource() argument
323 struct rxe_srq *srq = qp->srq; in check_resource()
330 if (likely(qp->attr.max_dest_rd_atomic > 0)) in check_resource()
338 return get_srq_wqe(qp); in check_resource()
340 qp->resp.wqe = queue_head(qp->rq.queue, in check_resource()
342 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; in check_resource()
348 static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, in rxe_resp_check_length() argument
357 if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { in rxe_resp_check_length()
362 for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) in rxe_resp_check_length()
363 recv_buffer_len += qp->resp.wqe->dma.sge[i].length; in rxe_resp_check_length()
365 rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); in rxe_resp_check_length()
370 if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || in rxe_resp_check_length()
371 (qp_type(qp) == IB_QPT_UC))) { in rxe_resp_check_length()
372 unsigned int mtu = qp->mtu; in rxe_resp_check_length()
378 rxe_dbg_qp(qp, "only packet too long"); in rxe_resp_check_length()
384 rxe_dbg_qp(qp, "first or middle packet not mtu"); in rxe_resp_check_length()
389 rxe_dbg_qp(qp, "last packet zero or too long"); in rxe_resp_check_length()
398 rxe_dbg_qp(qp, "dma length too long"); in rxe_resp_check_length()
414 static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_reth() argument
418 qp->resp.va = reth_va(pkt); in qp_resp_from_reth()
419 qp->resp.offset = 0; in qp_resp_from_reth()
420 qp->resp.resid = length; in qp_resp_from_reth()
421 qp->resp.length = length; in qp_resp_from_reth()
423 qp->resp.rkey = 0; in qp_resp_from_reth()
425 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth()
428 static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_atmeth() argument
430 qp->resp.va = atmeth_va(pkt); in qp_resp_from_atmeth()
431 qp->resp.offset = 0; in qp_resp_from_atmeth()
432 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth()
433 qp->resp.resid = sizeof(u64); in qp_resp_from_atmeth()
440 static enum resp_states check_rkey(struct rxe_qp *qp, in check_rkey() argument
449 int mtu = qp->mtu; in check_rkey()
459 qp_resp_from_reth(qp, pkt); in check_rkey()
467 qp_resp_from_reth(qp, pkt); in check_rkey()
474 qp_resp_from_atmeth(qp, pkt); in check_rkey()
486 qp->resp.mr = NULL; in check_rkey()
490 va = qp->resp.va; in check_rkey()
491 rkey = qp->resp.rkey; in check_rkey()
492 resid = qp->resp.resid; in check_rkey()
496 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
498 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey()
505 rxe_dbg_qp(qp, "MW doesn't have an MR\n"); in check_rkey()
511 qp->resp.offset = mw->addr; in check_rkey()
517 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey()
519 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey()
533 if (mr_check_range(mr, va + qp->resp.offset, resid)) { in check_rkey()
560 WARN_ON_ONCE(qp->resp.mr); in check_rkey()
562 qp->resp.mr = mr; in check_rkey()
566 qp->resp.mr = NULL; in check_rkey()
575 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, in send_data_in() argument
580 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, in send_data_in()
589 static enum resp_states write_data_in(struct rxe_qp *qp, in write_data_in() argument
596 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset, in write_data_in()
603 qp->resp.va += data_len; in write_data_in()
604 qp->resp.resid -= data_len; in write_data_in()
610 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, in rxe_prepare_res() argument
617 res = &qp->resp.resources[qp->resp.res_head]; in rxe_prepare_res()
618 rxe_advance_resp_resource(qp); in rxe_prepare_res()
626 res->read.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
627 res->read.va_org = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
628 res->read.resid = qp->resp.resid; in rxe_prepare_res()
629 res->read.length = qp->resp.resid; in rxe_prepare_res()
630 res->read.rkey = qp->resp.rkey; in rxe_prepare_res()
632 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); in rxe_prepare_res()
646 res->flush.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
647 res->flush.length = qp->resp.length; in rxe_prepare_res()
655 static enum resp_states process_flush(struct rxe_qp *qp, in process_flush() argument
659 struct rxe_mr *mr = qp->resp.mr; in process_flush()
660 struct resp_res *res = qp->resp.res; in process_flush()
666 res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK); in process_flush()
667 qp->resp.res = res; in process_flush()
688 qp->resp.msn++; in process_flush()
691 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in process_flush()
692 qp->resp.ack_psn = qp->resp.psn; in process_flush()
694 qp->resp.opcode = pkt->opcode; in process_flush()
695 qp->resp.status = IB_WC_SUCCESS; in process_flush()
700 static enum resp_states atomic_reply(struct rxe_qp *qp, in atomic_reply() argument
703 struct rxe_mr *mr = qp->resp.mr; in atomic_reply()
704 struct resp_res *res = qp->resp.res; in atomic_reply()
708 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); in atomic_reply()
709 qp->resp.res = res; in atomic_reply()
713 u64 iova = qp->resp.va + qp->resp.offset; in atomic_reply()
722 qp->resp.msn++; in atomic_reply()
725 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_reply()
726 qp->resp.ack_psn = qp->resp.psn; in atomic_reply()
728 qp->resp.opcode = pkt->opcode; in atomic_reply()
729 qp->resp.status = IB_WC_SUCCESS; in atomic_reply()
735 static enum resp_states atomic_write_reply(struct rxe_qp *qp, in atomic_write_reply() argument
738 struct resp_res *res = qp->resp.res; in atomic_write_reply()
745 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); in atomic_write_reply()
746 qp->resp.res = res; in atomic_write_reply()
752 mr = qp->resp.mr; in atomic_write_reply()
754 iova = qp->resp.va + qp->resp.offset; in atomic_write_reply()
760 qp->resp.resid = 0; in atomic_write_reply()
761 qp->resp.msn++; in atomic_write_reply()
764 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_write_reply()
765 qp->resp.ack_psn = qp->resp.psn; in atomic_write_reply()
767 qp->resp.opcode = pkt->opcode; in atomic_write_reply()
768 qp->resp.status = IB_WC_SUCCESS; in atomic_write_reply()
773 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, in prepare_ack_packet() argument
780 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet()
792 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet()
796 ack->qp = qp; in prepare_ack_packet()
803 qp->attr.dest_qp_num, 0, psn); in prepare_ack_packet()
807 aeth_set_msn(ack, qp->resp.msn); in prepare_ack_packet()
811 atmack_set_orig(ack, qp->resp.res->atomic.orig_val); in prepare_ack_packet()
813 err = rxe_prepare(&qp->pri_av, ack, skb); in prepare_ack_packet()
839 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey) in rxe_recheck_mr() argument
841 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_recheck_mr()
878 static enum resp_states read_reply(struct rxe_qp *qp, in read_reply() argument
883 int mtu = qp->mtu; in read_reply()
888 struct resp_res *res = qp->resp.res; in read_reply()
892 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); in read_reply()
893 qp->resp.res = res; in read_reply()
897 if (!res->replay || qp->resp.length == 0) { in read_reply()
902 mr = qp->resp.mr; in read_reply()
903 qp->resp.mr = NULL; in read_reply()
905 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
919 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
933 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, in read_reply()
955 err = rxe_xmit_packet(qp, &ack_pkt, skb); in read_reply()
968 qp->resp.res = NULL; in read_reply()
970 qp->resp.opcode = -1; in read_reply()
971 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) in read_reply()
972 qp->resp.psn = res->cur_psn; in read_reply()
982 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey) in invalidate_rkey() argument
985 return rxe_invalidate_mw(qp, rkey); in invalidate_rkey()
987 return rxe_invalidate_mr(qp, rkey); in invalidate_rkey()
993 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
1000 if (qp_type(qp) == IB_QPT_UD || in execute()
1001 qp_type(qp) == IB_QPT_GSI) { in execute()
1007 err = send_data_in(qp, &hdr, sizeof(hdr)); in execute()
1009 err = send_data_in(qp, ipv6_hdr(skb), in execute()
1015 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
1019 err = write_data_in(qp, pkt); in execute()
1024 qp->resp.msn++; in execute()
1040 err = invalidate_rkey(qp, rkey); in execute()
1047 qp->resp.msn++; in execute()
1050 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
1051 qp->resp.ack_psn = qp->resp.psn; in execute()
1053 qp->resp.opcode = pkt->opcode; in execute()
1054 qp->resp.status = IB_WC_SUCCESS; in execute()
1058 else if (qp_type(qp) == IB_QPT_RC) in execute()
1064 static enum resp_states do_complete(struct rxe_qp *qp, in do_complete() argument
1070 struct rxe_recv_wqe *wqe = qp->resp.wqe; in do_complete()
1071 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
1079 if (qp->rcq->is_user) { in do_complete()
1080 uwc->status = qp->resp.status; in do_complete()
1081 uwc->qp_num = qp->ibqp.qp_num; in do_complete()
1084 wc->status = qp->resp.status; in do_complete()
1085 wc->qp = &qp->ibqp; in do_complete()
1096 qp->resp.length : wqe->dma.length - wqe->dma.resid; in do_complete()
1101 if (qp->rcq->is_user) { in do_complete()
1117 uwc->port_num = qp->attr.port_num; in do_complete()
1145 wc->port_num = qp->attr.port_num; in do_complete()
1149 rxe_err_qp(qp, "non-flush error status = %d", in do_complete()
1154 if (!qp->srq) in do_complete()
1155 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
1157 qp->resp.wqe = NULL; in do_complete()
1159 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
1163 spin_lock_irqsave(&qp->state_lock, flags); in do_complete()
1164 if (unlikely(qp_state(qp) == IB_QPS_ERR)) { in do_complete()
1165 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1168 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1172 if (qp_type(qp) == IB_QPT_RC) in do_complete()
1179 static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, in send_common_ack() argument
1186 skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome); in send_common_ack()
1190 err = rxe_xmit_packet(qp, &ack_pkt, skb); in send_common_ack()
1192 rxe_dbg_qp(qp, "Failed sending %s\n", msg); in send_common_ack()
1197 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_ack() argument
1199 return send_common_ack(qp, syndrome, psn, in send_ack()
1203 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_atomic_ack() argument
1205 int ret = send_common_ack(qp, syndrome, psn, in send_atomic_ack()
1211 qp->resp.res = NULL; in send_atomic_ack()
1215 static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_read_response_ack() argument
1217 int ret = send_common_ack(qp, syndrome, psn, in send_read_response_ack()
1224 qp->resp.res = NULL; in send_read_response_ack()
1228 static enum resp_states acknowledge(struct rxe_qp *qp, in acknowledge() argument
1231 if (qp_type(qp) != IB_QPT_RC) in acknowledge()
1234 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) in acknowledge()
1235 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1237 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1239 send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1241 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1246 static enum resp_states cleanup(struct rxe_qp *qp, in cleanup() argument
1252 skb = skb_dequeue(&qp->req_pkts); in cleanup()
1253 rxe_put(qp); in cleanup()
1255 ib_device_put(qp->ibqp.device); in cleanup()
1258 if (qp->resp.mr) { in cleanup()
1259 rxe_put(qp->resp.mr); in cleanup()
1260 qp->resp.mr = NULL; in cleanup()
1266 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn) in find_resource() argument
1270 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in find_resource()
1271 struct resp_res *res = &qp->resp.resources[i]; in find_resource()
1285 static enum resp_states duplicate_request(struct rxe_qp *qp, in duplicate_request() argument
1289 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; in duplicate_request()
1294 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1300 res = find_resource(qp, pkt->psn); in duplicate_request()
1304 qp->resp.res = res; in duplicate_request()
1315 res = find_resource(qp, pkt->psn); in duplicate_request()
1354 qp->resp.res = res; in duplicate_request()
1362 res = find_resource(qp, pkt->psn); in duplicate_request()
1366 qp->resp.res = res; in duplicate_request()
1382 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, in do_class_ac_error() argument
1385 qp->resp.aeth_syndrome = syndrome; in do_class_ac_error()
1386 qp->resp.status = status; in do_class_ac_error()
1389 qp->resp.goto_error = 1; in do_class_ac_error()
1392 static enum resp_states do_class_d1e_error(struct rxe_qp *qp) in do_class_d1e_error() argument
1395 if (qp->srq) { in do_class_d1e_error()
1397 qp->resp.drop_msg = 1; in do_class_d1e_error()
1398 if (qp->resp.wqe) { in do_class_d1e_error()
1399 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in do_class_d1e_error()
1410 if (qp->resp.wqe) { in do_class_d1e_error()
1411 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length; in do_class_d1e_error()
1412 qp->resp.wqe->dma.cur_sge = 0; in do_class_d1e_error()
1413 qp->resp.wqe->dma.sge_offset = 0; in do_class_d1e_error()
1414 qp->resp.opcode = -1; in do_class_d1e_error()
1417 if (qp->resp.mr) { in do_class_d1e_error()
1418 rxe_put(qp->resp.mr); in do_class_d1e_error()
1419 qp->resp.mr = NULL; in do_class_d1e_error()
1427 static void drain_req_pkts(struct rxe_qp *qp) in drain_req_pkts() argument
1431 while ((skb = skb_dequeue(&qp->req_pkts))) { in drain_req_pkts()
1432 rxe_put(qp); in drain_req_pkts()
1434 ib_device_put(qp->ibqp.device); in drain_req_pkts()
1439 static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) in flush_recv_wqe() argument
1446 if (qp->rcq->is_user) { in flush_recv_wqe()
1449 uwc->qp_num = qp_num(qp); in flush_recv_wqe()
1453 wc->qp = &qp->ibqp; in flush_recv_wqe()
1456 err = rxe_cq_post(qp->rcq, &cqe, 0); in flush_recv_wqe()
1458 rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err); in flush_recv_wqe()
1467 static void flush_recv_queue(struct rxe_qp *qp, bool notify) in flush_recv_queue() argument
1469 struct rxe_queue *q = qp->rq.queue; in flush_recv_queue()
1473 if (qp->srq) { in flush_recv_queue()
1474 if (notify && qp->ibqp.event_handler) { in flush_recv_queue()
1477 ev.device = qp->ibqp.device; in flush_recv_queue()
1478 ev.element.qp = &qp->ibqp; in flush_recv_queue()
1480 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in flush_recv_queue()
1486 if (!qp->rq.queue) in flush_recv_queue()
1491 err = flush_recv_wqe(qp, wqe); in flush_recv_queue()
1498 qp->resp.wqe = NULL; in flush_recv_queue()
1501 int rxe_responder(struct rxe_qp *qp) in rxe_responder() argument
1503 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_responder()
1509 spin_lock_irqsave(&qp->state_lock, flags); in rxe_responder()
1510 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_responder()
1511 qp_state(qp) == IB_QPS_RESET) { in rxe_responder()
1512 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_responder()
1514 drain_req_pkts(qp); in rxe_responder()
1515 flush_recv_queue(qp, notify); in rxe_responder()
1516 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_responder()
1519 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_responder()
1521 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; in rxe_responder()
1526 rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]); in rxe_responder()
1529 state = get_req(qp, &pkt); in rxe_responder()
1532 state = check_psn(qp, pkt); in rxe_responder()
1535 state = check_op_seq(qp, pkt); in rxe_responder()
1538 state = check_op_valid(qp, pkt); in rxe_responder()
1541 state = check_resource(qp, pkt); in rxe_responder()
1544 state = rxe_resp_check_length(qp, pkt); in rxe_responder()
1547 state = check_rkey(qp, pkt); in rxe_responder()
1550 state = execute(qp, pkt); in rxe_responder()
1553 state = do_complete(qp, pkt); in rxe_responder()
1556 state = read_reply(qp, pkt); in rxe_responder()
1559 state = atomic_reply(qp, pkt); in rxe_responder()
1562 state = atomic_write_reply(qp, pkt); in rxe_responder()
1565 state = process_flush(qp, pkt); in rxe_responder()
1568 state = acknowledge(qp, pkt); in rxe_responder()
1571 state = cleanup(qp, pkt); in rxe_responder()
1574 state = duplicate_request(qp, pkt); in rxe_responder()
1578 send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1588 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1594 state = do_class_d1e_error(qp); in rxe_responder()
1597 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1600 send_ack(qp, AETH_RNR_NAK | in rxe_responder()
1602 qp->attr.min_rnr_timer), in rxe_responder()
1606 qp->resp.drop_msg = 1; in rxe_responder()
1612 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1614 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR, in rxe_responder()
1618 qp->resp.drop_msg = 1; in rxe_responder()
1619 if (qp->srq) { in rxe_responder()
1621 qp->resp.status = IB_WC_REM_ACCESS_ERR; in rxe_responder()
1632 qp->resp.goto_error = 1; in rxe_responder()
1633 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_responder()
1638 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1640 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1643 } else if (qp->srq) { in rxe_responder()
1645 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_responder()
1649 qp->resp.drop_msg = 1; in rxe_responder()
1656 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR, in rxe_responder()
1667 if (qp->resp.goto_error) { in rxe_responder()
1675 if (qp->resp.goto_error) { in rxe_responder()
1683 qp->resp.goto_error = 0; in rxe_responder()
1684 rxe_dbg_qp(qp, "moved to error state\n"); in rxe_responder()
1685 rxe_qp_error(qp); in rxe_responder()