Lines Matching refs:qp

117 	struct rxe_qp *qp = from_timer(qp, t, retrans_timer);  in retransmit_timer()  local
120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer()
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->comp.task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
134 must_sched = skb_queue_len(&qp->resp_pkts) > 0; in rxe_comp_queue_pkt()
138 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
141 rxe_sched_task(&qp->comp.task); in rxe_comp_queue_pkt()
143 rxe_run_task(&qp->comp.task); in rxe_comp_queue_pkt()
146 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument
155 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe()
174 static inline void reset_retry_counters(struct rxe_qp *qp) in reset_retry_counters() argument
176 qp->comp.retry_cnt = qp->attr.retry_cnt; in reset_retry_counters()
177 qp->comp.rnr_retry = qp->attr.rnr_retry; in reset_retry_counters()
178 qp->comp.started_retry = 0; in reset_retry_counters()
181 static inline enum comp_state check_psn(struct rxe_qp *qp, in check_psn() argument
196 reset_retry_counters(qp); in check_psn()
204 diff = psn_compare(pkt->psn, qp->comp.psn); in check_psn()
212 (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST || in check_psn()
213 qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE)) in check_psn()
224 static inline enum comp_state check_ack(struct rxe_qp *qp, in check_ack() argument
230 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack()
233 switch (qp->comp.opcode) { in check_ack()
290 reset_retry_counters(qp); in check_ack()
302 reset_retry_counters(qp); in check_ack()
309 reset_retry_counters(qp); in check_ack()
322 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { in check_ack()
325 qp->comp.psn = pkt->psn; in check_ack()
326 if (qp->req.wait_psn) { in check_ack()
327 qp->req.wait_psn = 0; in check_ack()
328 rxe_sched_task(&qp->req.task); in check_ack()
346 rxe_dbg_qp(qp, "unexpected nak %x\n", syn); in check_ack()
357 rxe_dbg_qp(qp, "unexpected opcode\n"); in check_ack()
363 static inline enum comp_state do_read(struct rxe_qp *qp, in do_read() argument
369 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_read()
383 static inline enum comp_state do_atomic(struct rxe_qp *qp, in do_atomic() argument
391 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_atomic()
402 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in make_send_cqe() argument
410 if (!qp->is_user) { in make_send_cqe()
413 wc->qp = &qp->ibqp; in make_send_cqe()
417 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe()
421 if (!qp->is_user) { in make_send_cqe()
436 rxe_err_qp(qp, "non-flush error status = %d", in make_send_cqe()
449 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in do_complete() argument
451 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
456 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || in do_complete()
461 make_send_cqe(qp, wqe, &cqe); in do_complete()
463 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
466 rxe_cq_post(qp->scq, &cqe, 0); in do_complete()
477 if (qp->req.wait_fence) { in do_complete()
478 qp->req.wait_fence = 0; in do_complete()
479 rxe_sched_task(&qp->req.task); in do_complete()
483 static void comp_check_sq_drain_done(struct rxe_qp *qp) in comp_check_sq_drain_done() argument
487 spin_lock_irqsave(&qp->state_lock, flags); in comp_check_sq_drain_done()
488 if (unlikely(qp_state(qp) == IB_QPS_SQD)) { in comp_check_sq_drain_done()
489 if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { in comp_check_sq_drain_done()
490 qp->attr.sq_draining = 0; in comp_check_sq_drain_done()
491 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
493 if (qp->ibqp.event_handler) { in comp_check_sq_drain_done()
496 ev.device = qp->ibqp.device; in comp_check_sq_drain_done()
497 ev.element.qp = &qp->ibqp; in comp_check_sq_drain_done()
499 qp->ibqp.event_handler(&ev, in comp_check_sq_drain_done()
500 qp->ibqp.qp_context); in comp_check_sq_drain_done()
505 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
508 static inline enum comp_state complete_ack(struct rxe_qp *qp, in complete_ack() argument
514 atomic_inc(&qp->req.rd_atomic); in complete_ack()
515 if (qp->req.need_rd_atomic) { in complete_ack()
516 qp->comp.timeout_retry = 0; in complete_ack()
517 qp->req.need_rd_atomic = 0; in complete_ack()
518 rxe_sched_task(&qp->req.task); in complete_ack()
522 comp_check_sq_drain_done(qp); in complete_ack()
524 do_complete(qp, wqe); in complete_ack()
526 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in complete_ack()
532 static inline enum comp_state complete_wqe(struct rxe_qp *qp, in complete_wqe() argument
537 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { in complete_wqe()
538 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; in complete_wqe()
539 qp->comp.opcode = -1; in complete_wqe()
542 if (qp->req.wait_psn) { in complete_wqe()
543 qp->req.wait_psn = 0; in complete_wqe()
544 rxe_sched_task(&qp->req.task); in complete_wqe()
548 do_complete(qp, wqe); in complete_wqe()
554 static void drain_resp_pkts(struct rxe_qp *qp) in drain_resp_pkts() argument
558 while ((skb = skb_dequeue(&qp->resp_pkts))) { in drain_resp_pkts()
559 rxe_put(qp); in drain_resp_pkts()
561 ib_device_put(qp->ibqp.device); in drain_resp_pkts()
566 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in flush_send_wqe() argument
573 if (qp->is_user) { in flush_send_wqe()
576 uwc->qp_num = qp->ibqp.qp_num; in flush_send_wqe()
580 wc->qp = &qp->ibqp; in flush_send_wqe()
583 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe()
585 rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err); in flush_send_wqe()
594 static void flush_send_queue(struct rxe_qp *qp, bool notify) in flush_send_queue() argument
597 struct rxe_queue *q = qp->sq.queue; in flush_send_queue()
601 if (!qp->sq.queue) in flush_send_queue()
606 err = flush_send_wqe(qp, wqe); in flush_send_queue()
617 struct rxe_qp *qp = pkt->qp; in free_pkt() local
618 struct ib_device *dev = qp->ibqp.device; in free_pkt()
621 rxe_put(qp); in free_pkt()
633 static void reset_retry_timer(struct rxe_qp *qp) in reset_retry_timer() argument
637 if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { in reset_retry_timer()
638 spin_lock_irqsave(&qp->state_lock, flags); in reset_retry_timer()
639 if (qp_state(qp) >= IB_QPS_RTS && in reset_retry_timer()
640 psn_compare(qp->req.psn, qp->comp.psn) > 0) in reset_retry_timer()
641 mod_timer(&qp->retrans_timer, in reset_retry_timer()
642 jiffies + qp->qp_timeout_jiffies); in reset_retry_timer()
643 spin_unlock_irqrestore(&qp->state_lock, flags); in reset_retry_timer()
647 int rxe_completer(struct rxe_qp *qp) in rxe_completer() argument
649 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer()
657 spin_lock_irqsave(&qp->state_lock, flags); in rxe_completer()
658 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_completer()
659 qp_state(qp) == IB_QPS_RESET) { in rxe_completer()
660 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_completer()
662 drain_resp_pkts(qp); in rxe_completer()
663 flush_send_queue(qp, notify); in rxe_completer()
664 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
667 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
669 if (qp->comp.timeout) { in rxe_completer()
670 qp->comp.timeout_retry = 1; in rxe_completer()
671 qp->comp.timeout = 0; in rxe_completer()
673 qp->comp.timeout_retry = 0; in rxe_completer()
676 if (qp->req.need_retry) in rxe_completer()
682 rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]); in rxe_completer()
685 skb = skb_dequeue(&qp->resp_pkts); in rxe_completer()
688 qp->comp.timeout_retry = 0; in rxe_completer()
694 state = get_wqe(qp, pkt, &wqe); in rxe_completer()
698 state = check_psn(qp, pkt, wqe); in rxe_completer()
702 state = check_ack(qp, pkt, wqe); in rxe_completer()
706 state = do_read(qp, pkt, wqe); in rxe_completer()
710 state = do_atomic(qp, pkt, wqe); in rxe_completer()
722 state = complete_ack(qp, pkt, wqe); in rxe_completer()
726 state = complete_wqe(qp, pkt, wqe); in rxe_completer()
731 qp->comp.opcode = -1; in rxe_completer()
733 qp->comp.opcode = pkt->opcode; in rxe_completer()
735 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in rxe_completer()
736 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in rxe_completer()
738 if (qp->req.wait_psn) { in rxe_completer()
739 qp->req.wait_psn = 0; in rxe_completer()
740 rxe_sched_task(&qp->req.task); in rxe_completer()
750 if (qp->comp.timeout_retry && wqe) { in rxe_completer()
755 reset_retry_timer(qp); in rxe_completer()
774 if (qp->comp.started_retry && in rxe_completer()
775 !qp->comp.timeout_retry) in rxe_completer()
778 if (qp->comp.retry_cnt > 0) { in rxe_completer()
779 if (qp->comp.retry_cnt != 7) in rxe_completer()
780 qp->comp.retry_cnt--; in rxe_completer()
786 if (psn_compare(qp->req.psn, in rxe_completer()
787 qp->comp.psn) > 0) { in rxe_completer()
793 qp->req.need_retry = 1; in rxe_completer()
794 qp->comp.started_retry = 1; in rxe_completer()
795 rxe_sched_task(&qp->req.task); in rxe_completer()
808 if (qp->comp.rnr_retry > 0) { in rxe_completer()
809 if (qp->comp.rnr_retry != 7) in rxe_completer()
810 qp->comp.rnr_retry--; in rxe_completer()
815 qp->req.wait_for_rnr_timer = 1; in rxe_completer()
816 rxe_dbg_qp(qp, "set rnr nak timer\n"); in rxe_completer()
818 mod_timer(&qp->rnr_nak_timer, in rxe_completer()
832 do_complete(qp, wqe); in rxe_completer()
833 rxe_qp_error(qp); in rxe_completer()