Lines Matching refs:qp

114 static void hfi1_init_trdma_req(struct rvt_qp *qp,
116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
123 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 static void hfi1_do_tid_send(struct rvt_qp *qp);
130 struct rvt_qp *qp, u32 psn, int diff, bool fecn);
143 static void tid_rdma_schedule_ack(struct rvt_qp *qp) in tid_rdma_schedule_ack() argument
145 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_schedule_ack()
148 hfi1_schedule_tid_send(qp); in tid_rdma_schedule_ack()
151 static void tid_rdma_trigger_ack(struct rvt_qp *qp) in tid_rdma_trigger_ack() argument
153 validate_r_tid_ack(qp->priv); in tid_rdma_trigger_ack()
154 tid_rdma_schedule_ack(qp); in tid_rdma_trigger_ack()
160 (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) << in tid_rdma_opfn_encode()
162 ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) << in tid_rdma_opfn_encode()
185 p->qp = in tid_rdma_opfn_decode()
193 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p) in tid_rdma_opfn_init() argument
195 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_opfn_init()
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; in tid_rdma_opfn_init()
202 p->timeout = qp->timeout; in tid_rdma_opfn_init()
206 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data) in tid_rdma_conn_req() argument
208 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_req()
214 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data) in tid_rdma_conn_reply() argument
216 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_reply()
246 trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local); in tid_rdma_conn_reply()
247 trace_hfi1_opfn_param(qp, 1, remote); in tid_rdma_conn_reply()
257 priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len); in tid_rdma_conn_reply()
269 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data) in tid_rdma_conn_resp() argument
273 ret = tid_rdma_conn_reply(qp, *data); in tid_rdma_conn_resp()
281 (void)tid_rdma_conn_req(qp, data); in tid_rdma_conn_resp()
285 void tid_rdma_conn_error(struct rvt_qp *qp) in tid_rdma_conn_error() argument
287 struct hfi1_qp_priv *priv = qp->priv; in tid_rdma_conn_error()
321 struct rvt_qp *qp) in qp_to_rcd() argument
331 if (qp->ibqp.qp_num == 0) in qp_to_rcd()
334 ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift); in qp_to_rcd()
338 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, in hfi1_qp_priv_init() argument
341 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_priv_init()
344 qpriv->rcd = qp_to_rcd(rdi, qp); in hfi1_qp_priv_init()
376 for (i = 0; i < qp->s_size; i++) { in hfi1_qp_priv_init()
378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init()
385 hfi1_init_trdma_req(qp, &priv->tid_req); in hfi1_qp_priv_init()
397 hfi1_init_trdma_req(qp, &priv->tid_req); in hfi1_qp_priv_init()
398 priv->tid_req.e.ack = &qp->s_ack_queue[i]; in hfi1_qp_priv_init()
406 qp->s_ack_queue[i].priv = priv; in hfi1_qp_priv_init()
413 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) in hfi1_qp_priv_tid_free() argument
415 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_priv_tid_free()
419 if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { in hfi1_qp_priv_tid_free()
420 for (i = 0; i < qp->s_size; i++) { in hfi1_qp_priv_tid_free()
421 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free()
426 struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv; in hfi1_qp_priv_tid_free()
431 qp->s_ack_queue[i].priv = NULL; in hfi1_qp_priv_tid_free()
505 struct tid_queue *queue, struct rvt_qp *qp) in kernel_tid_waiters() argument
506 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in kernel_tid_waiters()
511 lockdep_assert_held(&qp->s_lock); in kernel_tid_waiters()
514 if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE))) in kernel_tid_waiters()
538 struct tid_queue *queue, struct rvt_qp *qp) in dequeue_tid_waiter() argument
539 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in dequeue_tid_waiter()
541 struct hfi1_qp_priv *priv = qp->priv; in dequeue_tid_waiter()
543 lockdep_assert_held(&qp->s_lock); in dequeue_tid_waiter()
548 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in dequeue_tid_waiter()
550 rvt_put_qp(qp); in dequeue_tid_waiter()
565 struct tid_queue *queue, struct rvt_qp *qp) in queue_qp_for_tid_wait() argument
566 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in queue_qp_for_tid_wait()
568 struct hfi1_qp_priv *priv = qp->priv; in queue_qp_for_tid_wait()
570 lockdep_assert_held(&qp->s_lock); in queue_qp_for_tid_wait()
573 qp->s_flags |= HFI1_S_WAIT_TID_SPACE; in queue_qp_for_tid_wait()
577 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE); in queue_qp_for_tid_wait()
578 rvt_get_qp(qp); in queue_qp_for_tid_wait()
589 static void __trigger_tid_waiter(struct rvt_qp *qp) in __trigger_tid_waiter() argument
590 __must_hold(&qp->s_lock) in __trigger_tid_waiter()
592 lockdep_assert_held(&qp->s_lock); in __trigger_tid_waiter()
593 if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE)) in __trigger_tid_waiter()
595 trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE); in __trigger_tid_waiter()
596 hfi1_schedule_send(qp); in __trigger_tid_waiter()
611 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp) in tid_rdma_schedule_tid_wakeup() argument
619 if (!qp) in tid_rdma_schedule_tid_wakeup()
622 priv = qp->priv; in tid_rdma_schedule_tid_wakeup()
623 ibp = to_iport(qp->ibqp.device, qp->port_num); in tid_rdma_schedule_tid_wakeup()
625 dd = dd_from_ibdev(qp->ibqp.device); in tid_rdma_schedule_tid_wakeup()
633 rvt_put_qp(qp); in tid_rdma_schedule_tid_wakeup()
647 struct rvt_qp *qp; in tid_rdma_trigger_resume() local
651 qp = priv->owner; in tid_rdma_trigger_resume()
652 spin_lock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
653 if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) { in tid_rdma_trigger_resume()
654 spin_unlock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
657 spin_unlock_irq(&qp->s_lock); in tid_rdma_trigger_resume()
659 rvt_put_qp(qp); in tid_rdma_trigger_resume()
669 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue) in _tid_rdma_flush_wait() argument
670 __must_hold(&qp->s_lock) in _tid_rdma_flush_wait()
674 if (!qp) in _tid_rdma_flush_wait()
676 lockdep_assert_held(&qp->s_lock); in _tid_rdma_flush_wait()
677 priv = qp->priv; in _tid_rdma_flush_wait()
678 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in _tid_rdma_flush_wait()
682 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE; in _tid_rdma_flush_wait()
684 rvt_put_qp(qp); in _tid_rdma_flush_wait()
689 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp) in hfi1_tid_rdma_flush_wait() argument
690 __must_hold(&qp->s_lock) in hfi1_tid_rdma_flush_wait()
692 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_flush_wait()
694 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); in hfi1_tid_rdma_flush_wait()
695 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); in hfi1_tid_rdma_flush_wait()
780 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_setup_hw_flow() argument
782 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in hfi1_kern_setup_hw_flow()
793 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) in hfi1_kern_setup_hw_flow()
807 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
815 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
820 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_clear_hw_flow() argument
822 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in hfi1_kern_clear_hw_flow()
840 if (fqp == qp) { in hfi1_kern_clear_hw_flow()
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr); in tid_rdma_find_phys_blocks_4k()
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0, in tid_rdma_find_phys_blocks_4k()
935 trace_hfi1_tid_pageset(flow->req->qp, setcount, in tid_rdma_find_phys_blocks_4k()
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0); in tid_rdma_find_phys_blocks_8k()
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1); in tid_rdma_find_phys_blocks_8k()
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, in kern_get_phys_blocks()
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096)) in kern_get_phys_blocks()
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1, in kern_add_tid_node()
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ", in kern_alloc_tids()
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT; in kern_program_rcv_group()
1347 flow->req->qp, flow->tidcnt - 1, in kern_program_rcv_group()
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow); in kern_program_rcvarray()
1463 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_setup()
1467 struct hfi1_qp_priv *qpriv = req->qp->priv; in hfi1_kern_exp_rcv_setup()
1472 lockdep_assert_held(&req->qp->s_lock); in hfi1_kern_exp_rcv_setup()
1490 hfi1_wait_kmem(flow->req->qp); in hfi1_kern_exp_rcv_setup()
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1554 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_clear()
1562 lockdep_assert_held(&req->qp->s_lock); in hfi1_kern_exp_rcv_clear()
1582 if (fqp == req->qp) { in hfi1_kern_exp_rcv_clear()
1597 __must_hold(&req->qp->s_lock) in hfi1_kern_exp_rcv_clear_all()
1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument
1654 static void hfi1_init_trdma_req(struct rvt_qp *qp, in hfi1_init_trdma_req() argument
1657 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_init_trdma_req()
1669 req->qp = qp; in hfi1_init_trdma_req()
1709 struct rvt_qp *qp = req->qp; in hfi1_build_tid_rdma_read_packet() local
1710 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_packet()
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_read_packet()
1758 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_read_packet()
1762 rreq->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_read_packet()
1764 *bth1 |= remote->qp; in hfi1_build_tid_rdma_read_packet()
1771 qp->s_state = TID_OP(READ_REQ); in hfi1_build_tid_rdma_read_packet()
1775 qp->s_num_rd_atomic++; in hfi1_build_tid_rdma_read_packet()
1787 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_req() argument
1790 __must_hold(&qp->s_lock) in hfi1_build_tid_rdma_read_req()
1792 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_req()
1798 u32 npkts = rvt_div_round_up_mtu(qp, *len); in hfi1_build_tid_rdma_read_req()
1800 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_build_tid_rdma_read_req()
1811 hfi1_kern_clear_hw_flow(req->rcd, qp); in hfi1_build_tid_rdma_read_req()
1829 restart_sge(&qp->s_sge, wqe, req->s_next_psn, in hfi1_build_tid_rdma_read_req()
1830 qp->pmtu); in hfi1_build_tid_rdma_read_req()
1845 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) in hfi1_build_tid_rdma_read_req()
1852 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) { in hfi1_build_tid_rdma_read_req()
1889 static int tid_rdma_rcv_read_request(struct rvt_qp *qp, in tid_rdma_rcv_read_request() argument
1895 struct hfi1_qp_priv *qpriv = qp->priv; in tid_rdma_rcv_read_request()
1916 flow->npkts = rvt_div_round_up_mtu(qp, len); in tid_rdma_rcv_read_request()
1918 trace_hfi1_tid_entry_rcv_read_req(qp, i, in tid_rdma_rcv_read_request()
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow); in tid_rdma_rcv_read_request()
1980 trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn, in tid_rdma_rcv_read_request()
1987 struct rvt_qp *qp, u32 psn, int diff) in tid_rdma_rcv_error() argument
1989 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in tid_rdma_rcv_error()
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; in tid_rdma_rcv_error()
1991 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in tid_rdma_rcv_error()
1992 struct hfi1_qp_priv *qpriv = qp->priv; in tid_rdma_rcv_error()
1999 trace_hfi1_rsp_tid_rcv_error(qp, psn); in tid_rdma_rcv_error()
2000 trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff); in tid_rdma_rcv_error()
2003 if (!qp->r_nak_state) { in tid_rdma_rcv_error()
2005 qp->r_nak_state = IB_NAK_PSN_ERROR; in tid_rdma_rcv_error()
2006 qp->r_ack_psn = qp->r_psn; in tid_rdma_rcv_error()
2007 rc_defered_ack(rcd, qp); in tid_rdma_rcv_error()
2014 spin_lock_irqsave(&qp->s_lock, flags); in tid_rdma_rcv_error()
2015 e = find_prev_entry(qp, psn, &prev, NULL, &old_req); in tid_rdma_rcv_error()
2022 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req); in tid_rdma_rcv_error()
2045 qp->r_len = len; in tid_rdma_rcv_error()
2046 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in tid_rdma_rcv_error()
2062 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, in tid_rdma_rcv_error()
2096 if (i == qp->r_head_ack_queue) in tid_rdma_rcv_error()
2098 e = &qp->s_ack_queue[i]; in tid_rdma_rcv_error()
2158 if (i == qp->r_head_ack_queue) in tid_rdma_rcv_error()
2160 e = &qp->s_ack_queue[i]; in tid_rdma_rcv_error()
2162 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, in tid_rdma_rcv_error()
2183 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) in tid_rdma_rcv_error()
2184 qp->s_acked_ack_queue = prev; in tid_rdma_rcv_error()
2185 qp->s_tail_ack_queue = prev; in tid_rdma_rcv_error()
2192 qp->s_ack_state = OP(ACKNOWLEDGE); in tid_rdma_rcv_error()
2199 qp->s_nak_state = 0; in tid_rdma_rcv_error()
2201 qp->r_psn = e->lpsn + 1; in tid_rdma_rcv_error()
2202 hfi1_tid_write_alloc_resources(qp, true); in tid_rdma_rcv_error()
2205 qp->r_state = e->opcode; in tid_rdma_rcv_error()
2206 qp->r_nak_state = 0; in tid_rdma_rcv_error()
2207 qp->s_flags |= RVT_S_RESP_PENDING; in tid_rdma_rcv_error()
2208 hfi1_schedule_send(qp); in tid_rdma_rcv_error()
2210 spin_unlock_irqrestore(&qp->s_lock, flags); in tid_rdma_rcv_error()
2231 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_read_req() local
2232 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_rc_rcv_tid_rdma_read_req()
2237 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_read_req()
2249 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_read_req()
2251 trace_hfi1_rsp_rcv_tid_read_req(qp, psn); in hfi1_rc_rcv_tid_rdma_read_req()
2253 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv_tid_rdma_read_req()
2254 rvt_comm_est(qp); in hfi1_rc_rcv_tid_rdma_read_req()
2256 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) in hfi1_rc_rcv_tid_rdma_read_req()
2266 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv_tid_rdma_read_req()
2268 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn); in hfi1_rc_rcv_tid_rdma_read_req()
2273 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv_tid_rdma_read_req()
2274 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv_tid_rdma_read_req()
2276 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2277 if (unlikely(next == qp->s_tail_ack_queue)) { in hfi1_rc_rcv_tid_rdma_read_req()
2278 if (!qp->s_ack_queue[next].sent) { in hfi1_rc_rcv_tid_rdma_read_req()
2282 update_ack_queue(qp, next); in hfi1_rc_rcv_tid_rdma_read_req()
2284 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv_tid_rdma_read_req()
2288 qp->r_len = len; in hfi1_rc_rcv_tid_rdma_read_req()
2290 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, in hfi1_rc_rcv_tid_rdma_read_req()
2295 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr, in hfi1_rc_rcv_tid_rdma_read_req()
2299 qp->r_state = e->opcode; in hfi1_rc_rcv_tid_rdma_read_req()
2300 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_read_req()
2306 qp->r_msn++; in hfi1_rc_rcv_tid_rdma_read_req()
2307 qp->r_psn += e->lpsn - e->psn + 1; in hfi1_rc_rcv_tid_rdma_read_req()
2309 qp->r_head_ack_queue = next; in hfi1_rc_rcv_tid_rdma_read_req()
2317 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_read_req()
2320 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_read_req()
2322 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_read_req()
2323 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_read_req()
2325 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2329 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2331 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); in hfi1_rc_rcv_tid_rdma_read_req()
2332 qp->r_nak_state = nack_state; in hfi1_rc_rcv_tid_rdma_read_req()
2333 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_read_req()
2335 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_read_req()
2338 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_req()
2339 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); in hfi1_rc_rcv_tid_rdma_read_req()
2340 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv_tid_rdma_read_req()
2341 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_read_req()
2344 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, in hfi1_build_tid_rdma_read_resp() argument
2350 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_read_resp()
2360 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_read_resp()
2365 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_read_resp()
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_read_resp()
2382 resp->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_read_resp()
2385 resp->aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_read_resp()
2415 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode) in find_tid_request() argument
2416 __must_hold(&qp->s_lock) in find_tid_request()
2422 end = qp->s_cur + 1; in find_tid_request()
2423 if (end == qp->s_size) in find_tid_request()
2425 for (i = qp->s_acked; i != end;) { in find_tid_request()
2426 wqe = rvt_get_swqe_ptr(qp, i); in find_tid_request()
2433 if (++i == qp->s_size) in find_tid_request()
2452 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_read_resp() local
2453 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_rcv_tid_rdma_read_resp()
2462 trace_hfi1_sender_rcv_tid_read_resp(qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2463 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_read_resp()
2468 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_resp()
2470 req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ); in hfi1_rc_rcv_tid_rdma_read_resp()
2497 u32 pmtu = qp->pmtu; in hfi1_rc_rcv_tid_rdma_read_resp()
2504 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, in hfi1_rc_rcv_tid_rdma_read_resp()
2515 qp->s_num_rd_atomic--; in hfi1_rc_rcv_tid_rdma_read_resp()
2516 if ((qp->s_flags & RVT_S_WAIT_FENCE) && in hfi1_rc_rcv_tid_rdma_read_resp()
2517 !qp->s_num_rd_atomic) { in hfi1_rc_rcv_tid_rdma_read_resp()
2518 qp->s_flags &= ~(RVT_S_WAIT_FENCE | in hfi1_rc_rcv_tid_rdma_read_resp()
2520 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2522 if (qp->s_flags & RVT_S_WAIT_RDMAR) { in hfi1_rc_rcv_tid_rdma_read_resp()
2523 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK); in hfi1_rc_rcv_tid_rdma_read_resp()
2524 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2527 trace_hfi1_ack(qp, ipsn); in hfi1_rc_rcv_tid_rdma_read_resp()
2528 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode, in hfi1_rc_rcv_tid_rdma_read_resp()
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow); in hfi1_rc_rcv_tid_rdma_read_resp()
2536 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_read_resp()
2553 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2559 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2571 if (qp->s_last == qp->s_acked) in hfi1_rc_rcv_tid_rdma_read_resp()
2572 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); in hfi1_rc_rcv_tid_rdma_read_resp()
2575 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_read_resp()
2578 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp) in hfi1_kern_read_tid_flow_free() argument
2579 __must_hold(&qp->s_lock) in hfi1_kern_read_tid_flow_free()
2581 u32 n = qp->s_acked; in hfi1_kern_read_tid_flow_free()
2584 struct hfi1_qp_priv *priv = qp->priv; in hfi1_kern_read_tid_flow_free()
2586 lockdep_assert_held(&qp->s_lock); in hfi1_kern_read_tid_flow_free()
2588 while (n != qp->s_tail) { in hfi1_kern_read_tid_flow_free()
2589 wqe = rvt_get_swqe_ptr(qp, n); in hfi1_kern_read_tid_flow_free()
2595 if (++n == qp->s_size) in hfi1_kern_read_tid_flow_free()
2599 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_kern_read_tid_flow_free()
2604 struct rvt_qp *qp = packet->qp; in tid_rdma_tid_err() local
2609 spin_lock(&qp->s_lock); in tid_rdma_tid_err()
2619 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); in tid_rdma_tid_err()
2620 hfi1_schedule_send(qp); in tid_rdma_tid_err()
2624 spin_unlock(&qp->s_lock); in tid_rdma_tid_err()
2630 struct rvt_qp *qp, struct rvt_swqe *wqe) in restart_tid_rdma_read_req() argument
2636 qp->r_flags |= RVT_R_RDMAR_SEQ; in restart_tid_rdma_read_req()
2639 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0); in restart_tid_rdma_read_req()
2640 if (list_empty(&qp->rspwait)) { in restart_tid_rdma_read_req()
2641 qp->r_flags |= RVT_R_RSP_SEND; in restart_tid_rdma_read_req()
2642 rvt_get_qp(qp); in restart_tid_rdma_read_req()
2643 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in restart_tid_rdma_read_req()
2658 __must_hold(&packet->qp->r_lock) __must_hold(RCU) in handle_read_kdeth_eflags()
2667 struct rvt_qp *qp = packet->qp; in handle_read_kdeth_eflags() local
2668 struct hfi1_qp_priv *priv = qp->priv; in handle_read_kdeth_eflags()
2673 lockdep_assert_held(&qp->r_lock); in handle_read_kdeth_eflags()
2674 trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn); in handle_read_kdeth_eflags()
2675 trace_hfi1_sender_read_kdeth_eflags(qp); in handle_read_kdeth_eflags()
2676 trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0); in handle_read_kdeth_eflags()
2677 spin_lock(&qp->s_lock); in handle_read_kdeth_eflags()
2679 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || in handle_read_kdeth_eflags()
2680 cmp_psn(ibpsn, qp->s_psn) > 0) in handle_read_kdeth_eflags()
2689 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in handle_read_kdeth_eflags()
2690 ibp = to_iport(qp->ibqp.device, qp->port_num); in handle_read_kdeth_eflags()
2704 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { in handle_read_kdeth_eflags()
2705 qp->r_flags |= RVT_R_RDMAR_SEQ; in handle_read_kdeth_eflags()
2707 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2710 hfi1_restart_rc(qp, qp->s_last_psn + 1, in handle_read_kdeth_eflags()
2712 if (list_empty(&qp->rspwait)) { in handle_read_kdeth_eflags()
2713 qp->r_flags |= RVT_R_RSP_SEND; in handle_read_kdeth_eflags()
2714 rvt_get_qp(qp); in handle_read_kdeth_eflags()
2716 &qp->rspwait, in handle_read_kdeth_eflags()
2728 wqe = do_rc_completion(qp, wqe, ibp); in handle_read_kdeth_eflags()
2729 if (qp->s_acked == qp->s_tail) in handle_read_kdeth_eflags()
2733 if (qp->s_acked == qp->s_tail) in handle_read_kdeth_eflags()
2741 trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn, in handle_read_kdeth_eflags()
2757 trace_hfi1_tid_flow_read_kdeth_eflags(qp, in handle_read_kdeth_eflags()
2772 if (qp->r_flags & RVT_R_RDMAR_SEQ) in handle_read_kdeth_eflags()
2773 qp->r_flags &= in handle_read_kdeth_eflags()
2789 if (qp->r_flags & RVT_R_RDMAR_SEQ) in handle_read_kdeth_eflags()
2790 qp->r_flags &= in handle_read_kdeth_eflags()
2806 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) in handle_read_kdeth_eflags()
2807 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2841 spin_unlock(&qp->s_lock); in handle_read_kdeth_eflags()
2860 struct rvt_qp *qp; in hfi1_handle_kdeth_eflags() local
2888 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); in hfi1_handle_kdeth_eflags()
2889 if (!qp) in hfi1_handle_kdeth_eflags()
2892 packet->qp = qp; in hfi1_handle_kdeth_eflags()
2895 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_handle_kdeth_eflags()
2896 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_handle_kdeth_eflags()
2934 spin_lock(&qp->s_lock); in hfi1_handle_kdeth_eflags()
2935 qpriv = qp->priv; in hfi1_handle_kdeth_eflags()
2939 e = &qp->s_ack_queue[qpriv->r_tid_tail]; in hfi1_handle_kdeth_eflags()
2946 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); in hfi1_handle_kdeth_eflags()
2947 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); in hfi1_handle_kdeth_eflags()
2948 trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp); in hfi1_handle_kdeth_eflags()
2949 trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn, in hfi1_handle_kdeth_eflags()
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow); in hfi1_handle_kdeth_eflags()
3022 spin_unlock(&qp->s_lock); in hfi1_handle_kdeth_eflags()
3024 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_handle_kdeth_eflags()
3035 tid_rdma_trigger_ack(qp); in hfi1_handle_kdeth_eflags()
3046 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_tid_rdma_restart_req() argument
3051 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_tid_rdma_restart_req()
3057 *bth2 = mask_psn(qp->s_psn); in hfi1_tid_rdma_restart_req()
3061 qp, "!!!!!! Could not find flow to restart: bth2 ", in hfi1_tid_rdma_restart_req()
3063 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, in hfi1_tid_rdma_restart_req()
3081 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3095 tidnpkts = rvt_div_round_up_mtu(qp, tidlen); in hfi1_tid_rdma_restart_req()
3099 npkts * qp->pmtu); in hfi1_tid_rdma_restart_req()
3100 flow->tid_offset += npkts * qp->pmtu; in hfi1_tid_rdma_restart_req()
3131 trace_hfi1_tid_flow_restart_req(qp, fidx, flow); in hfi1_tid_rdma_restart_req()
3132 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_tid_rdma_restart_req()
3151 i = (++i == qp->s_size ? 0 : i); in hfi1_tid_rdma_restart_req()
3152 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_tid_rdma_restart_req()
3163 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp) in hfi1_qp_kern_exp_rcv_clear_all() argument
3166 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_qp_kern_exp_rcv_clear_all()
3169 if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA)) in hfi1_qp_kern_exp_rcv_clear_all()
3178 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_qp_kern_exp_rcv_clear_all()
3180 for (i = qp->s_acked; i != qp->s_head;) { in hfi1_qp_kern_exp_rcv_clear_all()
3181 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_kern_exp_rcv_clear_all()
3183 if (++i == qp->s_size) in hfi1_qp_kern_exp_rcv_clear_all()
3194 for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) { in hfi1_qp_kern_exp_rcv_clear_all()
3195 struct rvt_ack_entry *e = &qp->s_ack_queue[i]; in hfi1_qp_kern_exp_rcv_clear_all()
3197 if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_qp_kern_exp_rcv_clear_all()
3210 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) in hfi1_tid_rdma_wqe_interlock() argument
3213 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_wqe_interlock()
3217 s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1; in hfi1_tid_rdma_wqe_interlock()
3218 prev = rvt_get_swqe_ptr(qp, s_prev); in hfi1_tid_rdma_wqe_interlock()
3245 if (qp->s_acked != qp->s_cur) in hfi1_tid_rdma_wqe_interlock()
3268 static inline bool hfi1_check_sge_align(struct rvt_qp *qp, in hfi1_check_sge_align() argument
3274 trace_hfi1_sge_check_align(qp, i, sge); in hfi1_check_sge_align()
3282 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in setup_tid_rdma_wqe() argument
3284 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv; in setup_tid_rdma_wqe()
3291 if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) == in setup_tid_rdma_wqe()
3307 if (hfi1_check_sge_align(qp, &wqe->sg_list[0], in setup_tid_rdma_wqe()
3339 wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1; in setup_tid_rdma_wqe()
3356 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode, in setup_tid_rdma_wqe()
3366 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_build_tid_rdma_write_req() argument
3370 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_req()
3390 ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_req()
3392 *bth1 |= remote->qp; in hfi1_build_tid_rdma_write_req()
3393 qp->s_state = TID_OP(WRITE_REQ); in hfi1_build_tid_rdma_write_req()
3394 qp->s_flags |= HFI1_S_WAIT_TID_RESP; in hfi1_build_tid_rdma_write_req()
3402 static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp) in hfi1_compute_tid_rdma_flow_wt() argument
3412 return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT; in hfi1_compute_tid_rdma_flow_wt()
3426 static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg) in hfi1_compute_tid_rnr_timeout() argument
3428 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_compute_tid_rnr_timeout()
3464 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx) in hfi1_tid_write_alloc_resources() argument
3467 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_tid_write_alloc_resources()
3475 lockdep_assert_held(&qp->s_lock); in hfi1_tid_write_alloc_resources()
3478 trace_hfi1_rsp_tid_write_alloc_res(qp, 0); in hfi1_tid_write_alloc_resources()
3479 trace_hfi1_tid_write_rsp_alloc_res(qp); in hfi1_tid_write_alloc_resources()
3500 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3506 e = &qp->s_ack_queue[qpriv->r_tid_alloc]; in hfi1_tid_write_alloc_resources()
3510 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn, in hfi1_tid_write_alloc_resources()
3526 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3533 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); in hfi1_tid_write_alloc_resources()
3535 to_seg = hfi1_compute_tid_rdma_flow_wt(qp) * in hfi1_tid_write_alloc_resources()
3542 npkts = rvt_div_round_up_mtu(qp, req->seg_len); in hfi1_tid_write_alloc_resources()
3564 tid_rdma_trigger_ack(qp); in hfi1_tid_write_alloc_resources()
3581 rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_tid_write_alloc_resources()
3590 if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state) in hfi1_tid_write_alloc_resources()
3596 lockdep_assert_held(&qp->r_lock); in hfi1_tid_write_alloc_resources()
3599 qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK; in hfi1_tid_write_alloc_resources()
3602 qp->r_psn = e->psn + req->alloc_seg; in hfi1_tid_write_alloc_resources()
3603 qp->r_ack_psn = qp->r_psn; in hfi1_tid_write_alloc_resources()
3609 qp->r_head_ack_queue = qpriv->r_tid_alloc + 1; in hfi1_tid_write_alloc_resources()
3610 if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_tid_write_alloc_resources()
3611 qp->r_head_ack_queue = 0; in hfi1_tid_write_alloc_resources()
3612 qpriv->r_tid_head = qp->r_head_ack_queue; in hfi1_tid_write_alloc_resources()
3618 qp->s_nak_state = qp->r_nak_state; in hfi1_tid_write_alloc_resources()
3619 qp->s_ack_psn = qp->r_ack_psn; in hfi1_tid_write_alloc_resources()
3624 qp->s_flags &= ~(RVT_S_ACK_PENDING); in hfi1_tid_write_alloc_resources()
3626 trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn); in hfi1_tid_write_alloc_resources()
3641 rc_defered_ack(rcd, qp); in hfi1_tid_write_alloc_resources()
3660 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_req() local
3661 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_rc_rcv_tid_rdma_write_req()
3666 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_req()
3678 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_write_req()
3680 trace_hfi1_rsp_rcv_tid_write_req(qp, psn); in hfi1_rc_rcv_tid_rdma_write_req()
3682 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv_tid_rdma_write_req()
3683 rvt_comm_est(qp); in hfi1_rc_rcv_tid_rdma_write_req()
3685 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) in hfi1_rc_rcv_tid_rdma_write_req()
3693 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv_tid_rdma_write_req()
3695 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn); in hfi1_rc_rcv_tid_rdma_write_req()
3705 qp->r_head_ack_queue = qp->r_head_ack_queue ? in hfi1_rc_rcv_tid_rdma_write_req()
3706 qp->r_head_ack_queue - 1 : in hfi1_rc_rcv_tid_rdma_write_req()
3707 rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in hfi1_rc_rcv_tid_rdma_write_req()
3710 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv_tid_rdma_write_req()
3711 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv_tid_rdma_write_req()
3713 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3714 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv_tid_rdma_write_req()
3715 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv_tid_rdma_write_req()
3717 update_ack_queue(qp, next); in hfi1_rc_rcv_tid_rdma_write_req()
3719 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv_tid_rdma_write_req()
3724 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3725 qp->s_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3727 qp->r_psn = e->lpsn + 1; in hfi1_rc_rcv_tid_rdma_write_req()
3739 qp->r_len = len; in hfi1_rc_rcv_tid_rdma_write_req()
3746 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr, in hfi1_rc_rcv_tid_rdma_write_req()
3750 qp->r_psn += num_segs - 1; in hfi1_rc_rcv_tid_rdma_write_req()
3754 e->lpsn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3775 qp->r_state = e->opcode; in hfi1_rc_rcv_tid_rdma_write_req()
3776 qp->r_nak_state = 0; in hfi1_rc_rcv_tid_rdma_write_req()
3782 qp->r_msn++; in hfi1_rc_rcv_tid_rdma_write_req()
3783 qp->r_psn++; in hfi1_rc_rcv_tid_rdma_write_req()
3785 trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_rc_rcv_tid_rdma_write_req()
3789 qpriv->r_tid_tail = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3793 e = &qp->s_ack_queue[qpriv->r_tid_tail]; in hfi1_rc_rcv_tid_rdma_write_req()
3799 qpriv->r_tid_ack = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3800 qpriv->r_tid_tail = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3804 qp->r_head_ack_queue = next; in hfi1_rc_rcv_tid_rdma_write_req()
3805 qpriv->r_tid_head = qp->r_head_ack_queue; in hfi1_rc_rcv_tid_rdma_write_req()
3807 hfi1_tid_write_alloc_resources(qp, true); in hfi1_rc_rcv_tid_rdma_write_req()
3808 trace_hfi1_tid_write_rsp_rcv_req(qp); in hfi1_rc_rcv_tid_rdma_write_req()
3811 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_write_req()
3813 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_req()
3814 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_write_req()
3816 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3820 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3822 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); in hfi1_rc_rcv_tid_rdma_write_req()
3823 qp->r_nak_state = IB_NAK_INVALID_REQUEST; in hfi1_rc_rcv_tid_rdma_write_req()
3824 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3826 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_write_req()
3829 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_req()
3830 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); in hfi1_rc_rcv_tid_rdma_write_req()
3831 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv_tid_rdma_write_req()
3832 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv_tid_rdma_write_req()
3835 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, in hfi1_build_tid_rdma_write_resp() argument
3842 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_resp()
3848 trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_build_tid_rdma_write_resp()
3850 trace_hfi1_tid_write_rsp_build_resp(qp); in hfi1_build_tid_rdma_write_resp()
3851 trace_hfi1_rsp_build_tid_write_resp(qp, bth2); in hfi1_build_tid_rdma_write_resp()
3859 hfi1_tid_write_alloc_resources(qp, false); in hfi1_build_tid_rdma_write_resp()
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3875 hfi1_add_tid_reap_timer(qp); in hfi1_build_tid_rdma_write_resp()
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow); in hfi1_build_tid_rdma_write_resp()
3885 hfi1_mod_tid_reap_timer(qp); in hfi1_build_tid_rdma_write_resp()
3918 ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_resp()
3925 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_write_resp()
3929 ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_resp()
3930 *bth1 = remote->qp; in hfi1_build_tid_rdma_write_resp()
3938 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp) in hfi1_add_tid_reap_timer() argument
3940 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_add_tid_reap_timer()
3942 lockdep_assert_held(&qp->s_lock); in hfi1_add_tid_reap_timer()
3951 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp) in hfi1_mod_tid_reap_timer() argument
3953 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_mod_tid_reap_timer()
3955 lockdep_assert_held(&qp->s_lock); in hfi1_mod_tid_reap_timer()
3961 static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp) in hfi1_stop_tid_reap_timer() argument
3963 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_stop_tid_reap_timer()
3966 lockdep_assert_held(&qp->s_lock); in hfi1_stop_tid_reap_timer()
3974 void hfi1_del_tid_reap_timer(struct rvt_qp *qp) in hfi1_del_tid_reap_timer() argument
3976 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_del_tid_reap_timer()
3985 struct rvt_qp *qp = qpriv->owner; in hfi1_tid_timeout() local
3986 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in hfi1_tid_timeout()
3990 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_tid_timeout()
3991 spin_lock(&qp->s_lock); in hfi1_tid_timeout()
3993 dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n", in hfi1_tid_timeout()
3994 qp->ibqp.qp_num, __func__, __LINE__); in hfi1_tid_timeout()
3996 qp, "resource timeout = ", in hfi1_tid_timeout()
3998 hfi1_stop_tid_reap_timer(qp); in hfi1_tid_timeout()
4003 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_tid_timeout()
4006 ack_to_tid_req(&qp->s_ack_queue[i]); in hfi1_tid_timeout()
4010 spin_unlock(&qp->s_lock); in hfi1_tid_timeout()
4011 if (qp->ibqp.event_handler) { in hfi1_tid_timeout()
4014 ev.device = qp->ibqp.device; in hfi1_tid_timeout()
4015 ev.element.qp = &qp->ibqp; in hfi1_tid_timeout()
4017 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in hfi1_tid_timeout()
4019 rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR); in hfi1_tid_timeout()
4022 spin_unlock(&qp->s_lock); in hfi1_tid_timeout()
4024 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_tid_timeout()
4041 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_resp() local
4042 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_resp()
4052 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_write_resp()
4057 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_resp()
4060 if (cmp_psn(psn, qp->s_next_psn) >= 0) in hfi1_rc_rcv_tid_rdma_write_resp()
4064 if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0)) in hfi1_rc_rcv_tid_rdma_write_resp()
4067 if (unlikely(qp->s_acked == qp->s_tail)) in hfi1_rc_rcv_tid_rdma_write_resp()
4075 if (qp->r_flags & RVT_R_RDMAR_SEQ) { in hfi1_rc_rcv_tid_rdma_write_resp()
4076 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) in hfi1_rc_rcv_tid_rdma_write_resp()
4078 qp->r_flags &= ~RVT_R_RDMAR_SEQ; in hfi1_rc_rcv_tid_rdma_write_resp()
4081 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); in hfi1_rc_rcv_tid_rdma_write_resp()
4101 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_write_resp()
4104 trace_hfi1_ack(qp, psn); in hfi1_rc_rcv_tid_rdma_write_resp()
4122 flow->npkts = rvt_div_round_up_mtu(qp, flow->length); in hfi1_rc_rcv_tid_rdma_write_resp()
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow); in hfi1_rc_rcv_tid_rdma_write_resp()
4136 trace_hfi1_tid_write_sender_rcv_resp(qp, 0); in hfi1_rc_rcv_tid_rdma_write_resp()
4143 qp, i, flow->tid_entry[i]); in hfi1_rc_rcv_tid_rdma_write_resp()
4155 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_write_resp()
4181 if (i == qp->s_size) in hfi1_rc_rcv_tid_rdma_write_resp()
4183 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_rc_rcv_tid_rdma_write_resp()
4191 qp->s_flags &= ~HFI1_S_WAIT_TID_RESP; in hfi1_rc_rcv_tid_rdma_write_resp()
4192 hfi1_schedule_tid_send(qp); in hfi1_rc_rcv_tid_rdma_write_resp()
4198 rvt_error_qp(qp, status); in hfi1_rc_rcv_tid_rdma_write_resp()
4201 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_resp()
4202 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_resp()
4212 struct rvt_qp *qp = req->qp; in hfi1_build_tid_rdma_packet() local
4213 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_packet()
4221 hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR); in hfi1_build_tid_rdma_packet()
4222 rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR); in hfi1_build_tid_rdma_packet()
4225 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset); in hfi1_build_tid_rdma_packet()
4230 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry); in hfi1_build_tid_rdma_packet()
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow); in hfi1_build_tid_rdma_packet()
4243 wd->verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_packet()
4254 rvt_div_round_up_mtu(qp, req->seg_len) > in hfi1_build_tid_rdma_packet()
4271 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_write_data() local
4272 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_rcv_tid_rdma_write_data()
4278 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_rc_rcv_tid_rdma_write_data()
4284 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_write_data()
4292 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_data()
4293 e = &qp->s_ack_queue[priv->r_tid_tail]; in hfi1_rc_rcv_tid_rdma_write_data()
4318 u32 pmtu = qp->pmtu; in hfi1_rc_rcv_tid_rdma_write_data()
4338 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false, in hfi1_rc_rcv_tid_rdma_write_data()
4360 trace_hfi1_rsp_rcv_tid_write_data(qp, psn); in hfi1_rc_rcv_tid_rdma_write_data()
4361 trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn, in hfi1_rc_rcv_tid_rdma_write_data()
4363 trace_hfi1_tid_write_rsp_rcv_data(qp); in hfi1_rc_rcv_tid_rdma_write_data()
4373 e = &qp->s_ack_queue[next]; in hfi1_rc_rcv_tid_rdma_write_data()
4378 if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi)) in hfi1_rc_rcv_tid_rdma_write_data()
4379 qp->s_acked_ack_queue = 0; in hfi1_rc_rcv_tid_rdma_write_data()
4382 hfi1_tid_write_alloc_resources(qp, true); in hfi1_rc_rcv_tid_rdma_write_data()
4389 qp->s_tail_ack_queue != qp->r_head_ack_queue) { in hfi1_rc_rcv_tid_rdma_write_data()
4390 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv_tid_rdma_write_data()
4391 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_write_data()
4397 hfi1_mod_tid_reap_timer(req->qp); in hfi1_rc_rcv_tid_rdma_write_data()
4399 hfi1_stop_tid_reap_timer(req->qp); in hfi1_rc_rcv_tid_rdma_write_data()
4403 tid_rdma_schedule_ack(qp); in hfi1_rc_rcv_tid_rdma_write_data()
4407 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_write_data()
4408 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_write_data()
4415 tid_rdma_trigger_ack(qp); in hfi1_rc_rcv_tid_rdma_write_data()
4426 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e, in hfi1_build_tid_rdma_write_ack() argument
4430 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_write_ack()
4439 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_write_ack()
4440 *bth1 = remote->qp; in hfi1_build_tid_rdma_write_ack()
4446 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_ack()
4450 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in hfi1_build_tid_rdma_write_ack()
4455 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp); in hfi1_build_tid_rdma_write_ack()
4459 cpu_to_be32(qpriv->tid_rdma.local.qp | in hfi1_build_tid_rdma_write_ack()
4500 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_ack() local
4501 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_ack()
4509 trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0); in hfi1_rc_rcv_tid_rdma_ack()
4510 process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_ack()
4516 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_ack()
4517 trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn); in hfi1_rc_rcv_tid_rdma_ack()
4520 if ((qp->s_flags & HFI1_S_WAIT_HALT) && in hfi1_rc_rcv_tid_rdma_ack()
4534 if (unlikely(qp->s_acked == qp->s_tail)) in hfi1_rc_rcv_tid_rdma_ack()
4537 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_rcv_tid_rdma_ack()
4543 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4560 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4564 wqe = do_rc_completion(qp, wqe, in hfi1_rc_rcv_tid_rdma_ack()
4565 to_iport(qp->ibqp.device, in hfi1_rc_rcv_tid_rdma_ack()
4566 qp->port_num)); in hfi1_rc_rcv_tid_rdma_ack()
4567 trace_hfi1_sender_rcv_tid_ack(qp); in hfi1_rc_rcv_tid_rdma_ack()
4569 if (qp->s_acked == qp->s_tail) in hfi1_rc_rcv_tid_rdma_ack()
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); in hfi1_rc_rcv_tid_rdma_ack()
4579 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_rc_rcv_tid_rdma_ack()
4589 hfi1_mod_tid_retry_timer(qp); in hfi1_rc_rcv_tid_rdma_ack()
4591 hfi1_stop_tid_retry_timer(qp); in hfi1_rc_rcv_tid_rdma_ack()
4592 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_ack()
4598 hfi1_stop_tid_retry_timer(qp); in hfi1_rc_rcv_tid_rdma_ack()
4600 qp->s_flags &= ~HFI1_S_WAIT_HALT; in hfi1_rc_rcv_tid_rdma_ack()
4608 hfi1_schedule_send(qp); in hfi1_rc_rcv_tid_rdma_ack()
4610 if ((qp->s_acked == qpriv->s_tid_tail && in hfi1_rc_rcv_tid_rdma_ack()
4612 qp->s_acked == qp->s_tail) { in hfi1_rc_rcv_tid_rdma_ack()
4635 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_rcv_tid_rdma_ack()
4664 last_acked = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4696 trace_hfi1_tid_flow_rcv_tid_ack(qp, in hfi1_rc_rcv_tid_rdma_ack()
4702 if (last_acked == qp->s_size) in hfi1_rc_rcv_tid_rdma_ack()
4704 wqe = rvt_get_swqe_ptr(qp, last_acked); in hfi1_rc_rcv_tid_rdma_ack()
4708 qpriv->s_tid_tail = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4710 hfi1_schedule_tid_send(qp); in hfi1_rc_rcv_tid_rdma_ack()
4713 qpriv->s_retry = qp->s_retry_cnt; in hfi1_rc_rcv_tid_rdma_ack()
4717 hfi1_stop_tid_retry_timer(qp); in hfi1_rc_rcv_tid_rdma_ack()
4727 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, in hfi1_rc_rcv_tid_rdma_ack()
4731 qpriv->s_tid_tail = qp->s_acked; in hfi1_rc_rcv_tid_rdma_ack()
4733 qpriv->s_retry = qp->s_retry_cnt; in hfi1_rc_rcv_tid_rdma_ack()
4734 hfi1_schedule_tid_send(qp); in hfi1_rc_rcv_tid_rdma_ack()
4747 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_ack()
4750 void hfi1_add_tid_retry_timer(struct rvt_qp *qp) in hfi1_add_tid_retry_timer() argument
4752 struct hfi1_qp_priv *priv = qp->priv; in hfi1_add_tid_retry_timer()
4753 struct ib_qp *ibqp = &qp->ibqp; in hfi1_add_tid_retry_timer()
4756 lockdep_assert_held(&qp->s_lock); in hfi1_add_tid_retry_timer()
4765 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp) in hfi1_mod_tid_retry_timer() argument
4767 struct hfi1_qp_priv *priv = qp->priv; in hfi1_mod_tid_retry_timer()
4768 struct ib_qp *ibqp = &qp->ibqp; in hfi1_mod_tid_retry_timer()
4771 lockdep_assert_held(&qp->s_lock); in hfi1_mod_tid_retry_timer()
4777 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp) in hfi1_stop_tid_retry_timer() argument
4779 struct hfi1_qp_priv *priv = qp->priv; in hfi1_stop_tid_retry_timer()
4782 lockdep_assert_held(&qp->s_lock); in hfi1_stop_tid_retry_timer()
4790 void hfi1_del_tid_retry_timer(struct rvt_qp *qp) in hfi1_del_tid_retry_timer() argument
4792 struct hfi1_qp_priv *priv = qp->priv; in hfi1_del_tid_retry_timer()
4801 struct rvt_qp *qp = priv->owner; in hfi1_tid_retry_timeout() local
4806 spin_lock_irqsave(&qp->r_lock, flags); in hfi1_tid_retry_timeout()
4807 spin_lock(&qp->s_lock); in hfi1_tid_retry_timeout()
4808 trace_hfi1_tid_write_sender_retry_timeout(qp, 0); in hfi1_tid_retry_timeout()
4810 hfi1_stop_tid_retry_timer(qp); in hfi1_tid_retry_timeout()
4813 qp, in hfi1_tid_retry_timeout()
4817 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_tid_retry_timeout()
4818 hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); in hfi1_tid_retry_timeout()
4819 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); in hfi1_tid_retry_timeout()
4821 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_tid_retry_timeout()
4824 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req); in hfi1_tid_retry_timeout()
4833 qp->s_flags |= HFI1_S_WAIT_HALT; in hfi1_tid_retry_timeout()
4836 hfi1_schedule_tid_send(qp); in hfi1_tid_retry_timeout()
4839 spin_unlock(&qp->s_lock); in hfi1_tid_retry_timeout()
4840 spin_unlock_irqrestore(&qp->r_lock, flags); in hfi1_tid_retry_timeout()
4843 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_build_tid_rdma_resync() argument
4847 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_build_tid_rdma_resync()
4856 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn); in hfi1_build_tid_rdma_resync()
4857 *bth1 = remote->qp; in hfi1_build_tid_rdma_resync()
4872 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv_tid_rdma_resync() local
4873 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv_tid_rdma_resync()
4875 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_rc_rcv_tid_rdma_resync()
4884 fecn = process_ecn(qp, packet); in hfi1_rc_rcv_tid_rdma_resync()
4888 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_resync()
4921 trace_hfi1_tid_write_rsp_rcv_resync(qp); in hfi1_rc_rcv_tid_rdma_resync()
4933 e = &qp->s_ack_queue[idx]; in hfi1_rc_rcv_tid_rdma_resync()
4936 trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn, in hfi1_rc_rcv_tid_rdma_resync()
4960 trace_hfi1_tid_flow_rcv_resync(qp, flow_idx, in hfi1_rc_rcv_tid_rdma_resync()
4964 if (idx == qp->s_tail_ack_queue) in hfi1_rc_rcv_tid_rdma_resync()
4972 tid_rdma_trigger_ack(qp); in hfi1_rc_rcv_tid_rdma_resync()
4975 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv_tid_rdma_resync()
4976 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv_tid_rdma_resync()
4983 static void update_tid_tail(struct rvt_qp *qp) in update_tid_tail() argument
4984 __must_hold(&qp->s_lock) in update_tid_tail()
4986 struct hfi1_qp_priv *priv = qp->priv; in update_tid_tail()
4990 lockdep_assert_held(&qp->s_lock); in update_tid_tail()
4995 if (i == qp->s_size) in update_tid_tail()
5000 wqe = rvt_get_swqe_ptr(qp, i); in update_tid_tail()
5008 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_tid_rdma_pkt() argument
5009 __must_hold(&qp->s_lock) in hfi1_make_tid_rdma_pkt()
5011 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_tid_rdma_pkt()
5015 struct rvt_sge_state *ss = &qp->s_sge; in hfi1_make_tid_rdma_pkt()
5016 struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in hfi1_make_tid_rdma_pkt()
5021 lockdep_assert_held(&qp->s_lock); in hfi1_make_tid_rdma_pkt()
5022 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0); in hfi1_make_tid_rdma_pkt()
5029 !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK | in hfi1_make_tid_rdma_pkt()
5032 !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) { in hfi1_make_tid_rdma_pkt()
5037 if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) { in hfi1_make_tid_rdma_pkt()
5043 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_tid_rdma_pkt()
5050 make_tid_rdma_ack(qp, ohdr, ps)) in hfi1_make_tid_rdma_pkt()
5059 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) in hfi1_make_tid_rdma_pkt()
5068 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); in hfi1_make_tid_rdma_pkt()
5070 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn, in hfi1_make_tid_rdma_pkt()
5081 hfi1_tid_rdma_restart_req(qp, wqe, &bth2); in hfi1_make_tid_rdma_pkt()
5099 trace_hfi1_sender_make_tid_pkt(qp); in hfi1_make_tid_rdma_pkt()
5100 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0); in hfi1_make_tid_rdma_pkt()
5101 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail); in hfi1_make_tid_rdma_pkt()
5108 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, in hfi1_make_tid_rdma_pkt()
5120 qp->s_flags |= HFI1_S_WAIT_TID_RESP; in hfi1_make_tid_rdma_pkt()
5126 update_tid_tail(qp); in hfi1_make_tid_rdma_pkt()
5134 trace_hfi1_sender_make_tid_pkt(qp); in hfi1_make_tid_rdma_pkt()
5136 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur); in hfi1_make_tid_rdma_pkt()
5140 wqe = rvt_get_swqe_ptr(qp, in hfi1_make_tid_rdma_pkt()
5141 (!priv->s_tid_cur ? qp->s_size : in hfi1_make_tid_rdma_pkt()
5145 hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1, in hfi1_make_tid_rdma_pkt()
5162 qp->s_len -= len; in hfi1_make_tid_rdma_pkt()
5167 hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, in hfi1_make_tid_rdma_pkt()
5186 static int make_tid_rdma_ack(struct rvt_qp *qp, in make_tid_rdma_ack() argument
5191 struct hfi1_qp_priv *qpriv = qp->priv; in make_tid_rdma_ack()
5192 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in make_tid_rdma_ack()
5200 trace_hfi1_tid_write_rsp_make_tid_ack(qp); in make_tid_rdma_ack()
5202 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in make_tid_rdma_ack()
5208 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5227 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5231 trace_hfi1_rsp_make_tid_ack(qp, e->psn); in make_tid_rdma_ack()
5232 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, in make_tid_rdma_ack()
5270 if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ)) in make_tid_rdma_ack()
5272 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack()
5277 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5298 e = &qp->s_ack_queue[qpriv->r_tid_ack]; in make_tid_rdma_ack()
5305 trace_hfi1_tid_write_rsp_make_tid_ack(qp); in make_tid_rdma_ack()
5306 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn, in make_tid_rdma_ack()
5308 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1, in make_tid_rdma_ack()
5316 hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle, in make_tid_rdma_ack()
5330 static int hfi1_send_tid_ok(struct rvt_qp *qp) in hfi1_send_tid_ok() argument
5332 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_tid_ok()
5335 qp->s_flags & HFI1_S_ANY_WAIT_IO) && in hfi1_send_tid_ok()
5338 !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND)); in hfi1_send_tid_ok()
5344 struct rvt_qp *qp = iowait_to_qp(w->iow); in _hfi1_do_tid_send() local
5346 hfi1_do_tid_send(qp); in _hfi1_do_tid_send()
5349 static void hfi1_do_tid_send(struct rvt_qp *qp) in hfi1_do_tid_send() argument
5352 struct hfi1_qp_priv *priv = qp->priv; in hfi1_do_tid_send()
5354 ps.dev = to_idev(qp->ibqp.device); in hfi1_do_tid_send()
5355 ps.ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_do_tid_send()
5359 ps.timeout_int = qp->timeout_jiffies / 8; in hfi1_do_tid_send()
5361 trace_hfi1_rc_do_tid_send(qp, false); in hfi1_do_tid_send()
5362 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5365 if (!hfi1_send_tid_ok(qp)) { in hfi1_do_tid_send()
5366 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) in hfi1_do_tid_send()
5368 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5385 qp->s_flags |= RVT_S_BUSY; in hfi1_do_tid_send()
5388 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5394 if (hfi1_verbs_send(qp, &ps)) in hfi1_do_tid_send()
5398 if (hfi1_schedule_send_yield(qp, &ps, true)) in hfi1_do_tid_send()
5401 spin_lock_irqsave(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5403 qp->s_flags &= ~RVT_S_BUSY; in hfi1_do_tid_send()
5408 hfi1_schedule_send(qp); in hfi1_do_tid_send()
5411 } while (hfi1_make_tid_rdma_pkt(qp, &ps)); in hfi1_do_tid_send()
5413 spin_unlock_irqrestore(&qp->s_lock, ps.flags); in hfi1_do_tid_send()
5416 static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) in _hfi1_schedule_tid_send() argument
5418 struct hfi1_qp_priv *priv = qp->priv; in _hfi1_schedule_tid_send()
5420 to_iport(qp->ibqp.device, qp->port_num); in _hfi1_schedule_tid_send()
5446 bool hfi1_schedule_tid_send(struct rvt_qp *qp) in hfi1_schedule_tid_send() argument
5448 lockdep_assert_held(&qp->s_lock); in hfi1_schedule_tid_send()
5449 if (hfi1_send_tid_ok(qp)) { in hfi1_schedule_tid_send()
5456 _hfi1_schedule_tid_send(qp); in hfi1_schedule_tid_send()
5459 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) in hfi1_schedule_tid_send()
5460 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, in hfi1_schedule_tid_send()
5465 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e) in hfi1_tid_rdma_ack_interlock() argument
5469 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_tid_rdma_ack_interlock()
5470 struct hfi1_qp_priv *priv = qp->priv; in hfi1_tid_rdma_ack_interlock()
5473 s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) : in hfi1_tid_rdma_ack_interlock()
5474 (qp->s_tail_ack_queue - 1); in hfi1_tid_rdma_ack_interlock()
5475 prev = &qp->s_ack_queue[s_prev]; in hfi1_tid_rdma_ack_interlock()
5503 struct rvt_qp *qp, u32 psn, int diff, bool fecn) in tid_rdma_rcv_err() argument
5507 tid_rdma_rcv_error(packet, ohdr, qp, psn, diff); in tid_rdma_rcv_err()
5509 spin_lock_irqsave(&qp->s_lock, flags); in tid_rdma_rcv_err()
5510 qp->s_flags |= RVT_S_ECN; in tid_rdma_rcv_err()
5511 spin_unlock_irqrestore(&qp->s_lock, flags); in tid_rdma_rcv_err()