Lines Matching refs:qp

12 void erdma_qp_llp_close(struct erdma_qp *qp)  in erdma_qp_llp_close()  argument
16 down_write(&qp->state_lock); in erdma_qp_llp_close()
18 switch (qp->attrs.state) { in erdma_qp_llp_close()
24 erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE); in erdma_qp_llp_close()
27 qp->attrs.state = ERDMA_QP_STATE_IDLE; in erdma_qp_llp_close()
33 if (qp->cep) { in erdma_qp_llp_close()
34 erdma_cep_put(qp->cep); in erdma_qp_llp_close()
35 qp->cep = NULL; in erdma_qp_llp_close()
38 up_write(&qp->state_lock); in erdma_qp_llp_close()
43 struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id); in erdma_get_ibqp() local
45 if (qp) in erdma_get_ibqp()
46 return &qp->ibqp; in erdma_get_ibqp()
51 static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp, in erdma_modify_qp_state_to_rts() argument
56 struct erdma_dev *dev = qp->dev; in erdma_modify_qp_state_to_rts()
59 struct erdma_cep *cep = qp->cep; in erdma_modify_qp_state_to_rts()
76 qp->attrs.state = ERDMA_QP_STATE_RTS; in erdma_modify_qp_state_to_rts()
78 tp = tcp_sk(qp->cep->sock->sk); in erdma_modify_qp_state_to_rts()
83 req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) | in erdma_modify_qp_state_to_rts()
84 FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) | in erdma_modify_qp_state_to_rts()
85 FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp)); in erdma_modify_qp_state_to_rts()
87 req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie); in erdma_modify_qp_state_to_rts()
95 if (qp->attrs.qp_type == ERDMA_QP_PASSIVE) in erdma_modify_qp_state_to_rts()
96 req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len; in erdma_modify_qp_state_to_rts()
102 static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp, in erdma_modify_qp_state_to_stop() argument
106 struct erdma_dev *dev = qp->dev; in erdma_modify_qp_state_to_stop()
109 qp->attrs.state = attrs->state; in erdma_modify_qp_state_to_stop()
115 FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp)); in erdma_modify_qp_state_to_stop()
120 int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, in erdma_modify_qp_internal() argument
132 switch (qp->attrs.state) { in erdma_modify_qp_internal()
136 ret = erdma_modify_qp_state_to_rts(qp, attrs, mask); in erdma_modify_qp_internal()
138 qp->attrs.state = ERDMA_QP_STATE_ERROR; in erdma_modify_qp_internal()
140 if (qp->cep) { in erdma_modify_qp_internal()
141 erdma_cep_put(qp->cep); in erdma_modify_qp_internal()
142 qp->cep = NULL; in erdma_modify_qp_internal()
144 ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); in erdma_modify_qp_internal()
153 ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); in erdma_modify_qp_internal()
159 erdma_qp_cm_drop(qp); in erdma_modify_qp_internal()
164 qp->attrs.state = ERDMA_QP_STATE_ERROR; in erdma_modify_qp_internal()
168 qp->attrs.state = ERDMA_QP_STATE_IDLE; in erdma_modify_qp_internal()
170 ret = erdma_modify_qp_state_to_stop(qp, attrs, mask); in erdma_modify_qp_internal()
171 qp->attrs.state = ERDMA_QP_STATE_ERROR; in erdma_modify_qp_internal()
180 if (need_reflush && !ret && rdma_is_kernel_res(&qp->ibqp.res)) { in erdma_modify_qp_internal()
181 qp->flags |= ERDMA_QP_IN_FLUSHING; in erdma_modify_qp_internal()
182 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, in erdma_modify_qp_internal()
191 struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref); in erdma_qp_safe_free() local
193 complete(&qp->safe_free); in erdma_qp_safe_free()
196 void erdma_qp_put(struct erdma_qp *qp) in erdma_qp_put() argument
198 WARN_ON(kref_read(&qp->ref) < 1); in erdma_qp_put()
199 kref_put(&qp->ref, erdma_qp_safe_free); in erdma_qp_put()
202 void erdma_qp_get(struct erdma_qp *qp) in erdma_qp_get() argument
204 kref_get(&qp->ref); in erdma_qp_get()
207 static int fill_inline_data(struct erdma_qp *qp, in fill_inline_data() argument
217 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size, in fill_inline_data()
241 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, in fill_inline_data()
242 qp->attrs.sq_size, SQEBB_SHIFT); in fill_inline_data()
254 static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr, in fill_sgl() argument
261 if (send_wr->num_sge > qp->dev->attrs.max_send_sge) in fill_sgl()
270 sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, in fill_sgl()
271 qp->attrs.sq_size, SQEBB_SHIFT); in fill_sgl()
285 static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, in erdma_push_one_sqe() argument
289 u32 idx = *pi & (qp->attrs.sq_size - 1); in erdma_push_one_sqe()
304 entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size, in erdma_push_one_sqe()
310 qp->kern_qp.swr_tbl[idx] = send_wr->wr_id; in erdma_push_one_sqe()
314 ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0); in erdma_push_one_sqe()
321 wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)); in erdma_push_one_sqe()
365 sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1, in erdma_push_one_sqe()
366 qp->attrs.sq_size, SQEBB_SHIFT); in erdma_push_one_sqe()
411 memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1, in erdma_push_one_sqe()
412 qp->attrs.sq_size, SQEBB_SHIFT), in erdma_push_one_sqe()
447 sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1, in erdma_push_one_sqe()
448 qp->attrs.sq_size, SQEBB_SHIFT); in erdma_push_one_sqe()
464 ret = fill_inline_data(qp, send_wr, idx, sgl_offset, in erdma_push_one_sqe()
471 ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field); in erdma_push_one_sqe()
490 static void kick_sq_db(struct erdma_qp *qp, u16 pi) in kick_sq_db() argument
492 u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) | in kick_sq_db()
495 *(u64 *)qp->kern_qp.sq_db_info = db_data; in kick_sq_db()
496 writeq(db_data, qp->kern_qp.hw_sq_db); in kick_sq_db()
502 struct erdma_qp *qp = to_eqp(ibqp); in erdma_post_send() local
511 spin_lock_irqsave(&qp->lock, flags); in erdma_post_send()
512 sq_pi = qp->kern_qp.sq_pi; in erdma_post_send()
515 if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) { in erdma_post_send()
521 ret = erdma_push_one_sqe(qp, &sq_pi, wr); in erdma_post_send()
526 qp->kern_qp.sq_pi = sq_pi; in erdma_post_send()
527 kick_sq_db(qp, sq_pi); in erdma_post_send()
531 spin_unlock_irqrestore(&qp->lock, flags); in erdma_post_send()
533 if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING)) in erdma_post_send()
534 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, in erdma_post_send()
540 static int erdma_post_recv_one(struct erdma_qp *qp, in erdma_post_recv_one() argument
544 get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi, in erdma_post_recv_one()
545 qp->attrs.rq_size, RQE_SHIFT); in erdma_post_recv_one()
547 rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1); in erdma_post_recv_one()
548 rqe->qpn = cpu_to_le32(QP_ID(qp)); in erdma_post_recv_one()
560 *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe; in erdma_post_recv_one()
561 writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db); in erdma_post_recv_one()
563 qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] = in erdma_post_recv_one()
565 qp->kern_qp.rq_pi++; in erdma_post_recv_one()
574 struct erdma_qp *qp = to_eqp(ibqp); in erdma_post_recv() local
578 spin_lock_irqsave(&qp->lock, flags); in erdma_post_recv()
581 ret = erdma_post_recv_one(qp, wr); in erdma_post_recv()
589 spin_unlock_irqrestore(&qp->lock, flags); in erdma_post_recv()
591 if (unlikely(qp->flags & ERDMA_QP_IN_FLUSHING)) in erdma_post_recv()
592 mod_delayed_work(qp->dev->reflush_wq, &qp->reflush_dwork, in erdma_post_recv()