xref: /openbmc/linux/drivers/infiniband/hw/hfi1/qp.c (revision 145eba1a)
1*145eba1aSCai Huoqing // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2f48ad614SDennis Dalessandro /*
36d72344cSKaike Wan  * Copyright(c) 2015 - 2020 Intel Corporation.
4f48ad614SDennis Dalessandro  */
5f48ad614SDennis Dalessandro 
6f48ad614SDennis Dalessandro #include <linux/err.h>
7f48ad614SDennis Dalessandro #include <linux/vmalloc.h>
8f48ad614SDennis Dalessandro #include <linux/hash.h>
9f48ad614SDennis Dalessandro #include <linux/module.h>
10f48ad614SDennis Dalessandro #include <linux/seq_file.h>
11f48ad614SDennis Dalessandro #include <rdma/rdma_vt.h>
12f48ad614SDennis Dalessandro #include <rdma/rdmavt_qp.h>
131ac57c50SMike Marciniszyn #include <rdma/ib_verbs.h>
14f48ad614SDennis Dalessandro 
15f48ad614SDennis Dalessandro #include "hfi.h"
16f48ad614SDennis Dalessandro #include "qp.h"
17f48ad614SDennis Dalessandro #include "trace.h"
18f48ad614SDennis Dalessandro #include "verbs_txreq.h"
19f48ad614SDennis Dalessandro 
20f48ad614SDennis Dalessandro unsigned int hfi1_qp_table_size = 256;
21f48ad614SDennis Dalessandro module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
22f48ad614SDennis Dalessandro MODULE_PARM_DESC(qp_table_size, "QP table size");
23f48ad614SDennis Dalessandro 
24f48ad614SDennis Dalessandro static void flush_tx_list(struct rvt_qp *qp);
25f48ad614SDennis Dalessandro static int iowait_sleep(
26f48ad614SDennis Dalessandro 	struct sdma_engine *sde,
275da0fc9dSDennis Dalessandro 	struct iowait_work *wait,
28f48ad614SDennis Dalessandro 	struct sdma_txreq *stx,
29bcad2913SKaike Wan 	unsigned int seq,
30bcad2913SKaike Wan 	bool pkts_sent);
31f48ad614SDennis Dalessandro static void iowait_wakeup(struct iowait *wait, int reason);
32f48ad614SDennis Dalessandro static void iowait_sdma_drained(struct iowait *wait);
33f48ad614SDennis Dalessandro static void qp_pio_drain(struct rvt_qp *qp);
34f48ad614SDennis Dalessandro 
351ac57c50SMike Marciniszyn const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
361ac57c50SMike Marciniszyn [IB_WR_RDMA_WRITE] = {
371ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_rdma_wr),
381ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
391ac57c50SMike Marciniszyn },
401ac57c50SMike Marciniszyn 
411ac57c50SMike Marciniszyn [IB_WR_RDMA_READ] = {
421ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_rdma_wr),
431ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_RC),
441ac57c50SMike Marciniszyn 	.flags = RVT_OPERATION_ATOMIC,
451ac57c50SMike Marciniszyn },
461ac57c50SMike Marciniszyn 
471ac57c50SMike Marciniszyn [IB_WR_ATOMIC_CMP_AND_SWP] = {
481ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_atomic_wr),
491ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_RC),
501ac57c50SMike Marciniszyn 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
511ac57c50SMike Marciniszyn },
521ac57c50SMike Marciniszyn 
531ac57c50SMike Marciniszyn [IB_WR_ATOMIC_FETCH_AND_ADD] = {
541ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_atomic_wr),
551ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_RC),
561ac57c50SMike Marciniszyn 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
571ac57c50SMike Marciniszyn },
581ac57c50SMike Marciniszyn 
591ac57c50SMike Marciniszyn [IB_WR_RDMA_WRITE_WITH_IMM] = {
601ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_rdma_wr),
611ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
621ac57c50SMike Marciniszyn },
631ac57c50SMike Marciniszyn 
641ac57c50SMike Marciniszyn [IB_WR_SEND] = {
651ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_send_wr),
661ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
671ac57c50SMike Marciniszyn 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
681ac57c50SMike Marciniszyn },
691ac57c50SMike Marciniszyn 
701ac57c50SMike Marciniszyn [IB_WR_SEND_WITH_IMM] = {
711ac57c50SMike Marciniszyn 	.length = sizeof(struct ib_send_wr),
721ac57c50SMike Marciniszyn 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
731ac57c50SMike Marciniszyn 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
741ac57c50SMike Marciniszyn },
751ac57c50SMike Marciniszyn 
76c72cfe3eSJianxin Xiong [IB_WR_REG_MR] = {
77c72cfe3eSJianxin Xiong 	.length = sizeof(struct ib_reg_wr),
78c72cfe3eSJianxin Xiong 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
79c72cfe3eSJianxin Xiong 	.flags = RVT_OPERATION_LOCAL,
80c72cfe3eSJianxin Xiong },
81c72cfe3eSJianxin Xiong 
82c72cfe3eSJianxin Xiong [IB_WR_LOCAL_INV] = {
83c72cfe3eSJianxin Xiong 	.length = sizeof(struct ib_send_wr),
84c72cfe3eSJianxin Xiong 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
85c72cfe3eSJianxin Xiong 	.flags = RVT_OPERATION_LOCAL,
86c72cfe3eSJianxin Xiong },
87c72cfe3eSJianxin Xiong 
88c72cfe3eSJianxin Xiong [IB_WR_SEND_WITH_INV] = {
89c72cfe3eSJianxin Xiong 	.length = sizeof(struct ib_send_wr),
90c72cfe3eSJianxin Xiong 	.qpt_support = BIT(IB_QPT_RC),
91c72cfe3eSJianxin Xiong },
92c72cfe3eSJianxin Xiong 
9348a615dcSKaike Wan [IB_WR_OPFN] = {
9448a615dcSKaike Wan 	.length = sizeof(struct ib_atomic_wr),
9548a615dcSKaike Wan 	.qpt_support = BIT(IB_QPT_RC),
9648a615dcSKaike Wan 	.flags = RVT_OPERATION_USE_RESERVE,
9748a615dcSKaike Wan },
9848a615dcSKaike Wan 
993c6cb20aSKaike Wan [IB_WR_TID_RDMA_WRITE] = {
1003c6cb20aSKaike Wan 	.length = sizeof(struct ib_rdma_wr),
1013c6cb20aSKaike Wan 	.qpt_support = BIT(IB_QPT_RC),
1023c6cb20aSKaike Wan 	.flags = RVT_OPERATION_IGN_RNR_CNT,
1033c6cb20aSKaike Wan },
1043c6cb20aSKaike Wan 
1051ac57c50SMike Marciniszyn };
1061ac57c50SMike Marciniszyn 
flush_list_head(struct list_head * l)1075da0fc9dSDennis Dalessandro static void flush_list_head(struct list_head *l)
108f48ad614SDennis Dalessandro {
1095da0fc9dSDennis Dalessandro 	while (!list_empty(l)) {
110f48ad614SDennis Dalessandro 		struct sdma_txreq *tx;
111f48ad614SDennis Dalessandro 
112f48ad614SDennis Dalessandro 		tx = list_first_entry(
1135da0fc9dSDennis Dalessandro 			l,
114f48ad614SDennis Dalessandro 			struct sdma_txreq,
115f48ad614SDennis Dalessandro 			list);
116f48ad614SDennis Dalessandro 		list_del_init(&tx->list);
117f48ad614SDennis Dalessandro 		hfi1_put_txreq(
118f48ad614SDennis Dalessandro 			container_of(tx, struct verbs_txreq, txreq));
119f48ad614SDennis Dalessandro 	}
120f48ad614SDennis Dalessandro }
121f48ad614SDennis Dalessandro 
flush_tx_list(struct rvt_qp * qp)1225da0fc9dSDennis Dalessandro static void flush_tx_list(struct rvt_qp *qp)
1235da0fc9dSDennis Dalessandro {
1245da0fc9dSDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
1255da0fc9dSDennis Dalessandro 
1265da0fc9dSDennis Dalessandro 	flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
1275da0fc9dSDennis Dalessandro 	flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
1285da0fc9dSDennis Dalessandro }
1295da0fc9dSDennis Dalessandro 
flush_iowait(struct rvt_qp * qp)130f48ad614SDennis Dalessandro static void flush_iowait(struct rvt_qp *qp)
131f48ad614SDennis Dalessandro {
132f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
133f48ad614SDennis Dalessandro 	unsigned long flags;
1344e045572SMike Marciniszyn 	seqlock_t *lock = priv->s_iowait.lock;
135f48ad614SDennis Dalessandro 
1364e045572SMike Marciniszyn 	if (!lock)
1374e045572SMike Marciniszyn 		return;
1384e045572SMike Marciniszyn 	write_seqlock_irqsave(lock, flags);
139f48ad614SDennis Dalessandro 	if (!list_empty(&priv->s_iowait.list)) {
140f48ad614SDennis Dalessandro 		list_del_init(&priv->s_iowait.list);
1414e045572SMike Marciniszyn 		priv->s_iowait.lock = NULL;
1424d6f85c3SMike Marciniszyn 		rvt_put_qp(qp);
143f48ad614SDennis Dalessandro 	}
1444e045572SMike Marciniszyn 	write_sequnlock_irqrestore(lock, flags);
145f48ad614SDennis Dalessandro }
146f48ad614SDennis Dalessandro 
14711edbb19SLee Jones /*
148f48ad614SDennis Dalessandro  * This function is what we would push to the core layer if we wanted to be a
149f48ad614SDennis Dalessandro  * "first class citizen".  Instead we hide this here and rely on Verbs ULPs
150f48ad614SDennis Dalessandro  * to blindly pass the MTU enum value from the PathRecord to us.
151f48ad614SDennis Dalessandro  */
verbs_mtu_enum_to_int(struct ib_device * dev,enum ib_mtu mtu)152f48ad614SDennis Dalessandro static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
153f48ad614SDennis Dalessandro {
154f48ad614SDennis Dalessandro 	/* Constraining 10KB packets to 8KB packets */
155f48ad614SDennis Dalessandro 	if (mtu == (enum ib_mtu)OPA_MTU_10240)
156e18321acSNathan Chancellor 		mtu = (enum ib_mtu)OPA_MTU_8192;
1576d72344cSKaike Wan 	return opa_mtu_enum_to_int((enum opa_mtu)mtu);
158f48ad614SDennis Dalessandro }
159f48ad614SDennis Dalessandro 
hfi1_check_modify_qp(struct rvt_qp * qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)160f48ad614SDennis Dalessandro int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
161f48ad614SDennis Dalessandro 			 int attr_mask, struct ib_udata *udata)
162f48ad614SDennis Dalessandro {
163f48ad614SDennis Dalessandro 	struct ib_qp *ibqp = &qp->ibqp;
164f48ad614SDennis Dalessandro 	struct hfi1_ibdev *dev = to_idev(ibqp->device);
165f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = dd_from_dev(dev);
166f48ad614SDennis Dalessandro 	u8 sc;
167f48ad614SDennis Dalessandro 
168f48ad614SDennis Dalessandro 	if (attr_mask & IB_QP_AV) {
169f48ad614SDennis Dalessandro 		sc = ah_to_sc(ibqp->device, &attr->ah_attr);
170f48ad614SDennis Dalessandro 		if (sc == 0xf)
171f48ad614SDennis Dalessandro 			return -EINVAL;
172f48ad614SDennis Dalessandro 
173f48ad614SDennis Dalessandro 		if (!qp_to_sdma_engine(qp, sc) &&
174f48ad614SDennis Dalessandro 		    dd->flags & HFI1_HAS_SEND_DMA)
175f48ad614SDennis Dalessandro 			return -EINVAL;
176f48ad614SDennis Dalessandro 
177f48ad614SDennis Dalessandro 		if (!qp_to_send_context(qp, sc))
178f48ad614SDennis Dalessandro 			return -EINVAL;
179f48ad614SDennis Dalessandro 	}
180f48ad614SDennis Dalessandro 
181f48ad614SDennis Dalessandro 	if (attr_mask & IB_QP_ALT_PATH) {
182f48ad614SDennis Dalessandro 		sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
183f48ad614SDennis Dalessandro 		if (sc == 0xf)
184f48ad614SDennis Dalessandro 			return -EINVAL;
185f48ad614SDennis Dalessandro 
186f48ad614SDennis Dalessandro 		if (!qp_to_sdma_engine(qp, sc) &&
187f48ad614SDennis Dalessandro 		    dd->flags & HFI1_HAS_SEND_DMA)
188f48ad614SDennis Dalessandro 			return -EINVAL;
189f48ad614SDennis Dalessandro 
190f48ad614SDennis Dalessandro 		if (!qp_to_send_context(qp, sc))
191f48ad614SDennis Dalessandro 			return -EINVAL;
192f48ad614SDennis Dalessandro 	}
193f48ad614SDennis Dalessandro 
194f48ad614SDennis Dalessandro 	return 0;
195f48ad614SDennis Dalessandro }
196f48ad614SDennis Dalessandro 
197d98bb7f7SDon Hiatt /*
198d98bb7f7SDon Hiatt  * qp_set_16b - Set the hdr_type based on whether the slid or the
199d98bb7f7SDon Hiatt  * dlid in the connection is extended. Only applicable for RC and UC
200d98bb7f7SDon Hiatt  * QPs. UD QPs determine this on the fly from the ah in the wqe
201d98bb7f7SDon Hiatt  */
qp_set_16b(struct rvt_qp * qp)202d98bb7f7SDon Hiatt static inline void qp_set_16b(struct rvt_qp *qp)
203d98bb7f7SDon Hiatt {
204d98bb7f7SDon Hiatt 	struct hfi1_pportdata *ppd;
205d98bb7f7SDon Hiatt 	struct hfi1_ibport *ibp;
206d98bb7f7SDon Hiatt 	struct hfi1_qp_priv *priv = qp->priv;
207d98bb7f7SDon Hiatt 
208d98bb7f7SDon Hiatt 	/* Update ah_attr to account for extended LIDs */
209d98bb7f7SDon Hiatt 	hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
210d98bb7f7SDon Hiatt 
211d98bb7f7SDon Hiatt 	/* Create 32 bit LIDs */
212d98bb7f7SDon Hiatt 	hfi1_make_opa_lid(&qp->remote_ah_attr);
213d98bb7f7SDon Hiatt 
214d98bb7f7SDon Hiatt 	if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
215d98bb7f7SDon Hiatt 		return;
216d98bb7f7SDon Hiatt 
217d98bb7f7SDon Hiatt 	ibp = to_iport(qp->ibqp.device, qp->port_num);
218d98bb7f7SDon Hiatt 	ppd = ppd_from_ibp(ibp);
219d98bb7f7SDon Hiatt 	priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
220d98bb7f7SDon Hiatt }
221d98bb7f7SDon Hiatt 
hfi1_modify_qp(struct rvt_qp * qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)222f48ad614SDennis Dalessandro void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
223f48ad614SDennis Dalessandro 		    int attr_mask, struct ib_udata *udata)
224f48ad614SDennis Dalessandro {
225f48ad614SDennis Dalessandro 	struct ib_qp *ibqp = &qp->ibqp;
226f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
227f48ad614SDennis Dalessandro 
228f48ad614SDennis Dalessandro 	if (attr_mask & IB_QP_AV) {
229f48ad614SDennis Dalessandro 		priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
230f48ad614SDennis Dalessandro 		priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
231f48ad614SDennis Dalessandro 		priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
232d98bb7f7SDon Hiatt 		qp_set_16b(qp);
233f48ad614SDennis Dalessandro 	}
234f48ad614SDennis Dalessandro 
235f48ad614SDennis Dalessandro 	if (attr_mask & IB_QP_PATH_MIG_STATE &&
236f48ad614SDennis Dalessandro 	    attr->path_mig_state == IB_MIG_MIGRATED &&
237f48ad614SDennis Dalessandro 	    qp->s_mig_state == IB_MIG_ARMED) {
2382e2ba09eSMike Marciniszyn 		qp->s_flags |= HFI1_S_AHG_CLEAR;
239f48ad614SDennis Dalessandro 		priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
240f48ad614SDennis Dalessandro 		priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
241f48ad614SDennis Dalessandro 		priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
242d98bb7f7SDon Hiatt 		qp_set_16b(qp);
243f48ad614SDennis Dalessandro 	}
24448a615dcSKaike Wan 
24548a615dcSKaike Wan 	opfn_qp_init(qp, attr, attr_mask);
246f48ad614SDennis Dalessandro }
247f48ad614SDennis Dalessandro 
248f48ad614SDennis Dalessandro /**
249d205a06aSKaike Wan  * hfi1_setup_wqe - set up the wqe
25011edbb19SLee Jones  * @qp: The qp
25111edbb19SLee Jones  * @wqe: The built wqe
25211edbb19SLee Jones  * @call_send: Determine if the send should be posted or scheduled.
253f48ad614SDennis Dalessandro  *
254d205a06aSKaike Wan  * Perform setup of the wqe.  This is called
255d205a06aSKaike Wan  * prior to inserting the wqe into the ring but after
256d205a06aSKaike Wan  * the wqe has been setup by RDMAVT. This function
257d205a06aSKaike Wan  * allows the driver the opportunity to perform
258d205a06aSKaike Wan  * validation and additional setup of the wqe.
259d205a06aSKaike Wan  *
260f48ad614SDennis Dalessandro  * Returns 0 on success, -EINVAL on failure
261f48ad614SDennis Dalessandro  *
262f48ad614SDennis Dalessandro  */
hfi1_setup_wqe(struct rvt_qp * qp,struct rvt_swqe * wqe,bool * call_send)263d205a06aSKaike Wan int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
264f48ad614SDennis Dalessandro {
265f48ad614SDennis Dalessandro 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
266f48ad614SDennis Dalessandro 	struct rvt_ah *ah;
267bfe397c3SKaike Wan 	struct hfi1_pportdata *ppd;
268bfe397c3SKaike Wan 	struct hfi1_devdata *dd;
269f48ad614SDennis Dalessandro 
270f48ad614SDennis Dalessandro 	switch (qp->ibqp.qp_type) {
271f48ad614SDennis Dalessandro 	case IB_QPT_RC:
272f1ab4efaSKaike Wan 		hfi1_setup_tid_rdma_wqe(qp, wqe);
2736f24b159SGustavo A. R. Silva 		fallthrough;
274f48ad614SDennis Dalessandro 	case IB_QPT_UC:
275f48ad614SDennis Dalessandro 		if (wqe->length > 0x80000000U)
276f48ad614SDennis Dalessandro 			return -EINVAL;
2770b79b277SMichael J. Ruhl 		if (wqe->length > qp->pmtu)
2780b79b277SMichael J. Ruhl 			*call_send = false;
279f48ad614SDennis Dalessandro 		break;
280f48ad614SDennis Dalessandro 	case IB_QPT_SMI:
281bfe397c3SKaike Wan 		/*
282bfe397c3SKaike Wan 		 * SM packets should exclusively use VL15 and their SL is
283bfe397c3SKaike Wan 		 * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
284bfe397c3SKaike Wan 		 * is created, SL is 0 in most cases and as a result some
285bfe397c3SKaike Wan 		 * fields (vl and pmtu) in ah may not be set correctly,
286bfe397c3SKaike Wan 		 * depending on the SL2SC and SC2VL tables at the time.
287bfe397c3SKaike Wan 		 */
288bfe397c3SKaike Wan 		ppd = ppd_from_ibp(ibp);
289bfe397c3SKaike Wan 		dd = dd_from_ppd(ppd);
290bfe397c3SKaike Wan 		if (wqe->length > dd->vld[15].mtu)
291f48ad614SDennis Dalessandro 			return -EINVAL;
292f48ad614SDennis Dalessandro 		break;
293f48ad614SDennis Dalessandro 	case IB_QPT_GSI:
294f48ad614SDennis Dalessandro 	case IB_QPT_UD:
2952b0ad2daSMichael J. Ruhl 		ah = rvt_get_swqe_ah(wqe);
296f48ad614SDennis Dalessandro 		if (wqe->length > (1 << ah->log_pmtu))
297f48ad614SDennis Dalessandro 			return -EINVAL;
298d8966fcdSDasaratharaman Chandramouli 		if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
299f48ad614SDennis Dalessandro 			return -EINVAL;
3004846bf44SGustavo A. R. Silva 		break;
301f48ad614SDennis Dalessandro 	default:
302f48ad614SDennis Dalessandro 		break;
303f48ad614SDennis Dalessandro 	}
30490b2620eSMichael J. Ruhl 
30590b2620eSMichael J. Ruhl 	/*
30690b2620eSMichael J. Ruhl 	 * System latency between send and schedule is large enough that
30790b2620eSMichael J. Ruhl 	 * forcing call_send to true for piothreshold packets is necessary.
30890b2620eSMichael J. Ruhl 	 */
30990b2620eSMichael J. Ruhl 	if (wqe->length <= piothreshold)
31090b2620eSMichael J. Ruhl 		*call_send = true;
3110b79b277SMichael J. Ruhl 	return 0;
312f48ad614SDennis Dalessandro }
313f48ad614SDennis Dalessandro 
314f48ad614SDennis Dalessandro /**
315f48ad614SDennis Dalessandro  * _hfi1_schedule_send - schedule progress
316f48ad614SDennis Dalessandro  * @qp: the QP
317f48ad614SDennis Dalessandro  *
318f48ad614SDennis Dalessandro  * This schedules qp progress w/o regard to the s_flags.
319f48ad614SDennis Dalessandro  *
320f48ad614SDennis Dalessandro  * It is only used in the post send, which doesn't hold
321f48ad614SDennis Dalessandro  * the s_lock.
322f48ad614SDennis Dalessandro  */
_hfi1_schedule_send(struct rvt_qp * qp)3235da0fc9dSDennis Dalessandro bool _hfi1_schedule_send(struct rvt_qp *qp)
324f48ad614SDennis Dalessandro {
325f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
326f48ad614SDennis Dalessandro 	struct hfi1_ibport *ibp =
327f48ad614SDennis Dalessandro 		to_iport(qp->ibqp.device, qp->port_num);
328f48ad614SDennis Dalessandro 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
32928b70cd9SKaike Wan 	struct hfi1_devdata *dd = ppd->dd;
33028b70cd9SKaike Wan 
33128b70cd9SKaike Wan 	if (dd->flags & HFI1_SHUTDOWN)
33228b70cd9SKaike Wan 		return true;
333f48ad614SDennis Dalessandro 
3345da0fc9dSDennis Dalessandro 	return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
335f48ad614SDennis Dalessandro 			       priv->s_sde ?
336f48ad614SDennis Dalessandro 			       priv->s_sde->cpu :
337f48ad614SDennis Dalessandro 			       cpumask_first(cpumask_of_node(dd->node)));
338f48ad614SDennis Dalessandro }
339f48ad614SDennis Dalessandro 
qp_pio_drain(struct rvt_qp * qp)340f48ad614SDennis Dalessandro static void qp_pio_drain(struct rvt_qp *qp)
341f48ad614SDennis Dalessandro {
342f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
343f48ad614SDennis Dalessandro 
344f48ad614SDennis Dalessandro 	if (!priv->s_sendcontext)
345f48ad614SDennis Dalessandro 		return;
346f48ad614SDennis Dalessandro 	while (iowait_pio_pending(&priv->s_iowait)) {
3479aefcabeSMike Marciniszyn 		write_seqlock_irq(&priv->s_sendcontext->waitlock);
348f48ad614SDennis Dalessandro 		hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
3499aefcabeSMike Marciniszyn 		write_sequnlock_irq(&priv->s_sendcontext->waitlock);
350f48ad614SDennis Dalessandro 		iowait_pio_drain(&priv->s_iowait);
3519aefcabeSMike Marciniszyn 		write_seqlock_irq(&priv->s_sendcontext->waitlock);
352f48ad614SDennis Dalessandro 		hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
3539aefcabeSMike Marciniszyn 		write_sequnlock_irq(&priv->s_sendcontext->waitlock);
354f48ad614SDennis Dalessandro 	}
355f48ad614SDennis Dalessandro }
356f48ad614SDennis Dalessandro 
357f48ad614SDennis Dalessandro /**
358f48ad614SDennis Dalessandro  * hfi1_schedule_send - schedule progress
359f48ad614SDennis Dalessandro  * @qp: the QP
360f48ad614SDennis Dalessandro  *
361f48ad614SDennis Dalessandro  * This schedules qp progress and caller should hold
362f48ad614SDennis Dalessandro  * the s_lock.
3635da0fc9dSDennis Dalessandro  * @return true if the first leg is scheduled;
3645da0fc9dSDennis Dalessandro  * false if the first leg is not scheduled.
365f48ad614SDennis Dalessandro  */
hfi1_schedule_send(struct rvt_qp * qp)3665da0fc9dSDennis Dalessandro bool hfi1_schedule_send(struct rvt_qp *qp)
367f48ad614SDennis Dalessandro {
36868e78b3dSMike Marciniszyn 	lockdep_assert_held(&qp->s_lock);
3695da0fc9dSDennis Dalessandro 	if (hfi1_send_ok(qp)) {
370f48ad614SDennis Dalessandro 		_hfi1_schedule_send(qp);
3715da0fc9dSDennis Dalessandro 		return true;
3725da0fc9dSDennis Dalessandro 	}
3735da0fc9dSDennis Dalessandro 	if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
3745da0fc9dSDennis Dalessandro 		iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
3755da0fc9dSDennis Dalessandro 				IOWAIT_PENDING_IB);
3765da0fc9dSDennis Dalessandro 	return false;
3775da0fc9dSDennis Dalessandro }
3785da0fc9dSDennis Dalessandro 
hfi1_qp_schedule(struct rvt_qp * qp)3795da0fc9dSDennis Dalessandro static void hfi1_qp_schedule(struct rvt_qp *qp)
3805da0fc9dSDennis Dalessandro {
3815da0fc9dSDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
3825da0fc9dSDennis Dalessandro 	bool ret;
3835da0fc9dSDennis Dalessandro 
3845da0fc9dSDennis Dalessandro 	if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
3855da0fc9dSDennis Dalessandro 		ret = hfi1_schedule_send(qp);
3865da0fc9dSDennis Dalessandro 		if (ret)
3875da0fc9dSDennis Dalessandro 			iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
3885da0fc9dSDennis Dalessandro 	}
389572f0c33SKaike Wan 	if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
390572f0c33SKaike Wan 		ret = hfi1_schedule_tid_send(qp);
391572f0c33SKaike Wan 		if (ret)
392572f0c33SKaike Wan 			iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
393572f0c33SKaike Wan 	}
394f48ad614SDennis Dalessandro }
395f48ad614SDennis Dalessandro 
hfi1_qp_wakeup(struct rvt_qp * qp,u32 flag)396f48ad614SDennis Dalessandro void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
397f48ad614SDennis Dalessandro {
398f48ad614SDennis Dalessandro 	unsigned long flags;
399f48ad614SDennis Dalessandro 
400f48ad614SDennis Dalessandro 	spin_lock_irqsave(&qp->s_lock, flags);
401f48ad614SDennis Dalessandro 	if (qp->s_flags & flag) {
402f48ad614SDennis Dalessandro 		qp->s_flags &= ~flag;
403f48ad614SDennis Dalessandro 		trace_hfi1_qpwakeup(qp, flag);
4045da0fc9dSDennis Dalessandro 		hfi1_qp_schedule(qp);
405f48ad614SDennis Dalessandro 	}
406f48ad614SDennis Dalessandro 	spin_unlock_irqrestore(&qp->s_lock, flags);
407f48ad614SDennis Dalessandro 	/* Notify hfi1_destroy_qp() if it is waiting. */
4084d6f85c3SMike Marciniszyn 	rvt_put_qp(qp);
409f48ad614SDennis Dalessandro }
410f48ad614SDennis Dalessandro 
hfi1_qp_unbusy(struct rvt_qp * qp,struct iowait_work * wait)4115da0fc9dSDennis Dalessandro void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
4125da0fc9dSDennis Dalessandro {
413572f0c33SKaike Wan 	struct hfi1_qp_priv *priv = qp->priv;
414572f0c33SKaike Wan 
415572f0c33SKaike Wan 	if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
4165da0fc9dSDennis Dalessandro 		qp->s_flags &= ~RVT_S_BUSY;
417572f0c33SKaike Wan 		/*
418572f0c33SKaike Wan 		 * If we are sending a first-leg packet from the second leg,
419572f0c33SKaike Wan 		 * we need to clear the busy flag from priv->s_flags to
420572f0c33SKaike Wan 		 * avoid a race condition when the qp wakes up before
421572f0c33SKaike Wan 		 * the call to hfi1_verbs_send() returns to the second
422572f0c33SKaike Wan 		 * leg. In that case, the second leg will terminate without
423572f0c33SKaike Wan 		 * being re-scheduled, resulting in failure to send TID RDMA
424572f0c33SKaike Wan 		 * WRITE DATA and TID RDMA ACK packets.
425572f0c33SKaike Wan 		 */
426572f0c33SKaike Wan 		if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
427572f0c33SKaike Wan 			priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
428572f0c33SKaike Wan 					   RVT_S_BUSY);
429572f0c33SKaike Wan 			iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
430572f0c33SKaike Wan 		}
431572f0c33SKaike Wan 	} else {
432572f0c33SKaike Wan 		priv->s_flags &= ~RVT_S_BUSY;
433572f0c33SKaike Wan 	}
4345da0fc9dSDennis Dalessandro }
4355da0fc9dSDennis Dalessandro 
iowait_sleep(struct sdma_engine * sde,struct iowait_work * wait,struct sdma_txreq * stx,uint seq,bool pkts_sent)436f48ad614SDennis Dalessandro static int iowait_sleep(
437f48ad614SDennis Dalessandro 	struct sdma_engine *sde,
4385da0fc9dSDennis Dalessandro 	struct iowait_work *wait,
439f48ad614SDennis Dalessandro 	struct sdma_txreq *stx,
440bcad2913SKaike Wan 	uint seq,
441bcad2913SKaike Wan 	bool pkts_sent)
442f48ad614SDennis Dalessandro {
443f48ad614SDennis Dalessandro 	struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
444f48ad614SDennis Dalessandro 	struct rvt_qp *qp;
445f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv;
446f48ad614SDennis Dalessandro 	unsigned long flags;
447f48ad614SDennis Dalessandro 	int ret = 0;
448f48ad614SDennis Dalessandro 
449f48ad614SDennis Dalessandro 	qp = tx->qp;
450f48ad614SDennis Dalessandro 	priv = qp->priv;
451f48ad614SDennis Dalessandro 
452f48ad614SDennis Dalessandro 	spin_lock_irqsave(&qp->s_lock, flags);
453f48ad614SDennis Dalessandro 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
454f48ad614SDennis Dalessandro 		/*
455f48ad614SDennis Dalessandro 		 * If we couldn't queue the DMA request, save the info
456f48ad614SDennis Dalessandro 		 * and try again later rather than destroying the
457f48ad614SDennis Dalessandro 		 * buffer and undoing the side effects of the copy.
458f48ad614SDennis Dalessandro 		 */
459f48ad614SDennis Dalessandro 		/* Make a common routine? */
460f48ad614SDennis Dalessandro 		list_add_tail(&stx->list, &wait->tx_head);
4619aefcabeSMike Marciniszyn 		write_seqlock(&sde->waitlock);
462f48ad614SDennis Dalessandro 		if (sdma_progress(sde, seq, stx))
463f48ad614SDennis Dalessandro 			goto eagain;
464f48ad614SDennis Dalessandro 		if (list_empty(&priv->s_iowait.list)) {
465f48ad614SDennis Dalessandro 			struct hfi1_ibport *ibp =
466f48ad614SDennis Dalessandro 				to_iport(qp->ibqp.device, qp->port_num);
467f48ad614SDennis Dalessandro 
468f48ad614SDennis Dalessandro 			ibp->rvp.n_dmawait++;
469f48ad614SDennis Dalessandro 			qp->s_flags |= RVT_S_WAIT_DMA_DESC;
47034025fb0SKaike Wan 			iowait_get_priority(&priv->s_iowait);
471bcad2913SKaike Wan 			iowait_queue(pkts_sent, &priv->s_iowait,
472bcad2913SKaike Wan 				     &sde->dmawait);
4739aefcabeSMike Marciniszyn 			priv->s_iowait.lock = &sde->waitlock;
474f48ad614SDennis Dalessandro 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
4754d6f85c3SMike Marciniszyn 			rvt_get_qp(qp);
476f48ad614SDennis Dalessandro 		}
4779aefcabeSMike Marciniszyn 		write_sequnlock(&sde->waitlock);
4785da0fc9dSDennis Dalessandro 		hfi1_qp_unbusy(qp, wait);
479f48ad614SDennis Dalessandro 		spin_unlock_irqrestore(&qp->s_lock, flags);
480f48ad614SDennis Dalessandro 		ret = -EBUSY;
481f48ad614SDennis Dalessandro 	} else {
482f48ad614SDennis Dalessandro 		spin_unlock_irqrestore(&qp->s_lock, flags);
483f48ad614SDennis Dalessandro 		hfi1_put_txreq(tx);
484f48ad614SDennis Dalessandro 	}
485f48ad614SDennis Dalessandro 	return ret;
486f48ad614SDennis Dalessandro eagain:
4879aefcabeSMike Marciniszyn 	write_sequnlock(&sde->waitlock);
488f48ad614SDennis Dalessandro 	spin_unlock_irqrestore(&qp->s_lock, flags);
489f48ad614SDennis Dalessandro 	list_del_init(&stx->list);
490f48ad614SDennis Dalessandro 	return -EAGAIN;
491f48ad614SDennis Dalessandro }
492f48ad614SDennis Dalessandro 
iowait_wakeup(struct iowait * wait,int reason)493f48ad614SDennis Dalessandro static void iowait_wakeup(struct iowait *wait, int reason)
494f48ad614SDennis Dalessandro {
495f48ad614SDennis Dalessandro 	struct rvt_qp *qp = iowait_to_qp(wait);
496f48ad614SDennis Dalessandro 
497f48ad614SDennis Dalessandro 	WARN_ON(reason != SDMA_AVAIL_REASON);
498f48ad614SDennis Dalessandro 	hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
499f48ad614SDennis Dalessandro }
500f48ad614SDennis Dalessandro 
iowait_sdma_drained(struct iowait * wait)501f48ad614SDennis Dalessandro static void iowait_sdma_drained(struct iowait *wait)
502f48ad614SDennis Dalessandro {
503f48ad614SDennis Dalessandro 	struct rvt_qp *qp = iowait_to_qp(wait);
5047049de65SMike Marciniszyn 	unsigned long flags;
505f48ad614SDennis Dalessandro 
506f48ad614SDennis Dalessandro 	/*
507f48ad614SDennis Dalessandro 	 * This happens when the send engine notes
508f48ad614SDennis Dalessandro 	 * a QP in the error state and cannot
509f48ad614SDennis Dalessandro 	 * do the flush work until that QP's
510f48ad614SDennis Dalessandro 	 * sdma work has finished.
511f48ad614SDennis Dalessandro 	 */
5127049de65SMike Marciniszyn 	spin_lock_irqsave(&qp->s_lock, flags);
513f48ad614SDennis Dalessandro 	if (qp->s_flags & RVT_S_WAIT_DMA) {
514f48ad614SDennis Dalessandro 		qp->s_flags &= ~RVT_S_WAIT_DMA;
515f48ad614SDennis Dalessandro 		hfi1_schedule_send(qp);
516f48ad614SDennis Dalessandro 	}
5177049de65SMike Marciniszyn 	spin_unlock_irqrestore(&qp->s_lock, flags);
518f48ad614SDennis Dalessandro }
519f48ad614SDennis Dalessandro 
hfi1_init_priority(struct iowait * w)52034025fb0SKaike Wan static void hfi1_init_priority(struct iowait *w)
52134025fb0SKaike Wan {
52234025fb0SKaike Wan 	struct rvt_qp *qp = iowait_to_qp(w);
52334025fb0SKaike Wan 	struct hfi1_qp_priv *priv = qp->priv;
52434025fb0SKaike Wan 
52534025fb0SKaike Wan 	if (qp->s_flags & RVT_S_ACK_PENDING)
52634025fb0SKaike Wan 		w->priority++;
52734025fb0SKaike Wan 	if (priv->s_flags & RVT_S_ACK_PENDING)
52834025fb0SKaike Wan 		w->priority++;
52934025fb0SKaike Wan }
53034025fb0SKaike Wan 
531f48ad614SDennis Dalessandro /**
532f48ad614SDennis Dalessandro  * qp_to_sdma_engine - map a qp to a send engine
533f48ad614SDennis Dalessandro  * @qp: the QP
534f48ad614SDennis Dalessandro  * @sc5: the 5 bit sc
535f48ad614SDennis Dalessandro  *
536f48ad614SDennis Dalessandro  * Return:
537f48ad614SDennis Dalessandro  * A send engine for the qp or NULL for SMI type qp.
538f48ad614SDennis Dalessandro  */
qp_to_sdma_engine(struct rvt_qp * qp,u8 sc5)539f48ad614SDennis Dalessandro struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
540f48ad614SDennis Dalessandro {
541f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
542f48ad614SDennis Dalessandro 	struct sdma_engine *sde;
543f48ad614SDennis Dalessandro 
544f48ad614SDennis Dalessandro 	if (!(dd->flags & HFI1_HAS_SEND_DMA))
545f48ad614SDennis Dalessandro 		return NULL;
546f48ad614SDennis Dalessandro 	switch (qp->ibqp.qp_type) {
547f48ad614SDennis Dalessandro 	case IB_QPT_SMI:
548f48ad614SDennis Dalessandro 		return NULL;
549f48ad614SDennis Dalessandro 	default:
550f48ad614SDennis Dalessandro 		break;
551f48ad614SDennis Dalessandro 	}
552f48ad614SDennis Dalessandro 	sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
553f48ad614SDennis Dalessandro 	return sde;
554f48ad614SDennis Dalessandro }
555f48ad614SDennis Dalessandro 
55611edbb19SLee Jones /**
557f48ad614SDennis Dalessandro  * qp_to_send_context - map a qp to a send context
558f48ad614SDennis Dalessandro  * @qp: the QP
559f48ad614SDennis Dalessandro  * @sc5: the 5 bit sc
560f48ad614SDennis Dalessandro  *
561f48ad614SDennis Dalessandro  * Return:
562f48ad614SDennis Dalessandro  * A send context for the qp
563f48ad614SDennis Dalessandro  */
qp_to_send_context(struct rvt_qp * qp,u8 sc5)564f48ad614SDennis Dalessandro struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
565f48ad614SDennis Dalessandro {
566f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
567f48ad614SDennis Dalessandro 
568f48ad614SDennis Dalessandro 	switch (qp->ibqp.qp_type) {
569f48ad614SDennis Dalessandro 	case IB_QPT_SMI:
570f48ad614SDennis Dalessandro 		/* SMA packets to VL15 */
571f48ad614SDennis Dalessandro 		return dd->vld[15].sc;
572f48ad614SDennis Dalessandro 	default:
573f48ad614SDennis Dalessandro 		break;
574f48ad614SDennis Dalessandro 	}
575f48ad614SDennis Dalessandro 
576f48ad614SDennis Dalessandro 	return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
577f48ad614SDennis Dalessandro 					  sc5);
578f48ad614SDennis Dalessandro }
579f48ad614SDennis Dalessandro 
580f48ad614SDennis Dalessandro static const char * const qp_type_str[] = {
581f48ad614SDennis Dalessandro 	"SMI", "GSI", "RC", "UC", "UD",
582f48ad614SDennis Dalessandro };
583f48ad614SDennis Dalessandro 
qp_idle(struct rvt_qp * qp)584f48ad614SDennis Dalessandro static int qp_idle(struct rvt_qp *qp)
585f48ad614SDennis Dalessandro {
586f48ad614SDennis Dalessandro 	return
587f48ad614SDennis Dalessandro 		qp->s_last == qp->s_acked &&
588f48ad614SDennis Dalessandro 		qp->s_acked == qp->s_cur &&
589f48ad614SDennis Dalessandro 		qp->s_cur == qp->s_tail &&
590f48ad614SDennis Dalessandro 		qp->s_tail == qp->s_head;
591f48ad614SDennis Dalessandro }
592f48ad614SDennis Dalessandro 
593e5c197acSMike Marciniszyn /**
594e5c197acSMike Marciniszyn  * qp_iter_print - print the qp information to seq_file
595e5c197acSMike Marciniszyn  * @s: the seq_file to emit the qp information on
596e5c197acSMike Marciniszyn  * @iter: the iterator for the qp hash list
597e5c197acSMike Marciniszyn  */
qp_iter_print(struct seq_file * s,struct rvt_qp_iter * iter)598e5c197acSMike Marciniszyn void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
599f48ad614SDennis Dalessandro {
600f48ad614SDennis Dalessandro 	struct rvt_swqe *wqe;
601f48ad614SDennis Dalessandro 	struct rvt_qp *qp = iter->qp;
602f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
603f48ad614SDennis Dalessandro 	struct sdma_engine *sde;
604f48ad614SDennis Dalessandro 	struct send_context *send_context;
605642aaab5SKaike Wan 	struct rvt_ack_entry *e = NULL;
606d67d6114SMichael J. Ruhl 	struct rvt_srq *srq = qp->ibqp.srq ?
607d67d6114SMichael J. Ruhl 		ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
608f48ad614SDennis Dalessandro 
609f48ad614SDennis Dalessandro 	sde = qp_to_sdma_engine(qp, priv->s_sc);
610f48ad614SDennis Dalessandro 	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
611f48ad614SDennis Dalessandro 	send_context = qp_to_send_context(qp, priv->s_sc);
612642aaab5SKaike Wan 	if (qp->s_ack_queue)
613642aaab5SKaike Wan 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
614f48ad614SDennis Dalessandro 	seq_printf(s,
6159636258fSMitko Haralanov 		   "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
616f48ad614SDennis Dalessandro 		   iter->n,
617f48ad614SDennis Dalessandro 		   qp_idle(qp) ? "I" : "B",
618f48ad614SDennis Dalessandro 		   qp->ibqp.qp_num,
619f48ad614SDennis Dalessandro 		   atomic_read(&qp->refcount),
620f48ad614SDennis Dalessandro 		   qp_type_str[qp->ibqp.qp_type],
621f48ad614SDennis Dalessandro 		   qp->state,
622f48ad614SDennis Dalessandro 		   wqe ? wqe->wr.opcode : 0,
623f48ad614SDennis Dalessandro 		   qp->s_flags,
624f48ad614SDennis Dalessandro 		   iowait_sdma_pending(&priv->s_iowait),
625f48ad614SDennis Dalessandro 		   iowait_pio_pending(&priv->s_iowait),
626f48ad614SDennis Dalessandro 		   !list_empty(&priv->s_iowait.list),
627f48ad614SDennis Dalessandro 		   qp->timeout,
628f48ad614SDennis Dalessandro 		   wqe ? wqe->ssn : 0,
629f48ad614SDennis Dalessandro 		   qp->s_lsn,
630f48ad614SDennis Dalessandro 		   qp->s_last_psn,
631f48ad614SDennis Dalessandro 		   qp->s_psn, qp->s_next_psn,
632f48ad614SDennis Dalessandro 		   qp->s_sending_psn, qp->s_sending_hpsn,
633d7c76e91SMike Marciniszyn 		   qp->r_psn,
634f48ad614SDennis Dalessandro 		   qp->s_last, qp->s_acked, qp->s_cur,
635f48ad614SDennis Dalessandro 		   qp->s_tail, qp->s_head, qp->s_size,
636f48ad614SDennis Dalessandro 		   qp->s_avail,
637ff8d836eSKaike Wan 		   /* ack_queue ring pointers, size */
638ff8d836eSKaike Wan 		   qp->s_tail_ack_queue, qp->r_head_ack_queue,
6394b9796b0SKaike Wan 		   rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
640ff8d836eSKaike Wan 		   /* remote QP info  */
641f48ad614SDennis Dalessandro 		   qp->remote_qpn,
642d8966fcdSDasaratharaman Chandramouli 		   rdma_ah_get_dlid(&qp->remote_ah_attr),
643d8966fcdSDasaratharaman Chandramouli 		   rdma_ah_get_sl(&qp->remote_ah_attr),
644f48ad614SDennis Dalessandro 		   qp->pmtu,
645f48ad614SDennis Dalessandro 		   qp->s_retry,
646f48ad614SDennis Dalessandro 		   qp->s_retry_cnt,
647f48ad614SDennis Dalessandro 		   qp->s_rnr_retry_cnt,
648d7c76e91SMike Marciniszyn 		   qp->s_rnr_retry,
649f48ad614SDennis Dalessandro 		   sde,
650f48ad614SDennis Dalessandro 		   sde ? sde->this_idx : 0,
651f48ad614SDennis Dalessandro 		   send_context,
652f48ad614SDennis Dalessandro 		   send_context ? send_context->sw_index : 0,
653239b0e52SKamenee Arumugam 		   ib_cq_head(qp->ibqp.send_cq),
654239b0e52SKamenee Arumugam 		   ib_cq_tail(qp->ibqp.send_cq),
655642aaab5SKaike Wan 		   qp->pid,
656280ad49aSMike Marciniszyn 		   qp->s_state,
657280ad49aSMike Marciniszyn 		   qp->s_ack_state,
658642aaab5SKaike Wan 		   /* ack queue information */
659642aaab5SKaike Wan 		   e ? e->opcode : 0,
660642aaab5SKaike Wan 		   e ? e->psn : 0,
661d67d6114SMichael J. Ruhl 		   e ? e->lpsn : 0,
662d67d6114SMichael J. Ruhl 		   qp->r_min_rnr_timer,
663d67d6114SMichael J. Ruhl 		   srq ? "SRQ" : "RQ",
664d67d6114SMichael J. Ruhl 		   srq ? srq->rq.size : qp->r_rq.size
665d67d6114SMichael J. Ruhl 		);
666f48ad614SDennis Dalessandro }
667f48ad614SDennis Dalessandro 
qp_priv_alloc(struct rvt_dev_info * rdi,struct rvt_qp * qp)6680f4d027cSLeon Romanovsky void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
669f48ad614SDennis Dalessandro {
670f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv;
671f48ad614SDennis Dalessandro 
6720f4d027cSLeon Romanovsky 	priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
673f48ad614SDennis Dalessandro 	if (!priv)
674f48ad614SDennis Dalessandro 		return ERR_PTR(-ENOMEM);
675f48ad614SDennis Dalessandro 
676f48ad614SDennis Dalessandro 	priv->owner = qp;
677f48ad614SDennis Dalessandro 
6780f4d027cSLeon Romanovsky 	priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
679a9b6b3bcSDasaratharaman Chandramouli 				   rdi->dparms.node);
680a9b6b3bcSDasaratharaman Chandramouli 	if (!priv->s_ahg) {
681f48ad614SDennis Dalessandro 		kfree(priv);
682f48ad614SDennis Dalessandro 		return ERR_PTR(-ENOMEM);
683f48ad614SDennis Dalessandro 	}
6845a648dfaSMike Marciniszyn 	iowait_init(
6855a648dfaSMike Marciniszyn 		&priv->s_iowait,
6865a648dfaSMike Marciniszyn 		1,
6875a648dfaSMike Marciniszyn 		_hfi1_do_send,
688572f0c33SKaike Wan 		_hfi1_do_tid_send,
6895a648dfaSMike Marciniszyn 		iowait_sleep,
6905a648dfaSMike Marciniszyn 		iowait_wakeup,
69134025fb0SKaike Wan 		iowait_sdma_drained,
69234025fb0SKaike Wan 		hfi1_init_priority);
693270a9833SMike Marciniszyn 	/* Init to a value to start the running average correctly */
694270a9833SMike Marciniszyn 	priv->s_running_pkt_size = piothreshold / 2;
695f48ad614SDennis Dalessandro 	return priv;
696f48ad614SDennis Dalessandro }
697f48ad614SDennis Dalessandro 
qp_priv_free(struct rvt_dev_info * rdi,struct rvt_qp * qp)698f48ad614SDennis Dalessandro void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
699f48ad614SDennis Dalessandro {
700f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
701f48ad614SDennis Dalessandro 
70248a615dcSKaike Wan 	hfi1_qp_priv_tid_free(rdi, qp);
703a9b6b3bcSDasaratharaman Chandramouli 	kfree(priv->s_ahg);
704f48ad614SDennis Dalessandro 	kfree(priv);
705f48ad614SDennis Dalessandro }
706f48ad614SDennis Dalessandro 
free_all_qps(struct rvt_dev_info * rdi)707f48ad614SDennis Dalessandro unsigned free_all_qps(struct rvt_dev_info *rdi)
708f48ad614SDennis Dalessandro {
709f48ad614SDennis Dalessandro 	struct hfi1_ibdev *verbs_dev = container_of(rdi,
710f48ad614SDennis Dalessandro 						    struct hfi1_ibdev,
711f48ad614SDennis Dalessandro 						    rdi);
712f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = container_of(verbs_dev,
713f48ad614SDennis Dalessandro 					       struct hfi1_devdata,
714f48ad614SDennis Dalessandro 					       verbs_dev);
715f48ad614SDennis Dalessandro 	int n;
716f48ad614SDennis Dalessandro 	unsigned qp_inuse = 0;
717f48ad614SDennis Dalessandro 
718f48ad614SDennis Dalessandro 	for (n = 0; n < dd->num_pports; n++) {
719f48ad614SDennis Dalessandro 		struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
720f48ad614SDennis Dalessandro 
721f48ad614SDennis Dalessandro 		rcu_read_lock();
722f48ad614SDennis Dalessandro 		if (rcu_dereference(ibp->rvp.qp[0]))
723f48ad614SDennis Dalessandro 			qp_inuse++;
724f48ad614SDennis Dalessandro 		if (rcu_dereference(ibp->rvp.qp[1]))
725f48ad614SDennis Dalessandro 			qp_inuse++;
726f48ad614SDennis Dalessandro 		rcu_read_unlock();
727f48ad614SDennis Dalessandro 	}
728f48ad614SDennis Dalessandro 
729f48ad614SDennis Dalessandro 	return qp_inuse;
730f48ad614SDennis Dalessandro }
731f48ad614SDennis Dalessandro 
flush_qp_waiters(struct rvt_qp * qp)732f48ad614SDennis Dalessandro void flush_qp_waiters(struct rvt_qp *qp)
733f48ad614SDennis Dalessandro {
73468e78b3dSMike Marciniszyn 	lockdep_assert_held(&qp->s_lock);
735f48ad614SDennis Dalessandro 	flush_iowait(qp);
73637356e78SKaike Wan 	hfi1_tid_rdma_flush_wait(qp);
737f48ad614SDennis Dalessandro }
738f48ad614SDennis Dalessandro 
stop_send_queue(struct rvt_qp * qp)739f48ad614SDennis Dalessandro void stop_send_queue(struct rvt_qp *qp)
740f48ad614SDennis Dalessandro {
741f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
742f48ad614SDennis Dalessandro 
7435da0fc9dSDennis Dalessandro 	iowait_cancel_work(&priv->s_iowait);
74437356e78SKaike Wan 	if (cancel_work_sync(&priv->tid_rdma.trigger_work))
74537356e78SKaike Wan 		rvt_put_qp(qp);
746f48ad614SDennis Dalessandro }
747f48ad614SDennis Dalessandro 
quiesce_qp(struct rvt_qp * qp)748f48ad614SDennis Dalessandro void quiesce_qp(struct rvt_qp *qp)
749f48ad614SDennis Dalessandro {
750f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
751f48ad614SDennis Dalessandro 
7523c759e00SKaike Wan 	hfi1_del_tid_reap_timer(qp);
7533c6cb20aSKaike Wan 	hfi1_del_tid_retry_timer(qp);
754f48ad614SDennis Dalessandro 	iowait_sdma_drain(&priv->s_iowait);
755f48ad614SDennis Dalessandro 	qp_pio_drain(qp);
756f48ad614SDennis Dalessandro 	flush_tx_list(qp);
757f48ad614SDennis Dalessandro }
758f48ad614SDennis Dalessandro 
notify_qp_reset(struct rvt_qp * qp)759f48ad614SDennis Dalessandro void notify_qp_reset(struct rvt_qp *qp)
760f48ad614SDennis Dalessandro {
76124b11923SKaike Wan 	hfi1_qp_kern_exp_rcv_clear_all(qp);
762688f21c0SMike Marciniszyn 	qp->r_adefered = 0;
763f48ad614SDennis Dalessandro 	clear_ahg(qp);
76448a615dcSKaike Wan 
76548a615dcSKaike Wan 	/* Clear any OPFN state */
76648a615dcSKaike Wan 	if (qp->ibqp.qp_type == IB_QPT_RC)
76748a615dcSKaike Wan 		opfn_conn_error(qp);
768f48ad614SDennis Dalessandro }
769f48ad614SDennis Dalessandro 
770f48ad614SDennis Dalessandro /*
771f48ad614SDennis Dalessandro  * Switch to alternate path.
772f48ad614SDennis Dalessandro  * The QP s_lock should be held and interrupts disabled.
773f48ad614SDennis Dalessandro  */
hfi1_migrate_qp(struct rvt_qp * qp)774f48ad614SDennis Dalessandro void hfi1_migrate_qp(struct rvt_qp *qp)
775f48ad614SDennis Dalessandro {
776f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
777f48ad614SDennis Dalessandro 	struct ib_event ev;
778f48ad614SDennis Dalessandro 
779f48ad614SDennis Dalessandro 	qp->s_mig_state = IB_MIG_MIGRATED;
780f48ad614SDennis Dalessandro 	qp->remote_ah_attr = qp->alt_ah_attr;
781d8966fcdSDasaratharaman Chandramouli 	qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
782f48ad614SDennis Dalessandro 	qp->s_pkey_index = qp->s_alt_pkey_index;
7832e2ba09eSMike Marciniszyn 	qp->s_flags |= HFI1_S_AHG_CLEAR;
784f48ad614SDennis Dalessandro 	priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
785f48ad614SDennis Dalessandro 	priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
786d98bb7f7SDon Hiatt 	qp_set_16b(qp);
787f48ad614SDennis Dalessandro 
788f48ad614SDennis Dalessandro 	ev.device = qp->ibqp.device;
789f48ad614SDennis Dalessandro 	ev.element.qp = &qp->ibqp;
790f48ad614SDennis Dalessandro 	ev.event = IB_EVENT_PATH_MIG;
791f48ad614SDennis Dalessandro 	qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
792f48ad614SDennis Dalessandro }
793f48ad614SDennis Dalessandro 
mtu_to_path_mtu(u32 mtu)794f48ad614SDennis Dalessandro int mtu_to_path_mtu(u32 mtu)
795f48ad614SDennis Dalessandro {
796f48ad614SDennis Dalessandro 	return mtu_to_enum(mtu, OPA_MTU_8192);
797f48ad614SDennis Dalessandro }
798f48ad614SDennis Dalessandro 
mtu_from_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,u32 pmtu)799f48ad614SDennis Dalessandro u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
800f48ad614SDennis Dalessandro {
801f48ad614SDennis Dalessandro 	u32 mtu;
802f48ad614SDennis Dalessandro 	struct hfi1_ibdev *verbs_dev = container_of(rdi,
803f48ad614SDennis Dalessandro 						    struct hfi1_ibdev,
804f48ad614SDennis Dalessandro 						    rdi);
805f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = container_of(verbs_dev,
806f48ad614SDennis Dalessandro 					       struct hfi1_devdata,
807f48ad614SDennis Dalessandro 					       verbs_dev);
808f48ad614SDennis Dalessandro 	struct hfi1_ibport *ibp;
809f48ad614SDennis Dalessandro 	u8 sc, vl;
810f48ad614SDennis Dalessandro 
811f48ad614SDennis Dalessandro 	ibp = &dd->pport[qp->port_num - 1].ibport_data;
812d8966fcdSDasaratharaman Chandramouli 	sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
813f48ad614SDennis Dalessandro 	vl = sc_to_vlt(dd, sc);
814f48ad614SDennis Dalessandro 
815f48ad614SDennis Dalessandro 	mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
816f48ad614SDennis Dalessandro 	if (vl < PER_VL_SEND_CONTEXTS)
817f48ad614SDennis Dalessandro 		mtu = min_t(u32, mtu, dd->vld[vl].mtu);
818f48ad614SDennis Dalessandro 	return mtu;
819f48ad614SDennis Dalessandro }
820f48ad614SDennis Dalessandro 
get_pmtu_from_attr(struct rvt_dev_info * rdi,struct rvt_qp * qp,struct ib_qp_attr * attr)821f48ad614SDennis Dalessandro int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
822f48ad614SDennis Dalessandro 		       struct ib_qp_attr *attr)
823f48ad614SDennis Dalessandro {
824f48ad614SDennis Dalessandro 	int mtu, pidx = qp->port_num - 1;
825f48ad614SDennis Dalessandro 	struct hfi1_ibdev *verbs_dev = container_of(rdi,
826f48ad614SDennis Dalessandro 						    struct hfi1_ibdev,
827f48ad614SDennis Dalessandro 						    rdi);
828f48ad614SDennis Dalessandro 	struct hfi1_devdata *dd = container_of(verbs_dev,
829f48ad614SDennis Dalessandro 					       struct hfi1_devdata,
830f48ad614SDennis Dalessandro 					       verbs_dev);
831f48ad614SDennis Dalessandro 	mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
832f48ad614SDennis Dalessandro 	if (mtu == -1)
833f48ad614SDennis Dalessandro 		return -1; /* values less than 0 are error */
834f48ad614SDennis Dalessandro 
835f48ad614SDennis Dalessandro 	if (mtu > dd->pport[pidx].ibmtu)
836f48ad614SDennis Dalessandro 		return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
837f48ad614SDennis Dalessandro 	else
838f48ad614SDennis Dalessandro 		return attr->path_mtu;
839f48ad614SDennis Dalessandro }
840f48ad614SDennis Dalessandro 
notify_error_qp(struct rvt_qp * qp)841f48ad614SDennis Dalessandro void notify_error_qp(struct rvt_qp *qp)
842f48ad614SDennis Dalessandro {
843f48ad614SDennis Dalessandro 	struct hfi1_qp_priv *priv = qp->priv;
844a8715b97SMike Marciniszyn 	seqlock_t *lock = priv->s_iowait.lock;
845f48ad614SDennis Dalessandro 
846a8715b97SMike Marciniszyn 	if (lock) {
847a8715b97SMike Marciniszyn 		write_seqlock(lock);
848a8715b97SMike Marciniszyn 		if (!list_empty(&priv->s_iowait.list) &&
849572f0c33SKaike Wan 		    !(qp->s_flags & RVT_S_BUSY) &&
850572f0c33SKaike Wan 		    !(priv->s_flags & RVT_S_BUSY)) {
851662d6646SKaike Wan 			qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
85293b289b9SKaike Wan 			iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
85393b289b9SKaike Wan 			iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
854f48ad614SDennis Dalessandro 			list_del_init(&priv->s_iowait.list);
8554e045572SMike Marciniszyn 			priv->s_iowait.lock = NULL;
8564d6f85c3SMike Marciniszyn 			rvt_put_qp(qp);
857f48ad614SDennis Dalessandro 		}
858a8715b97SMike Marciniszyn 		write_sequnlock(lock);
859a8715b97SMike Marciniszyn 	}
860f48ad614SDennis Dalessandro 
861572f0c33SKaike Wan 	if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) {
862572f0c33SKaike Wan 		qp->s_hdrwords = 0;
863f48ad614SDennis Dalessandro 		if (qp->s_rdma_mr) {
864f48ad614SDennis Dalessandro 			rvt_put_mr(qp->s_rdma_mr);
865f48ad614SDennis Dalessandro 			qp->s_rdma_mr = NULL;
866f48ad614SDennis Dalessandro 		}
867f48ad614SDennis Dalessandro 		flush_tx_list(qp);
868f48ad614SDennis Dalessandro 	}
869f48ad614SDennis Dalessandro }
870f48ad614SDennis Dalessandro 
871f48ad614SDennis Dalessandro /**
872dff2fe7eSMike Marciniszyn  * hfi1_qp_iter_cb - callback for iterator
87311edbb19SLee Jones  * @qp: the qp
87411edbb19SLee Jones  * @v: the sl in low bits of v
875dff2fe7eSMike Marciniszyn  *
876dff2fe7eSMike Marciniszyn  * This is called from the iterator callback to work
877dff2fe7eSMike Marciniszyn  * on an individual qp.
878dff2fe7eSMike Marciniszyn  */
hfi1_qp_iter_cb(struct rvt_qp * qp,u64 v)879dff2fe7eSMike Marciniszyn static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
880dff2fe7eSMike Marciniszyn {
881dff2fe7eSMike Marciniszyn 	int lastwqe;
882dff2fe7eSMike Marciniszyn 	struct ib_event ev;
883dff2fe7eSMike Marciniszyn 	struct hfi1_ibport *ibp =
884dff2fe7eSMike Marciniszyn 		to_iport(qp->ibqp.device, qp->port_num);
885dff2fe7eSMike Marciniszyn 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
886dff2fe7eSMike Marciniszyn 	u8 sl = (u8)v;
887dff2fe7eSMike Marciniszyn 
888dff2fe7eSMike Marciniszyn 	if (qp->port_num != ppd->port ||
889dff2fe7eSMike Marciniszyn 	    (qp->ibqp.qp_type != IB_QPT_UC &&
890dff2fe7eSMike Marciniszyn 	     qp->ibqp.qp_type != IB_QPT_RC) ||
891dff2fe7eSMike Marciniszyn 	    rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
892dff2fe7eSMike Marciniszyn 	    !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
893dff2fe7eSMike Marciniszyn 		return;
894dff2fe7eSMike Marciniszyn 
895dff2fe7eSMike Marciniszyn 	spin_lock_irq(&qp->r_lock);
896dff2fe7eSMike Marciniszyn 	spin_lock(&qp->s_hlock);
897dff2fe7eSMike Marciniszyn 	spin_lock(&qp->s_lock);
898dff2fe7eSMike Marciniszyn 	lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
899dff2fe7eSMike Marciniszyn 	spin_unlock(&qp->s_lock);
900dff2fe7eSMike Marciniszyn 	spin_unlock(&qp->s_hlock);
901dff2fe7eSMike Marciniszyn 	spin_unlock_irq(&qp->r_lock);
902dff2fe7eSMike Marciniszyn 	if (lastwqe) {
903dff2fe7eSMike Marciniszyn 		ev.device = qp->ibqp.device;
904dff2fe7eSMike Marciniszyn 		ev.element.qp = &qp->ibqp;
905dff2fe7eSMike Marciniszyn 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
906dff2fe7eSMike Marciniszyn 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
907dff2fe7eSMike Marciniszyn 	}
908dff2fe7eSMike Marciniszyn }
909dff2fe7eSMike Marciniszyn 
910dff2fe7eSMike Marciniszyn /**
911f48ad614SDennis Dalessandro  * hfi1_error_port_qps - put a port's RC/UC qps into error state
912f48ad614SDennis Dalessandro  * @ibp: the ibport.
913f48ad614SDennis Dalessandro  * @sl: the service level.
914f48ad614SDennis Dalessandro  *
915f48ad614SDennis Dalessandro  * This function places all RC/UC qps with a given service level into error
916f48ad614SDennis Dalessandro  * state. It is generally called to force upper lay apps to abandon stale qps
917f48ad614SDennis Dalessandro  * after an sl->sc mapping change.
918f48ad614SDennis Dalessandro  */
hfi1_error_port_qps(struct hfi1_ibport * ibp,u8 sl)919f48ad614SDennis Dalessandro void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
920f48ad614SDennis Dalessandro {
921f48ad614SDennis Dalessandro 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
922f48ad614SDennis Dalessandro 	struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
923f48ad614SDennis Dalessandro 
924dff2fe7eSMike Marciniszyn 	rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);
925f48ad614SDennis Dalessandro }
926