1297cccebSAlex Vesker // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2297cccebSAlex Vesker /* Copyright (c) 2019 Mellanox Technologies. */
3297cccebSAlex Vesker 
4c0702a4bSErez Shitrit #include <linux/smp.h>
5297cccebSAlex Vesker #include "dr_types.h"
6297cccebSAlex Vesker 
7297cccebSAlex Vesker #define QUEUE_SIZE 128
8297cccebSAlex Vesker #define SIGNAL_PER_DIV_QUEUE 16
9297cccebSAlex Vesker #define TH_NUMS_TO_DRAIN 2
10297cccebSAlex Vesker 
11297cccebSAlex Vesker enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
12297cccebSAlex Vesker 
13297cccebSAlex Vesker struct dr_data_seg {
14297cccebSAlex Vesker 	u64 addr;
15297cccebSAlex Vesker 	u32 length;
16297cccebSAlex Vesker 	u32 lkey;
17297cccebSAlex Vesker 	unsigned int send_flags;
18297cccebSAlex Vesker };
19297cccebSAlex Vesker 
20297cccebSAlex Vesker struct postsend_info {
21297cccebSAlex Vesker 	struct dr_data_seg write;
22297cccebSAlex Vesker 	struct dr_data_seg read;
23297cccebSAlex Vesker 	u64 remote_addr;
24297cccebSAlex Vesker 	u32 rkey;
25297cccebSAlex Vesker };
26297cccebSAlex Vesker 
27297cccebSAlex Vesker struct dr_qp_rtr_attr {
28297cccebSAlex Vesker 	struct mlx5dr_cmd_gid_attr dgid_attr;
29297cccebSAlex Vesker 	enum ib_mtu mtu;
30297cccebSAlex Vesker 	u32 qp_num;
31297cccebSAlex Vesker 	u16 port_num;
32297cccebSAlex Vesker 	u8 min_rnr_timer;
33297cccebSAlex Vesker 	u8 sgid_index;
34297cccebSAlex Vesker 	u16 udp_src_port;
357304d603SYevgeny Kliteynik 	u8 fl:1;
36297cccebSAlex Vesker };
37297cccebSAlex Vesker 
38297cccebSAlex Vesker struct dr_qp_rts_attr {
39297cccebSAlex Vesker 	u8 timeout;
40297cccebSAlex Vesker 	u8 retry_cnt;
41297cccebSAlex Vesker 	u8 rnr_retry;
42297cccebSAlex Vesker };
43297cccebSAlex Vesker 
44297cccebSAlex Vesker struct dr_qp_init_attr {
45297cccebSAlex Vesker 	u32 cqn;
46297cccebSAlex Vesker 	u32 pdn;
47297cccebSAlex Vesker 	u32 max_send_wr;
48297cccebSAlex Vesker 	struct mlx5_uars_page *uar;
49aeacb52aSYevgeny Kliteynik 	u8 isolate_vl_tc:1;
50297cccebSAlex Vesker };
51297cccebSAlex Vesker 
52297cccebSAlex Vesker static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
53297cccebSAlex Vesker {
54297cccebSAlex Vesker 	unsigned int idx;
55297cccebSAlex Vesker 	u8 opcode;
56297cccebSAlex Vesker 
57297cccebSAlex Vesker 	opcode = get_cqe_opcode(cqe64);
58297cccebSAlex Vesker 	if (opcode == MLX5_CQE_REQ_ERR) {
59297cccebSAlex Vesker 		idx = be16_to_cpu(cqe64->wqe_counter) &
60297cccebSAlex Vesker 			(dr_cq->qp->sq.wqe_cnt - 1);
61297cccebSAlex Vesker 		dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
62297cccebSAlex Vesker 	} else if (opcode == MLX5_CQE_RESP_ERR) {
63297cccebSAlex Vesker 		++dr_cq->qp->sq.cc;
64297cccebSAlex Vesker 	} else {
65297cccebSAlex Vesker 		idx = be16_to_cpu(cqe64->wqe_counter) &
66297cccebSAlex Vesker 			(dr_cq->qp->sq.wqe_cnt - 1);
67297cccebSAlex Vesker 		dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
68297cccebSAlex Vesker 
69297cccebSAlex Vesker 		return CQ_OK;
70297cccebSAlex Vesker 	}
71297cccebSAlex Vesker 
72297cccebSAlex Vesker 	return CQ_POLL_ERR;
73297cccebSAlex Vesker }
74297cccebSAlex Vesker 
75297cccebSAlex Vesker static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
76297cccebSAlex Vesker {
77297cccebSAlex Vesker 	struct mlx5_cqe64 *cqe64;
78297cccebSAlex Vesker 	int err;
79297cccebSAlex Vesker 
80297cccebSAlex Vesker 	cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
81297cccebSAlex Vesker 	if (!cqe64)
82297cccebSAlex Vesker 		return CQ_EMPTY;
83297cccebSAlex Vesker 
84297cccebSAlex Vesker 	mlx5_cqwq_pop(&dr_cq->wq);
85297cccebSAlex Vesker 	err = dr_parse_cqe(dr_cq, cqe64);
86297cccebSAlex Vesker 	mlx5_cqwq_update_db_record(&dr_cq->wq);
87297cccebSAlex Vesker 
88297cccebSAlex Vesker 	return err;
89297cccebSAlex Vesker }
90297cccebSAlex Vesker 
91297cccebSAlex Vesker static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
92297cccebSAlex Vesker {
93297cccebSAlex Vesker 	int npolled;
94297cccebSAlex Vesker 	int err = 0;
95297cccebSAlex Vesker 
96297cccebSAlex Vesker 	for (npolled = 0; npolled < ne; ++npolled) {
97297cccebSAlex Vesker 		err = dr_cq_poll_one(dr_cq);
98297cccebSAlex Vesker 		if (err != CQ_OK)
99297cccebSAlex Vesker 			break;
100297cccebSAlex Vesker 	}
101297cccebSAlex Vesker 
102297cccebSAlex Vesker 	return err == CQ_POLL_ERR ? err : npolled;
103297cccebSAlex Vesker }
104297cccebSAlex Vesker 
105297cccebSAlex Vesker static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
106297cccebSAlex Vesker 					 struct dr_qp_init_attr *attr)
107297cccebSAlex Vesker {
108ec44e72bSLeon Romanovsky 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
109297cccebSAlex Vesker 	u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
110297cccebSAlex Vesker 	struct mlx5_wq_param wqp;
111297cccebSAlex Vesker 	struct mlx5dr_qp *dr_qp;
112297cccebSAlex Vesker 	int inlen;
113297cccebSAlex Vesker 	void *qpc;
114297cccebSAlex Vesker 	void *in;
115297cccebSAlex Vesker 	int err;
116297cccebSAlex Vesker 
117297cccebSAlex Vesker 	dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
118297cccebSAlex Vesker 	if (!dr_qp)
119297cccebSAlex Vesker 		return NULL;
120297cccebSAlex Vesker 
121297cccebSAlex Vesker 	wqp.buf_numa_node = mdev->priv.numa_node;
122297cccebSAlex Vesker 	wqp.db_numa_node = mdev->priv.numa_node;
123297cccebSAlex Vesker 
124297cccebSAlex Vesker 	dr_qp->rq.pc = 0;
125297cccebSAlex Vesker 	dr_qp->rq.cc = 0;
126297cccebSAlex Vesker 	dr_qp->rq.wqe_cnt = 4;
127297cccebSAlex Vesker 	dr_qp->sq.pc = 0;
128297cccebSAlex Vesker 	dr_qp->sq.cc = 0;
129297cccebSAlex Vesker 	dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
130297cccebSAlex Vesker 
131297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
132297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
133297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
134297cccebSAlex Vesker 	err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
135297cccebSAlex Vesker 				&dr_qp->wq_ctrl);
136297cccebSAlex Vesker 	if (err) {
137b7d0db55SErez Shitrit 		mlx5_core_warn(mdev, "Can't create QP WQ\n");
138297cccebSAlex Vesker 		goto err_wq;
139297cccebSAlex Vesker 	}
140297cccebSAlex Vesker 
141297cccebSAlex Vesker 	dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
142297cccebSAlex Vesker 				     sizeof(dr_qp->sq.wqe_head[0]),
143297cccebSAlex Vesker 				     GFP_KERNEL);
144297cccebSAlex Vesker 
145297cccebSAlex Vesker 	if (!dr_qp->sq.wqe_head) {
146297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't allocate wqe head\n");
147297cccebSAlex Vesker 		goto err_wqe_head;
148297cccebSAlex Vesker 	}
149297cccebSAlex Vesker 
150297cccebSAlex Vesker 	inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
151297cccebSAlex Vesker 		MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
152297cccebSAlex Vesker 		dr_qp->wq_ctrl.buf.npages;
153297cccebSAlex Vesker 	in = kvzalloc(inlen, GFP_KERNEL);
154297cccebSAlex Vesker 	if (!in) {
155297cccebSAlex Vesker 		err = -ENOMEM;
156297cccebSAlex Vesker 		goto err_in;
157297cccebSAlex Vesker 	}
158297cccebSAlex Vesker 
159297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
160297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
161297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
162aeacb52aSYevgeny Kliteynik 	MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
163297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pd, attr->pdn);
164297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
165297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_page_size,
166297cccebSAlex Vesker 		 dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
167297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, fre, 1);
168297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rlky, 1);
169297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
170297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
171297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
172297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
173297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
174297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
1754806f1e2SMaor Gottlieb 	MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
176297cccebSAlex Vesker 	MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
177297cccebSAlex Vesker 	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
178297cccebSAlex Vesker 		MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
179297cccebSAlex Vesker 	mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
180297cccebSAlex Vesker 				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
181297cccebSAlex Vesker 							 in, pas));
182297cccebSAlex Vesker 
183ec44e72bSLeon Romanovsky 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
184ec44e72bSLeon Romanovsky 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
185f93f4f4fSLeon Romanovsky 	dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
18647a357deSDenis Efremov 	kvfree(in);
187ec44e72bSLeon Romanovsky 	if (err)
188297cccebSAlex Vesker 		goto err_in;
189297cccebSAlex Vesker 	dr_qp->uar = attr->uar;
190297cccebSAlex Vesker 
191297cccebSAlex Vesker 	return dr_qp;
192297cccebSAlex Vesker 
193297cccebSAlex Vesker err_in:
194297cccebSAlex Vesker 	kfree(dr_qp->sq.wqe_head);
195297cccebSAlex Vesker err_wqe_head:
196297cccebSAlex Vesker 	mlx5_wq_destroy(&dr_qp->wq_ctrl);
197297cccebSAlex Vesker err_wq:
198297cccebSAlex Vesker 	kfree(dr_qp);
199297cccebSAlex Vesker 	return NULL;
200297cccebSAlex Vesker }
201297cccebSAlex Vesker 
202297cccebSAlex Vesker static void dr_destroy_qp(struct mlx5_core_dev *mdev,
203297cccebSAlex Vesker 			  struct mlx5dr_qp *dr_qp)
204297cccebSAlex Vesker {
205ec44e72bSLeon Romanovsky 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
206ec44e72bSLeon Romanovsky 
207ec44e72bSLeon Romanovsky 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
208f93f4f4fSLeon Romanovsky 	MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
209ec44e72bSLeon Romanovsky 	mlx5_cmd_exec_in(mdev, destroy_qp, in);
210ec44e72bSLeon Romanovsky 
211297cccebSAlex Vesker 	kfree(dr_qp->sq.wqe_head);
212297cccebSAlex Vesker 	mlx5_wq_destroy(&dr_qp->wq_ctrl);
213297cccebSAlex Vesker 	kfree(dr_qp);
214297cccebSAlex Vesker }
215297cccebSAlex Vesker 
216297cccebSAlex Vesker static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
217297cccebSAlex Vesker {
218297cccebSAlex Vesker 	dma_wmb();
219ff1925bbSYevgeny Kliteynik 	*dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
220297cccebSAlex Vesker 
221297cccebSAlex Vesker 	/* After wmb() the hw aware of new work */
222297cccebSAlex Vesker 	wmb();
223297cccebSAlex Vesker 
224297cccebSAlex Vesker 	mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
225297cccebSAlex Vesker }
226297cccebSAlex Vesker 
227297cccebSAlex Vesker static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
228297cccebSAlex Vesker 			     u32 rkey, struct dr_data_seg *data_seg,
2297d22ad73SYevgeny Kliteynik 			     u32 opcode, bool notify_hw)
230297cccebSAlex Vesker {
231297cccebSAlex Vesker 	struct mlx5_wqe_raddr_seg *wq_raddr;
232297cccebSAlex Vesker 	struct mlx5_wqe_ctrl_seg *wq_ctrl;
233297cccebSAlex Vesker 	struct mlx5_wqe_data_seg *wq_dseg;
234297cccebSAlex Vesker 	unsigned int size;
235297cccebSAlex Vesker 	unsigned int idx;
236297cccebSAlex Vesker 
237297cccebSAlex Vesker 	size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 +
238297cccebSAlex Vesker 		sizeof(*wq_raddr) / 16;
239297cccebSAlex Vesker 
240297cccebSAlex Vesker 	idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
241297cccebSAlex Vesker 
242297cccebSAlex Vesker 	wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
243297cccebSAlex Vesker 	wq_ctrl->imm = 0;
244297cccebSAlex Vesker 	wq_ctrl->fm_ce_se = (data_seg->send_flags) ?
245297cccebSAlex Vesker 		MLX5_WQE_CTRL_CQ_UPDATE : 0;
246297cccebSAlex Vesker 	wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
247297cccebSAlex Vesker 						opcode);
248f93f4f4fSLeon Romanovsky 	wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
249297cccebSAlex Vesker 	wq_raddr = (void *)(wq_ctrl + 1);
250297cccebSAlex Vesker 	wq_raddr->raddr = cpu_to_be64(remote_addr);
251297cccebSAlex Vesker 	wq_raddr->rkey = cpu_to_be32(rkey);
252297cccebSAlex Vesker 	wq_raddr->reserved = 0;
253297cccebSAlex Vesker 
254297cccebSAlex Vesker 	wq_dseg = (void *)(wq_raddr + 1);
255297cccebSAlex Vesker 	wq_dseg->byte_count = cpu_to_be32(data_seg->length);
256297cccebSAlex Vesker 	wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
257297cccebSAlex Vesker 	wq_dseg->addr = cpu_to_be64(data_seg->addr);
258297cccebSAlex Vesker 
259297cccebSAlex Vesker 	dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
260297cccebSAlex Vesker 
2617d22ad73SYevgeny Kliteynik 	if (notify_hw)
262297cccebSAlex Vesker 		dr_cmd_notify_hw(dr_qp, wq_ctrl);
263297cccebSAlex Vesker }
264297cccebSAlex Vesker 
265297cccebSAlex Vesker static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
266297cccebSAlex Vesker {
267297cccebSAlex Vesker 	dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
2687d22ad73SYevgeny Kliteynik 			 &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
269297cccebSAlex Vesker 	dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
2707d22ad73SYevgeny Kliteynik 			 &send_info->read, MLX5_OPCODE_RDMA_READ, true);
271297cccebSAlex Vesker }
272297cccebSAlex Vesker 
273297cccebSAlex Vesker /**
274297cccebSAlex Vesker  * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
275297cccebSAlex Vesker  * with send_list parameters:
276297cccebSAlex Vesker  *
277297cccebSAlex Vesker  *     @ste:       The data that attached to this specific ste
278297cccebSAlex Vesker  *     @size:      of data to write
279297cccebSAlex Vesker  *     @offset:    of the data from start of the hw_ste entry
280297cccebSAlex Vesker  *     @data:      data
281297cccebSAlex Vesker  *     @ste_info:  ste to be sent with send_list
282297cccebSAlex Vesker  *     @send_list: to append into it
283297cccebSAlex Vesker  *     @copy_data: if true indicates that the data should be kept because
284297cccebSAlex Vesker  *                 it's not backuped any where (like in re-hash).
285297cccebSAlex Vesker  *                 if false, it lets the data to be updated after
286297cccebSAlex Vesker  *                 it was added to the list.
287297cccebSAlex Vesker  */
288297cccebSAlex Vesker void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
289297cccebSAlex Vesker 					       u16 offset, u8 *data,
290297cccebSAlex Vesker 					       struct mlx5dr_ste_send_info *ste_info,
291297cccebSAlex Vesker 					       struct list_head *send_list,
292297cccebSAlex Vesker 					       bool copy_data)
293297cccebSAlex Vesker {
294297cccebSAlex Vesker 	ste_info->size = size;
295297cccebSAlex Vesker 	ste_info->ste = ste;
296297cccebSAlex Vesker 	ste_info->offset = offset;
297297cccebSAlex Vesker 
298297cccebSAlex Vesker 	if (copy_data) {
299297cccebSAlex Vesker 		memcpy(ste_info->data_cont, data, size);
300297cccebSAlex Vesker 		ste_info->data = ste_info->data_cont;
301297cccebSAlex Vesker 	} else {
302297cccebSAlex Vesker 		ste_info->data = data;
303297cccebSAlex Vesker 	}
304297cccebSAlex Vesker 
305297cccebSAlex Vesker 	list_add_tail(&ste_info->send_list, send_list);
306297cccebSAlex Vesker }
307297cccebSAlex Vesker 
308297cccebSAlex Vesker /* The function tries to consume one wc each time, unless the queue is full, in
309297cccebSAlex Vesker  * that case, which means that the hw is behind the sw in a full queue len
310297cccebSAlex Vesker  * the function will drain the cq till it empty.
311297cccebSAlex Vesker  */
312297cccebSAlex Vesker static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
313297cccebSAlex Vesker 				struct mlx5dr_send_ring *send_ring)
314297cccebSAlex Vesker {
315297cccebSAlex Vesker 	bool is_drain = false;
316297cccebSAlex Vesker 	int ne;
317297cccebSAlex Vesker 
318297cccebSAlex Vesker 	if (send_ring->pending_wqe < send_ring->signal_th)
319297cccebSAlex Vesker 		return 0;
320297cccebSAlex Vesker 
321297cccebSAlex Vesker 	/* Queue is full start drain it */
322297cccebSAlex Vesker 	if (send_ring->pending_wqe >=
323297cccebSAlex Vesker 	    dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
324297cccebSAlex Vesker 		is_drain = true;
325297cccebSAlex Vesker 
326297cccebSAlex Vesker 	do {
327297cccebSAlex Vesker 		ne = dr_poll_cq(send_ring->cq, 1);
328d5a84e96SYevgeny Kliteynik 		if (unlikely(ne < 0)) {
329d5a84e96SYevgeny Kliteynik 			mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
330d5a84e96SYevgeny Kliteynik 					    send_ring->qp->qpn);
331d5a84e96SYevgeny Kliteynik 			send_ring->err_state = true;
332297cccebSAlex Vesker 			return ne;
333d5a84e96SYevgeny Kliteynik 		} else if (ne == 1) {
334297cccebSAlex Vesker 			send_ring->pending_wqe -= send_ring->signal_th;
335d5a84e96SYevgeny Kliteynik 		}
336297cccebSAlex Vesker 	} while (is_drain && send_ring->pending_wqe);
337297cccebSAlex Vesker 
338297cccebSAlex Vesker 	return 0;
339297cccebSAlex Vesker }
340297cccebSAlex Vesker 
341297cccebSAlex Vesker static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
342297cccebSAlex Vesker 			      struct postsend_info *send_info)
343297cccebSAlex Vesker {
344297cccebSAlex Vesker 	send_ring->pending_wqe++;
345297cccebSAlex Vesker 
346297cccebSAlex Vesker 	if (send_ring->pending_wqe % send_ring->signal_th == 0)
347297cccebSAlex Vesker 		send_info->write.send_flags |= IB_SEND_SIGNALED;
348297cccebSAlex Vesker 
349297cccebSAlex Vesker 	send_ring->pending_wqe++;
350297cccebSAlex Vesker 	send_info->read.length = send_info->write.length;
351297cccebSAlex Vesker 	/* Read into the same write area */
352297cccebSAlex Vesker 	send_info->read.addr = (uintptr_t)send_info->write.addr;
35383fec3f1SAharon Landau 	send_info->read.lkey = send_ring->mr->mkey;
354297cccebSAlex Vesker 
355297cccebSAlex Vesker 	if (send_ring->pending_wqe % send_ring->signal_th == 0)
356297cccebSAlex Vesker 		send_info->read.send_flags = IB_SEND_SIGNALED;
357297cccebSAlex Vesker 	else
358297cccebSAlex Vesker 		send_info->read.send_flags = 0;
359297cccebSAlex Vesker }
360297cccebSAlex Vesker 
361297cccebSAlex Vesker static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
362297cccebSAlex Vesker 				struct postsend_info *send_info)
363297cccebSAlex Vesker {
364297cccebSAlex Vesker 	struct mlx5dr_send_ring *send_ring = dmn->send_ring;
365297cccebSAlex Vesker 	u32 buff_offset;
366297cccebSAlex Vesker 	int ret;
367297cccebSAlex Vesker 
368d5a84e96SYevgeny Kliteynik 	if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
369d5a84e96SYevgeny Kliteynik 		     send_ring->err_state)) {
370d5a84e96SYevgeny Kliteynik 		mlx5_core_dbg_once(dmn->mdev,
371d5a84e96SYevgeny Kliteynik 				   "Skipping post send: QP err state: %d, device state: %d\n",
372d5a84e96SYevgeny Kliteynik 				   send_ring->err_state, dmn->mdev->state);
373d5a84e96SYevgeny Kliteynik 		return 0;
374d5a84e96SYevgeny Kliteynik 	}
375d5a84e96SYevgeny Kliteynik 
376cedb2819SAlex Vesker 	spin_lock(&send_ring->lock);
377cedb2819SAlex Vesker 
378297cccebSAlex Vesker 	ret = dr_handle_pending_wc(dmn, send_ring);
379297cccebSAlex Vesker 	if (ret)
380cedb2819SAlex Vesker 		goto out_unlock;
381297cccebSAlex Vesker 
382297cccebSAlex Vesker 	if (send_info->write.length > dmn->info.max_inline_size) {
383297cccebSAlex Vesker 		buff_offset = (send_ring->tx_head &
384297cccebSAlex Vesker 			       (dmn->send_ring->signal_th - 1)) *
385297cccebSAlex Vesker 			send_ring->max_post_send_size;
386297cccebSAlex Vesker 		/* Copy to ring mr */
387297cccebSAlex Vesker 		memcpy(send_ring->buf + buff_offset,
388297cccebSAlex Vesker 		       (void *)(uintptr_t)send_info->write.addr,
389297cccebSAlex Vesker 		       send_info->write.length);
390297cccebSAlex Vesker 		send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
39183fec3f1SAharon Landau 		send_info->write.lkey = send_ring->mr->mkey;
392297cccebSAlex Vesker 	}
393297cccebSAlex Vesker 
394297cccebSAlex Vesker 	send_ring->tx_head++;
395297cccebSAlex Vesker 	dr_fill_data_segs(send_ring, send_info);
396297cccebSAlex Vesker 	dr_post_send(send_ring->qp, send_info);
397297cccebSAlex Vesker 
398cedb2819SAlex Vesker out_unlock:
399cedb2819SAlex Vesker 	spin_unlock(&send_ring->lock);
400cedb2819SAlex Vesker 	return ret;
401297cccebSAlex Vesker }
402297cccebSAlex Vesker 
403297cccebSAlex Vesker static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
404297cccebSAlex Vesker 				   struct mlx5dr_ste_htbl *htbl,
405297cccebSAlex Vesker 				   u8 **data,
406297cccebSAlex Vesker 				   u32 *byte_size,
407297cccebSAlex Vesker 				   int *iterations,
408297cccebSAlex Vesker 				   int *num_stes)
409297cccebSAlex Vesker {
410f51bb517SRongwei Liu 	u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
411297cccebSAlex Vesker 	int alloc_size;
412297cccebSAlex Vesker 
413f51bb517SRongwei Liu 	if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
414f51bb517SRongwei Liu 		*iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
415297cccebSAlex Vesker 		*byte_size = dmn->send_ring->max_post_send_size;
416297cccebSAlex Vesker 		alloc_size = *byte_size;
417297cccebSAlex Vesker 		*num_stes = *byte_size / DR_STE_SIZE;
418297cccebSAlex Vesker 	} else {
419297cccebSAlex Vesker 		*iterations = 1;
420f51bb517SRongwei Liu 		*num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
421297cccebSAlex Vesker 		alloc_size = *num_stes * DR_STE_SIZE;
422297cccebSAlex Vesker 	}
423297cccebSAlex Vesker 
424b7f86258SRoi Dayan 	*data = kvzalloc(alloc_size, GFP_KERNEL);
425297cccebSAlex Vesker 	if (!*data)
426297cccebSAlex Vesker 		return -ENOMEM;
427297cccebSAlex Vesker 
428297cccebSAlex Vesker 	return 0;
429297cccebSAlex Vesker }
430297cccebSAlex Vesker 
431297cccebSAlex Vesker /**
432297cccebSAlex Vesker  * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
433297cccebSAlex Vesker  *
434297cccebSAlex Vesker  *     @dmn:    Domain
435297cccebSAlex Vesker  *     @ste:    The ste struct that contains the data (at
436297cccebSAlex Vesker  *              least part of it)
437297cccebSAlex Vesker  *     @data:   The real data to send size data
438297cccebSAlex Vesker  *     @size:   for writing.
439297cccebSAlex Vesker  *     @offset: The offset from the icm mapped data to
440297cccebSAlex Vesker  *              start write to this for write only part of the
441297cccebSAlex Vesker  *              buffer.
442297cccebSAlex Vesker  *
443297cccebSAlex Vesker  * Return: 0 on success.
444297cccebSAlex Vesker  */
445297cccebSAlex Vesker int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
446297cccebSAlex Vesker 			     u8 *data, u16 size, u16 offset)
447297cccebSAlex Vesker {
448297cccebSAlex Vesker 	struct postsend_info send_info = {};
449297cccebSAlex Vesker 
4504fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
4514fe45e1dSYevgeny Kliteynik 
452297cccebSAlex Vesker 	send_info.write.addr = (uintptr_t)data;
453297cccebSAlex Vesker 	send_info.write.length = size;
454297cccebSAlex Vesker 	send_info.write.lkey = 0;
455297cccebSAlex Vesker 	send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
456003f4f9aSRongwei Liu 	send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
457297cccebSAlex Vesker 
458297cccebSAlex Vesker 	return dr_postsend_icm_data(dmn, &send_info);
459297cccebSAlex Vesker }
460297cccebSAlex Vesker 
461297cccebSAlex Vesker int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
462297cccebSAlex Vesker 			      struct mlx5dr_ste_htbl *htbl,
463297cccebSAlex Vesker 			      u8 *formatted_ste, u8 *mask)
464297cccebSAlex Vesker {
465f51bb517SRongwei Liu 	u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
466297cccebSAlex Vesker 	int num_stes_per_iter;
467297cccebSAlex Vesker 	int iterations;
468297cccebSAlex Vesker 	u8 *data;
469297cccebSAlex Vesker 	int ret;
470297cccebSAlex Vesker 	int i;
471297cccebSAlex Vesker 	int j;
472297cccebSAlex Vesker 
473297cccebSAlex Vesker 	ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
474297cccebSAlex Vesker 				      &iterations, &num_stes_per_iter);
475297cccebSAlex Vesker 	if (ret)
476297cccebSAlex Vesker 		return ret;
477297cccebSAlex Vesker 
4784fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
4794fe45e1dSYevgeny Kliteynik 
480297cccebSAlex Vesker 	/* Send the data iteration times */
481297cccebSAlex Vesker 	for (i = 0; i < iterations; i++) {
482297cccebSAlex Vesker 		u32 ste_index = i * (byte_size / DR_STE_SIZE);
483297cccebSAlex Vesker 		struct postsend_info send_info = {};
484297cccebSAlex Vesker 
485297cccebSAlex Vesker 		/* Copy all ste's on the data buffer
486297cccebSAlex Vesker 		 * need to add the bit_mask
487297cccebSAlex Vesker 		 */
488297cccebSAlex Vesker 		for (j = 0; j < num_stes_per_iter; j++) {
489597534bdSRongwei Liu 			struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
490297cccebSAlex Vesker 			u32 ste_off = j * DR_STE_SIZE;
491297cccebSAlex Vesker 
49297ffd895SYevgeny Kliteynik 			if (mlx5dr_ste_is_not_used(ste)) {
493297cccebSAlex Vesker 				memcpy(data + ste_off,
494297cccebSAlex Vesker 				       formatted_ste, DR_STE_SIZE);
495297cccebSAlex Vesker 			} else {
496297cccebSAlex Vesker 				/* Copy data */
497297cccebSAlex Vesker 				memcpy(data + ste_off,
498*0d7f1595SRongwei Liu 				       htbl->chunk->hw_ste_arr +
499*0d7f1595SRongwei Liu 				       DR_STE_SIZE_REDUCED * (ste_index + j),
500297cccebSAlex Vesker 				       DR_STE_SIZE_REDUCED);
501297cccebSAlex Vesker 				/* Copy bit_mask */
502297cccebSAlex Vesker 				memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
503297cccebSAlex Vesker 				       mask, DR_STE_SIZE_MASK);
5044fe45e1dSYevgeny Kliteynik 				/* Only when we have mask we need to re-arrange the STE */
5054fe45e1dSYevgeny Kliteynik 				mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
5064fe45e1dSYevgeny Kliteynik 								data + (j * DR_STE_SIZE),
5074fe45e1dSYevgeny Kliteynik 								DR_STE_SIZE);
508297cccebSAlex Vesker 			}
509297cccebSAlex Vesker 		}
510297cccebSAlex Vesker 
511297cccebSAlex Vesker 		send_info.write.addr = (uintptr_t)data;
512297cccebSAlex Vesker 		send_info.write.length = byte_size;
513297cccebSAlex Vesker 		send_info.write.lkey = 0;
514297cccebSAlex Vesker 		send_info.remote_addr =
515597534bdSRongwei Liu 			mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
516003f4f9aSRongwei Liu 		send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
517297cccebSAlex Vesker 
518297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
519297cccebSAlex Vesker 		if (ret)
520297cccebSAlex Vesker 			goto out_free;
521297cccebSAlex Vesker 	}
522297cccebSAlex Vesker 
523297cccebSAlex Vesker out_free:
524b7f86258SRoi Dayan 	kvfree(data);
525297cccebSAlex Vesker 	return ret;
526297cccebSAlex Vesker }
527297cccebSAlex Vesker 
528297cccebSAlex Vesker /* Initialize htble with default STEs */
529297cccebSAlex Vesker int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
530297cccebSAlex Vesker 					struct mlx5dr_ste_htbl *htbl,
531297cccebSAlex Vesker 					u8 *ste_init_data,
532297cccebSAlex Vesker 					bool update_hw_ste)
533297cccebSAlex Vesker {
534f51bb517SRongwei Liu 	u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
535297cccebSAlex Vesker 	int iterations;
536297cccebSAlex Vesker 	int num_stes;
5374fe45e1dSYevgeny Kliteynik 	u8 *copy_dst;
538297cccebSAlex Vesker 	u8 *data;
539297cccebSAlex Vesker 	int ret;
540297cccebSAlex Vesker 	int i;
541297cccebSAlex Vesker 
542297cccebSAlex Vesker 	ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
543297cccebSAlex Vesker 				      &iterations, &num_stes);
544297cccebSAlex Vesker 	if (ret)
545297cccebSAlex Vesker 		return ret;
546297cccebSAlex Vesker 
547297cccebSAlex Vesker 	if (update_hw_ste) {
5484fe45e1dSYevgeny Kliteynik 		/* Copy the reduced STE to hash table ste_arr */
5494fe45e1dSYevgeny Kliteynik 		for (i = 0; i < num_stes; i++) {
550597534bdSRongwei Liu 			copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
551297cccebSAlex Vesker 			memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
552297cccebSAlex Vesker 		}
553297cccebSAlex Vesker 	}
554297cccebSAlex Vesker 
5554fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
5564fe45e1dSYevgeny Kliteynik 
5574fe45e1dSYevgeny Kliteynik 	/* Copy the same STE on the data buffer */
5584fe45e1dSYevgeny Kliteynik 	for (i = 0; i < num_stes; i++) {
5594fe45e1dSYevgeny Kliteynik 		copy_dst = data + i * DR_STE_SIZE;
5604fe45e1dSYevgeny Kliteynik 		memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
5614fe45e1dSYevgeny Kliteynik 	}
5624fe45e1dSYevgeny Kliteynik 
563297cccebSAlex Vesker 	/* Send the data iteration times */
564297cccebSAlex Vesker 	for (i = 0; i < iterations; i++) {
565297cccebSAlex Vesker 		u8 ste_index = i * (byte_size / DR_STE_SIZE);
566297cccebSAlex Vesker 		struct postsend_info send_info = {};
567297cccebSAlex Vesker 
568297cccebSAlex Vesker 		send_info.write.addr = (uintptr_t)data;
569297cccebSAlex Vesker 		send_info.write.length = byte_size;
570297cccebSAlex Vesker 		send_info.write.lkey = 0;
571297cccebSAlex Vesker 		send_info.remote_addr =
572597534bdSRongwei Liu 			mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
573003f4f9aSRongwei Liu 		send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
574297cccebSAlex Vesker 
575297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
576297cccebSAlex Vesker 		if (ret)
577297cccebSAlex Vesker 			goto out_free;
578297cccebSAlex Vesker 	}
579297cccebSAlex Vesker 
580297cccebSAlex Vesker out_free:
581b7f86258SRoi Dayan 	kvfree(data);
582297cccebSAlex Vesker 	return ret;
583297cccebSAlex Vesker }
584297cccebSAlex Vesker 
585297cccebSAlex Vesker int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
586297cccebSAlex Vesker 				struct mlx5dr_action *action)
587297cccebSAlex Vesker {
588297cccebSAlex Vesker 	struct postsend_info send_info = {};
589297cccebSAlex Vesker 	int ret;
590297cccebSAlex Vesker 
5919dac2966SJianbo Liu 	send_info.write.addr = (uintptr_t)action->rewrite->data;
5929dac2966SJianbo Liu 	send_info.write.length = action->rewrite->num_of_actions *
593692b0399SHamdan Igbaria 				 DR_MODIFY_ACTION_SIZE;
594297cccebSAlex Vesker 	send_info.write.lkey = 0;
595003f4f9aSRongwei Liu 	send_info.remote_addr =
596003f4f9aSRongwei Liu 		mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
597003f4f9aSRongwei Liu 	send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
598297cccebSAlex Vesker 
599297cccebSAlex Vesker 	ret = dr_postsend_icm_data(dmn, &send_info);
600297cccebSAlex Vesker 
601297cccebSAlex Vesker 	return ret;
602297cccebSAlex Vesker }
603297cccebSAlex Vesker 
604297cccebSAlex Vesker static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
605297cccebSAlex Vesker 				 struct mlx5dr_qp *dr_qp,
606297cccebSAlex Vesker 				 int port)
607297cccebSAlex Vesker {
608297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
609297cccebSAlex Vesker 	void *qpc;
610297cccebSAlex Vesker 
611297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
612297cccebSAlex Vesker 
613297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
614297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
615297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rre, 1);
616297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rwe, 1);
617297cccebSAlex Vesker 
618acab4b88SLeon Romanovsky 	MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
619f93f4f4fSLeon Romanovsky 	MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
620acab4b88SLeon Romanovsky 
621acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
622297cccebSAlex Vesker }
623297cccebSAlex Vesker 
624297cccebSAlex Vesker static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
625297cccebSAlex Vesker 				    struct mlx5dr_qp *dr_qp,
626297cccebSAlex Vesker 				    struct dr_qp_rts_attr *attr)
627297cccebSAlex Vesker {
628297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
629297cccebSAlex Vesker 	void *qpc;
630297cccebSAlex Vesker 
631297cccebSAlex Vesker 	qpc  = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
632297cccebSAlex Vesker 
633f93f4f4fSLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
634297cccebSAlex Vesker 
635297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
636297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
637ec449ed8SYevgeny Kliteynik 	MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
638297cccebSAlex Vesker 
639acab4b88SLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
640f93f4f4fSLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
641acab4b88SLeon Romanovsky 
642acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
643297cccebSAlex Vesker }
644297cccebSAlex Vesker 
645297cccebSAlex Vesker static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
646297cccebSAlex Vesker 				     struct mlx5dr_qp *dr_qp,
647297cccebSAlex Vesker 				     struct dr_qp_rtr_attr *attr)
648297cccebSAlex Vesker {
649297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
650297cccebSAlex Vesker 	void *qpc;
651297cccebSAlex Vesker 
652297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
653297cccebSAlex Vesker 
654f93f4f4fSLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
655297cccebSAlex Vesker 
656297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, mtu, attr->mtu);
657297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
658297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
659297cccebSAlex Vesker 	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
660297cccebSAlex Vesker 	       attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
661297cccebSAlex Vesker 	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
662297cccebSAlex Vesker 	       attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
663297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
664297cccebSAlex Vesker 		 attr->sgid_index);
665297cccebSAlex Vesker 
666297cccebSAlex Vesker 	if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
667297cccebSAlex Vesker 		MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
668297cccebSAlex Vesker 			 attr->udp_src_port);
669297cccebSAlex Vesker 
670297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
6717304d603SYevgeny Kliteynik 	MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
672297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, min_rnr_nak, 1);
673297cccebSAlex Vesker 
674acab4b88SLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
675f93f4f4fSLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
676acab4b88SLeon Romanovsky 
677acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
678297cccebSAlex Vesker }
679297cccebSAlex Vesker 
6807304d603SYevgeny Kliteynik static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
6817304d603SYevgeny Kliteynik {
6827304d603SYevgeny Kliteynik 	/* Check whether RC RoCE QP creation with force loopback is allowed.
6837304d603SYevgeny Kliteynik 	 * There are two separate capability bits for this:
6847304d603SYevgeny Kliteynik 	 *  - force loopback when RoCE is enabled
6857304d603SYevgeny Kliteynik 	 *  - force loopback when RoCE is disabled
6867304d603SYevgeny Kliteynik 	 */
6877304d603SYevgeny Kliteynik 	return ((caps->roce_caps.roce_en &&
6887304d603SYevgeny Kliteynik 		 caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
6897304d603SYevgeny Kliteynik 		(!caps->roce_caps.roce_en &&
6907304d603SYevgeny Kliteynik 		 caps->roce_caps.fl_rc_qp_when_roce_disabled));
6917304d603SYevgeny Kliteynik }
6927304d603SYevgeny Kliteynik 
693297cccebSAlex Vesker static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
694297cccebSAlex Vesker {
695297cccebSAlex Vesker 	struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
696297cccebSAlex Vesker 	struct dr_qp_rts_attr rts_attr = {};
697297cccebSAlex Vesker 	struct dr_qp_rtr_attr rtr_attr = {};
698297cccebSAlex Vesker 	enum ib_mtu mtu = IB_MTU_1024;
699297cccebSAlex Vesker 	u16 gid_index = 0;
700297cccebSAlex Vesker 	int port = 1;
701297cccebSAlex Vesker 	int ret;
702297cccebSAlex Vesker 
703297cccebSAlex Vesker 	/* Init */
704297cccebSAlex Vesker 	ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
705b7d0db55SErez Shitrit 	if (ret) {
706b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP rst2init\n");
707297cccebSAlex Vesker 		return ret;
708b7d0db55SErez Shitrit 	}
709297cccebSAlex Vesker 
710297cccebSAlex Vesker 	/* RTR */
711297cccebSAlex Vesker 	rtr_attr.mtu		= mtu;
712f93f4f4fSLeon Romanovsky 	rtr_attr.qp_num		= dr_qp->qpn;
713297cccebSAlex Vesker 	rtr_attr.min_rnr_timer	= 12;
714297cccebSAlex Vesker 	rtr_attr.port_num	= port;
715297cccebSAlex Vesker 	rtr_attr.udp_src_port	= dmn->info.caps.roce_min_src_udp;
716297cccebSAlex Vesker 
7177304d603SYevgeny Kliteynik 	/* If QP creation with force loopback is allowed, then there
7187304d603SYevgeny Kliteynik 	 * is no need for GID index when creating the QP.
7197304d603SYevgeny Kliteynik 	 * Otherwise we query GID attributes and use GID index.
7207304d603SYevgeny Kliteynik 	 */
7217304d603SYevgeny Kliteynik 	rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
7227304d603SYevgeny Kliteynik 	if (!rtr_attr.fl) {
7237304d603SYevgeny Kliteynik 		ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
7247304d603SYevgeny Kliteynik 					   &rtr_attr.dgid_attr);
7257304d603SYevgeny Kliteynik 		if (ret)
7267304d603SYevgeny Kliteynik 			return ret;
7277304d603SYevgeny Kliteynik 
7287304d603SYevgeny Kliteynik 		rtr_attr.sgid_index = gid_index;
7297304d603SYevgeny Kliteynik 	}
7307304d603SYevgeny Kliteynik 
731297cccebSAlex Vesker 	ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
732b7d0db55SErez Shitrit 	if (ret) {
733b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
734297cccebSAlex Vesker 		return ret;
735b7d0db55SErez Shitrit 	}
736297cccebSAlex Vesker 
737297cccebSAlex Vesker 	/* RTS */
738297cccebSAlex Vesker 	rts_attr.timeout	= 14;
739297cccebSAlex Vesker 	rts_attr.retry_cnt	= 7;
740297cccebSAlex Vesker 	rts_attr.rnr_retry	= 7;
741297cccebSAlex Vesker 
742297cccebSAlex Vesker 	ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
743b7d0db55SErez Shitrit 	if (ret) {
744b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
745297cccebSAlex Vesker 		return ret;
746b7d0db55SErez Shitrit 	}
747297cccebSAlex Vesker 
748297cccebSAlex Vesker 	return 0;
749297cccebSAlex Vesker }
750297cccebSAlex Vesker 
7518075411dSErez Shitrit static void dr_cq_complete(struct mlx5_core_cq *mcq,
7528075411dSErez Shitrit 			   struct mlx5_eqe *eqe)
7538075411dSErez Shitrit {
7548075411dSErez Shitrit 	pr_err("CQ completion CQ: #%u\n", mcq->cqn);
7558075411dSErez Shitrit }
7568075411dSErez Shitrit 
757297cccebSAlex Vesker static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
758297cccebSAlex Vesker 				      struct mlx5_uars_page *uar,
759297cccebSAlex Vesker 				      size_t ncqe)
760297cccebSAlex Vesker {
761297cccebSAlex Vesker 	u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
762297cccebSAlex Vesker 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
763297cccebSAlex Vesker 	struct mlx5_wq_param wqp;
764297cccebSAlex Vesker 	struct mlx5_cqe64 *cqe;
765297cccebSAlex Vesker 	struct mlx5dr_cq *cq;
766297cccebSAlex Vesker 	int inlen, err, eqn;
767297cccebSAlex Vesker 	void *cqc, *in;
768297cccebSAlex Vesker 	__be64 *pas;
76982996995SAlex Vesker 	int vector;
770297cccebSAlex Vesker 	u32 i;
771297cccebSAlex Vesker 
772297cccebSAlex Vesker 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
773297cccebSAlex Vesker 	if (!cq)
774297cccebSAlex Vesker 		return NULL;
775297cccebSAlex Vesker 
776297cccebSAlex Vesker 	ncqe = roundup_pow_of_two(ncqe);
777297cccebSAlex Vesker 	MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
778297cccebSAlex Vesker 
779297cccebSAlex Vesker 	wqp.buf_numa_node = mdev->priv.numa_node;
780297cccebSAlex Vesker 	wqp.db_numa_node = mdev->priv.numa_node;
781297cccebSAlex Vesker 
782297cccebSAlex Vesker 	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
783297cccebSAlex Vesker 			       &cq->wq_ctrl);
784297cccebSAlex Vesker 	if (err)
785297cccebSAlex Vesker 		goto out;
786297cccebSAlex Vesker 
787297cccebSAlex Vesker 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
788297cccebSAlex Vesker 		cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
789297cccebSAlex Vesker 		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
790297cccebSAlex Vesker 	}
791297cccebSAlex Vesker 
792297cccebSAlex Vesker 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
793297cccebSAlex Vesker 		sizeof(u64) * cq->wq_ctrl.buf.npages;
794297cccebSAlex Vesker 	in = kvzalloc(inlen, GFP_KERNEL);
795297cccebSAlex Vesker 	if (!in)
796297cccebSAlex Vesker 		goto err_cqwq;
797297cccebSAlex Vesker 
798c0702a4bSErez Shitrit 	vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
799563476aeSShay Drory 	err = mlx5_vector2eqn(mdev, vector, &eqn);
800297cccebSAlex Vesker 	if (err) {
801297cccebSAlex Vesker 		kvfree(in);
802297cccebSAlex Vesker 		goto err_cqwq;
803297cccebSAlex Vesker 	}
804297cccebSAlex Vesker 
805297cccebSAlex Vesker 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
806297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
807616d5769STal Gilboa 	MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
808297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, uar_page, uar->index);
809297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
810297cccebSAlex Vesker 		 MLX5_ADAPTER_PAGE_SHIFT);
811297cccebSAlex Vesker 	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
812297cccebSAlex Vesker 
813297cccebSAlex Vesker 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
814297cccebSAlex Vesker 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
815297cccebSAlex Vesker 
8168075411dSErez Shitrit 	cq->mcq.comp  = dr_cq_complete;
817297cccebSAlex Vesker 
818297cccebSAlex Vesker 	err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
819297cccebSAlex Vesker 	kvfree(in);
820297cccebSAlex Vesker 
821297cccebSAlex Vesker 	if (err)
822297cccebSAlex Vesker 		goto err_cqwq;
823297cccebSAlex Vesker 
824297cccebSAlex Vesker 	cq->mcq.cqe_sz = 64;
825297cccebSAlex Vesker 	cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
826297cccebSAlex Vesker 	cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
827297cccebSAlex Vesker 	*cq->mcq.set_ci_db = 0;
8288075411dSErez Shitrit 
8298075411dSErez Shitrit 	/* set no-zero value, in order to avoid the HW to run db-recovery on
8308075411dSErez Shitrit 	 * CQ that used in polling mode.
8318075411dSErez Shitrit 	 */
8328075411dSErez Shitrit 	*cq->mcq.arm_db = cpu_to_be32(2 << 28);
8338075411dSErez Shitrit 
834297cccebSAlex Vesker 	cq->mcq.vector = 0;
835297cccebSAlex Vesker 	cq->mcq.uar = uar;
836297cccebSAlex Vesker 
837297cccebSAlex Vesker 	return cq;
838297cccebSAlex Vesker 
839297cccebSAlex Vesker err_cqwq:
840297cccebSAlex Vesker 	mlx5_wq_destroy(&cq->wq_ctrl);
841297cccebSAlex Vesker out:
842297cccebSAlex Vesker 	kfree(cq);
843297cccebSAlex Vesker 	return NULL;
844297cccebSAlex Vesker }
845297cccebSAlex Vesker 
846297cccebSAlex Vesker static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
847297cccebSAlex Vesker {
848297cccebSAlex Vesker 	mlx5_core_destroy_cq(mdev, &cq->mcq);
849297cccebSAlex Vesker 	mlx5_wq_destroy(&cq->wq_ctrl);
850297cccebSAlex Vesker 	kfree(cq);
851297cccebSAlex Vesker }
852297cccebSAlex Vesker 
85383fec3f1SAharon Landau static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
854297cccebSAlex Vesker {
855297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
856297cccebSAlex Vesker 	void *mkc;
857297cccebSAlex Vesker 
858297cccebSAlex Vesker 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
859297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
860297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, a, 1);
861297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, rw, 1);
862297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, rr, 1);
863297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, lw, 1);
864297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, lr, 1);
865297cccebSAlex Vesker 
866297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, pd, pdn);
867297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, length64, 1);
868297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
869297cccebSAlex Vesker 
870297cccebSAlex Vesker 	return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
871297cccebSAlex Vesker }
872297cccebSAlex Vesker 
873297cccebSAlex Vesker static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
874297cccebSAlex Vesker 				   u32 pdn, void *buf, size_t size)
875297cccebSAlex Vesker {
876297cccebSAlex Vesker 	struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
877297cccebSAlex Vesker 	struct device *dma_device;
878297cccebSAlex Vesker 	dma_addr_t dma_addr;
879297cccebSAlex Vesker 	int err;
880297cccebSAlex Vesker 
881297cccebSAlex Vesker 	if (!mr)
882297cccebSAlex Vesker 		return NULL;
883297cccebSAlex Vesker 
8847be3412aSParav Pandit 	dma_device = mlx5_core_dma_dev(mdev);
885297cccebSAlex Vesker 	dma_addr = dma_map_single(dma_device, buf, size,
886297cccebSAlex Vesker 				  DMA_BIDIRECTIONAL);
887297cccebSAlex Vesker 	err = dma_mapping_error(dma_device, dma_addr);
888297cccebSAlex Vesker 	if (err) {
889297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't dma buf\n");
890297cccebSAlex Vesker 		kfree(mr);
891297cccebSAlex Vesker 		return NULL;
892297cccebSAlex Vesker 	}
893297cccebSAlex Vesker 
894297cccebSAlex Vesker 	err = dr_create_mkey(mdev, pdn, &mr->mkey);
895297cccebSAlex Vesker 	if (err) {
896297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't create mkey\n");
897297cccebSAlex Vesker 		dma_unmap_single(dma_device, dma_addr, size,
898297cccebSAlex Vesker 				 DMA_BIDIRECTIONAL);
899297cccebSAlex Vesker 		kfree(mr);
900297cccebSAlex Vesker 		return NULL;
901297cccebSAlex Vesker 	}
902297cccebSAlex Vesker 
903297cccebSAlex Vesker 	mr->dma_addr = dma_addr;
904297cccebSAlex Vesker 	mr->size = size;
905297cccebSAlex Vesker 	mr->addr = buf;
906297cccebSAlex Vesker 
907297cccebSAlex Vesker 	return mr;
908297cccebSAlex Vesker }
909297cccebSAlex Vesker 
910297cccebSAlex Vesker static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
911297cccebSAlex Vesker {
91283fec3f1SAharon Landau 	mlx5_core_destroy_mkey(mdev, mr->mkey);
9137be3412aSParav Pandit 	dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
914297cccebSAlex Vesker 			 DMA_BIDIRECTIONAL);
915297cccebSAlex Vesker 	kfree(mr);
916297cccebSAlex Vesker }
917297cccebSAlex Vesker 
918297cccebSAlex Vesker int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
919297cccebSAlex Vesker {
920297cccebSAlex Vesker 	struct dr_qp_init_attr init_attr = {};
921297cccebSAlex Vesker 	int cq_size;
922297cccebSAlex Vesker 	int size;
923297cccebSAlex Vesker 	int ret;
924297cccebSAlex Vesker 
925297cccebSAlex Vesker 	dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
926297cccebSAlex Vesker 	if (!dmn->send_ring)
927297cccebSAlex Vesker 		return -ENOMEM;
928297cccebSAlex Vesker 
929297cccebSAlex Vesker 	cq_size = QUEUE_SIZE + 1;
930297cccebSAlex Vesker 	dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
931297cccebSAlex Vesker 	if (!dmn->send_ring->cq) {
932b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed creating CQ\n");
933297cccebSAlex Vesker 		ret = -ENOMEM;
934297cccebSAlex Vesker 		goto free_send_ring;
935297cccebSAlex Vesker 	}
936297cccebSAlex Vesker 
937297cccebSAlex Vesker 	init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
938297cccebSAlex Vesker 	init_attr.pdn = dmn->pdn;
939297cccebSAlex Vesker 	init_attr.uar = dmn->uar;
940297cccebSAlex Vesker 	init_attr.max_send_wr = QUEUE_SIZE;
941aeacb52aSYevgeny Kliteynik 
942aeacb52aSYevgeny Kliteynik 	/* Isolated VL is applicable only if force loopback is supported */
943aeacb52aSYevgeny Kliteynik 	if (dr_send_allow_fl(&dmn->info.caps))
944aeacb52aSYevgeny Kliteynik 		init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
945aeacb52aSYevgeny Kliteynik 
946cedb2819SAlex Vesker 	spin_lock_init(&dmn->send_ring->lock);
947297cccebSAlex Vesker 
948297cccebSAlex Vesker 	dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
949297cccebSAlex Vesker 	if (!dmn->send_ring->qp)  {
950b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed creating QP\n");
951297cccebSAlex Vesker 		ret = -ENOMEM;
952297cccebSAlex Vesker 		goto clean_cq;
953297cccebSAlex Vesker 	}
954297cccebSAlex Vesker 
955297cccebSAlex Vesker 	dmn->send_ring->cq->qp = dmn->send_ring->qp;
956297cccebSAlex Vesker 
957297cccebSAlex Vesker 	dmn->info.max_send_wr = QUEUE_SIZE;
958297cccebSAlex Vesker 	dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
959297cccebSAlex Vesker 					DR_STE_SIZE);
960297cccebSAlex Vesker 
961297cccebSAlex Vesker 	dmn->send_ring->signal_th = dmn->info.max_send_wr /
962297cccebSAlex Vesker 		SIGNAL_PER_DIV_QUEUE;
963297cccebSAlex Vesker 
964297cccebSAlex Vesker 	/* Prepare qp to be used */
965297cccebSAlex Vesker 	ret = dr_prepare_qp_to_rts(dmn);
966297cccebSAlex Vesker 	if (ret)
967297cccebSAlex Vesker 		goto clean_qp;
968297cccebSAlex Vesker 
969297cccebSAlex Vesker 	dmn->send_ring->max_post_send_size =
970297cccebSAlex Vesker 		mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
971297cccebSAlex Vesker 						   DR_ICM_TYPE_STE);
972297cccebSAlex Vesker 
973297cccebSAlex Vesker 	/* Allocating the max size as a buffer for writing */
974297cccebSAlex Vesker 	size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
975297cccebSAlex Vesker 	dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
976297cccebSAlex Vesker 	if (!dmn->send_ring->buf) {
977297cccebSAlex Vesker 		ret = -ENOMEM;
978297cccebSAlex Vesker 		goto clean_qp;
979297cccebSAlex Vesker 	}
980297cccebSAlex Vesker 
981297cccebSAlex Vesker 	dmn->send_ring->buf_size = size;
982297cccebSAlex Vesker 
983297cccebSAlex Vesker 	dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
984297cccebSAlex Vesker 				       dmn->pdn, dmn->send_ring->buf, size);
985297cccebSAlex Vesker 	if (!dmn->send_ring->mr) {
986297cccebSAlex Vesker 		ret = -ENOMEM;
987297cccebSAlex Vesker 		goto free_mem;
988297cccebSAlex Vesker 	}
989297cccebSAlex Vesker 
990297cccebSAlex Vesker 	dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
991297cccebSAlex Vesker 					    dmn->pdn, dmn->send_ring->sync_buff,
992297cccebSAlex Vesker 					    MIN_READ_SYNC);
993297cccebSAlex Vesker 	if (!dmn->send_ring->sync_mr) {
994297cccebSAlex Vesker 		ret = -ENOMEM;
995297cccebSAlex Vesker 		goto clean_mr;
996297cccebSAlex Vesker 	}
997297cccebSAlex Vesker 
998297cccebSAlex Vesker 	return 0;
999297cccebSAlex Vesker 
1000297cccebSAlex Vesker clean_mr:
1001297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
1002297cccebSAlex Vesker free_mem:
1003297cccebSAlex Vesker 	kfree(dmn->send_ring->buf);
1004297cccebSAlex Vesker clean_qp:
1005297cccebSAlex Vesker 	dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
1006297cccebSAlex Vesker clean_cq:
1007297cccebSAlex Vesker 	dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
1008297cccebSAlex Vesker free_send_ring:
1009297cccebSAlex Vesker 	kfree(dmn->send_ring);
1010297cccebSAlex Vesker 
1011297cccebSAlex Vesker 	return ret;
1012297cccebSAlex Vesker }
1013297cccebSAlex Vesker 
1014297cccebSAlex Vesker void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1015297cccebSAlex Vesker 			   struct mlx5dr_send_ring *send_ring)
1016297cccebSAlex Vesker {
1017297cccebSAlex Vesker 	dr_destroy_qp(dmn->mdev, send_ring->qp);
1018297cccebSAlex Vesker 	dr_destroy_cq(dmn->mdev, send_ring->cq);
1019297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
1020297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, send_ring->mr);
1021297cccebSAlex Vesker 	kfree(send_ring->buf);
1022297cccebSAlex Vesker 	kfree(send_ring);
1023297cccebSAlex Vesker }
1024297cccebSAlex Vesker 
1025297cccebSAlex Vesker int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
1026297cccebSAlex Vesker {
1027297cccebSAlex Vesker 	struct mlx5dr_send_ring *send_ring = dmn->send_ring;
1028297cccebSAlex Vesker 	struct postsend_info send_info = {};
1029297cccebSAlex Vesker 	u8 data[DR_STE_SIZE];
1030297cccebSAlex Vesker 	int num_of_sends_req;
1031297cccebSAlex Vesker 	int ret;
1032297cccebSAlex Vesker 	int i;
1033297cccebSAlex Vesker 
1034297cccebSAlex Vesker 	/* Sending this amount of requests makes sure we will get drain */
1035297cccebSAlex Vesker 	num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
1036297cccebSAlex Vesker 
1037297cccebSAlex Vesker 	/* Send fake requests forcing the last to be signaled */
1038297cccebSAlex Vesker 	send_info.write.addr = (uintptr_t)data;
1039297cccebSAlex Vesker 	send_info.write.length = DR_STE_SIZE;
1040297cccebSAlex Vesker 	send_info.write.lkey = 0;
1041297cccebSAlex Vesker 	/* Using the sync_mr in order to write/read */
1042297cccebSAlex Vesker 	send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
104383fec3f1SAharon Landau 	send_info.rkey = send_ring->sync_mr->mkey;
1044297cccebSAlex Vesker 
1045297cccebSAlex Vesker 	for (i = 0; i < num_of_sends_req; i++) {
1046297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
1047297cccebSAlex Vesker 		if (ret)
1048297cccebSAlex Vesker 			return ret;
1049297cccebSAlex Vesker 	}
1050297cccebSAlex Vesker 
1051cedb2819SAlex Vesker 	spin_lock(&send_ring->lock);
1052297cccebSAlex Vesker 	ret = dr_handle_pending_wc(dmn, send_ring);
1053cedb2819SAlex Vesker 	spin_unlock(&send_ring->lock);
1054297cccebSAlex Vesker 
1055297cccebSAlex Vesker 	return ret;
1056297cccebSAlex Vesker }
1057