1297cccebSAlex Vesker // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2297cccebSAlex Vesker /* Copyright (c) 2019 Mellanox Technologies. */
3297cccebSAlex Vesker 
4c0702a4bSErez Shitrit #include <linux/smp.h>
5297cccebSAlex Vesker #include "dr_types.h"
6297cccebSAlex Vesker 
7297cccebSAlex Vesker #define QUEUE_SIZE 128
8297cccebSAlex Vesker #define SIGNAL_PER_DIV_QUEUE 16
9297cccebSAlex Vesker #define TH_NUMS_TO_DRAIN 2
1017b56073SYevgeny Kliteynik #define DR_SEND_INFO_POOL_SIZE 1000
11297cccebSAlex Vesker 
12297cccebSAlex Vesker enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
13297cccebSAlex Vesker 
14297cccebSAlex Vesker struct dr_data_seg {
15297cccebSAlex Vesker 	u64 addr;
16297cccebSAlex Vesker 	u32 length;
17297cccebSAlex Vesker 	u32 lkey;
18297cccebSAlex Vesker 	unsigned int send_flags;
19297cccebSAlex Vesker };
20297cccebSAlex Vesker 
211e5cc736SYevgeny Kliteynik enum send_info_type {
221e5cc736SYevgeny Kliteynik 	WRITE_ICM = 0,
234605fc0aSYevgeny Kliteynik 	GTA_ARG   = 1,
241e5cc736SYevgeny Kliteynik };
251e5cc736SYevgeny Kliteynik 
26297cccebSAlex Vesker struct postsend_info {
271e5cc736SYevgeny Kliteynik 	enum send_info_type type;
28297cccebSAlex Vesker 	struct dr_data_seg write;
29297cccebSAlex Vesker 	struct dr_data_seg read;
30297cccebSAlex Vesker 	u64 remote_addr;
31297cccebSAlex Vesker 	u32 rkey;
32297cccebSAlex Vesker };
33297cccebSAlex Vesker 
34297cccebSAlex Vesker struct dr_qp_rtr_attr {
35297cccebSAlex Vesker 	struct mlx5dr_cmd_gid_attr dgid_attr;
36297cccebSAlex Vesker 	enum ib_mtu mtu;
37297cccebSAlex Vesker 	u32 qp_num;
38297cccebSAlex Vesker 	u16 port_num;
39297cccebSAlex Vesker 	u8 min_rnr_timer;
40297cccebSAlex Vesker 	u8 sgid_index;
41297cccebSAlex Vesker 	u16 udp_src_port;
427304d603SYevgeny Kliteynik 	u8 fl:1;
43297cccebSAlex Vesker };
44297cccebSAlex Vesker 
45297cccebSAlex Vesker struct dr_qp_rts_attr {
46297cccebSAlex Vesker 	u8 timeout;
47297cccebSAlex Vesker 	u8 retry_cnt;
48297cccebSAlex Vesker 	u8 rnr_retry;
49297cccebSAlex Vesker };
50297cccebSAlex Vesker 
51297cccebSAlex Vesker struct dr_qp_init_attr {
52297cccebSAlex Vesker 	u32 cqn;
53297cccebSAlex Vesker 	u32 pdn;
54297cccebSAlex Vesker 	u32 max_send_wr;
55297cccebSAlex Vesker 	struct mlx5_uars_page *uar;
56aeacb52aSYevgeny Kliteynik 	u8 isolate_vl_tc:1;
57297cccebSAlex Vesker };
58297cccebSAlex Vesker 
5917b56073SYevgeny Kliteynik struct mlx5dr_send_info_pool_obj {
6017b56073SYevgeny Kliteynik 	struct mlx5dr_ste_send_info ste_send_info;
6117b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool *pool;
6217b56073SYevgeny Kliteynik 	struct list_head list_node;
6317b56073SYevgeny Kliteynik };
6417b56073SYevgeny Kliteynik 
6517b56073SYevgeny Kliteynik struct mlx5dr_send_info_pool {
6617b56073SYevgeny Kliteynik 	struct list_head free_list;
6717b56073SYevgeny Kliteynik };
6817b56073SYevgeny Kliteynik 
dr_send_info_pool_fill(struct mlx5dr_send_info_pool * pool)6917b56073SYevgeny Kliteynik static int dr_send_info_pool_fill(struct mlx5dr_send_info_pool *pool)
7017b56073SYevgeny Kliteynik {
7117b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
7217b56073SYevgeny Kliteynik 	int i;
7317b56073SYevgeny Kliteynik 
7417b56073SYevgeny Kliteynik 	for (i = 0; i < DR_SEND_INFO_POOL_SIZE; i++) {
7517b56073SYevgeny Kliteynik 		pool_obj = kzalloc(sizeof(*pool_obj), GFP_KERNEL);
7617b56073SYevgeny Kliteynik 		if (!pool_obj)
7717b56073SYevgeny Kliteynik 			goto clean_pool;
7817b56073SYevgeny Kliteynik 
7917b56073SYevgeny Kliteynik 		pool_obj->pool = pool;
8017b56073SYevgeny Kliteynik 		list_add_tail(&pool_obj->list_node, &pool->free_list);
8117b56073SYevgeny Kliteynik 	}
8217b56073SYevgeny Kliteynik 
8317b56073SYevgeny Kliteynik 	return 0;
8417b56073SYevgeny Kliteynik 
8517b56073SYevgeny Kliteynik clean_pool:
8617b56073SYevgeny Kliteynik 	list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
8717b56073SYevgeny Kliteynik 		list_del(&pool_obj->list_node);
8817b56073SYevgeny Kliteynik 		kfree(pool_obj);
8917b56073SYevgeny Kliteynik 	}
9017b56073SYevgeny Kliteynik 
9117b56073SYevgeny Kliteynik 	return -ENOMEM;
9217b56073SYevgeny Kliteynik }
9317b56073SYevgeny Kliteynik 
dr_send_info_pool_destroy(struct mlx5dr_send_info_pool * pool)9417b56073SYevgeny Kliteynik static void dr_send_info_pool_destroy(struct mlx5dr_send_info_pool *pool)
9517b56073SYevgeny Kliteynik {
9617b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
9717b56073SYevgeny Kliteynik 
9817b56073SYevgeny Kliteynik 	list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
9917b56073SYevgeny Kliteynik 		list_del(&pool_obj->list_node);
10017b56073SYevgeny Kliteynik 		kfree(pool_obj);
10117b56073SYevgeny Kliteynik 	}
10217b56073SYevgeny Kliteynik 
10317b56073SYevgeny Kliteynik 	kfree(pool);
10417b56073SYevgeny Kliteynik }
10517b56073SYevgeny Kliteynik 
mlx5dr_send_info_pool_destroy(struct mlx5dr_domain * dmn)10617b56073SYevgeny Kliteynik void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn)
10717b56073SYevgeny Kliteynik {
10817b56073SYevgeny Kliteynik 	dr_send_info_pool_destroy(dmn->send_info_pool_tx);
10917b56073SYevgeny Kliteynik 	dr_send_info_pool_destroy(dmn->send_info_pool_rx);
11017b56073SYevgeny Kliteynik }
11117b56073SYevgeny Kliteynik 
dr_send_info_pool_create(void)11217b56073SYevgeny Kliteynik static struct mlx5dr_send_info_pool *dr_send_info_pool_create(void)
11317b56073SYevgeny Kliteynik {
11417b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool *pool;
11517b56073SYevgeny Kliteynik 	int ret;
11617b56073SYevgeny Kliteynik 
11717b56073SYevgeny Kliteynik 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
11817b56073SYevgeny Kliteynik 	if (!pool)
11917b56073SYevgeny Kliteynik 		return NULL;
12017b56073SYevgeny Kliteynik 
12117b56073SYevgeny Kliteynik 	INIT_LIST_HEAD(&pool->free_list);
12217b56073SYevgeny Kliteynik 
12317b56073SYevgeny Kliteynik 	ret = dr_send_info_pool_fill(pool);
12417b56073SYevgeny Kliteynik 	if (ret) {
12517b56073SYevgeny Kliteynik 		kfree(pool);
12617b56073SYevgeny Kliteynik 		return NULL;
12717b56073SYevgeny Kliteynik 	}
12817b56073SYevgeny Kliteynik 
12917b56073SYevgeny Kliteynik 	return pool;
13017b56073SYevgeny Kliteynik }
13117b56073SYevgeny Kliteynik 
mlx5dr_send_info_pool_create(struct mlx5dr_domain * dmn)13217b56073SYevgeny Kliteynik int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn)
13317b56073SYevgeny Kliteynik {
13417b56073SYevgeny Kliteynik 	dmn->send_info_pool_rx = dr_send_info_pool_create();
13517b56073SYevgeny Kliteynik 	if (!dmn->send_info_pool_rx)
13617b56073SYevgeny Kliteynik 		return -ENOMEM;
13717b56073SYevgeny Kliteynik 
13817b56073SYevgeny Kliteynik 	dmn->send_info_pool_tx = dr_send_info_pool_create();
13917b56073SYevgeny Kliteynik 	if (!dmn->send_info_pool_tx) {
14017b56073SYevgeny Kliteynik 		dr_send_info_pool_destroy(dmn->send_info_pool_rx);
14117b56073SYevgeny Kliteynik 		return -ENOMEM;
14217b56073SYevgeny Kliteynik 	}
14317b56073SYevgeny Kliteynik 
14417b56073SYevgeny Kliteynik 	return 0;
14517b56073SYevgeny Kliteynik }
14617b56073SYevgeny Kliteynik 
14717b56073SYevgeny Kliteynik struct mlx5dr_ste_send_info
mlx5dr_send_info_alloc(struct mlx5dr_domain * dmn,enum mlx5dr_domain_nic_type nic_type)14817b56073SYevgeny Kliteynik *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
14917b56073SYevgeny Kliteynik 			enum mlx5dr_domain_nic_type nic_type)
15017b56073SYevgeny Kliteynik {
15117b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool_obj *pool_obj;
15217b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool *pool;
15317b56073SYevgeny Kliteynik 	int ret;
15417b56073SYevgeny Kliteynik 
15517b56073SYevgeny Kliteynik 	pool = nic_type == DR_DOMAIN_NIC_TYPE_RX ? dmn->send_info_pool_rx :
15617b56073SYevgeny Kliteynik 						   dmn->send_info_pool_tx;
15717b56073SYevgeny Kliteynik 
15817b56073SYevgeny Kliteynik 	if (unlikely(list_empty(&pool->free_list))) {
15917b56073SYevgeny Kliteynik 		ret = dr_send_info_pool_fill(pool);
16017b56073SYevgeny Kliteynik 		if (ret)
16117b56073SYevgeny Kliteynik 			return NULL;
16217b56073SYevgeny Kliteynik 	}
16317b56073SYevgeny Kliteynik 
16417b56073SYevgeny Kliteynik 	pool_obj = list_first_entry_or_null(&pool->free_list,
16517b56073SYevgeny Kliteynik 					    struct mlx5dr_send_info_pool_obj,
16617b56073SYevgeny Kliteynik 					    list_node);
16717b56073SYevgeny Kliteynik 
16817b56073SYevgeny Kliteynik 	if (likely(pool_obj)) {
16917b56073SYevgeny Kliteynik 		list_del_init(&pool_obj->list_node);
17017b56073SYevgeny Kliteynik 	} else {
17117b56073SYevgeny Kliteynik 		WARN_ONCE(!pool_obj, "Failed getting ste send info obj from pool");
17217b56073SYevgeny Kliteynik 		return NULL;
17317b56073SYevgeny Kliteynik 	}
17417b56073SYevgeny Kliteynik 
17517b56073SYevgeny Kliteynik 	return &pool_obj->ste_send_info;
17617b56073SYevgeny Kliteynik }
17717b56073SYevgeny Kliteynik 
mlx5dr_send_info_free(struct mlx5dr_ste_send_info * ste_send_info)17817b56073SYevgeny Kliteynik void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info)
17917b56073SYevgeny Kliteynik {
18017b56073SYevgeny Kliteynik 	struct mlx5dr_send_info_pool_obj *pool_obj;
18117b56073SYevgeny Kliteynik 
18217b56073SYevgeny Kliteynik 	pool_obj = container_of(ste_send_info,
18317b56073SYevgeny Kliteynik 				struct mlx5dr_send_info_pool_obj,
18417b56073SYevgeny Kliteynik 				ste_send_info);
18517b56073SYevgeny Kliteynik 
18617b56073SYevgeny Kliteynik 	list_add(&pool_obj->list_node, &pool_obj->pool->free_list);
18717b56073SYevgeny Kliteynik }
18817b56073SYevgeny Kliteynik 
dr_parse_cqe(struct mlx5dr_cq * dr_cq,struct mlx5_cqe64 * cqe64)189297cccebSAlex Vesker static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
190297cccebSAlex Vesker {
191297cccebSAlex Vesker 	unsigned int idx;
192297cccebSAlex Vesker 	u8 opcode;
193297cccebSAlex Vesker 
194297cccebSAlex Vesker 	opcode = get_cqe_opcode(cqe64);
195297cccebSAlex Vesker 	if (opcode == MLX5_CQE_REQ_ERR) {
196297cccebSAlex Vesker 		idx = be16_to_cpu(cqe64->wqe_counter) &
197297cccebSAlex Vesker 			(dr_cq->qp->sq.wqe_cnt - 1);
198297cccebSAlex Vesker 		dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
199297cccebSAlex Vesker 	} else if (opcode == MLX5_CQE_RESP_ERR) {
200297cccebSAlex Vesker 		++dr_cq->qp->sq.cc;
201297cccebSAlex Vesker 	} else {
202297cccebSAlex Vesker 		idx = be16_to_cpu(cqe64->wqe_counter) &
203297cccebSAlex Vesker 			(dr_cq->qp->sq.wqe_cnt - 1);
204297cccebSAlex Vesker 		dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
205297cccebSAlex Vesker 
206297cccebSAlex Vesker 		return CQ_OK;
207297cccebSAlex Vesker 	}
208297cccebSAlex Vesker 
209297cccebSAlex Vesker 	return CQ_POLL_ERR;
210297cccebSAlex Vesker }
211297cccebSAlex Vesker 
dr_cq_poll_one(struct mlx5dr_cq * dr_cq)212297cccebSAlex Vesker static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
213297cccebSAlex Vesker {
214297cccebSAlex Vesker 	struct mlx5_cqe64 *cqe64;
215297cccebSAlex Vesker 	int err;
216297cccebSAlex Vesker 
217297cccebSAlex Vesker 	cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
2185fd08f65SYevgeny Kliteynik 	if (!cqe64) {
2195fd08f65SYevgeny Kliteynik 		if (unlikely(dr_cq->mdev->state ==
2205fd08f65SYevgeny Kliteynik 			     MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
2215fd08f65SYevgeny Kliteynik 			mlx5_core_dbg_once(dr_cq->mdev,
2225fd08f65SYevgeny Kliteynik 					   "Polling CQ while device is shutting down\n");
2235fd08f65SYevgeny Kliteynik 			return CQ_POLL_ERR;
2245fd08f65SYevgeny Kliteynik 		}
225297cccebSAlex Vesker 		return CQ_EMPTY;
2265fd08f65SYevgeny Kliteynik 	}
227297cccebSAlex Vesker 
228297cccebSAlex Vesker 	mlx5_cqwq_pop(&dr_cq->wq);
229297cccebSAlex Vesker 	err = dr_parse_cqe(dr_cq, cqe64);
230297cccebSAlex Vesker 	mlx5_cqwq_update_db_record(&dr_cq->wq);
231297cccebSAlex Vesker 
232297cccebSAlex Vesker 	return err;
233297cccebSAlex Vesker }
234297cccebSAlex Vesker 
dr_poll_cq(struct mlx5dr_cq * dr_cq,int ne)235297cccebSAlex Vesker static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
236297cccebSAlex Vesker {
237297cccebSAlex Vesker 	int npolled;
238297cccebSAlex Vesker 	int err = 0;
239297cccebSAlex Vesker 
240297cccebSAlex Vesker 	for (npolled = 0; npolled < ne; ++npolled) {
241297cccebSAlex Vesker 		err = dr_cq_poll_one(dr_cq);
242297cccebSAlex Vesker 		if (err != CQ_OK)
243297cccebSAlex Vesker 			break;
244297cccebSAlex Vesker 	}
245297cccebSAlex Vesker 
246297cccebSAlex Vesker 	return err == CQ_POLL_ERR ? err : npolled;
247297cccebSAlex Vesker }
248297cccebSAlex Vesker 
dr_create_rc_qp(struct mlx5_core_dev * mdev,struct dr_qp_init_attr * attr)249297cccebSAlex Vesker static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
250297cccebSAlex Vesker 					 struct dr_qp_init_attr *attr)
251297cccebSAlex Vesker {
252ec44e72bSLeon Romanovsky 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
253297cccebSAlex Vesker 	u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
254297cccebSAlex Vesker 	struct mlx5_wq_param wqp;
255297cccebSAlex Vesker 	struct mlx5dr_qp *dr_qp;
256297cccebSAlex Vesker 	int inlen;
257297cccebSAlex Vesker 	void *qpc;
258297cccebSAlex Vesker 	void *in;
259297cccebSAlex Vesker 	int err;
260297cccebSAlex Vesker 
261297cccebSAlex Vesker 	dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
262297cccebSAlex Vesker 	if (!dr_qp)
263297cccebSAlex Vesker 		return NULL;
264297cccebSAlex Vesker 
265297cccebSAlex Vesker 	wqp.buf_numa_node = mdev->priv.numa_node;
266297cccebSAlex Vesker 	wqp.db_numa_node = mdev->priv.numa_node;
267297cccebSAlex Vesker 
268297cccebSAlex Vesker 	dr_qp->rq.pc = 0;
269297cccebSAlex Vesker 	dr_qp->rq.cc = 0;
27017dc71c3SYevgeny Kliteynik 	dr_qp->rq.wqe_cnt = 256;
271297cccebSAlex Vesker 	dr_qp->sq.pc = 0;
272297cccebSAlex Vesker 	dr_qp->sq.cc = 0;
2734605fc0aSYevgeny Kliteynik 	dr_qp->sq.head = 0;
274297cccebSAlex Vesker 	dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
275297cccebSAlex Vesker 
276297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
277297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
278297cccebSAlex Vesker 	MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
279297cccebSAlex Vesker 	err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
280297cccebSAlex Vesker 				&dr_qp->wq_ctrl);
281297cccebSAlex Vesker 	if (err) {
282b7d0db55SErez Shitrit 		mlx5_core_warn(mdev, "Can't create QP WQ\n");
283297cccebSAlex Vesker 		goto err_wq;
284297cccebSAlex Vesker 	}
285297cccebSAlex Vesker 
286297cccebSAlex Vesker 	dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
287297cccebSAlex Vesker 				     sizeof(dr_qp->sq.wqe_head[0]),
288297cccebSAlex Vesker 				     GFP_KERNEL);
289297cccebSAlex Vesker 
290297cccebSAlex Vesker 	if (!dr_qp->sq.wqe_head) {
291297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't allocate wqe head\n");
292297cccebSAlex Vesker 		goto err_wqe_head;
293297cccebSAlex Vesker 	}
294297cccebSAlex Vesker 
295297cccebSAlex Vesker 	inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
296297cccebSAlex Vesker 		MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
297297cccebSAlex Vesker 		dr_qp->wq_ctrl.buf.npages;
298297cccebSAlex Vesker 	in = kvzalloc(inlen, GFP_KERNEL);
299297cccebSAlex Vesker 	if (!in) {
300297cccebSAlex Vesker 		err = -ENOMEM;
301297cccebSAlex Vesker 		goto err_in;
302297cccebSAlex Vesker 	}
303297cccebSAlex Vesker 
304297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
305297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
306297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
307aeacb52aSYevgeny Kliteynik 	MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
308297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pd, attr->pdn);
309297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
310297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_page_size,
311297cccebSAlex Vesker 		 dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
312297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, fre, 1);
313297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rlky, 1);
314297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
315297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
316297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
317297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
318297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
319297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
3204806f1e2SMaor Gottlieb 	MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
321297cccebSAlex Vesker 	MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
322297cccebSAlex Vesker 	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
323297cccebSAlex Vesker 		MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
324297cccebSAlex Vesker 	mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
325297cccebSAlex Vesker 				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
326297cccebSAlex Vesker 							 in, pas));
327297cccebSAlex Vesker 
328ec44e72bSLeon Romanovsky 	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
329ec44e72bSLeon Romanovsky 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
330f93f4f4fSLeon Romanovsky 	dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
33147a357deSDenis Efremov 	kvfree(in);
332ec44e72bSLeon Romanovsky 	if (err)
333297cccebSAlex Vesker 		goto err_in;
334297cccebSAlex Vesker 	dr_qp->uar = attr->uar;
335297cccebSAlex Vesker 
336297cccebSAlex Vesker 	return dr_qp;
337297cccebSAlex Vesker 
338297cccebSAlex Vesker err_in:
339297cccebSAlex Vesker 	kfree(dr_qp->sq.wqe_head);
340297cccebSAlex Vesker err_wqe_head:
341297cccebSAlex Vesker 	mlx5_wq_destroy(&dr_qp->wq_ctrl);
342297cccebSAlex Vesker err_wq:
343297cccebSAlex Vesker 	kfree(dr_qp);
344297cccebSAlex Vesker 	return NULL;
345297cccebSAlex Vesker }
346297cccebSAlex Vesker 
dr_destroy_qp(struct mlx5_core_dev * mdev,struct mlx5dr_qp * dr_qp)347297cccebSAlex Vesker static void dr_destroy_qp(struct mlx5_core_dev *mdev,
348297cccebSAlex Vesker 			  struct mlx5dr_qp *dr_qp)
349297cccebSAlex Vesker {
350ec44e72bSLeon Romanovsky 	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
351ec44e72bSLeon Romanovsky 
352ec44e72bSLeon Romanovsky 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
353f93f4f4fSLeon Romanovsky 	MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
354ec44e72bSLeon Romanovsky 	mlx5_cmd_exec_in(mdev, destroy_qp, in);
355ec44e72bSLeon Romanovsky 
356297cccebSAlex Vesker 	kfree(dr_qp->sq.wqe_head);
357297cccebSAlex Vesker 	mlx5_wq_destroy(&dr_qp->wq_ctrl);
358297cccebSAlex Vesker 	kfree(dr_qp);
359297cccebSAlex Vesker }
360297cccebSAlex Vesker 
dr_cmd_notify_hw(struct mlx5dr_qp * dr_qp,void * ctrl)361297cccebSAlex Vesker static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
362297cccebSAlex Vesker {
363297cccebSAlex Vesker 	dma_wmb();
364ff1925bbSYevgeny Kliteynik 	*dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
365297cccebSAlex Vesker 
366297cccebSAlex Vesker 	/* After wmb() the hw aware of new work */
367297cccebSAlex Vesker 	wmb();
368297cccebSAlex Vesker 
369297cccebSAlex Vesker 	mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
370297cccebSAlex Vesker }
371297cccebSAlex Vesker 
3724605fc0aSYevgeny Kliteynik static void
dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg * wq_ctrl,u32 remote_addr,struct dr_data_seg * data_seg,int * size)3734605fc0aSYevgeny Kliteynik dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
3744605fc0aSYevgeny Kliteynik 					u32 remote_addr,
3754605fc0aSYevgeny Kliteynik 					struct dr_data_seg *data_seg,
3764605fc0aSYevgeny Kliteynik 					int *size)
3774605fc0aSYevgeny Kliteynik {
3784605fc0aSYevgeny Kliteynik 	struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg;
3794605fc0aSYevgeny Kliteynik 	struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg;
3804605fc0aSYevgeny Kliteynik 
3814605fc0aSYevgeny Kliteynik 	wq_ctrl->general_id = cpu_to_be32(remote_addr);
3824605fc0aSYevgeny Kliteynik 	wq_flow_seg = (void *)(wq_ctrl + 1);
3834605fc0aSYevgeny Kliteynik 
3844605fc0aSYevgeny Kliteynik 	/* mlx5_wqe_flow_update_ctrl_seg - all reserved */
3854605fc0aSYevgeny Kliteynik 	memset(wq_flow_seg, 0, sizeof(*wq_flow_seg));
3864605fc0aSYevgeny Kliteynik 	wq_arg_seg = (void *)(wq_flow_seg + 1);
3874605fc0aSYevgeny Kliteynik 
3884605fc0aSYevgeny Kliteynik 	memcpy(wq_arg_seg->argument_list,
3894605fc0aSYevgeny Kliteynik 	       (void *)(uintptr_t)data_seg->addr,
3904605fc0aSYevgeny Kliteynik 	       data_seg->length);
3914605fc0aSYevgeny Kliteynik 
3924605fc0aSYevgeny Kliteynik 	*size = (sizeof(*wq_ctrl) +      /* WQE ctrl segment */
3934605fc0aSYevgeny Kliteynik 		 sizeof(*wq_flow_seg) +  /* WQE flow update ctrl seg - reserved */
3944605fc0aSYevgeny Kliteynik 		 sizeof(*wq_arg_seg)) /  /* WQE hdr modify arg seg - data */
3954605fc0aSYevgeny Kliteynik 		MLX5_SEND_WQE_DS;
3964605fc0aSYevgeny Kliteynik }
3974605fc0aSYevgeny Kliteynik 
3984605fc0aSYevgeny Kliteynik static void
dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg * wq_ctrl,u64 remote_addr,u32 rkey,struct dr_data_seg * data_seg,unsigned int * size)399*ead487b3SItamar Gozlan dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
4004605fc0aSYevgeny Kliteynik 				  u64 remote_addr,
4014605fc0aSYevgeny Kliteynik 				  u32 rkey,
4024605fc0aSYevgeny Kliteynik 				  struct dr_data_seg *data_seg,
4034605fc0aSYevgeny Kliteynik 				  unsigned int *size)
404297cccebSAlex Vesker {
405297cccebSAlex Vesker 	struct mlx5_wqe_raddr_seg *wq_raddr;
406297cccebSAlex Vesker 	struct mlx5_wqe_data_seg *wq_dseg;
407297cccebSAlex Vesker 
408297cccebSAlex Vesker 	wq_raddr = (void *)(wq_ctrl + 1);
4094605fc0aSYevgeny Kliteynik 
410297cccebSAlex Vesker 	wq_raddr->raddr = cpu_to_be64(remote_addr);
411297cccebSAlex Vesker 	wq_raddr->rkey = cpu_to_be32(rkey);
412297cccebSAlex Vesker 	wq_raddr->reserved = 0;
413297cccebSAlex Vesker 
414297cccebSAlex Vesker 	wq_dseg = (void *)(wq_raddr + 1);
4154605fc0aSYevgeny Kliteynik 
416297cccebSAlex Vesker 	wq_dseg->byte_count = cpu_to_be32(data_seg->length);
417297cccebSAlex Vesker 	wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
418297cccebSAlex Vesker 	wq_dseg->addr = cpu_to_be64(data_seg->addr);
419*ead487b3SItamar Gozlan 
420*ead487b3SItamar Gozlan 	*size = (sizeof(*wq_ctrl) +    /* WQE ctrl segment */
421*ead487b3SItamar Gozlan 		 sizeof(*wq_dseg) +    /* WQE data segment */
422*ead487b3SItamar Gozlan 		 sizeof(*wq_raddr)) /  /* WQE remote addr segment */
423*ead487b3SItamar Gozlan 		MLX5_SEND_WQE_DS;
4244605fc0aSYevgeny Kliteynik }
4254605fc0aSYevgeny Kliteynik 
dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg * wq_ctrl,struct dr_data_seg * data_seg)4264605fc0aSYevgeny Kliteynik static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
4274605fc0aSYevgeny Kliteynik 			    struct dr_data_seg *data_seg)
4284605fc0aSYevgeny Kliteynik {
4294605fc0aSYevgeny Kliteynik 	wq_ctrl->signature = 0;
4304605fc0aSYevgeny Kliteynik 	wq_ctrl->rsvd[0] = 0;
4314605fc0aSYevgeny Kliteynik 	wq_ctrl->rsvd[1] = 0;
4324605fc0aSYevgeny Kliteynik 	wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ?
4334605fc0aSYevgeny Kliteynik 				MLX5_WQE_CTRL_CQ_UPDATE : 0;
4344605fc0aSYevgeny Kliteynik 	wq_ctrl->imm = 0;
4354605fc0aSYevgeny Kliteynik }
4364605fc0aSYevgeny Kliteynik 
dr_rdma_segments(struct mlx5dr_qp * dr_qp,u64 remote_addr,u32 rkey,struct dr_data_seg * data_seg,u32 opcode,bool notify_hw)4374605fc0aSYevgeny Kliteynik static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
4384605fc0aSYevgeny Kliteynik 			     u32 rkey, struct dr_data_seg *data_seg,
4394605fc0aSYevgeny Kliteynik 			     u32 opcode, bool notify_hw)
4404605fc0aSYevgeny Kliteynik {
4414605fc0aSYevgeny Kliteynik 	struct mlx5_wqe_ctrl_seg *wq_ctrl;
4424605fc0aSYevgeny Kliteynik 	int opcode_mod = 0;
4434605fc0aSYevgeny Kliteynik 	unsigned int size;
4444605fc0aSYevgeny Kliteynik 	unsigned int idx;
4454605fc0aSYevgeny Kliteynik 
4464605fc0aSYevgeny Kliteynik 	idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
4474605fc0aSYevgeny Kliteynik 
4484605fc0aSYevgeny Kliteynik 	wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
4494605fc0aSYevgeny Kliteynik 	dr_set_ctrl_seg(wq_ctrl, data_seg);
4504605fc0aSYevgeny Kliteynik 
4514605fc0aSYevgeny Kliteynik 	switch (opcode) {
4524605fc0aSYevgeny Kliteynik 	case MLX5_OPCODE_RDMA_READ:
4534605fc0aSYevgeny Kliteynik 	case MLX5_OPCODE_RDMA_WRITE:
454*ead487b3SItamar Gozlan 		dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
4554605fc0aSYevgeny Kliteynik 						  rkey, data_seg, &size);
4564605fc0aSYevgeny Kliteynik 		break;
4574605fc0aSYevgeny Kliteynik 	case MLX5_OPCODE_FLOW_TBL_ACCESS:
4584605fc0aSYevgeny Kliteynik 		opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT;
4594605fc0aSYevgeny Kliteynik 		dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr,
4604605fc0aSYevgeny Kliteynik 							data_seg, &size);
4614605fc0aSYevgeny Kliteynik 		break;
4624605fc0aSYevgeny Kliteynik 	default:
4634605fc0aSYevgeny Kliteynik 		WARN(true, "illegal opcode %d", opcode);
4644605fc0aSYevgeny Kliteynik 		return;
4654605fc0aSYevgeny Kliteynik 	}
4664605fc0aSYevgeny Kliteynik 
4674605fc0aSYevgeny Kliteynik 	/* --------------------------------------------------------
4684605fc0aSYevgeny Kliteynik 	 * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)|
4694605fc0aSYevgeny Kliteynik 	 * --------------------------------------------------------
4704605fc0aSYevgeny Kliteynik 	 */
4714605fc0aSYevgeny Kliteynik 	wq_ctrl->opmod_idx_opcode =
4724605fc0aSYevgeny Kliteynik 		cpu_to_be32((opcode_mod << 24) |
4734605fc0aSYevgeny Kliteynik 			    ((dr_qp->sq.pc & 0xffff) << 8) |
4744605fc0aSYevgeny Kliteynik 			    opcode);
4754605fc0aSYevgeny Kliteynik 	wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
4764605fc0aSYevgeny Kliteynik 
4774605fc0aSYevgeny Kliteynik 	dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
4784605fc0aSYevgeny Kliteynik 	dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++;
479297cccebSAlex Vesker 
4807d22ad73SYevgeny Kliteynik 	if (notify_hw)
481297cccebSAlex Vesker 		dr_cmd_notify_hw(dr_qp, wq_ctrl);
482297cccebSAlex Vesker }
483297cccebSAlex Vesker 
dr_post_send(struct mlx5dr_qp * dr_qp,struct postsend_info * send_info)484297cccebSAlex Vesker static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
485297cccebSAlex Vesker {
4861e5cc736SYevgeny Kliteynik 	if (send_info->type == WRITE_ICM) {
487297cccebSAlex Vesker 		dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
4887d22ad73SYevgeny Kliteynik 				 &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
489297cccebSAlex Vesker 		dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
4907d22ad73SYevgeny Kliteynik 				 &send_info->read, MLX5_OPCODE_RDMA_READ, true);
4914605fc0aSYevgeny Kliteynik 	} else { /* GTA_ARG */
4924605fc0aSYevgeny Kliteynik 		dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
4934605fc0aSYevgeny Kliteynik 				 &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true);
494297cccebSAlex Vesker 	}
4954605fc0aSYevgeny Kliteynik 
4961e5cc736SYevgeny Kliteynik }
497297cccebSAlex Vesker 
498297cccebSAlex Vesker /**
499297cccebSAlex Vesker  * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
500297cccebSAlex Vesker  * with send_list parameters:
501297cccebSAlex Vesker  *
502297cccebSAlex Vesker  *     @ste:       The data that attached to this specific ste
503297cccebSAlex Vesker  *     @size:      of data to write
504297cccebSAlex Vesker  *     @offset:    of the data from start of the hw_ste entry
505297cccebSAlex Vesker  *     @data:      data
506297cccebSAlex Vesker  *     @ste_info:  ste to be sent with send_list
507297cccebSAlex Vesker  *     @send_list: to append into it
508297cccebSAlex Vesker  *     @copy_data: if true indicates that the data should be kept because
509297cccebSAlex Vesker  *                 it's not backuped any where (like in re-hash).
510297cccebSAlex Vesker  *                 if false, it lets the data to be updated after
511297cccebSAlex Vesker  *                 it was added to the list.
512297cccebSAlex Vesker  */
mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste * ste,u16 size,u16 offset,u8 * data,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_list,bool copy_data)513297cccebSAlex Vesker void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
514297cccebSAlex Vesker 					       u16 offset, u8 *data,
515297cccebSAlex Vesker 					       struct mlx5dr_ste_send_info *ste_info,
516297cccebSAlex Vesker 					       struct list_head *send_list,
517297cccebSAlex Vesker 					       bool copy_data)
518297cccebSAlex Vesker {
519297cccebSAlex Vesker 	ste_info->size = size;
520297cccebSAlex Vesker 	ste_info->ste = ste;
521297cccebSAlex Vesker 	ste_info->offset = offset;
522297cccebSAlex Vesker 
523297cccebSAlex Vesker 	if (copy_data) {
524297cccebSAlex Vesker 		memcpy(ste_info->data_cont, data, size);
525297cccebSAlex Vesker 		ste_info->data = ste_info->data_cont;
526297cccebSAlex Vesker 	} else {
527297cccebSAlex Vesker 		ste_info->data = data;
528297cccebSAlex Vesker 	}
529297cccebSAlex Vesker 
530297cccebSAlex Vesker 	list_add_tail(&ste_info->send_list, send_list);
531297cccebSAlex Vesker }
532297cccebSAlex Vesker 
533297cccebSAlex Vesker /* The function tries to consume one wc each time, unless the queue is full, in
534297cccebSAlex Vesker  * that case, which means that the hw is behind the sw in a full queue len
535297cccebSAlex Vesker  * the function will drain the cq till it empty.
536297cccebSAlex Vesker  */
dr_handle_pending_wc(struct mlx5dr_domain * dmn,struct mlx5dr_send_ring * send_ring)537297cccebSAlex Vesker static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
538297cccebSAlex Vesker 				struct mlx5dr_send_ring *send_ring)
539297cccebSAlex Vesker {
540297cccebSAlex Vesker 	bool is_drain = false;
541297cccebSAlex Vesker 	int ne;
542297cccebSAlex Vesker 
543297cccebSAlex Vesker 	if (send_ring->pending_wqe < send_ring->signal_th)
544297cccebSAlex Vesker 		return 0;
545297cccebSAlex Vesker 
546297cccebSAlex Vesker 	/* Queue is full start drain it */
547297cccebSAlex Vesker 	if (send_ring->pending_wqe >=
548297cccebSAlex Vesker 	    dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
549297cccebSAlex Vesker 		is_drain = true;
550297cccebSAlex Vesker 
551297cccebSAlex Vesker 	do {
552297cccebSAlex Vesker 		ne = dr_poll_cq(send_ring->cq, 1);
553d5a84e96SYevgeny Kliteynik 		if (unlikely(ne < 0)) {
554d5a84e96SYevgeny Kliteynik 			mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
555d5a84e96SYevgeny Kliteynik 					    send_ring->qp->qpn);
556d5a84e96SYevgeny Kliteynik 			send_ring->err_state = true;
557297cccebSAlex Vesker 			return ne;
558d5a84e96SYevgeny Kliteynik 		} else if (ne == 1) {
559297cccebSAlex Vesker 			send_ring->pending_wqe -= send_ring->signal_th;
560d5a84e96SYevgeny Kliteynik 		}
5614605fc0aSYevgeny Kliteynik 	} while (ne == 1 ||
5624605fc0aSYevgeny Kliteynik 		 (is_drain && send_ring->pending_wqe  >= send_ring->signal_th));
563297cccebSAlex Vesker 
564297cccebSAlex Vesker 	return 0;
565297cccebSAlex Vesker }
566297cccebSAlex Vesker 
dr_fill_write_args_segs(struct mlx5dr_send_ring * send_ring,struct postsend_info * send_info)5674605fc0aSYevgeny Kliteynik static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
5684605fc0aSYevgeny Kliteynik 				    struct postsend_info *send_info)
5694605fc0aSYevgeny Kliteynik {
5704605fc0aSYevgeny Kliteynik 	send_ring->pending_wqe++;
5714605fc0aSYevgeny Kliteynik 
5724605fc0aSYevgeny Kliteynik 	if (send_ring->pending_wqe % send_ring->signal_th == 0)
5734605fc0aSYevgeny Kliteynik 		send_info->write.send_flags |= IB_SEND_SIGNALED;
5744605fc0aSYevgeny Kliteynik 	else
575*ead487b3SItamar Gozlan 		send_info->write.send_flags = 0;
5764605fc0aSYevgeny Kliteynik }
5774605fc0aSYevgeny Kliteynik 
dr_fill_write_icm_segs(struct mlx5dr_domain * dmn,struct mlx5dr_send_ring * send_ring,struct postsend_info * send_info)5781e5cc736SYevgeny Kliteynik static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
5791e5cc736SYevgeny Kliteynik 				   struct mlx5dr_send_ring *send_ring,
580297cccebSAlex Vesker 				   struct postsend_info *send_info)
581297cccebSAlex Vesker {
5821e5cc736SYevgeny Kliteynik 	u32 buff_offset;
5831e5cc736SYevgeny Kliteynik 
5841e5cc736SYevgeny Kliteynik 	if (send_info->write.length > dmn->info.max_inline_size) {
5851e5cc736SYevgeny Kliteynik 		buff_offset = (send_ring->tx_head &
5861e5cc736SYevgeny Kliteynik 			       (dmn->send_ring->signal_th - 1)) *
5871e5cc736SYevgeny Kliteynik 			      send_ring->max_post_send_size;
5881e5cc736SYevgeny Kliteynik 		/* Copy to ring mr */
5891e5cc736SYevgeny Kliteynik 		memcpy(send_ring->buf + buff_offset,
5901e5cc736SYevgeny Kliteynik 		       (void *)(uintptr_t)send_info->write.addr,
5911e5cc736SYevgeny Kliteynik 		       send_info->write.length);
5921e5cc736SYevgeny Kliteynik 		send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
5931e5cc736SYevgeny Kliteynik 		send_info->write.lkey = send_ring->mr->mkey;
5941e5cc736SYevgeny Kliteynik 
5951e5cc736SYevgeny Kliteynik 		send_ring->tx_head++;
5961e5cc736SYevgeny Kliteynik 	}
5971e5cc736SYevgeny Kliteynik 
598297cccebSAlex Vesker 	send_ring->pending_wqe++;
599297cccebSAlex Vesker 
600297cccebSAlex Vesker 	if (send_ring->pending_wqe % send_ring->signal_th == 0)
601297cccebSAlex Vesker 		send_info->write.send_flags |= IB_SEND_SIGNALED;
602297cccebSAlex Vesker 
603297cccebSAlex Vesker 	send_ring->pending_wqe++;
604297cccebSAlex Vesker 	send_info->read.length = send_info->write.length;
6057d7c9453SYevgeny Kliteynik 
6067d7c9453SYevgeny Kliteynik 	/* Read into dedicated sync buffer */
6077d7c9453SYevgeny Kliteynik 	send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
6087d7c9453SYevgeny Kliteynik 	send_info->read.lkey = send_ring->sync_mr->mkey;
609297cccebSAlex Vesker 
610297cccebSAlex Vesker 	if (send_ring->pending_wqe % send_ring->signal_th == 0)
611*ead487b3SItamar Gozlan 		send_info->read.send_flags = IB_SEND_SIGNALED;
612297cccebSAlex Vesker 	else
613*ead487b3SItamar Gozlan 		send_info->read.send_flags = 0;
614297cccebSAlex Vesker }
615297cccebSAlex Vesker 
dr_fill_data_segs(struct mlx5dr_domain * dmn,struct mlx5dr_send_ring * send_ring,struct postsend_info * send_info)6161e5cc736SYevgeny Kliteynik static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
6171e5cc736SYevgeny Kliteynik 			      struct mlx5dr_send_ring *send_ring,
6181e5cc736SYevgeny Kliteynik 			      struct postsend_info *send_info)
6191e5cc736SYevgeny Kliteynik {
6201e5cc736SYevgeny Kliteynik 	if (send_info->type == WRITE_ICM)
6211e5cc736SYevgeny Kliteynik 		dr_fill_write_icm_segs(dmn, send_ring, send_info);
6224605fc0aSYevgeny Kliteynik 	else /* args */
6234605fc0aSYevgeny Kliteynik 		dr_fill_write_args_segs(send_ring, send_info);
6241e5cc736SYevgeny Kliteynik }
6251e5cc736SYevgeny Kliteynik 
dr_postsend_icm_data(struct mlx5dr_domain * dmn,struct postsend_info * send_info)626297cccebSAlex Vesker static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
627297cccebSAlex Vesker 				struct postsend_info *send_info)
628297cccebSAlex Vesker {
629297cccebSAlex Vesker 	struct mlx5dr_send_ring *send_ring = dmn->send_ring;
630297cccebSAlex Vesker 	int ret;
631297cccebSAlex Vesker 
632d5a84e96SYevgeny Kliteynik 	if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
633d5a84e96SYevgeny Kliteynik 		     send_ring->err_state)) {
634d5a84e96SYevgeny Kliteynik 		mlx5_core_dbg_once(dmn->mdev,
635d5a84e96SYevgeny Kliteynik 				   "Skipping post send: QP err state: %d, device state: %d\n",
636d5a84e96SYevgeny Kliteynik 				   send_ring->err_state, dmn->mdev->state);
637d5a84e96SYevgeny Kliteynik 		return 0;
638d5a84e96SYevgeny Kliteynik 	}
639d5a84e96SYevgeny Kliteynik 
640cedb2819SAlex Vesker 	spin_lock(&send_ring->lock);
641cedb2819SAlex Vesker 
642297cccebSAlex Vesker 	ret = dr_handle_pending_wc(dmn, send_ring);
643297cccebSAlex Vesker 	if (ret)
644cedb2819SAlex Vesker 		goto out_unlock;
645297cccebSAlex Vesker 
6461e5cc736SYevgeny Kliteynik 	dr_fill_data_segs(dmn, send_ring, send_info);
647297cccebSAlex Vesker 	dr_post_send(send_ring->qp, send_info);
648297cccebSAlex Vesker 
649cedb2819SAlex Vesker out_unlock:
650cedb2819SAlex Vesker 	spin_unlock(&send_ring->lock);
651cedb2819SAlex Vesker 	return ret;
652297cccebSAlex Vesker }
653297cccebSAlex Vesker 
dr_get_tbl_copy_details(struct mlx5dr_domain * dmn,struct mlx5dr_ste_htbl * htbl,u8 ** data,u32 * byte_size,int * iterations,int * num_stes)654297cccebSAlex Vesker static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
655297cccebSAlex Vesker 				   struct mlx5dr_ste_htbl *htbl,
656297cccebSAlex Vesker 				   u8 **data,
657297cccebSAlex Vesker 				   u32 *byte_size,
658297cccebSAlex Vesker 				   int *iterations,
659297cccebSAlex Vesker 				   int *num_stes)
660297cccebSAlex Vesker {
661f51bb517SRongwei Liu 	u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
662297cccebSAlex Vesker 	int alloc_size;
663297cccebSAlex Vesker 
664f51bb517SRongwei Liu 	if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
665f51bb517SRongwei Liu 		*iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
666297cccebSAlex Vesker 		*byte_size = dmn->send_ring->max_post_send_size;
667297cccebSAlex Vesker 		alloc_size = *byte_size;
668297cccebSAlex Vesker 		*num_stes = *byte_size / DR_STE_SIZE;
669297cccebSAlex Vesker 	} else {
670297cccebSAlex Vesker 		*iterations = 1;
671f51bb517SRongwei Liu 		*num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
672297cccebSAlex Vesker 		alloc_size = *num_stes * DR_STE_SIZE;
673297cccebSAlex Vesker 	}
674297cccebSAlex Vesker 
675b7f86258SRoi Dayan 	*data = kvzalloc(alloc_size, GFP_KERNEL);
676297cccebSAlex Vesker 	if (!*data)
677297cccebSAlex Vesker 		return -ENOMEM;
678297cccebSAlex Vesker 
679297cccebSAlex Vesker 	return 0;
680297cccebSAlex Vesker }
681297cccebSAlex Vesker 
682297cccebSAlex Vesker /**
683297cccebSAlex Vesker  * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
684297cccebSAlex Vesker  *
685297cccebSAlex Vesker  *     @dmn:    Domain
686297cccebSAlex Vesker  *     @ste:    The ste struct that contains the data (at
687297cccebSAlex Vesker  *              least part of it)
688297cccebSAlex Vesker  *     @data:   The real data to send size data
689297cccebSAlex Vesker  *     @size:   for writing.
690297cccebSAlex Vesker  *     @offset: The offset from the icm mapped data to
691297cccebSAlex Vesker  *              start write to this for write only part of the
692297cccebSAlex Vesker  *              buffer.
693297cccebSAlex Vesker  *
694297cccebSAlex Vesker  * Return: 0 on success.
695297cccebSAlex Vesker  */
mlx5dr_send_postsend_ste(struct mlx5dr_domain * dmn,struct mlx5dr_ste * ste,u8 * data,u16 size,u16 offset)696297cccebSAlex Vesker int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
697297cccebSAlex Vesker 			     u8 *data, u16 size, u16 offset)
698297cccebSAlex Vesker {
699297cccebSAlex Vesker 	struct postsend_info send_info = {};
700297cccebSAlex Vesker 
7014fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
7024fe45e1dSYevgeny Kliteynik 
703297cccebSAlex Vesker 	send_info.write.addr = (uintptr_t)data;
704297cccebSAlex Vesker 	send_info.write.length = size;
705297cccebSAlex Vesker 	send_info.write.lkey = 0;
706297cccebSAlex Vesker 	send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
707003f4f9aSRongwei Liu 	send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
708297cccebSAlex Vesker 
709297cccebSAlex Vesker 	return dr_postsend_icm_data(dmn, &send_info);
710297cccebSAlex Vesker }
711297cccebSAlex Vesker 
mlx5dr_send_postsend_htbl(struct mlx5dr_domain * dmn,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,u8 * mask)712297cccebSAlex Vesker int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
713297cccebSAlex Vesker 			      struct mlx5dr_ste_htbl *htbl,
714297cccebSAlex Vesker 			      u8 *formatted_ste, u8 *mask)
715297cccebSAlex Vesker {
716f51bb517SRongwei Liu 	u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
717297cccebSAlex Vesker 	int num_stes_per_iter;
718297cccebSAlex Vesker 	int iterations;
719297cccebSAlex Vesker 	u8 *data;
720297cccebSAlex Vesker 	int ret;
721297cccebSAlex Vesker 	int i;
722297cccebSAlex Vesker 	int j;
723297cccebSAlex Vesker 
724297cccebSAlex Vesker 	ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
725297cccebSAlex Vesker 				      &iterations, &num_stes_per_iter);
726297cccebSAlex Vesker 	if (ret)
727297cccebSAlex Vesker 		return ret;
728297cccebSAlex Vesker 
7294fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
7304fe45e1dSYevgeny Kliteynik 
731297cccebSAlex Vesker 	/* Send the data iteration times */
732297cccebSAlex Vesker 	for (i = 0; i < iterations; i++) {
733297cccebSAlex Vesker 		u32 ste_index = i * (byte_size / DR_STE_SIZE);
734297cccebSAlex Vesker 		struct postsend_info send_info = {};
735297cccebSAlex Vesker 
736297cccebSAlex Vesker 		/* Copy all ste's on the data buffer
737297cccebSAlex Vesker 		 * need to add the bit_mask
738297cccebSAlex Vesker 		 */
739297cccebSAlex Vesker 		for (j = 0; j < num_stes_per_iter; j++) {
740597534bdSRongwei Liu 			struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
741297cccebSAlex Vesker 			u32 ste_off = j * DR_STE_SIZE;
742297cccebSAlex Vesker 
74397ffd895SYevgeny Kliteynik 			if (mlx5dr_ste_is_not_used(ste)) {
744297cccebSAlex Vesker 				memcpy(data + ste_off,
745297cccebSAlex Vesker 				       formatted_ste, DR_STE_SIZE);
746297cccebSAlex Vesker 			} else {
747297cccebSAlex Vesker 				/* Copy data */
748297cccebSAlex Vesker 				memcpy(data + ste_off,
7490d7f1595SRongwei Liu 				       htbl->chunk->hw_ste_arr +
7500d7f1595SRongwei Liu 				       DR_STE_SIZE_REDUCED * (ste_index + j),
751297cccebSAlex Vesker 				       DR_STE_SIZE_REDUCED);
752297cccebSAlex Vesker 				/* Copy bit_mask */
753297cccebSAlex Vesker 				memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
754297cccebSAlex Vesker 				       mask, DR_STE_SIZE_MASK);
7554fe45e1dSYevgeny Kliteynik 				/* Only when we have mask we need to re-arrange the STE */
7564fe45e1dSYevgeny Kliteynik 				mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
7574fe45e1dSYevgeny Kliteynik 								data + (j * DR_STE_SIZE),
7584fe45e1dSYevgeny Kliteynik 								DR_STE_SIZE);
759297cccebSAlex Vesker 			}
760297cccebSAlex Vesker 		}
761297cccebSAlex Vesker 
762297cccebSAlex Vesker 		send_info.write.addr = (uintptr_t)data;
763297cccebSAlex Vesker 		send_info.write.length = byte_size;
764297cccebSAlex Vesker 		send_info.write.lkey = 0;
765297cccebSAlex Vesker 		send_info.remote_addr =
766597534bdSRongwei Liu 			mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
767003f4f9aSRongwei Liu 		send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
768297cccebSAlex Vesker 
769297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
770297cccebSAlex Vesker 		if (ret)
771297cccebSAlex Vesker 			goto out_free;
772297cccebSAlex Vesker 	}
773297cccebSAlex Vesker 
774297cccebSAlex Vesker out_free:
775b7f86258SRoi Dayan 	kvfree(data);
776297cccebSAlex Vesker 	return ret;
777297cccebSAlex Vesker }
778297cccebSAlex Vesker 
779297cccebSAlex Vesker /* Initialize htble with default STEs */
mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain * dmn,struct mlx5dr_ste_htbl * htbl,u8 * ste_init_data,bool update_hw_ste)780297cccebSAlex Vesker int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
781297cccebSAlex Vesker 					struct mlx5dr_ste_htbl *htbl,
782297cccebSAlex Vesker 					u8 *ste_init_data,
783297cccebSAlex Vesker 					bool update_hw_ste)
784297cccebSAlex Vesker {
785f51bb517SRongwei Liu 	u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
786297cccebSAlex Vesker 	int iterations;
787297cccebSAlex Vesker 	int num_stes;
7884fe45e1dSYevgeny Kliteynik 	u8 *copy_dst;
789297cccebSAlex Vesker 	u8 *data;
790297cccebSAlex Vesker 	int ret;
791297cccebSAlex Vesker 	int i;
792297cccebSAlex Vesker 
793297cccebSAlex Vesker 	ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
794297cccebSAlex Vesker 				      &iterations, &num_stes);
795297cccebSAlex Vesker 	if (ret)
796297cccebSAlex Vesker 		return ret;
797297cccebSAlex Vesker 
798297cccebSAlex Vesker 	if (update_hw_ste) {
7994fe45e1dSYevgeny Kliteynik 		/* Copy the reduced STE to hash table ste_arr */
8004fe45e1dSYevgeny Kliteynik 		for (i = 0; i < num_stes; i++) {
801597534bdSRongwei Liu 			copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
802297cccebSAlex Vesker 			memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
803297cccebSAlex Vesker 		}
804297cccebSAlex Vesker 	}
805297cccebSAlex Vesker 
8064fe45e1dSYevgeny Kliteynik 	mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
8074fe45e1dSYevgeny Kliteynik 
8084fe45e1dSYevgeny Kliteynik 	/* Copy the same STE on the data buffer */
8094fe45e1dSYevgeny Kliteynik 	for (i = 0; i < num_stes; i++) {
8104fe45e1dSYevgeny Kliteynik 		copy_dst = data + i * DR_STE_SIZE;
8114fe45e1dSYevgeny Kliteynik 		memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
8124fe45e1dSYevgeny Kliteynik 	}
8134fe45e1dSYevgeny Kliteynik 
814297cccebSAlex Vesker 	/* Send the data iteration times */
815297cccebSAlex Vesker 	for (i = 0; i < iterations; i++) {
816297cccebSAlex Vesker 		u8 ste_index = i * (byte_size / DR_STE_SIZE);
817297cccebSAlex Vesker 		struct postsend_info send_info = {};
818297cccebSAlex Vesker 
819297cccebSAlex Vesker 		send_info.write.addr = (uintptr_t)data;
820297cccebSAlex Vesker 		send_info.write.length = byte_size;
821297cccebSAlex Vesker 		send_info.write.lkey = 0;
822297cccebSAlex Vesker 		send_info.remote_addr =
823597534bdSRongwei Liu 			mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
824003f4f9aSRongwei Liu 		send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
825297cccebSAlex Vesker 
826297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
827297cccebSAlex Vesker 		if (ret)
828297cccebSAlex Vesker 			goto out_free;
829297cccebSAlex Vesker 	}
830297cccebSAlex Vesker 
831297cccebSAlex Vesker out_free:
832b7f86258SRoi Dayan 	kvfree(data);
833297cccebSAlex Vesker 	return ret;
834297cccebSAlex Vesker }
835297cccebSAlex Vesker 
mlx5dr_send_postsend_action(struct mlx5dr_domain * dmn,struct mlx5dr_action * action)836297cccebSAlex Vesker int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
837297cccebSAlex Vesker 				struct mlx5dr_action *action)
838297cccebSAlex Vesker {
839297cccebSAlex Vesker 	struct postsend_info send_info = {};
840297cccebSAlex Vesker 
8419dac2966SJianbo Liu 	send_info.write.addr = (uintptr_t)action->rewrite->data;
8429dac2966SJianbo Liu 	send_info.write.length = action->rewrite->num_of_actions *
843692b0399SHamdan Igbaria 				 DR_MODIFY_ACTION_SIZE;
844297cccebSAlex Vesker 	send_info.write.lkey = 0;
845003f4f9aSRongwei Liu 	send_info.remote_addr =
846003f4f9aSRongwei Liu 		mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
847003f4f9aSRongwei Liu 	send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
848297cccebSAlex Vesker 
8494238654cSzhang songyi 	return dr_postsend_icm_data(dmn, &send_info);
850297cccebSAlex Vesker }
851297cccebSAlex Vesker 
mlx5dr_send_postsend_pattern(struct mlx5dr_domain * dmn,struct mlx5dr_icm_chunk * chunk,u16 num_of_actions,u8 * data)852da5d0027SYevgeny Kliteynik int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
853da5d0027SYevgeny Kliteynik 				 struct mlx5dr_icm_chunk *chunk,
854da5d0027SYevgeny Kliteynik 				 u16 num_of_actions,
855da5d0027SYevgeny Kliteynik 				 u8 *data)
856da5d0027SYevgeny Kliteynik {
857da5d0027SYevgeny Kliteynik 	struct postsend_info send_info = {};
858da5d0027SYevgeny Kliteynik 	int ret;
859da5d0027SYevgeny Kliteynik 
860da5d0027SYevgeny Kliteynik 	send_info.write.addr = (uintptr_t)data;
861da5d0027SYevgeny Kliteynik 	send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE;
862da5d0027SYevgeny Kliteynik 	send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk);
863da5d0027SYevgeny Kliteynik 	send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk);
864da5d0027SYevgeny Kliteynik 
865da5d0027SYevgeny Kliteynik 	ret = dr_postsend_icm_data(dmn, &send_info);
866da5d0027SYevgeny Kliteynik 	if (ret)
867da5d0027SYevgeny Kliteynik 		return ret;
868da5d0027SYevgeny Kliteynik 
869da5d0027SYevgeny Kliteynik 	return 0;
870da5d0027SYevgeny Kliteynik }
871da5d0027SYevgeny Kliteynik 
mlx5dr_send_postsend_args(struct mlx5dr_domain * dmn,u64 arg_id,u16 num_of_actions,u8 * actions_data)8724605fc0aSYevgeny Kliteynik int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
8734605fc0aSYevgeny Kliteynik 			      u16 num_of_actions, u8 *actions_data)
8744605fc0aSYevgeny Kliteynik {
8754605fc0aSYevgeny Kliteynik 	int data_len, iter = 0, cur_sent;
8764605fc0aSYevgeny Kliteynik 	u64 addr;
8774605fc0aSYevgeny Kliteynik 	int ret;
8784605fc0aSYevgeny Kliteynik 
8794605fc0aSYevgeny Kliteynik 	addr = (uintptr_t)actions_data;
8804605fc0aSYevgeny Kliteynik 	data_len = num_of_actions * DR_MODIFY_ACTION_SIZE;
8814605fc0aSYevgeny Kliteynik 
8824605fc0aSYevgeny Kliteynik 	do {
8834605fc0aSYevgeny Kliteynik 		struct postsend_info send_info = {};
8844605fc0aSYevgeny Kliteynik 
8854605fc0aSYevgeny Kliteynik 		send_info.type = GTA_ARG;
8864605fc0aSYevgeny Kliteynik 		send_info.write.addr = addr;
8874605fc0aSYevgeny Kliteynik 		cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE);
8884605fc0aSYevgeny Kliteynik 		send_info.write.length = cur_sent;
8894605fc0aSYevgeny Kliteynik 		send_info.write.lkey = 0;
8904605fc0aSYevgeny Kliteynik 		send_info.remote_addr = arg_id + iter;
8914605fc0aSYevgeny Kliteynik 
8924605fc0aSYevgeny Kliteynik 		ret = dr_postsend_icm_data(dmn, &send_info);
8934605fc0aSYevgeny Kliteynik 		if (ret)
8944605fc0aSYevgeny Kliteynik 			goto out;
8954605fc0aSYevgeny Kliteynik 
8964605fc0aSYevgeny Kliteynik 		iter++;
8974605fc0aSYevgeny Kliteynik 		addr += cur_sent;
8984605fc0aSYevgeny Kliteynik 		data_len -= cur_sent;
8994605fc0aSYevgeny Kliteynik 	} while (data_len > 0);
9004605fc0aSYevgeny Kliteynik 
9014605fc0aSYevgeny Kliteynik out:
9024605fc0aSYevgeny Kliteynik 	return ret;
9034605fc0aSYevgeny Kliteynik }
9044605fc0aSYevgeny Kliteynik 
dr_modify_qp_rst2init(struct mlx5_core_dev * mdev,struct mlx5dr_qp * dr_qp,int port)905297cccebSAlex Vesker static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
906297cccebSAlex Vesker 				 struct mlx5dr_qp *dr_qp,
907297cccebSAlex Vesker 				 int port)
908297cccebSAlex Vesker {
909297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
910297cccebSAlex Vesker 	void *qpc;
911297cccebSAlex Vesker 
912297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
913297cccebSAlex Vesker 
914297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
915297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
916297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rre, 1);
917297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rwe, 1);
918297cccebSAlex Vesker 
919acab4b88SLeon Romanovsky 	MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
920f93f4f4fSLeon Romanovsky 	MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
921acab4b88SLeon Romanovsky 
922acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
923297cccebSAlex Vesker }
924297cccebSAlex Vesker 
dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev * mdev,struct mlx5dr_qp * dr_qp,struct dr_qp_rts_attr * attr)925297cccebSAlex Vesker static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
926297cccebSAlex Vesker 				    struct mlx5dr_qp *dr_qp,
927297cccebSAlex Vesker 				    struct dr_qp_rts_attr *attr)
928297cccebSAlex Vesker {
929297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
930297cccebSAlex Vesker 	void *qpc;
931297cccebSAlex Vesker 
932297cccebSAlex Vesker 	qpc  = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
933297cccebSAlex Vesker 
934f93f4f4fSLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
935297cccebSAlex Vesker 
936297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
937297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
938ec449ed8SYevgeny Kliteynik 	MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
939297cccebSAlex Vesker 
940acab4b88SLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
941f93f4f4fSLeon Romanovsky 	MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
942acab4b88SLeon Romanovsky 
943acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
944297cccebSAlex Vesker }
945297cccebSAlex Vesker 
dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev * mdev,struct mlx5dr_qp * dr_qp,struct dr_qp_rtr_attr * attr)946297cccebSAlex Vesker static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
947297cccebSAlex Vesker 				     struct mlx5dr_qp *dr_qp,
948297cccebSAlex Vesker 				     struct dr_qp_rtr_attr *attr)
949297cccebSAlex Vesker {
950297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
951297cccebSAlex Vesker 	void *qpc;
952297cccebSAlex Vesker 
953297cccebSAlex Vesker 	qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
954297cccebSAlex Vesker 
955f93f4f4fSLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
956297cccebSAlex Vesker 
957297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, mtu, attr->mtu);
958297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
959297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
960297cccebSAlex Vesker 	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
961297cccebSAlex Vesker 	       attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
962297cccebSAlex Vesker 	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
963297cccebSAlex Vesker 	       attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
964297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
965297cccebSAlex Vesker 		 attr->sgid_index);
966297cccebSAlex Vesker 
967297cccebSAlex Vesker 	if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
968297cccebSAlex Vesker 		MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
969297cccebSAlex Vesker 			 attr->udp_src_port);
970297cccebSAlex Vesker 
971297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
9727304d603SYevgeny Kliteynik 	MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
973297cccebSAlex Vesker 	MLX5_SET(qpc, qpc, min_rnr_nak, 1);
974297cccebSAlex Vesker 
975acab4b88SLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
976f93f4f4fSLeon Romanovsky 	MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
977acab4b88SLeon Romanovsky 
978acab4b88SLeon Romanovsky 	return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
979297cccebSAlex Vesker }
980297cccebSAlex Vesker 
dr_send_allow_fl(struct mlx5dr_cmd_caps * caps)9817304d603SYevgeny Kliteynik static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
9827304d603SYevgeny Kliteynik {
9837304d603SYevgeny Kliteynik 	/* Check whether RC RoCE QP creation with force loopback is allowed.
9847304d603SYevgeny Kliteynik 	 * There are two separate capability bits for this:
9857304d603SYevgeny Kliteynik 	 *  - force loopback when RoCE is enabled
9867304d603SYevgeny Kliteynik 	 *  - force loopback when RoCE is disabled
9877304d603SYevgeny Kliteynik 	 */
9887304d603SYevgeny Kliteynik 	return ((caps->roce_caps.roce_en &&
9897304d603SYevgeny Kliteynik 		 caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
9907304d603SYevgeny Kliteynik 		(!caps->roce_caps.roce_en &&
9917304d603SYevgeny Kliteynik 		 caps->roce_caps.fl_rc_qp_when_roce_disabled));
9927304d603SYevgeny Kliteynik }
9937304d603SYevgeny Kliteynik 
dr_prepare_qp_to_rts(struct mlx5dr_domain * dmn)994297cccebSAlex Vesker static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
995297cccebSAlex Vesker {
996297cccebSAlex Vesker 	struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
997297cccebSAlex Vesker 	struct dr_qp_rts_attr rts_attr = {};
998297cccebSAlex Vesker 	struct dr_qp_rtr_attr rtr_attr = {};
999297cccebSAlex Vesker 	enum ib_mtu mtu = IB_MTU_1024;
1000297cccebSAlex Vesker 	u16 gid_index = 0;
1001297cccebSAlex Vesker 	int port = 1;
1002297cccebSAlex Vesker 	int ret;
1003297cccebSAlex Vesker 
1004297cccebSAlex Vesker 	/* Init */
1005297cccebSAlex Vesker 	ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
1006b7d0db55SErez Shitrit 	if (ret) {
1007b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP rst2init\n");
1008297cccebSAlex Vesker 		return ret;
1009b7d0db55SErez Shitrit 	}
1010297cccebSAlex Vesker 
1011297cccebSAlex Vesker 	/* RTR */
1012297cccebSAlex Vesker 	rtr_attr.mtu		= mtu;
1013f93f4f4fSLeon Romanovsky 	rtr_attr.qp_num		= dr_qp->qpn;
1014297cccebSAlex Vesker 	rtr_attr.min_rnr_timer	= 12;
1015297cccebSAlex Vesker 	rtr_attr.port_num	= port;
1016297cccebSAlex Vesker 	rtr_attr.udp_src_port	= dmn->info.caps.roce_min_src_udp;
1017297cccebSAlex Vesker 
10187304d603SYevgeny Kliteynik 	/* If QP creation with force loopback is allowed, then there
10197304d603SYevgeny Kliteynik 	 * is no need for GID index when creating the QP.
10207304d603SYevgeny Kliteynik 	 * Otherwise we query GID attributes and use GID index.
10217304d603SYevgeny Kliteynik 	 */
10227304d603SYevgeny Kliteynik 	rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
10237304d603SYevgeny Kliteynik 	if (!rtr_attr.fl) {
10247304d603SYevgeny Kliteynik 		ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
10257304d603SYevgeny Kliteynik 					   &rtr_attr.dgid_attr);
10267304d603SYevgeny Kliteynik 		if (ret)
10277304d603SYevgeny Kliteynik 			return ret;
10287304d603SYevgeny Kliteynik 
10297304d603SYevgeny Kliteynik 		rtr_attr.sgid_index = gid_index;
10307304d603SYevgeny Kliteynik 	}
10317304d603SYevgeny Kliteynik 
1032297cccebSAlex Vesker 	ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
1033b7d0db55SErez Shitrit 	if (ret) {
1034b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
1035297cccebSAlex Vesker 		return ret;
1036b7d0db55SErez Shitrit 	}
1037297cccebSAlex Vesker 
1038297cccebSAlex Vesker 	/* RTS */
1039297cccebSAlex Vesker 	rts_attr.timeout	= 14;
1040297cccebSAlex Vesker 	rts_attr.retry_cnt	= 7;
1041297cccebSAlex Vesker 	rts_attr.rnr_retry	= 7;
1042297cccebSAlex Vesker 
1043297cccebSAlex Vesker 	ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
1044b7d0db55SErez Shitrit 	if (ret) {
1045b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
1046297cccebSAlex Vesker 		return ret;
1047b7d0db55SErez Shitrit 	}
1048297cccebSAlex Vesker 
1049297cccebSAlex Vesker 	return 0;
1050297cccebSAlex Vesker }
1051297cccebSAlex Vesker 
dr_cq_complete(struct mlx5_core_cq * mcq,struct mlx5_eqe * eqe)10528075411dSErez Shitrit static void dr_cq_complete(struct mlx5_core_cq *mcq,
10538075411dSErez Shitrit 			   struct mlx5_eqe *eqe)
10548075411dSErez Shitrit {
10558075411dSErez Shitrit 	pr_err("CQ completion CQ: #%u\n", mcq->cqn);
10568075411dSErez Shitrit }
10578075411dSErez Shitrit 
dr_create_cq(struct mlx5_core_dev * mdev,struct mlx5_uars_page * uar,size_t ncqe)1058297cccebSAlex Vesker static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
1059297cccebSAlex Vesker 				      struct mlx5_uars_page *uar,
1060297cccebSAlex Vesker 				      size_t ncqe)
1061297cccebSAlex Vesker {
1062297cccebSAlex Vesker 	u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
1063297cccebSAlex Vesker 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
1064297cccebSAlex Vesker 	struct mlx5_wq_param wqp;
1065297cccebSAlex Vesker 	struct mlx5_cqe64 *cqe;
1066297cccebSAlex Vesker 	struct mlx5dr_cq *cq;
1067297cccebSAlex Vesker 	int inlen, err, eqn;
1068297cccebSAlex Vesker 	void *cqc, *in;
1069297cccebSAlex Vesker 	__be64 *pas;
107082996995SAlex Vesker 	int vector;
1071297cccebSAlex Vesker 	u32 i;
1072297cccebSAlex Vesker 
1073297cccebSAlex Vesker 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1074297cccebSAlex Vesker 	if (!cq)
1075297cccebSAlex Vesker 		return NULL;
1076297cccebSAlex Vesker 
1077297cccebSAlex Vesker 	ncqe = roundup_pow_of_two(ncqe);
1078297cccebSAlex Vesker 	MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
1079297cccebSAlex Vesker 
1080297cccebSAlex Vesker 	wqp.buf_numa_node = mdev->priv.numa_node;
1081297cccebSAlex Vesker 	wqp.db_numa_node = mdev->priv.numa_node;
1082297cccebSAlex Vesker 
1083297cccebSAlex Vesker 	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
1084297cccebSAlex Vesker 			       &cq->wq_ctrl);
1085297cccebSAlex Vesker 	if (err)
1086297cccebSAlex Vesker 		goto out;
1087297cccebSAlex Vesker 
1088297cccebSAlex Vesker 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1089297cccebSAlex Vesker 		cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1090297cccebSAlex Vesker 		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
1091297cccebSAlex Vesker 	}
1092297cccebSAlex Vesker 
1093297cccebSAlex Vesker 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1094297cccebSAlex Vesker 		sizeof(u64) * cq->wq_ctrl.buf.npages;
1095297cccebSAlex Vesker 	in = kvzalloc(inlen, GFP_KERNEL);
1096297cccebSAlex Vesker 	if (!in)
1097297cccebSAlex Vesker 		goto err_cqwq;
1098297cccebSAlex Vesker 
1099674dd4e2SMaher Sanalla 	vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
1100f14c1a14SMaher Sanalla 	err = mlx5_comp_eqn_get(mdev, vector, &eqn);
1101297cccebSAlex Vesker 	if (err) {
1102297cccebSAlex Vesker 		kvfree(in);
1103297cccebSAlex Vesker 		goto err_cqwq;
1104297cccebSAlex Vesker 	}
1105297cccebSAlex Vesker 
1106297cccebSAlex Vesker 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1107297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
1108616d5769STal Gilboa 	MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1109297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, uar_page, uar->index);
1110297cccebSAlex Vesker 	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1111297cccebSAlex Vesker 		 MLX5_ADAPTER_PAGE_SHIFT);
1112297cccebSAlex Vesker 	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1113297cccebSAlex Vesker 
1114297cccebSAlex Vesker 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
1115297cccebSAlex Vesker 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
1116297cccebSAlex Vesker 
11178075411dSErez Shitrit 	cq->mcq.comp  = dr_cq_complete;
1118297cccebSAlex Vesker 
1119297cccebSAlex Vesker 	err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
1120297cccebSAlex Vesker 	kvfree(in);
1121297cccebSAlex Vesker 
1122297cccebSAlex Vesker 	if (err)
1123297cccebSAlex Vesker 		goto err_cqwq;
1124297cccebSAlex Vesker 
1125297cccebSAlex Vesker 	cq->mcq.cqe_sz = 64;
1126297cccebSAlex Vesker 	cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
1127297cccebSAlex Vesker 	cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
1128297cccebSAlex Vesker 	*cq->mcq.set_ci_db = 0;
11298075411dSErez Shitrit 
11308075411dSErez Shitrit 	/* set no-zero value, in order to avoid the HW to run db-recovery on
11318075411dSErez Shitrit 	 * CQ that used in polling mode.
11328075411dSErez Shitrit 	 */
11338075411dSErez Shitrit 	*cq->mcq.arm_db = cpu_to_be32(2 << 28);
11348075411dSErez Shitrit 
1135297cccebSAlex Vesker 	cq->mcq.vector = 0;
1136297cccebSAlex Vesker 	cq->mcq.uar = uar;
11375fd08f65SYevgeny Kliteynik 	cq->mdev = mdev;
1138297cccebSAlex Vesker 
1139297cccebSAlex Vesker 	return cq;
1140297cccebSAlex Vesker 
1141297cccebSAlex Vesker err_cqwq:
1142297cccebSAlex Vesker 	mlx5_wq_destroy(&cq->wq_ctrl);
1143297cccebSAlex Vesker out:
1144297cccebSAlex Vesker 	kfree(cq);
1145297cccebSAlex Vesker 	return NULL;
1146297cccebSAlex Vesker }
1147297cccebSAlex Vesker 
dr_destroy_cq(struct mlx5_core_dev * mdev,struct mlx5dr_cq * cq)1148297cccebSAlex Vesker static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
1149297cccebSAlex Vesker {
1150297cccebSAlex Vesker 	mlx5_core_destroy_cq(mdev, &cq->mcq);
1151297cccebSAlex Vesker 	mlx5_wq_destroy(&cq->wq_ctrl);
1152297cccebSAlex Vesker 	kfree(cq);
1153297cccebSAlex Vesker }
1154297cccebSAlex Vesker 
dr_create_mkey(struct mlx5_core_dev * mdev,u32 pdn,u32 * mkey)115583fec3f1SAharon Landau static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
1156297cccebSAlex Vesker {
1157297cccebSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
1158297cccebSAlex Vesker 	void *mkc;
1159297cccebSAlex Vesker 
1160297cccebSAlex Vesker 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1161297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
1162297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, a, 1);
1163297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, rw, 1);
1164297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, rr, 1);
1165297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, lw, 1);
1166297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, lr, 1);
1167297cccebSAlex Vesker 
1168297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, pd, pdn);
1169297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, length64, 1);
1170297cccebSAlex Vesker 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
1171297cccebSAlex Vesker 
1172297cccebSAlex Vesker 	return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
1173297cccebSAlex Vesker }
1174297cccebSAlex Vesker 
dr_reg_mr(struct mlx5_core_dev * mdev,u32 pdn,void * buf,size_t size)1175297cccebSAlex Vesker static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
1176297cccebSAlex Vesker 				   u32 pdn, void *buf, size_t size)
1177297cccebSAlex Vesker {
1178297cccebSAlex Vesker 	struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1179297cccebSAlex Vesker 	struct device *dma_device;
1180297cccebSAlex Vesker 	dma_addr_t dma_addr;
1181297cccebSAlex Vesker 	int err;
1182297cccebSAlex Vesker 
1183297cccebSAlex Vesker 	if (!mr)
1184297cccebSAlex Vesker 		return NULL;
1185297cccebSAlex Vesker 
11867be3412aSParav Pandit 	dma_device = mlx5_core_dma_dev(mdev);
1187297cccebSAlex Vesker 	dma_addr = dma_map_single(dma_device, buf, size,
1188297cccebSAlex Vesker 				  DMA_BIDIRECTIONAL);
1189297cccebSAlex Vesker 	err = dma_mapping_error(dma_device, dma_addr);
1190297cccebSAlex Vesker 	if (err) {
1191297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't dma buf\n");
1192297cccebSAlex Vesker 		kfree(mr);
1193297cccebSAlex Vesker 		return NULL;
1194297cccebSAlex Vesker 	}
1195297cccebSAlex Vesker 
1196297cccebSAlex Vesker 	err = dr_create_mkey(mdev, pdn, &mr->mkey);
1197297cccebSAlex Vesker 	if (err) {
1198297cccebSAlex Vesker 		mlx5_core_warn(mdev, "Can't create mkey\n");
1199297cccebSAlex Vesker 		dma_unmap_single(dma_device, dma_addr, size,
1200297cccebSAlex Vesker 				 DMA_BIDIRECTIONAL);
1201297cccebSAlex Vesker 		kfree(mr);
1202297cccebSAlex Vesker 		return NULL;
1203297cccebSAlex Vesker 	}
1204297cccebSAlex Vesker 
1205297cccebSAlex Vesker 	mr->dma_addr = dma_addr;
1206297cccebSAlex Vesker 	mr->size = size;
1207297cccebSAlex Vesker 	mr->addr = buf;
1208297cccebSAlex Vesker 
1209297cccebSAlex Vesker 	return mr;
1210297cccebSAlex Vesker }
1211297cccebSAlex Vesker 
dr_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5dr_mr * mr)1212297cccebSAlex Vesker static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
1213297cccebSAlex Vesker {
121483fec3f1SAharon Landau 	mlx5_core_destroy_mkey(mdev, mr->mkey);
12157be3412aSParav Pandit 	dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
1216297cccebSAlex Vesker 			 DMA_BIDIRECTIONAL);
1217297cccebSAlex Vesker 	kfree(mr);
1218297cccebSAlex Vesker }
1219297cccebSAlex Vesker 
mlx5dr_send_ring_alloc(struct mlx5dr_domain * dmn)1220297cccebSAlex Vesker int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
1221297cccebSAlex Vesker {
1222297cccebSAlex Vesker 	struct dr_qp_init_attr init_attr = {};
1223297cccebSAlex Vesker 	int cq_size;
1224297cccebSAlex Vesker 	int size;
1225297cccebSAlex Vesker 	int ret;
1226297cccebSAlex Vesker 
1227297cccebSAlex Vesker 	dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
1228297cccebSAlex Vesker 	if (!dmn->send_ring)
1229297cccebSAlex Vesker 		return -ENOMEM;
1230297cccebSAlex Vesker 
1231297cccebSAlex Vesker 	cq_size = QUEUE_SIZE + 1;
1232297cccebSAlex Vesker 	dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
1233297cccebSAlex Vesker 	if (!dmn->send_ring->cq) {
1234b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed creating CQ\n");
1235297cccebSAlex Vesker 		ret = -ENOMEM;
1236297cccebSAlex Vesker 		goto free_send_ring;
1237297cccebSAlex Vesker 	}
1238297cccebSAlex Vesker 
1239297cccebSAlex Vesker 	init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
1240297cccebSAlex Vesker 	init_attr.pdn = dmn->pdn;
1241297cccebSAlex Vesker 	init_attr.uar = dmn->uar;
1242297cccebSAlex Vesker 	init_attr.max_send_wr = QUEUE_SIZE;
1243aeacb52aSYevgeny Kliteynik 
1244aeacb52aSYevgeny Kliteynik 	/* Isolated VL is applicable only if force loopback is supported */
1245aeacb52aSYevgeny Kliteynik 	if (dr_send_allow_fl(&dmn->info.caps))
1246aeacb52aSYevgeny Kliteynik 		init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
1247aeacb52aSYevgeny Kliteynik 
1248cedb2819SAlex Vesker 	spin_lock_init(&dmn->send_ring->lock);
1249297cccebSAlex Vesker 
1250297cccebSAlex Vesker 	dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
1251297cccebSAlex Vesker 	if (!dmn->send_ring->qp)  {
1252b7d0db55SErez Shitrit 		mlx5dr_err(dmn, "Failed creating QP\n");
1253297cccebSAlex Vesker 		ret = -ENOMEM;
1254297cccebSAlex Vesker 		goto clean_cq;
1255297cccebSAlex Vesker 	}
1256297cccebSAlex Vesker 
1257297cccebSAlex Vesker 	dmn->send_ring->cq->qp = dmn->send_ring->qp;
1258297cccebSAlex Vesker 
1259297cccebSAlex Vesker 	dmn->info.max_send_wr = QUEUE_SIZE;
1260297cccebSAlex Vesker 	dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
1261297cccebSAlex Vesker 					DR_STE_SIZE);
1262297cccebSAlex Vesker 
1263297cccebSAlex Vesker 	dmn->send_ring->signal_th = dmn->info.max_send_wr /
1264297cccebSAlex Vesker 		SIGNAL_PER_DIV_QUEUE;
1265297cccebSAlex Vesker 
1266297cccebSAlex Vesker 	/* Prepare qp to be used */
1267297cccebSAlex Vesker 	ret = dr_prepare_qp_to_rts(dmn);
1268297cccebSAlex Vesker 	if (ret)
1269297cccebSAlex Vesker 		goto clean_qp;
1270297cccebSAlex Vesker 
1271297cccebSAlex Vesker 	dmn->send_ring->max_post_send_size =
1272297cccebSAlex Vesker 		mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
1273297cccebSAlex Vesker 						   DR_ICM_TYPE_STE);
1274297cccebSAlex Vesker 
1275297cccebSAlex Vesker 	/* Allocating the max size as a buffer for writing */
1276297cccebSAlex Vesker 	size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
1277297cccebSAlex Vesker 	dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
1278297cccebSAlex Vesker 	if (!dmn->send_ring->buf) {
1279297cccebSAlex Vesker 		ret = -ENOMEM;
1280297cccebSAlex Vesker 		goto clean_qp;
1281297cccebSAlex Vesker 	}
1282297cccebSAlex Vesker 
1283297cccebSAlex Vesker 	dmn->send_ring->buf_size = size;
1284297cccebSAlex Vesker 
1285297cccebSAlex Vesker 	dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
1286297cccebSAlex Vesker 				       dmn->pdn, dmn->send_ring->buf, size);
1287297cccebSAlex Vesker 	if (!dmn->send_ring->mr) {
1288297cccebSAlex Vesker 		ret = -ENOMEM;
1289297cccebSAlex Vesker 		goto free_mem;
1290297cccebSAlex Vesker 	}
1291297cccebSAlex Vesker 
12927d7c9453SYevgeny Kliteynik 	dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
12937d7c9453SYevgeny Kliteynik 					    GFP_KERNEL);
12947d7c9453SYevgeny Kliteynik 	if (!dmn->send_ring->sync_buff) {
1295297cccebSAlex Vesker 		ret = -ENOMEM;
1296297cccebSAlex Vesker 		goto clean_mr;
1297297cccebSAlex Vesker 	}
1298297cccebSAlex Vesker 
12997d7c9453SYevgeny Kliteynik 	dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
13007d7c9453SYevgeny Kliteynik 					    dmn->pdn, dmn->send_ring->sync_buff,
13017d7c9453SYevgeny Kliteynik 					    dmn->send_ring->max_post_send_size);
13027d7c9453SYevgeny Kliteynik 	if (!dmn->send_ring->sync_mr) {
13037d7c9453SYevgeny Kliteynik 		ret = -ENOMEM;
13047d7c9453SYevgeny Kliteynik 		goto free_sync_mem;
13057d7c9453SYevgeny Kliteynik 	}
13067d7c9453SYevgeny Kliteynik 
1307297cccebSAlex Vesker 	return 0;
1308297cccebSAlex Vesker 
13097d7c9453SYevgeny Kliteynik free_sync_mem:
13107d7c9453SYevgeny Kliteynik 	kfree(dmn->send_ring->sync_buff);
1311297cccebSAlex Vesker clean_mr:
1312297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
1313297cccebSAlex Vesker free_mem:
1314297cccebSAlex Vesker 	kfree(dmn->send_ring->buf);
1315297cccebSAlex Vesker clean_qp:
1316297cccebSAlex Vesker 	dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
1317297cccebSAlex Vesker clean_cq:
1318297cccebSAlex Vesker 	dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
1319297cccebSAlex Vesker free_send_ring:
1320297cccebSAlex Vesker 	kfree(dmn->send_ring);
1321297cccebSAlex Vesker 
1322297cccebSAlex Vesker 	return ret;
1323297cccebSAlex Vesker }
1324297cccebSAlex Vesker 
mlx5dr_send_ring_free(struct mlx5dr_domain * dmn,struct mlx5dr_send_ring * send_ring)1325297cccebSAlex Vesker void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1326297cccebSAlex Vesker 			   struct mlx5dr_send_ring *send_ring)
1327297cccebSAlex Vesker {
1328297cccebSAlex Vesker 	dr_destroy_qp(dmn->mdev, send_ring->qp);
1329297cccebSAlex Vesker 	dr_destroy_cq(dmn->mdev, send_ring->cq);
1330297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
1331297cccebSAlex Vesker 	dr_dereg_mr(dmn->mdev, send_ring->mr);
1332297cccebSAlex Vesker 	kfree(send_ring->buf);
13337d7c9453SYevgeny Kliteynik 	kfree(send_ring->sync_buff);
1334297cccebSAlex Vesker 	kfree(send_ring);
1335297cccebSAlex Vesker }
1336297cccebSAlex Vesker 
mlx5dr_send_ring_force_drain(struct mlx5dr_domain * dmn)1337297cccebSAlex Vesker int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
1338297cccebSAlex Vesker {
1339297cccebSAlex Vesker 	struct mlx5dr_send_ring *send_ring = dmn->send_ring;
1340297cccebSAlex Vesker 	struct postsend_info send_info = {};
1341297cccebSAlex Vesker 	u8 data[DR_STE_SIZE];
1342297cccebSAlex Vesker 	int num_of_sends_req;
1343297cccebSAlex Vesker 	int ret;
1344297cccebSAlex Vesker 	int i;
1345297cccebSAlex Vesker 
1346297cccebSAlex Vesker 	/* Sending this amount of requests makes sure we will get drain */
1347297cccebSAlex Vesker 	num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
1348297cccebSAlex Vesker 
1349297cccebSAlex Vesker 	/* Send fake requests forcing the last to be signaled */
1350297cccebSAlex Vesker 	send_info.write.addr = (uintptr_t)data;
1351297cccebSAlex Vesker 	send_info.write.length = DR_STE_SIZE;
1352297cccebSAlex Vesker 	send_info.write.lkey = 0;
1353297cccebSAlex Vesker 	/* Using the sync_mr in order to write/read */
1354297cccebSAlex Vesker 	send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
135583fec3f1SAharon Landau 	send_info.rkey = send_ring->sync_mr->mkey;
1356297cccebSAlex Vesker 
1357297cccebSAlex Vesker 	for (i = 0; i < num_of_sends_req; i++) {
1358297cccebSAlex Vesker 		ret = dr_postsend_icm_data(dmn, &send_info);
1359297cccebSAlex Vesker 		if (ret)
1360297cccebSAlex Vesker 			return ret;
1361297cccebSAlex Vesker 	}
1362297cccebSAlex Vesker 
1363cedb2819SAlex Vesker 	spin_lock(&send_ring->lock);
1364297cccebSAlex Vesker 	ret = dr_handle_pending_wc(dmn, send_ring);
1365cedb2819SAlex Vesker 	spin_unlock(&send_ring->lock);
1366297cccebSAlex Vesker 
1367297cccebSAlex Vesker 	return ret;
1368297cccebSAlex Vesker }
1369