xref: /openbmc/linux/drivers/infiniband/hw/mlx5/umr.c (revision 483196764091621b8dd45d7af29e7a9c874a9f19)
104876c12SAharon Landau // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
204876c12SAharon Landau /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
304876c12SAharon Landau 
404876c12SAharon Landau #include "mlx5_ib.h"
504876c12SAharon Landau #include "umr.h"
66f0689fdSAharon Landau #include "wr.h"
704876c12SAharon Landau 
88a8a5d37SAharon Landau static __be64 get_umr_enable_mr_mask(void)
98a8a5d37SAharon Landau {
108a8a5d37SAharon Landau 	u64 result;
118a8a5d37SAharon Landau 
128a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_KEY |
138a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_FREE;
148a8a5d37SAharon Landau 
158a8a5d37SAharon Landau 	return cpu_to_be64(result);
168a8a5d37SAharon Landau }
178a8a5d37SAharon Landau 
188a8a5d37SAharon Landau static __be64 get_umr_disable_mr_mask(void)
198a8a5d37SAharon Landau {
208a8a5d37SAharon Landau 	u64 result;
218a8a5d37SAharon Landau 
228a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_FREE;
238a8a5d37SAharon Landau 
248a8a5d37SAharon Landau 	return cpu_to_be64(result);
258a8a5d37SAharon Landau }
268a8a5d37SAharon Landau 
278a8a5d37SAharon Landau static __be64 get_umr_update_translation_mask(void)
288a8a5d37SAharon Landau {
298a8a5d37SAharon Landau 	u64 result;
308a8a5d37SAharon Landau 
318a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_LEN |
328a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_PAGE_SIZE |
338a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_START_ADDR;
348a8a5d37SAharon Landau 
358a8a5d37SAharon Landau 	return cpu_to_be64(result);
368a8a5d37SAharon Landau }
378a8a5d37SAharon Landau 
38ba6a9c68SAharon Landau static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
398a8a5d37SAharon Landau {
408a8a5d37SAharon Landau 	u64 result;
418a8a5d37SAharon Landau 
428a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_LR |
438a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_LW |
448a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_RR |
458a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_RW;
468a8a5d37SAharon Landau 
47ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, atomic))
488a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_A;
498a8a5d37SAharon Landau 
50ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
518a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
528a8a5d37SAharon Landau 
53ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
548a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
558a8a5d37SAharon Landau 
568a8a5d37SAharon Landau 	return cpu_to_be64(result);
578a8a5d37SAharon Landau }
588a8a5d37SAharon Landau 
598a8a5d37SAharon Landau static __be64 get_umr_update_pd_mask(void)
608a8a5d37SAharon Landau {
618a8a5d37SAharon Landau 	u64 result;
628a8a5d37SAharon Landau 
638a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_PD;
648a8a5d37SAharon Landau 
658a8a5d37SAharon Landau 	return cpu_to_be64(result);
668a8a5d37SAharon Landau }
678a8a5d37SAharon Landau 
688a8a5d37SAharon Landau static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
698a8a5d37SAharon Landau {
708a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
718a8a5d37SAharon Landau 	    MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
728a8a5d37SAharon Landau 		return -EPERM;
738a8a5d37SAharon Landau 
748a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_A &&
758a8a5d37SAharon Landau 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
768a8a5d37SAharon Landau 		return -EPERM;
778a8a5d37SAharon Landau 
788a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
798a8a5d37SAharon Landau 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
808a8a5d37SAharon Landau 		return -EPERM;
818a8a5d37SAharon Landau 
828a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
838a8a5d37SAharon Landau 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
848a8a5d37SAharon Landau 		return -EPERM;
858a8a5d37SAharon Landau 
868a8a5d37SAharon Landau 	return 0;
878a8a5d37SAharon Landau }
888a8a5d37SAharon Landau 
898a8a5d37SAharon Landau int mlx5r_umr_set_umr_ctrl_seg(struct mlx5_ib_dev *dev,
908a8a5d37SAharon Landau 			       struct mlx5_wqe_umr_ctrl_seg *umr,
918a8a5d37SAharon Landau 			       const struct ib_send_wr *wr)
928a8a5d37SAharon Landau {
938a8a5d37SAharon Landau 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
948a8a5d37SAharon Landau 
958a8a5d37SAharon Landau 	memset(umr, 0, sizeof(*umr));
968a8a5d37SAharon Landau 
978a8a5d37SAharon Landau 	if (!umrwr->ignore_free_state) {
988a8a5d37SAharon Landau 		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
998a8a5d37SAharon Landau 			 /* fail if free */
1008a8a5d37SAharon Landau 			umr->flags = MLX5_UMR_CHECK_FREE;
1018a8a5d37SAharon Landau 		else
1028a8a5d37SAharon Landau 			/* fail if not free */
1038a8a5d37SAharon Landau 			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
1048a8a5d37SAharon Landau 	}
1058a8a5d37SAharon Landau 
1068a8a5d37SAharon Landau 	umr->xlt_octowords =
1078a8a5d37SAharon Landau 		cpu_to_be16(mlx5r_umr_get_xlt_octo(umrwr->xlt_size));
1088a8a5d37SAharon Landau 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
1098a8a5d37SAharon Landau 		u64 offset = mlx5r_umr_get_xlt_octo(umrwr->offset);
1108a8a5d37SAharon Landau 
1118a8a5d37SAharon Landau 		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
1128a8a5d37SAharon Landau 		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
1138a8a5d37SAharon Landau 		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1148a8a5d37SAharon Landau 	}
1158a8a5d37SAharon Landau 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
1168a8a5d37SAharon Landau 		umr->mkey_mask |= get_umr_update_translation_mask();
1178a8a5d37SAharon Landau 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
118ba6a9c68SAharon Landau 		umr->mkey_mask |= get_umr_update_access_mask(dev);
1198a8a5d37SAharon Landau 		umr->mkey_mask |= get_umr_update_pd_mask();
1208a8a5d37SAharon Landau 	}
1218a8a5d37SAharon Landau 	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
1228a8a5d37SAharon Landau 		umr->mkey_mask |= get_umr_enable_mr_mask();
1238a8a5d37SAharon Landau 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
1248a8a5d37SAharon Landau 		umr->mkey_mask |= get_umr_disable_mr_mask();
1258a8a5d37SAharon Landau 
1268a8a5d37SAharon Landau 	if (!wr->num_sge)
1278a8a5d37SAharon Landau 		umr->flags |= MLX5_UMR_INLINE;
1288a8a5d37SAharon Landau 
1298a8a5d37SAharon Landau 	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
1308a8a5d37SAharon Landau }
1318a8a5d37SAharon Landau 
13204876c12SAharon Landau enum {
13304876c12SAharon Landau 	MAX_UMR_WR = 128,
13404876c12SAharon Landau };
13504876c12SAharon Landau 
13604876c12SAharon Landau static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
13704876c12SAharon Landau {
13804876c12SAharon Landau 	struct ib_qp_attr attr = {};
13904876c12SAharon Landau 	int ret;
14004876c12SAharon Landau 
14104876c12SAharon Landau 	attr.qp_state = IB_QPS_INIT;
14204876c12SAharon Landau 	attr.port_num = 1;
14304876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr,
14404876c12SAharon Landau 			   IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
14504876c12SAharon Landau 	if (ret) {
14604876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
14704876c12SAharon Landau 		return ret;
14804876c12SAharon Landau 	}
14904876c12SAharon Landau 
15004876c12SAharon Landau 	memset(&attr, 0, sizeof(attr));
15104876c12SAharon Landau 	attr.qp_state = IB_QPS_RTR;
15204876c12SAharon Landau 
15304876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
15404876c12SAharon Landau 	if (ret) {
15504876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
15604876c12SAharon Landau 		return ret;
15704876c12SAharon Landau 	}
15804876c12SAharon Landau 
15904876c12SAharon Landau 	memset(&attr, 0, sizeof(attr));
16004876c12SAharon Landau 	attr.qp_state = IB_QPS_RTS;
16104876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
16204876c12SAharon Landau 	if (ret) {
16304876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
16404876c12SAharon Landau 		return ret;
16504876c12SAharon Landau 	}
16604876c12SAharon Landau 
16704876c12SAharon Landau 	return 0;
16804876c12SAharon Landau }
16904876c12SAharon Landau 
17004876c12SAharon Landau int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
17104876c12SAharon Landau {
17204876c12SAharon Landau 	struct ib_qp_init_attr init_attr = {};
17304876c12SAharon Landau 	struct ib_pd *pd;
17404876c12SAharon Landau 	struct ib_cq *cq;
17504876c12SAharon Landau 	struct ib_qp *qp;
17604876c12SAharon Landau 	int ret;
17704876c12SAharon Landau 
17804876c12SAharon Landau 	pd = ib_alloc_pd(&dev->ib_dev, 0);
17904876c12SAharon Landau 	if (IS_ERR(pd)) {
18004876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
18104876c12SAharon Landau 		return PTR_ERR(pd);
18204876c12SAharon Landau 	}
18304876c12SAharon Landau 
18404876c12SAharon Landau 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
18504876c12SAharon Landau 	if (IS_ERR(cq)) {
18604876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
18704876c12SAharon Landau 		ret = PTR_ERR(cq);
18804876c12SAharon Landau 		goto destroy_pd;
18904876c12SAharon Landau 	}
19004876c12SAharon Landau 
19104876c12SAharon Landau 	init_attr.send_cq = cq;
19204876c12SAharon Landau 	init_attr.recv_cq = cq;
19304876c12SAharon Landau 	init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
19404876c12SAharon Landau 	init_attr.cap.max_send_wr = MAX_UMR_WR;
19504876c12SAharon Landau 	init_attr.cap.max_send_sge = 1;
19604876c12SAharon Landau 	init_attr.qp_type = MLX5_IB_QPT_REG_UMR;
19704876c12SAharon Landau 	init_attr.port_num = 1;
19804876c12SAharon Landau 	qp = ib_create_qp(pd, &init_attr);
19904876c12SAharon Landau 	if (IS_ERR(qp)) {
20004876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
20104876c12SAharon Landau 		ret = PTR_ERR(qp);
20204876c12SAharon Landau 		goto destroy_cq;
20304876c12SAharon Landau 	}
20404876c12SAharon Landau 
20504876c12SAharon Landau 	ret = mlx5r_umr_qp_rst2rts(dev, qp);
20604876c12SAharon Landau 	if (ret)
20704876c12SAharon Landau 		goto destroy_qp;
20804876c12SAharon Landau 
20904876c12SAharon Landau 	dev->umrc.qp = qp;
21004876c12SAharon Landau 	dev->umrc.cq = cq;
21104876c12SAharon Landau 	dev->umrc.pd = pd;
21204876c12SAharon Landau 
21304876c12SAharon Landau 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
21404876c12SAharon Landau 
21504876c12SAharon Landau 	return 0;
21604876c12SAharon Landau 
21704876c12SAharon Landau destroy_qp:
21804876c12SAharon Landau 	ib_destroy_qp(qp);
21904876c12SAharon Landau destroy_cq:
22004876c12SAharon Landau 	ib_free_cq(cq);
22104876c12SAharon Landau destroy_pd:
22204876c12SAharon Landau 	ib_dealloc_pd(pd);
22304876c12SAharon Landau 	return ret;
22404876c12SAharon Landau }
22504876c12SAharon Landau 
22604876c12SAharon Landau void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
22704876c12SAharon Landau {
22804876c12SAharon Landau 	ib_destroy_qp(dev->umrc.qp);
22904876c12SAharon Landau 	ib_free_cq(dev->umrc.cq);
23004876c12SAharon Landau 	ib_dealloc_pd(dev->umrc.pd);
23104876c12SAharon Landau }
2326f0689fdSAharon Landau 
2336f0689fdSAharon Landau static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
2346f0689fdSAharon Landau 			       struct mlx5r_umr_wqe *wqe, bool with_data)
2356f0689fdSAharon Landau {
2366f0689fdSAharon Landau 	unsigned int wqe_size =
2376f0689fdSAharon Landau 		with_data ? sizeof(struct mlx5r_umr_wqe) :
2386f0689fdSAharon Landau 			    sizeof(struct mlx5r_umr_wqe) -
2396f0689fdSAharon Landau 				    sizeof(struct mlx5_wqe_data_seg);
2406f0689fdSAharon Landau 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2416f0689fdSAharon Landau 	struct mlx5_core_dev *mdev = dev->mdev;
2426f0689fdSAharon Landau 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
2436f0689fdSAharon Landau 	struct mlx5_wqe_ctrl_seg *ctrl;
2446f0689fdSAharon Landau 	union {
2456f0689fdSAharon Landau 		struct ib_cqe *ib_cqe;
2466f0689fdSAharon Landau 		u64 wr_id;
2476f0689fdSAharon Landau 	} id;
2486f0689fdSAharon Landau 	void *cur_edge, *seg;
2496f0689fdSAharon Landau 	unsigned long flags;
2506f0689fdSAharon Landau 	unsigned int idx;
2516f0689fdSAharon Landau 	int size, err;
2526f0689fdSAharon Landau 
2536f0689fdSAharon Landau 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
2546f0689fdSAharon Landau 		return -EIO;
2556f0689fdSAharon Landau 
2566f0689fdSAharon Landau 	spin_lock_irqsave(&qp->sq.lock, flags);
2576f0689fdSAharon Landau 
2586f0689fdSAharon Landau 	err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
2596f0689fdSAharon Landau 			      cpu_to_be32(mkey), false, false);
2606f0689fdSAharon Landau 	if (WARN_ON(err))
2616f0689fdSAharon Landau 		goto out;
2626f0689fdSAharon Landau 
2636f0689fdSAharon Landau 	qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2646f0689fdSAharon Landau 
2656f0689fdSAharon Landau 	mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
2666f0689fdSAharon Landau 
2676f0689fdSAharon Landau 	id.ib_cqe = cqe;
2686f0689fdSAharon Landau 	mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
2696f0689fdSAharon Landau 			 MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
2706f0689fdSAharon Landau 
2716f0689fdSAharon Landau 	mlx5r_ring_db(qp, 1, ctrl);
2726f0689fdSAharon Landau 
2736f0689fdSAharon Landau out:
2746f0689fdSAharon Landau 	spin_unlock_irqrestore(&qp->sq.lock, flags);
2756f0689fdSAharon Landau 
2766f0689fdSAharon Landau 	return err;
2776f0689fdSAharon Landau }
2786f0689fdSAharon Landau 
2796f0689fdSAharon Landau static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
2806f0689fdSAharon Landau {
2816f0689fdSAharon Landau 	struct mlx5_ib_umr_context *context =
2826f0689fdSAharon Landau 		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
2836f0689fdSAharon Landau 
2846f0689fdSAharon Landau 	context->status = wc->status;
2856f0689fdSAharon Landau 	complete(&context->done);
2866f0689fdSAharon Landau }
2876f0689fdSAharon Landau 
2886f0689fdSAharon Landau static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
2896f0689fdSAharon Landau {
2906f0689fdSAharon Landau 	context->cqe.done = mlx5r_umr_done;
2916f0689fdSAharon Landau 	init_completion(&context->done);
2926f0689fdSAharon Landau }
2936f0689fdSAharon Landau 
2946f0689fdSAharon Landau static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
2956f0689fdSAharon Landau 				   struct mlx5r_umr_wqe *wqe, bool with_data)
2966f0689fdSAharon Landau {
2976f0689fdSAharon Landau 	struct umr_common *umrc = &dev->umrc;
2986f0689fdSAharon Landau 	struct mlx5r_umr_context umr_context;
2996f0689fdSAharon Landau 	int err;
3006f0689fdSAharon Landau 
3016f0689fdSAharon Landau 	err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
3026f0689fdSAharon Landau 	if (WARN_ON(err))
3036f0689fdSAharon Landau 		return err;
3046f0689fdSAharon Landau 
3056f0689fdSAharon Landau 	mlx5r_umr_init_context(&umr_context);
3066f0689fdSAharon Landau 
3076f0689fdSAharon Landau 	down(&umrc->sem);
3086f0689fdSAharon Landau 	err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
3096f0689fdSAharon Landau 				  with_data);
3106f0689fdSAharon Landau 	if (err)
3116f0689fdSAharon Landau 		mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
3126f0689fdSAharon Landau 	else {
3136f0689fdSAharon Landau 		wait_for_completion(&umr_context.done);
3146f0689fdSAharon Landau 		if (umr_context.status != IB_WC_SUCCESS) {
3156f0689fdSAharon Landau 			mlx5_ib_warn(dev, "reg umr failed (%u)\n",
3166f0689fdSAharon Landau 				     umr_context.status);
3176f0689fdSAharon Landau 			err = -EFAULT;
3186f0689fdSAharon Landau 		}
3196f0689fdSAharon Landau 	}
3206f0689fdSAharon Landau 	up(&umrc->sem);
3216f0689fdSAharon Landau 	return err;
3226f0689fdSAharon Landau }
32333e8aa8eSAharon Landau 
32433e8aa8eSAharon Landau /**
32533e8aa8eSAharon Landau  * mlx5r_umr_revoke_mr - Fence all DMA on the MR
32633e8aa8eSAharon Landau  * @mr: The MR to fence
32733e8aa8eSAharon Landau  *
32833e8aa8eSAharon Landau  * Upon return the NIC will not be doing any DMA to the pages under the MR,
32933e8aa8eSAharon Landau  * and any DMA in progress will be completed. Failure of this function
33033e8aa8eSAharon Landau  * indicates the HW has failed catastrophically.
33133e8aa8eSAharon Landau  */
33233e8aa8eSAharon Landau int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
33333e8aa8eSAharon Landau {
33433e8aa8eSAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
33533e8aa8eSAharon Landau 	struct mlx5r_umr_wqe wqe = {};
33633e8aa8eSAharon Landau 
33733e8aa8eSAharon Landau 	if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
33833e8aa8eSAharon Landau 		return 0;
33933e8aa8eSAharon Landau 
34033e8aa8eSAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
34133e8aa8eSAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
34233e8aa8eSAharon Landau 	wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
34333e8aa8eSAharon Landau 
34433e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
34533e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
34633e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
34733e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
34833e8aa8eSAharon Landau 		 mlx5_mkey_variant(mr->mmkey.key));
34933e8aa8eSAharon Landau 
35033e8aa8eSAharon Landau 	return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
35133e8aa8eSAharon Landau }
352*48319676SAharon Landau 
353*48319676SAharon Landau static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
354*48319676SAharon Landau 				       struct mlx5_mkey_seg *seg,
355*48319676SAharon Landau 				       unsigned int access_flags)
356*48319676SAharon Landau {
357*48319676SAharon Landau 	MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
358*48319676SAharon Landau 	MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
359*48319676SAharon Landau 	MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
360*48319676SAharon Landau 	MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
361*48319676SAharon Landau 	MLX5_SET(mkc, seg, lr, 1);
362*48319676SAharon Landau 	MLX5_SET(mkc, seg, relaxed_ordering_write,
363*48319676SAharon Landau 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
364*48319676SAharon Landau 	MLX5_SET(mkc, seg, relaxed_ordering_read,
365*48319676SAharon Landau 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
366*48319676SAharon Landau }
367*48319676SAharon Landau 
368*48319676SAharon Landau int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
369*48319676SAharon Landau 			      int access_flags)
370*48319676SAharon Landau {
371*48319676SAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
372*48319676SAharon Landau 	struct mlx5r_umr_wqe wqe = {};
373*48319676SAharon Landau 	int err;
374*48319676SAharon Landau 
375*48319676SAharon Landau 	wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
376*48319676SAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
377*48319676SAharon Landau 	wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
378*48319676SAharon Landau 	wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
379*48319676SAharon Landau 
380*48319676SAharon Landau 	mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
381*48319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
382*48319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
383*48319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
384*48319676SAharon Landau 		 mlx5_mkey_variant(mr->mmkey.key));
385*48319676SAharon Landau 
386*48319676SAharon Landau 	err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
387*48319676SAharon Landau 	if (err)
388*48319676SAharon Landau 		return err;
389*48319676SAharon Landau 
390*48319676SAharon Landau 	mr->access_flags = access_flags;
391*48319676SAharon Landau 	return 0;
392*48319676SAharon Landau }
393