xref: /openbmc/linux/drivers/infiniband/hw/mlx5/umr.c (revision 158e71bb69e368b8b33e8b7c4ac8c111da0c1ae2)
104876c12SAharon Landau // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
204876c12SAharon Landau /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
304876c12SAharon Landau 
4636bdbfcSAharon Landau #include <rdma/ib_umem_odp.h>
504876c12SAharon Landau #include "mlx5_ib.h"
604876c12SAharon Landau #include "umr.h"
76f0689fdSAharon Landau #include "wr.h"
804876c12SAharon Landau 
9916adb49SAharon Landau /*
10916adb49SAharon Landau  * We can't use an array for xlt_emergency_page because dma_map_single doesn't
11916adb49SAharon Landau  * work on kernel modules memory
12916adb49SAharon Landau  */
13916adb49SAharon Landau void *xlt_emergency_page;
14916adb49SAharon Landau static DEFINE_MUTEX(xlt_emergency_page_mutex);
15916adb49SAharon Landau 
168a8a5d37SAharon Landau static __be64 get_umr_enable_mr_mask(void)
178a8a5d37SAharon Landau {
188a8a5d37SAharon Landau 	u64 result;
198a8a5d37SAharon Landau 
208a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_KEY |
218a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_FREE;
228a8a5d37SAharon Landau 
238a8a5d37SAharon Landau 	return cpu_to_be64(result);
248a8a5d37SAharon Landau }
258a8a5d37SAharon Landau 
268a8a5d37SAharon Landau static __be64 get_umr_disable_mr_mask(void)
278a8a5d37SAharon Landau {
288a8a5d37SAharon Landau 	u64 result;
298a8a5d37SAharon Landau 
308a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_FREE;
318a8a5d37SAharon Landau 
328a8a5d37SAharon Landau 	return cpu_to_be64(result);
338a8a5d37SAharon Landau }
348a8a5d37SAharon Landau 
358a8a5d37SAharon Landau static __be64 get_umr_update_translation_mask(void)
368a8a5d37SAharon Landau {
378a8a5d37SAharon Landau 	u64 result;
388a8a5d37SAharon Landau 
398a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_LEN |
408a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_PAGE_SIZE |
418a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_START_ADDR;
428a8a5d37SAharon Landau 
438a8a5d37SAharon Landau 	return cpu_to_be64(result);
448a8a5d37SAharon Landau }
458a8a5d37SAharon Landau 
46ba6a9c68SAharon Landau static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
478a8a5d37SAharon Landau {
488a8a5d37SAharon Landau 	u64 result;
498a8a5d37SAharon Landau 
508a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_LR |
518a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_LW |
528a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_RR |
538a8a5d37SAharon Landau 		 MLX5_MKEY_MASK_RW;
548a8a5d37SAharon Landau 
55ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, atomic))
568a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_A;
578a8a5d37SAharon Landau 
58ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
598a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
608a8a5d37SAharon Landau 
61ba6a9c68SAharon Landau 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
628a8a5d37SAharon Landau 		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
638a8a5d37SAharon Landau 
648a8a5d37SAharon Landau 	return cpu_to_be64(result);
658a8a5d37SAharon Landau }
668a8a5d37SAharon Landau 
678a8a5d37SAharon Landau static __be64 get_umr_update_pd_mask(void)
688a8a5d37SAharon Landau {
698a8a5d37SAharon Landau 	u64 result;
708a8a5d37SAharon Landau 
718a8a5d37SAharon Landau 	result = MLX5_MKEY_MASK_PD;
728a8a5d37SAharon Landau 
738a8a5d37SAharon Landau 	return cpu_to_be64(result);
748a8a5d37SAharon Landau }
758a8a5d37SAharon Landau 
768a8a5d37SAharon Landau static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
778a8a5d37SAharon Landau {
788a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
798a8a5d37SAharon Landau 	    MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
808a8a5d37SAharon Landau 		return -EPERM;
818a8a5d37SAharon Landau 
828a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_A &&
838a8a5d37SAharon Landau 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
848a8a5d37SAharon Landau 		return -EPERM;
858a8a5d37SAharon Landau 
868a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
878a8a5d37SAharon Landau 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
888a8a5d37SAharon Landau 		return -EPERM;
898a8a5d37SAharon Landau 
908a8a5d37SAharon Landau 	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
918a8a5d37SAharon Landau 	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
928a8a5d37SAharon Landau 		return -EPERM;
938a8a5d37SAharon Landau 
948a8a5d37SAharon Landau 	return 0;
958a8a5d37SAharon Landau }
968a8a5d37SAharon Landau 
9704876c12SAharon Landau enum {
9804876c12SAharon Landau 	MAX_UMR_WR = 128,
9904876c12SAharon Landau };
10004876c12SAharon Landau 
10104876c12SAharon Landau static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
10204876c12SAharon Landau {
10304876c12SAharon Landau 	struct ib_qp_attr attr = {};
10404876c12SAharon Landau 	int ret;
10504876c12SAharon Landau 
10604876c12SAharon Landau 	attr.qp_state = IB_QPS_INIT;
10704876c12SAharon Landau 	attr.port_num = 1;
10804876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr,
10904876c12SAharon Landau 			   IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
11004876c12SAharon Landau 	if (ret) {
11104876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
11204876c12SAharon Landau 		return ret;
11304876c12SAharon Landau 	}
11404876c12SAharon Landau 
11504876c12SAharon Landau 	memset(&attr, 0, sizeof(attr));
11604876c12SAharon Landau 	attr.qp_state = IB_QPS_RTR;
11704876c12SAharon Landau 
11804876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
11904876c12SAharon Landau 	if (ret) {
12004876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
12104876c12SAharon Landau 		return ret;
12204876c12SAharon Landau 	}
12304876c12SAharon Landau 
12404876c12SAharon Landau 	memset(&attr, 0, sizeof(attr));
12504876c12SAharon Landau 	attr.qp_state = IB_QPS_RTS;
12604876c12SAharon Landau 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
12704876c12SAharon Landau 	if (ret) {
12804876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
12904876c12SAharon Landau 		return ret;
13004876c12SAharon Landau 	}
13104876c12SAharon Landau 
13204876c12SAharon Landau 	return 0;
13304876c12SAharon Landau }
13404876c12SAharon Landau 
13504876c12SAharon Landau int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
13604876c12SAharon Landau {
13704876c12SAharon Landau 	struct ib_qp_init_attr init_attr = {};
13804876c12SAharon Landau 	struct ib_pd *pd;
13904876c12SAharon Landau 	struct ib_cq *cq;
14004876c12SAharon Landau 	struct ib_qp *qp;
14104876c12SAharon Landau 	int ret;
14204876c12SAharon Landau 
14304876c12SAharon Landau 	pd = ib_alloc_pd(&dev->ib_dev, 0);
14404876c12SAharon Landau 	if (IS_ERR(pd)) {
14504876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
14604876c12SAharon Landau 		return PTR_ERR(pd);
14704876c12SAharon Landau 	}
14804876c12SAharon Landau 
14904876c12SAharon Landau 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
15004876c12SAharon Landau 	if (IS_ERR(cq)) {
15104876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
15204876c12SAharon Landau 		ret = PTR_ERR(cq);
15304876c12SAharon Landau 		goto destroy_pd;
15404876c12SAharon Landau 	}
15504876c12SAharon Landau 
15604876c12SAharon Landau 	init_attr.send_cq = cq;
15704876c12SAharon Landau 	init_attr.recv_cq = cq;
15804876c12SAharon Landau 	init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
15904876c12SAharon Landau 	init_attr.cap.max_send_wr = MAX_UMR_WR;
16004876c12SAharon Landau 	init_attr.cap.max_send_sge = 1;
16104876c12SAharon Landau 	init_attr.qp_type = MLX5_IB_QPT_REG_UMR;
16204876c12SAharon Landau 	init_attr.port_num = 1;
16304876c12SAharon Landau 	qp = ib_create_qp(pd, &init_attr);
16404876c12SAharon Landau 	if (IS_ERR(qp)) {
16504876c12SAharon Landau 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
16604876c12SAharon Landau 		ret = PTR_ERR(qp);
16704876c12SAharon Landau 		goto destroy_cq;
16804876c12SAharon Landau 	}
16904876c12SAharon Landau 
17004876c12SAharon Landau 	ret = mlx5r_umr_qp_rst2rts(dev, qp);
17104876c12SAharon Landau 	if (ret)
17204876c12SAharon Landau 		goto destroy_qp;
17304876c12SAharon Landau 
17404876c12SAharon Landau 	dev->umrc.qp = qp;
17504876c12SAharon Landau 	dev->umrc.cq = cq;
17604876c12SAharon Landau 	dev->umrc.pd = pd;
17704876c12SAharon Landau 
17804876c12SAharon Landau 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
179*158e71bbSAharon Landau 	mutex_init(&dev->umrc.lock);
18004876c12SAharon Landau 
18104876c12SAharon Landau 	return 0;
18204876c12SAharon Landau 
18304876c12SAharon Landau destroy_qp:
18404876c12SAharon Landau 	ib_destroy_qp(qp);
18504876c12SAharon Landau destroy_cq:
18604876c12SAharon Landau 	ib_free_cq(cq);
18704876c12SAharon Landau destroy_pd:
18804876c12SAharon Landau 	ib_dealloc_pd(pd);
18904876c12SAharon Landau 	return ret;
19004876c12SAharon Landau }
19104876c12SAharon Landau 
19204876c12SAharon Landau void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
19304876c12SAharon Landau {
19404876c12SAharon Landau 	ib_destroy_qp(dev->umrc.qp);
19504876c12SAharon Landau 	ib_free_cq(dev->umrc.cq);
19604876c12SAharon Landau 	ib_dealloc_pd(dev->umrc.pd);
19704876c12SAharon Landau }
1986f0689fdSAharon Landau 
199*158e71bbSAharon Landau static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
200*158e71bbSAharon Landau {
201*158e71bbSAharon Landau 	struct umr_common *umrc = &dev->umrc;
202*158e71bbSAharon Landau 	struct ib_qp_attr attr;
203*158e71bbSAharon Landau 	int err;
204*158e71bbSAharon Landau 
205*158e71bbSAharon Landau 	attr.qp_state = IB_QPS_RESET;
206*158e71bbSAharon Landau 	err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
207*158e71bbSAharon Landau 	if (err) {
208*158e71bbSAharon Landau 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
209*158e71bbSAharon Landau 		goto err;
210*158e71bbSAharon Landau 	}
211*158e71bbSAharon Landau 
212*158e71bbSAharon Landau 	err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
213*158e71bbSAharon Landau 	if (err)
214*158e71bbSAharon Landau 		goto err;
215*158e71bbSAharon Landau 
216*158e71bbSAharon Landau 	umrc->state = MLX5_UMR_STATE_ACTIVE;
217*158e71bbSAharon Landau 	return 0;
218*158e71bbSAharon Landau 
219*158e71bbSAharon Landau err:
220*158e71bbSAharon Landau 	umrc->state = MLX5_UMR_STATE_ERR;
221*158e71bbSAharon Landau 	return err;
222*158e71bbSAharon Landau }
223*158e71bbSAharon Landau 
2246f0689fdSAharon Landau static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
2256f0689fdSAharon Landau 			       struct mlx5r_umr_wqe *wqe, bool with_data)
2266f0689fdSAharon Landau {
2276f0689fdSAharon Landau 	unsigned int wqe_size =
2286f0689fdSAharon Landau 		with_data ? sizeof(struct mlx5r_umr_wqe) :
2296f0689fdSAharon Landau 			    sizeof(struct mlx5r_umr_wqe) -
2306f0689fdSAharon Landau 				    sizeof(struct mlx5_wqe_data_seg);
2316f0689fdSAharon Landau 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2326f0689fdSAharon Landau 	struct mlx5_core_dev *mdev = dev->mdev;
2336f0689fdSAharon Landau 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
2346f0689fdSAharon Landau 	struct mlx5_wqe_ctrl_seg *ctrl;
2356f0689fdSAharon Landau 	union {
2366f0689fdSAharon Landau 		struct ib_cqe *ib_cqe;
2376f0689fdSAharon Landau 		u64 wr_id;
2386f0689fdSAharon Landau 	} id;
2396f0689fdSAharon Landau 	void *cur_edge, *seg;
2406f0689fdSAharon Landau 	unsigned long flags;
2416f0689fdSAharon Landau 	unsigned int idx;
2426f0689fdSAharon Landau 	int size, err;
2436f0689fdSAharon Landau 
2446f0689fdSAharon Landau 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
2456f0689fdSAharon Landau 		return -EIO;
2466f0689fdSAharon Landau 
2476f0689fdSAharon Landau 	spin_lock_irqsave(&qp->sq.lock, flags);
2486f0689fdSAharon Landau 
2496f0689fdSAharon Landau 	err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
2506f0689fdSAharon Landau 			      cpu_to_be32(mkey), false, false);
2516f0689fdSAharon Landau 	if (WARN_ON(err))
2526f0689fdSAharon Landau 		goto out;
2536f0689fdSAharon Landau 
2546f0689fdSAharon Landau 	qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2556f0689fdSAharon Landau 
2566f0689fdSAharon Landau 	mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
2576f0689fdSAharon Landau 
2586f0689fdSAharon Landau 	id.ib_cqe = cqe;
2596f0689fdSAharon Landau 	mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
260*158e71bbSAharon Landau 			 MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
2616f0689fdSAharon Landau 
2626f0689fdSAharon Landau 	mlx5r_ring_db(qp, 1, ctrl);
2636f0689fdSAharon Landau 
2646f0689fdSAharon Landau out:
2656f0689fdSAharon Landau 	spin_unlock_irqrestore(&qp->sq.lock, flags);
2666f0689fdSAharon Landau 
2676f0689fdSAharon Landau 	return err;
2686f0689fdSAharon Landau }
2696f0689fdSAharon Landau 
2706f0689fdSAharon Landau static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
2716f0689fdSAharon Landau {
2726f0689fdSAharon Landau 	struct mlx5_ib_umr_context *context =
2736f0689fdSAharon Landau 		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
2746f0689fdSAharon Landau 
2756f0689fdSAharon Landau 	context->status = wc->status;
2766f0689fdSAharon Landau 	complete(&context->done);
2776f0689fdSAharon Landau }
2786f0689fdSAharon Landau 
2796f0689fdSAharon Landau static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
2806f0689fdSAharon Landau {
2816f0689fdSAharon Landau 	context->cqe.done = mlx5r_umr_done;
2826f0689fdSAharon Landau 	init_completion(&context->done);
2836f0689fdSAharon Landau }
2846f0689fdSAharon Landau 
2856f0689fdSAharon Landau static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
2866f0689fdSAharon Landau 				   struct mlx5r_umr_wqe *wqe, bool with_data)
2876f0689fdSAharon Landau {
2886f0689fdSAharon Landau 	struct umr_common *umrc = &dev->umrc;
2896f0689fdSAharon Landau 	struct mlx5r_umr_context umr_context;
2906f0689fdSAharon Landau 	int err;
2916f0689fdSAharon Landau 
2926f0689fdSAharon Landau 	err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
2936f0689fdSAharon Landau 	if (WARN_ON(err))
2946f0689fdSAharon Landau 		return err;
2956f0689fdSAharon Landau 
2966f0689fdSAharon Landau 	mlx5r_umr_init_context(&umr_context);
2976f0689fdSAharon Landau 
2986f0689fdSAharon Landau 	down(&umrc->sem);
299*158e71bbSAharon Landau 	while (true) {
300*158e71bbSAharon Landau 		mutex_lock(&umrc->lock);
301*158e71bbSAharon Landau 		if (umrc->state == MLX5_UMR_STATE_ERR) {
302*158e71bbSAharon Landau 			mutex_unlock(&umrc->lock);
303*158e71bbSAharon Landau 			err = -EFAULT;
304*158e71bbSAharon Landau 			break;
305*158e71bbSAharon Landau 		}
306*158e71bbSAharon Landau 
307*158e71bbSAharon Landau 		if (umrc->state == MLX5_UMR_STATE_RECOVER) {
308*158e71bbSAharon Landau 			mutex_unlock(&umrc->lock);
309*158e71bbSAharon Landau 			usleep_range(3000, 5000);
310*158e71bbSAharon Landau 			continue;
311*158e71bbSAharon Landau 		}
312*158e71bbSAharon Landau 
3136f0689fdSAharon Landau 		err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
3146f0689fdSAharon Landau 					  with_data);
315*158e71bbSAharon Landau 		mutex_unlock(&umrc->lock);
316*158e71bbSAharon Landau 		if (err) {
317*158e71bbSAharon Landau 			mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
318*158e71bbSAharon Landau 				     err);
319*158e71bbSAharon Landau 			break;
3206f0689fdSAharon Landau 		}
321*158e71bbSAharon Landau 
322*158e71bbSAharon Landau 		wait_for_completion(&umr_context.done);
323*158e71bbSAharon Landau 
324*158e71bbSAharon Landau 		if (umr_context.status == IB_WC_SUCCESS)
325*158e71bbSAharon Landau 			break;
326*158e71bbSAharon Landau 
327*158e71bbSAharon Landau 		if (umr_context.status == IB_WC_WR_FLUSH_ERR)
328*158e71bbSAharon Landau 			continue;
329*158e71bbSAharon Landau 
330*158e71bbSAharon Landau 		WARN_ON_ONCE(1);
331*158e71bbSAharon Landau 		mlx5_ib_warn(dev,
332*158e71bbSAharon Landau 			"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
333*158e71bbSAharon Landau 			umr_context.status);
334*158e71bbSAharon Landau 		mutex_lock(&umrc->lock);
335*158e71bbSAharon Landau 		err = mlx5r_umr_recover(dev);
336*158e71bbSAharon Landau 		mutex_unlock(&umrc->lock);
337*158e71bbSAharon Landau 		if (err)
338*158e71bbSAharon Landau 			mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
339*158e71bbSAharon Landau 				     err);
340*158e71bbSAharon Landau 		err = -EFAULT;
341*158e71bbSAharon Landau 		break;
3426f0689fdSAharon Landau 	}
3436f0689fdSAharon Landau 	up(&umrc->sem);
3446f0689fdSAharon Landau 	return err;
3456f0689fdSAharon Landau }
34633e8aa8eSAharon Landau 
34733e8aa8eSAharon Landau /**
34833e8aa8eSAharon Landau  * mlx5r_umr_revoke_mr - Fence all DMA on the MR
34933e8aa8eSAharon Landau  * @mr: The MR to fence
35033e8aa8eSAharon Landau  *
35133e8aa8eSAharon Landau  * Upon return the NIC will not be doing any DMA to the pages under the MR,
35233e8aa8eSAharon Landau  * and any DMA in progress will be completed. Failure of this function
35333e8aa8eSAharon Landau  * indicates the HW has failed catastrophically.
35433e8aa8eSAharon Landau  */
35533e8aa8eSAharon Landau int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
35633e8aa8eSAharon Landau {
35733e8aa8eSAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
35833e8aa8eSAharon Landau 	struct mlx5r_umr_wqe wqe = {};
35933e8aa8eSAharon Landau 
36033e8aa8eSAharon Landau 	if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
36133e8aa8eSAharon Landau 		return 0;
36233e8aa8eSAharon Landau 
36333e8aa8eSAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
36433e8aa8eSAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
36533e8aa8eSAharon Landau 	wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
36633e8aa8eSAharon Landau 
36733e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
36833e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
36933e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
37033e8aa8eSAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
37133e8aa8eSAharon Landau 		 mlx5_mkey_variant(mr->mmkey.key));
37233e8aa8eSAharon Landau 
37333e8aa8eSAharon Landau 	return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
37433e8aa8eSAharon Landau }
37548319676SAharon Landau 
37648319676SAharon Landau static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
37748319676SAharon Landau 				       struct mlx5_mkey_seg *seg,
37848319676SAharon Landau 				       unsigned int access_flags)
37948319676SAharon Landau {
38048319676SAharon Landau 	MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
38148319676SAharon Landau 	MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
38248319676SAharon Landau 	MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
38348319676SAharon Landau 	MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
38448319676SAharon Landau 	MLX5_SET(mkc, seg, lr, 1);
38548319676SAharon Landau 	MLX5_SET(mkc, seg, relaxed_ordering_write,
38648319676SAharon Landau 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
38748319676SAharon Landau 	MLX5_SET(mkc, seg, relaxed_ordering_read,
38848319676SAharon Landau 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
38948319676SAharon Landau }
39048319676SAharon Landau 
39148319676SAharon Landau int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
39248319676SAharon Landau 			      int access_flags)
39348319676SAharon Landau {
39448319676SAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
39548319676SAharon Landau 	struct mlx5r_umr_wqe wqe = {};
39648319676SAharon Landau 	int err;
39748319676SAharon Landau 
39848319676SAharon Landau 	wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
39948319676SAharon Landau 	wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
40048319676SAharon Landau 	wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
40148319676SAharon Landau 	wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
40248319676SAharon Landau 
40348319676SAharon Landau 	mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
40448319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
40548319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
40648319676SAharon Landau 	MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
40748319676SAharon Landau 		 mlx5_mkey_variant(mr->mmkey.key));
40848319676SAharon Landau 
40948319676SAharon Landau 	err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
41048319676SAharon Landau 	if (err)
41148319676SAharon Landau 		return err;
41248319676SAharon Landau 
41348319676SAharon Landau 	mr->access_flags = access_flags;
41448319676SAharon Landau 	return 0;
41548319676SAharon Landau }
416916adb49SAharon Landau 
417916adb49SAharon Landau #define MLX5_MAX_UMR_CHUNK                                                     \
418916adb49SAharon Landau 	((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
419916adb49SAharon Landau #define MLX5_SPARE_UMR_CHUNK 0x10000
420916adb49SAharon Landau 
421916adb49SAharon Landau /*
422916adb49SAharon Landau  * Allocate a temporary buffer to hold the per-page information to transfer to
423916adb49SAharon Landau  * HW. For efficiency this should be as large as it can be, but buffer
424916adb49SAharon Landau  * allocation failure is not allowed, so try smaller sizes.
425916adb49SAharon Landau  */
426916adb49SAharon Landau static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
427916adb49SAharon Landau {
428916adb49SAharon Landau 	const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
429916adb49SAharon Landau 	size_t size;
430916adb49SAharon Landau 	void *res = NULL;
431916adb49SAharon Landau 
432916adb49SAharon Landau 	static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
433916adb49SAharon Landau 
434916adb49SAharon Landau 	/*
435916adb49SAharon Landau 	 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
436916adb49SAharon Landau 	 * allocation can't trigger any kind of reclaim.
437916adb49SAharon Landau 	 */
438916adb49SAharon Landau 	might_sleep();
439916adb49SAharon Landau 
440916adb49SAharon Landau 	gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
441916adb49SAharon Landau 
442916adb49SAharon Landau 	/*
443916adb49SAharon Landau 	 * If the system already has a suitable high order page then just use
444916adb49SAharon Landau 	 * that, but don't try hard to create one. This max is about 1M, so a
445916adb49SAharon Landau 	 * free x86 huge page will satisfy it.
446916adb49SAharon Landau 	 */
447916adb49SAharon Landau 	size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
448916adb49SAharon Landau 		     MLX5_MAX_UMR_CHUNK);
449916adb49SAharon Landau 	*nents = size / ent_size;
450916adb49SAharon Landau 	res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
451916adb49SAharon Landau 				       get_order(size));
452916adb49SAharon Landau 	if (res)
453916adb49SAharon Landau 		return res;
454916adb49SAharon Landau 
455916adb49SAharon Landau 	if (size > MLX5_SPARE_UMR_CHUNK) {
456916adb49SAharon Landau 		size = MLX5_SPARE_UMR_CHUNK;
457916adb49SAharon Landau 		*nents = size / ent_size;
458916adb49SAharon Landau 		res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
459916adb49SAharon Landau 					       get_order(size));
460916adb49SAharon Landau 		if (res)
461916adb49SAharon Landau 			return res;
462916adb49SAharon Landau 	}
463916adb49SAharon Landau 
464916adb49SAharon Landau 	*nents = PAGE_SIZE / ent_size;
465916adb49SAharon Landau 	res = (void *)__get_free_page(gfp_mask);
466916adb49SAharon Landau 	if (res)
467916adb49SAharon Landau 		return res;
468916adb49SAharon Landau 
469916adb49SAharon Landau 	mutex_lock(&xlt_emergency_page_mutex);
470916adb49SAharon Landau 	memset(xlt_emergency_page, 0, PAGE_SIZE);
471916adb49SAharon Landau 	return xlt_emergency_page;
472916adb49SAharon Landau }
473916adb49SAharon Landau 
474916adb49SAharon Landau static void mlx5r_umr_free_xlt(void *xlt, size_t length)
475916adb49SAharon Landau {
476916adb49SAharon Landau 	if (xlt == xlt_emergency_page) {
477916adb49SAharon Landau 		mutex_unlock(&xlt_emergency_page_mutex);
478916adb49SAharon Landau 		return;
479916adb49SAharon Landau 	}
480916adb49SAharon Landau 
481916adb49SAharon Landau 	free_pages((unsigned long)xlt, get_order(length));
482916adb49SAharon Landau }
483916adb49SAharon Landau 
484636bdbfcSAharon Landau static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
485916adb49SAharon Landau 				     struct ib_sge *sg)
486916adb49SAharon Landau {
487916adb49SAharon Landau 	struct device *ddev = &dev->mdev->pdev->dev;
488916adb49SAharon Landau 
489916adb49SAharon Landau 	dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
490916adb49SAharon Landau 	mlx5r_umr_free_xlt(xlt, sg->length);
491916adb49SAharon Landau }
492916adb49SAharon Landau 
493916adb49SAharon Landau /*
494916adb49SAharon Landau  * Create an XLT buffer ready for submission.
495916adb49SAharon Landau  */
496636bdbfcSAharon Landau static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg,
497636bdbfcSAharon Landau 				  size_t nents, size_t ent_size,
498636bdbfcSAharon Landau 				  unsigned int flags)
499916adb49SAharon Landau {
500916adb49SAharon Landau 	struct device *ddev = &dev->mdev->pdev->dev;
501916adb49SAharon Landau 	dma_addr_t dma;
502916adb49SAharon Landau 	void *xlt;
503916adb49SAharon Landau 
504916adb49SAharon Landau 	xlt = mlx5r_umr_alloc_xlt(&nents, ent_size,
505916adb49SAharon Landau 				 flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
506916adb49SAharon Landau 								  GFP_KERNEL);
507916adb49SAharon Landau 	sg->length = nents * ent_size;
508916adb49SAharon Landau 	dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
509916adb49SAharon Landau 	if (dma_mapping_error(ddev, dma)) {
510916adb49SAharon Landau 		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
511916adb49SAharon Landau 		mlx5r_umr_free_xlt(xlt, sg->length);
512916adb49SAharon Landau 		return NULL;
513916adb49SAharon Landau 	}
514916adb49SAharon Landau 	sg->addr = dma;
515916adb49SAharon Landau 	sg->lkey = dev->umrc.pd->local_dma_lkey;
516916adb49SAharon Landau 
517916adb49SAharon Landau 	return xlt;
518916adb49SAharon Landau }
519b3d47ebdSAharon Landau 
520b3d47ebdSAharon Landau static void
521b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
522b3d47ebdSAharon Landau 				  unsigned int flags, struct ib_sge *sg)
523b3d47ebdSAharon Landau {
524b3d47ebdSAharon Landau 	if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
525b3d47ebdSAharon Landau 		/* fail if free */
526b3d47ebdSAharon Landau 		ctrl_seg->flags = MLX5_UMR_CHECK_FREE;
527b3d47ebdSAharon Landau 	else
528b3d47ebdSAharon Landau 		/* fail if not free */
529b3d47ebdSAharon Landau 		ctrl_seg->flags = MLX5_UMR_CHECK_NOT_FREE;
530b3d47ebdSAharon Landau 	ctrl_seg->xlt_octowords =
531b3d47ebdSAharon Landau 		cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
532b3d47ebdSAharon Landau }
533b3d47ebdSAharon Landau 
534b3d47ebdSAharon Landau static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev,
535b3d47ebdSAharon Landau 					      struct mlx5_mkey_seg *mkey_seg,
536b3d47ebdSAharon Landau 					      struct mlx5_ib_mr *mr,
537b3d47ebdSAharon Landau 					      unsigned int page_shift)
538b3d47ebdSAharon Landau {
539b3d47ebdSAharon Landau 	mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags);
540b3d47ebdSAharon Landau 	MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
541b3d47ebdSAharon Landau 	MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova);
542b3d47ebdSAharon Landau 	MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length);
543b3d47ebdSAharon Landau 	MLX5_SET(mkc, mkey_seg, log_page_size, page_shift);
544b3d47ebdSAharon Landau 	MLX5_SET(mkc, mkey_seg, qpn, 0xffffff);
545b3d47ebdSAharon Landau 	MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key));
546b3d47ebdSAharon Landau }
547b3d47ebdSAharon Landau 
548b3d47ebdSAharon Landau static void
549b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg,
550b3d47ebdSAharon Landau 				  struct ib_sge *sg)
551b3d47ebdSAharon Landau {
552b3d47ebdSAharon Landau 	data_seg->byte_count = cpu_to_be32(sg->length);
553b3d47ebdSAharon Landau 	data_seg->lkey = cpu_to_be32(sg->lkey);
554b3d47ebdSAharon Landau 	data_seg->addr = cpu_to_be64(sg->addr);
555b3d47ebdSAharon Landau }
556b3d47ebdSAharon Landau 
557b3d47ebdSAharon Landau static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
558b3d47ebdSAharon Landau 				    u64 offset)
559b3d47ebdSAharon Landau {
560b3d47ebdSAharon Landau 	u64 octo_offset = mlx5r_umr_get_xlt_octo(offset);
561b3d47ebdSAharon Landau 
562b3d47ebdSAharon Landau 	ctrl_seg->xlt_offset = cpu_to_be16(octo_offset & 0xffff);
563b3d47ebdSAharon Landau 	ctrl_seg->xlt_offset_47_16 = cpu_to_be32(octo_offset >> 16);
564b3d47ebdSAharon Landau 	ctrl_seg->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
565b3d47ebdSAharon Landau }
566b3d47ebdSAharon Landau 
567b3d47ebdSAharon Landau static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
568b3d47ebdSAharon Landau 				       struct mlx5r_umr_wqe *wqe,
569b3d47ebdSAharon Landau 				       struct mlx5_ib_mr *mr, struct ib_sge *sg,
570b3d47ebdSAharon Landau 				       unsigned int flags)
571b3d47ebdSAharon Landau {
572b3d47ebdSAharon Landau 	bool update_pd_access, update_translation;
573b3d47ebdSAharon Landau 
574b3d47ebdSAharon Landau 	if (flags & MLX5_IB_UPD_XLT_ENABLE)
575b3d47ebdSAharon Landau 		wqe->ctrl_seg.mkey_mask |= get_umr_enable_mr_mask();
576b3d47ebdSAharon Landau 
577b3d47ebdSAharon Landau 	update_pd_access = flags & MLX5_IB_UPD_XLT_ENABLE ||
578b3d47ebdSAharon Landau 			   flags & MLX5_IB_UPD_XLT_PD ||
579b3d47ebdSAharon Landau 			   flags & MLX5_IB_UPD_XLT_ACCESS;
580b3d47ebdSAharon Landau 
581b3d47ebdSAharon Landau 	if (update_pd_access) {
582b3d47ebdSAharon Landau 		wqe->ctrl_seg.mkey_mask |= get_umr_update_access_mask(dev);
583b3d47ebdSAharon Landau 		wqe->ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
584b3d47ebdSAharon Landau 	}
585b3d47ebdSAharon Landau 
586b3d47ebdSAharon Landau 	update_translation =
587b3d47ebdSAharon Landau 		flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
588b3d47ebdSAharon Landau 
589b3d47ebdSAharon Landau 	if (update_translation) {
590b3d47ebdSAharon Landau 		wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
591b3d47ebdSAharon Landau 		if (!mr->ibmr.length)
592b3d47ebdSAharon Landau 			MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
593b3d47ebdSAharon Landau 	}
594b3d47ebdSAharon Landau 
595b3d47ebdSAharon Landau 	wqe->ctrl_seg.xlt_octowords =
596b3d47ebdSAharon Landau 		cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
597b3d47ebdSAharon Landau 	wqe->data_seg.byte_count = cpu_to_be32(sg->length);
598b3d47ebdSAharon Landau }
599b3d47ebdSAharon Landau 
600b3d47ebdSAharon Landau /*
601b3d47ebdSAharon Landau  * Send the DMA list to the HW for a normal MR using UMR.
602b3d47ebdSAharon Landau  * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
603b3d47ebdSAharon Landau  * flag may be used.
604b3d47ebdSAharon Landau  */
605b3d47ebdSAharon Landau int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
606b3d47ebdSAharon Landau {
607b3d47ebdSAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
608b3d47ebdSAharon Landau 	struct device *ddev = &dev->mdev->pdev->dev;
609b3d47ebdSAharon Landau 	struct mlx5r_umr_wqe wqe = {};
610b3d47ebdSAharon Landau 	struct ib_block_iter biter;
611b3d47ebdSAharon Landau 	struct mlx5_mtt *cur_mtt;
612b3d47ebdSAharon Landau 	size_t orig_sg_length;
613b3d47ebdSAharon Landau 	struct mlx5_mtt *mtt;
614b3d47ebdSAharon Landau 	size_t final_size;
615b3d47ebdSAharon Landau 	struct ib_sge sg;
616b3d47ebdSAharon Landau 	u64 offset = 0;
617b3d47ebdSAharon Landau 	int err = 0;
618b3d47ebdSAharon Landau 
619b3d47ebdSAharon Landau 	if (WARN_ON(mr->umem->is_odp))
620b3d47ebdSAharon Landau 		return -EINVAL;
621b3d47ebdSAharon Landau 
622b3d47ebdSAharon Landau 	mtt = mlx5r_umr_create_xlt(
623b3d47ebdSAharon Landau 		dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
624b3d47ebdSAharon Landau 		sizeof(*mtt), flags);
625b3d47ebdSAharon Landau 	if (!mtt)
626b3d47ebdSAharon Landau 		return -ENOMEM;
627b3d47ebdSAharon Landau 
628b3d47ebdSAharon Landau 	orig_sg_length = sg.length;
629b3d47ebdSAharon Landau 
630b3d47ebdSAharon Landau 	mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
631b3d47ebdSAharon Landau 	mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
632b3d47ebdSAharon Landau 					  mr->page_shift);
633b3d47ebdSAharon Landau 	mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
634b3d47ebdSAharon Landau 
635b3d47ebdSAharon Landau 	cur_mtt = mtt;
636b3d47ebdSAharon Landau 	rdma_for_each_block(mr->umem->sgt_append.sgt.sgl, &biter,
637b3d47ebdSAharon Landau 			    mr->umem->sgt_append.sgt.nents,
638b3d47ebdSAharon Landau 			    BIT(mr->page_shift)) {
639b3d47ebdSAharon Landau 		if (cur_mtt == (void *)mtt + sg.length) {
640b3d47ebdSAharon Landau 			dma_sync_single_for_device(ddev, sg.addr, sg.length,
641b3d47ebdSAharon Landau 						   DMA_TO_DEVICE);
642b3d47ebdSAharon Landau 
643b3d47ebdSAharon Landau 			err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe,
644b3d47ebdSAharon Landau 						       true);
645b3d47ebdSAharon Landau 			if (err)
646b3d47ebdSAharon Landau 				goto err;
647b3d47ebdSAharon Landau 			dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
648b3d47ebdSAharon Landau 						DMA_TO_DEVICE);
649b3d47ebdSAharon Landau 			offset += sg.length;
650b3d47ebdSAharon Landau 			mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
651b3d47ebdSAharon Landau 
652b3d47ebdSAharon Landau 			cur_mtt = mtt;
653b3d47ebdSAharon Landau 		}
654b3d47ebdSAharon Landau 
655b3d47ebdSAharon Landau 		cur_mtt->ptag =
656b3d47ebdSAharon Landau 			cpu_to_be64(rdma_block_iter_dma_address(&biter) |
657b3d47ebdSAharon Landau 				    MLX5_IB_MTT_PRESENT);
658b3d47ebdSAharon Landau 
659b3d47ebdSAharon Landau 		if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
660b3d47ebdSAharon Landau 			cur_mtt->ptag = 0;
661b3d47ebdSAharon Landau 
662b3d47ebdSAharon Landau 		cur_mtt++;
663b3d47ebdSAharon Landau 	}
664b3d47ebdSAharon Landau 
665b3d47ebdSAharon Landau 	final_size = (void *)cur_mtt - (void *)mtt;
666b3d47ebdSAharon Landau 	sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
667b3d47ebdSAharon Landau 	memset(cur_mtt, 0, sg.length - final_size);
668b3d47ebdSAharon Landau 	mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
669b3d47ebdSAharon Landau 
670b3d47ebdSAharon Landau 	dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
671b3d47ebdSAharon Landau 	err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
672b3d47ebdSAharon Landau 
673b3d47ebdSAharon Landau err:
674b3d47ebdSAharon Landau 	sg.length = orig_sg_length;
675b3d47ebdSAharon Landau 	mlx5r_umr_unmap_free_xlt(dev, mtt, &sg);
676b3d47ebdSAharon Landau 	return err;
677b3d47ebdSAharon Landau }
678636bdbfcSAharon Landau 
679636bdbfcSAharon Landau static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
680636bdbfcSAharon Landau {
681636bdbfcSAharon Landau 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
682636bdbfcSAharon Landau }
683636bdbfcSAharon Landau 
684636bdbfcSAharon Landau int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
685636bdbfcSAharon Landau 			 int page_shift, int flags)
686636bdbfcSAharon Landau {
687636bdbfcSAharon Landau 	int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
688636bdbfcSAharon Landau 			       ? sizeof(struct mlx5_klm)
689636bdbfcSAharon Landau 			       : sizeof(struct mlx5_mtt);
690636bdbfcSAharon Landau 	const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
691636bdbfcSAharon Landau 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
692636bdbfcSAharon Landau 	struct device *ddev = &dev->mdev->pdev->dev;
693636bdbfcSAharon Landau 	const int page_mask = page_align - 1;
694636bdbfcSAharon Landau 	struct mlx5r_umr_wqe wqe = {};
695636bdbfcSAharon Landau 	size_t pages_mapped = 0;
696636bdbfcSAharon Landau 	size_t pages_to_map = 0;
697636bdbfcSAharon Landau 	size_t size_to_map = 0;
698636bdbfcSAharon Landau 	size_t orig_sg_length;
699636bdbfcSAharon Landau 	size_t pages_iter;
700636bdbfcSAharon Landau 	struct ib_sge sg;
701636bdbfcSAharon Landau 	int err = 0;
702636bdbfcSAharon Landau 	void *xlt;
703636bdbfcSAharon Landau 
704636bdbfcSAharon Landau 	if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
705636bdbfcSAharon Landau 	    !umr_can_use_indirect_mkey(dev))
706636bdbfcSAharon Landau 		return -EPERM;
707636bdbfcSAharon Landau 
708636bdbfcSAharon Landau 	if (WARN_ON(!mr->umem->is_odp))
709636bdbfcSAharon Landau 		return -EINVAL;
710636bdbfcSAharon Landau 
711636bdbfcSAharon Landau 	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
712636bdbfcSAharon Landau 	 * so we need to align the offset and length accordingly
713636bdbfcSAharon Landau 	 */
714636bdbfcSAharon Landau 	if (idx & page_mask) {
715636bdbfcSAharon Landau 		npages += idx & page_mask;
716636bdbfcSAharon Landau 		idx &= ~page_mask;
717636bdbfcSAharon Landau 	}
718636bdbfcSAharon Landau 	pages_to_map = ALIGN(npages, page_align);
719636bdbfcSAharon Landau 
720636bdbfcSAharon Landau 	xlt = mlx5r_umr_create_xlt(dev, &sg, npages, desc_size, flags);
721636bdbfcSAharon Landau 	if (!xlt)
722636bdbfcSAharon Landau 		return -ENOMEM;
723636bdbfcSAharon Landau 
724636bdbfcSAharon Landau 	pages_iter = sg.length / desc_size;
725636bdbfcSAharon Landau 	orig_sg_length = sg.length;
726636bdbfcSAharon Landau 
727636bdbfcSAharon Landau 	if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
728636bdbfcSAharon Landau 		struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
729636bdbfcSAharon Landau 		size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
730636bdbfcSAharon Landau 
731636bdbfcSAharon Landau 		pages_to_map = min_t(size_t, pages_to_map, max_pages);
732636bdbfcSAharon Landau 	}
733636bdbfcSAharon Landau 
734636bdbfcSAharon Landau 	mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
735636bdbfcSAharon Landau 	mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift);
736636bdbfcSAharon Landau 	mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
737636bdbfcSAharon Landau 
738636bdbfcSAharon Landau 	for (pages_mapped = 0;
739636bdbfcSAharon Landau 	     pages_mapped < pages_to_map && !err;
740636bdbfcSAharon Landau 	     pages_mapped += pages_iter, idx += pages_iter) {
741636bdbfcSAharon Landau 		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
742636bdbfcSAharon Landau 		size_to_map = npages * desc_size;
743636bdbfcSAharon Landau 		dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
744636bdbfcSAharon Landau 					DMA_TO_DEVICE);
745636bdbfcSAharon Landau 		mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
746636bdbfcSAharon Landau 		dma_sync_single_for_device(ddev, sg.addr, sg.length,
747636bdbfcSAharon Landau 					   DMA_TO_DEVICE);
748636bdbfcSAharon Landau 		sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
749636bdbfcSAharon Landau 
750636bdbfcSAharon Landau 		if (pages_mapped + pages_iter >= pages_to_map)
751636bdbfcSAharon Landau 			mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
752636bdbfcSAharon Landau 		mlx5r_umr_update_offset(&wqe.ctrl_seg, idx * desc_size);
753636bdbfcSAharon Landau 		err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
754636bdbfcSAharon Landau 	}
755636bdbfcSAharon Landau 	sg.length = orig_sg_length;
756636bdbfcSAharon Landau 	mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
757636bdbfcSAharon Landau 	return err;
758636bdbfcSAharon Landau }
759