104876c12SAharon Landau // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 204876c12SAharon Landau /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ 304876c12SAharon Landau 4636bdbfcSAharon Landau #include <rdma/ib_umem_odp.h> 504876c12SAharon Landau #include "mlx5_ib.h" 604876c12SAharon Landau #include "umr.h" 76f0689fdSAharon Landau #include "wr.h" 804876c12SAharon Landau 9916adb49SAharon Landau /* 10916adb49SAharon Landau * We can't use an array for xlt_emergency_page because dma_map_single doesn't 11916adb49SAharon Landau * work on kernel modules memory 12916adb49SAharon Landau */ 13916adb49SAharon Landau void *xlt_emergency_page; 14916adb49SAharon Landau static DEFINE_MUTEX(xlt_emergency_page_mutex); 15916adb49SAharon Landau 168a8a5d37SAharon Landau static __be64 get_umr_enable_mr_mask(void) 178a8a5d37SAharon Landau { 188a8a5d37SAharon Landau u64 result; 198a8a5d37SAharon Landau 208a8a5d37SAharon Landau result = MLX5_MKEY_MASK_KEY | 218a8a5d37SAharon Landau MLX5_MKEY_MASK_FREE; 228a8a5d37SAharon Landau 238a8a5d37SAharon Landau return cpu_to_be64(result); 248a8a5d37SAharon Landau } 258a8a5d37SAharon Landau 268a8a5d37SAharon Landau static __be64 get_umr_disable_mr_mask(void) 278a8a5d37SAharon Landau { 288a8a5d37SAharon Landau u64 result; 298a8a5d37SAharon Landau 308a8a5d37SAharon Landau result = MLX5_MKEY_MASK_FREE; 318a8a5d37SAharon Landau 328a8a5d37SAharon Landau return cpu_to_be64(result); 338a8a5d37SAharon Landau } 348a8a5d37SAharon Landau 358a8a5d37SAharon Landau static __be64 get_umr_update_translation_mask(void) 368a8a5d37SAharon Landau { 378a8a5d37SAharon Landau u64 result; 388a8a5d37SAharon Landau 398a8a5d37SAharon Landau result = MLX5_MKEY_MASK_LEN | 408a8a5d37SAharon Landau MLX5_MKEY_MASK_PAGE_SIZE | 418a8a5d37SAharon Landau MLX5_MKEY_MASK_START_ADDR; 428a8a5d37SAharon Landau 438a8a5d37SAharon Landau return cpu_to_be64(result); 448a8a5d37SAharon Landau } 458a8a5d37SAharon Landau 46ba6a9c68SAharon Landau static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev) 478a8a5d37SAharon Landau { 488a8a5d37SAharon Landau u64 result; 498a8a5d37SAharon Landau 508a8a5d37SAharon Landau result = MLX5_MKEY_MASK_LR | 518a8a5d37SAharon Landau MLX5_MKEY_MASK_LW | 528a8a5d37SAharon Landau MLX5_MKEY_MASK_RR | 538a8a5d37SAharon Landau MLX5_MKEY_MASK_RW; 548a8a5d37SAharon Landau 55ba6a9c68SAharon Landau if (MLX5_CAP_GEN(dev->mdev, atomic)) 568a8a5d37SAharon Landau result |= MLX5_MKEY_MASK_A; 578a8a5d37SAharon Landau 58ba6a9c68SAharon Landau if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) 598a8a5d37SAharon Landau result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE; 608a8a5d37SAharon Landau 61ba6a9c68SAharon Landau if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) 628a8a5d37SAharon Landau result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ; 638a8a5d37SAharon Landau 648a8a5d37SAharon Landau return cpu_to_be64(result); 658a8a5d37SAharon Landau } 668a8a5d37SAharon Landau 678a8a5d37SAharon Landau static __be64 get_umr_update_pd_mask(void) 688a8a5d37SAharon Landau { 698a8a5d37SAharon Landau u64 result; 708a8a5d37SAharon Landau 718a8a5d37SAharon Landau result = MLX5_MKEY_MASK_PD; 728a8a5d37SAharon Landau 738a8a5d37SAharon Landau return cpu_to_be64(result); 748a8a5d37SAharon Landau } 758a8a5d37SAharon Landau 768a8a5d37SAharon Landau static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) 778a8a5d37SAharon Landau { 788a8a5d37SAharon Landau if (mask & MLX5_MKEY_MASK_PAGE_SIZE && 798a8a5d37SAharon Landau MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) 808a8a5d37SAharon Landau return -EPERM; 818a8a5d37SAharon Landau 828a8a5d37SAharon Landau if (mask & MLX5_MKEY_MASK_A && 838a8a5d37SAharon Landau MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) 848a8a5d37SAharon Landau return -EPERM; 858a8a5d37SAharon Landau 868a8a5d37SAharon Landau if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE && 878a8a5d37SAharon Landau !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) 888a8a5d37SAharon Landau return -EPERM; 898a8a5d37SAharon Landau 908a8a5d37SAharon Landau if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ && 918a8a5d37SAharon Landau !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) 928a8a5d37SAharon Landau return -EPERM; 938a8a5d37SAharon Landau 948a8a5d37SAharon Landau return 0; 958a8a5d37SAharon Landau } 968a8a5d37SAharon Landau 9704876c12SAharon Landau enum { 9804876c12SAharon Landau MAX_UMR_WR = 128, 9904876c12SAharon Landau }; 10004876c12SAharon Landau 10104876c12SAharon Landau static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp) 10204876c12SAharon Landau { 10304876c12SAharon Landau struct ib_qp_attr attr = {}; 10404876c12SAharon Landau int ret; 10504876c12SAharon Landau 10604876c12SAharon Landau attr.qp_state = IB_QPS_INIT; 10704876c12SAharon Landau attr.port_num = 1; 10804876c12SAharon Landau ret = ib_modify_qp(qp, &attr, 10904876c12SAharon Landau IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT); 11004876c12SAharon Landau if (ret) { 11104876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 11204876c12SAharon Landau return ret; 11304876c12SAharon Landau } 11404876c12SAharon Landau 11504876c12SAharon Landau memset(&attr, 0, sizeof(attr)); 11604876c12SAharon Landau attr.qp_state = IB_QPS_RTR; 11704876c12SAharon Landau 11804876c12SAharon Landau ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 11904876c12SAharon Landau if (ret) { 12004876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 12104876c12SAharon Landau return ret; 12204876c12SAharon Landau } 12304876c12SAharon Landau 12404876c12SAharon Landau memset(&attr, 0, sizeof(attr)); 12504876c12SAharon Landau attr.qp_state = IB_QPS_RTS; 12604876c12SAharon Landau ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 12704876c12SAharon Landau if (ret) { 12804876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 12904876c12SAharon Landau return ret; 13004876c12SAharon Landau } 13104876c12SAharon Landau 13204876c12SAharon Landau return 0; 13304876c12SAharon Landau } 13404876c12SAharon Landau 13504876c12SAharon Landau int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev) 13604876c12SAharon Landau { 13704876c12SAharon Landau struct ib_qp_init_attr init_attr = {}; 13804876c12SAharon Landau struct ib_pd *pd; 13904876c12SAharon Landau struct ib_cq *cq; 14004876c12SAharon Landau struct ib_qp *qp; 14104876c12SAharon Landau int ret; 14204876c12SAharon Landau 14304876c12SAharon Landau pd = ib_alloc_pd(&dev->ib_dev, 0); 14404876c12SAharon Landau if (IS_ERR(pd)) { 14504876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 14604876c12SAharon Landau return PTR_ERR(pd); 14704876c12SAharon Landau } 14804876c12SAharon Landau 14904876c12SAharon Landau cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 15004876c12SAharon Landau if (IS_ERR(cq)) { 15104876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 15204876c12SAharon Landau ret = PTR_ERR(cq); 15304876c12SAharon Landau goto destroy_pd; 15404876c12SAharon Landau } 15504876c12SAharon Landau 15604876c12SAharon Landau init_attr.send_cq = cq; 15704876c12SAharon Landau init_attr.recv_cq = cq; 15804876c12SAharon Landau init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 15904876c12SAharon Landau init_attr.cap.max_send_wr = MAX_UMR_WR; 16004876c12SAharon Landau init_attr.cap.max_send_sge = 1; 16104876c12SAharon Landau init_attr.qp_type = MLX5_IB_QPT_REG_UMR; 16204876c12SAharon Landau init_attr.port_num = 1; 16304876c12SAharon Landau qp = ib_create_qp(pd, &init_attr); 16404876c12SAharon Landau if (IS_ERR(qp)) { 16504876c12SAharon Landau mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 16604876c12SAharon Landau ret = PTR_ERR(qp); 16704876c12SAharon Landau goto destroy_cq; 16804876c12SAharon Landau } 16904876c12SAharon Landau 17004876c12SAharon Landau ret = mlx5r_umr_qp_rst2rts(dev, qp); 17104876c12SAharon Landau if (ret) 17204876c12SAharon Landau goto destroy_qp; 17304876c12SAharon Landau 17404876c12SAharon Landau dev->umrc.qp = qp; 17504876c12SAharon Landau dev->umrc.cq = cq; 17604876c12SAharon Landau dev->umrc.pd = pd; 17704876c12SAharon Landau 17804876c12SAharon Landau sema_init(&dev->umrc.sem, MAX_UMR_WR); 179158e71bbSAharon Landau mutex_init(&dev->umrc.lock); 1809b7d4be9SMaor Gottlieb dev->umrc.state = MLX5_UMR_STATE_ACTIVE; 18104876c12SAharon Landau 18204876c12SAharon Landau return 0; 18304876c12SAharon Landau 18404876c12SAharon Landau destroy_qp: 18504876c12SAharon Landau ib_destroy_qp(qp); 18604876c12SAharon Landau destroy_cq: 18704876c12SAharon Landau ib_free_cq(cq); 18804876c12SAharon Landau destroy_pd: 18904876c12SAharon Landau ib_dealloc_pd(pd); 19004876c12SAharon Landau return ret; 19104876c12SAharon Landau } 19204876c12SAharon Landau 19304876c12SAharon Landau void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev) 19404876c12SAharon Landau { 1959b7d4be9SMaor Gottlieb if (dev->umrc.state == MLX5_UMR_STATE_UNINIT) 1969b7d4be9SMaor Gottlieb return; 19704876c12SAharon Landau ib_destroy_qp(dev->umrc.qp); 19804876c12SAharon Landau ib_free_cq(dev->umrc.cq); 19904876c12SAharon Landau ib_dealloc_pd(dev->umrc.pd); 20004876c12SAharon Landau } 2016f0689fdSAharon Landau 202158e71bbSAharon Landau static int mlx5r_umr_recover(struct mlx5_ib_dev *dev) 203158e71bbSAharon Landau { 204158e71bbSAharon Landau struct umr_common *umrc = &dev->umrc; 205158e71bbSAharon Landau struct ib_qp_attr attr; 206158e71bbSAharon Landau int err; 207158e71bbSAharon Landau 208158e71bbSAharon Landau attr.qp_state = IB_QPS_RESET; 209158e71bbSAharon Landau err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); 210158e71bbSAharon Landau if (err) { 211158e71bbSAharon Landau mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 212158e71bbSAharon Landau goto err; 213158e71bbSAharon Landau } 214158e71bbSAharon Landau 215158e71bbSAharon Landau err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); 216158e71bbSAharon Landau if (err) 217158e71bbSAharon Landau goto err; 218158e71bbSAharon Landau 219158e71bbSAharon Landau umrc->state = MLX5_UMR_STATE_ACTIVE; 220158e71bbSAharon Landau return 0; 221158e71bbSAharon Landau 222158e71bbSAharon Landau err: 223158e71bbSAharon Landau umrc->state = MLX5_UMR_STATE_ERR; 224158e71bbSAharon Landau return err; 225158e71bbSAharon Landau } 226158e71bbSAharon Landau 2276f0689fdSAharon Landau static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, 2286f0689fdSAharon Landau struct mlx5r_umr_wqe *wqe, bool with_data) 2296f0689fdSAharon Landau { 2306f0689fdSAharon Landau unsigned int wqe_size = 2316f0689fdSAharon Landau with_data ? sizeof(struct mlx5r_umr_wqe) : 2326f0689fdSAharon Landau sizeof(struct mlx5r_umr_wqe) - 2336f0689fdSAharon Landau sizeof(struct mlx5_wqe_data_seg); 2346f0689fdSAharon Landau struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2356f0689fdSAharon Landau struct mlx5_core_dev *mdev = dev->mdev; 2366f0689fdSAharon Landau struct mlx5_ib_qp *qp = to_mqp(ibqp); 2376f0689fdSAharon Landau struct mlx5_wqe_ctrl_seg *ctrl; 2386f0689fdSAharon Landau union { 2396f0689fdSAharon Landau struct ib_cqe *ib_cqe; 2406f0689fdSAharon Landau u64 wr_id; 2416f0689fdSAharon Landau } id; 2426f0689fdSAharon Landau void *cur_edge, *seg; 2436f0689fdSAharon Landau unsigned long flags; 2446f0689fdSAharon Landau unsigned int idx; 2456f0689fdSAharon Landau int size, err; 2466f0689fdSAharon Landau 2476f0689fdSAharon Landau if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) 2486f0689fdSAharon Landau return -EIO; 2496f0689fdSAharon Landau 2506f0689fdSAharon Landau spin_lock_irqsave(&qp->sq.lock, flags); 2516f0689fdSAharon Landau 2526f0689fdSAharon Landau err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0, 2536f0689fdSAharon Landau cpu_to_be32(mkey), false, false); 2546f0689fdSAharon Landau if (WARN_ON(err)) 2556f0689fdSAharon Landau goto out; 2566f0689fdSAharon Landau 2576f0689fdSAharon Landau qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 2586f0689fdSAharon Landau 2596f0689fdSAharon Landau mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size); 2606f0689fdSAharon Landau 2616f0689fdSAharon Landau id.ib_cqe = cqe; 2626f0689fdSAharon Landau mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0, 263158e71bbSAharon Landau MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR); 2646f0689fdSAharon Landau 2656f0689fdSAharon Landau mlx5r_ring_db(qp, 1, ctrl); 2666f0689fdSAharon Landau 2676f0689fdSAharon Landau out: 2686f0689fdSAharon Landau spin_unlock_irqrestore(&qp->sq.lock, flags); 2696f0689fdSAharon Landau 2706f0689fdSAharon Landau return err; 2716f0689fdSAharon Landau } 2726f0689fdSAharon Landau 2736f0689fdSAharon Landau static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc) 2746f0689fdSAharon Landau { 2756f0689fdSAharon Landau struct mlx5_ib_umr_context *context = 2766f0689fdSAharon Landau container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); 2776f0689fdSAharon Landau 2786f0689fdSAharon Landau context->status = wc->status; 2796f0689fdSAharon Landau complete(&context->done); 2806f0689fdSAharon Landau } 2816f0689fdSAharon Landau 2826f0689fdSAharon Landau static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context) 2836f0689fdSAharon Landau { 2846f0689fdSAharon Landau context->cqe.done = mlx5r_umr_done; 2856f0689fdSAharon Landau init_completion(&context->done); 2866f0689fdSAharon Landau } 2876f0689fdSAharon Landau 2886f0689fdSAharon Landau static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, 2896f0689fdSAharon Landau struct mlx5r_umr_wqe *wqe, bool with_data) 2906f0689fdSAharon Landau { 2916f0689fdSAharon Landau struct umr_common *umrc = &dev->umrc; 2926f0689fdSAharon Landau struct mlx5r_umr_context umr_context; 2936f0689fdSAharon Landau int err; 2946f0689fdSAharon Landau 2956f0689fdSAharon Landau err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask)); 2966f0689fdSAharon Landau if (WARN_ON(err)) 2976f0689fdSAharon Landau return err; 2986f0689fdSAharon Landau 2996f0689fdSAharon Landau mlx5r_umr_init_context(&umr_context); 3006f0689fdSAharon Landau 3016f0689fdSAharon Landau down(&umrc->sem); 302158e71bbSAharon Landau while (true) { 303158e71bbSAharon Landau mutex_lock(&umrc->lock); 304158e71bbSAharon Landau if (umrc->state == MLX5_UMR_STATE_ERR) { 305158e71bbSAharon Landau mutex_unlock(&umrc->lock); 306158e71bbSAharon Landau err = -EFAULT; 307158e71bbSAharon Landau break; 308158e71bbSAharon Landau } 309158e71bbSAharon Landau 310158e71bbSAharon Landau if (umrc->state == MLX5_UMR_STATE_RECOVER) { 311158e71bbSAharon Landau mutex_unlock(&umrc->lock); 312158e71bbSAharon Landau usleep_range(3000, 5000); 313158e71bbSAharon Landau continue; 314158e71bbSAharon Landau } 315158e71bbSAharon Landau 3166f0689fdSAharon Landau err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, 3176f0689fdSAharon Landau with_data); 318158e71bbSAharon Landau mutex_unlock(&umrc->lock); 319158e71bbSAharon Landau if (err) { 320158e71bbSAharon Landau mlx5_ib_warn(dev, "UMR post send failed, err %d\n", 321158e71bbSAharon Landau err); 322158e71bbSAharon Landau break; 3236f0689fdSAharon Landau } 324158e71bbSAharon Landau 325158e71bbSAharon Landau wait_for_completion(&umr_context.done); 326158e71bbSAharon Landau 327158e71bbSAharon Landau if (umr_context.status == IB_WC_SUCCESS) 328158e71bbSAharon Landau break; 329158e71bbSAharon Landau 330158e71bbSAharon Landau if (umr_context.status == IB_WC_WR_FLUSH_ERR) 331158e71bbSAharon Landau continue; 332158e71bbSAharon Landau 333158e71bbSAharon Landau WARN_ON_ONCE(1); 334158e71bbSAharon Landau mlx5_ib_warn(dev, 335158e71bbSAharon Landau "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n", 336158e71bbSAharon Landau umr_context.status); 337158e71bbSAharon Landau mutex_lock(&umrc->lock); 338158e71bbSAharon Landau err = mlx5r_umr_recover(dev); 339158e71bbSAharon Landau mutex_unlock(&umrc->lock); 340158e71bbSAharon Landau if (err) 341158e71bbSAharon Landau mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n", 342158e71bbSAharon Landau err); 343158e71bbSAharon Landau err = -EFAULT; 344158e71bbSAharon Landau break; 3456f0689fdSAharon Landau } 3466f0689fdSAharon Landau up(&umrc->sem); 3476f0689fdSAharon Landau return err; 3486f0689fdSAharon Landau } 34933e8aa8eSAharon Landau 35033e8aa8eSAharon Landau /** 35133e8aa8eSAharon Landau * mlx5r_umr_revoke_mr - Fence all DMA on the MR 35233e8aa8eSAharon Landau * @mr: The MR to fence 35333e8aa8eSAharon Landau * 35433e8aa8eSAharon Landau * Upon return the NIC will not be doing any DMA to the pages under the MR, 35533e8aa8eSAharon Landau * and any DMA in progress will be completed. Failure of this function 35633e8aa8eSAharon Landau * indicates the HW has failed catastrophically. 35733e8aa8eSAharon Landau */ 35833e8aa8eSAharon Landau int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr) 35933e8aa8eSAharon Landau { 36033e8aa8eSAharon Landau struct mlx5_ib_dev *dev = mr_to_mdev(mr); 36133e8aa8eSAharon Landau struct mlx5r_umr_wqe wqe = {}; 36233e8aa8eSAharon Landau 36333e8aa8eSAharon Landau if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 36433e8aa8eSAharon Landau return 0; 36533e8aa8eSAharon Landau 36633e8aa8eSAharon Landau wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); 36733e8aa8eSAharon Landau wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask(); 36833e8aa8eSAharon Landau wqe.ctrl_seg.flags |= MLX5_UMR_INLINE; 36933e8aa8eSAharon Landau 37033e8aa8eSAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, free, 1); 37133e8aa8eSAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn); 37233e8aa8eSAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff); 37333e8aa8eSAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0, 37433e8aa8eSAharon Landau mlx5_mkey_variant(mr->mmkey.key)); 37533e8aa8eSAharon Landau 37633e8aa8eSAharon Landau return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false); 37733e8aa8eSAharon Landau } 37848319676SAharon Landau 37948319676SAharon Landau static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev, 38048319676SAharon Landau struct mlx5_mkey_seg *seg, 38148319676SAharon Landau unsigned int access_flags) 38248319676SAharon Landau { 38348319676SAharon Landau MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); 38448319676SAharon Landau MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); 38548319676SAharon Landau MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); 38648319676SAharon Landau MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); 38748319676SAharon Landau MLX5_SET(mkc, seg, lr, 1); 38848319676SAharon Landau MLX5_SET(mkc, seg, relaxed_ordering_write, 38948319676SAharon Landau !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); 39048319676SAharon Landau MLX5_SET(mkc, seg, relaxed_ordering_read, 39148319676SAharon Landau !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); 39248319676SAharon Landau } 39348319676SAharon Landau 39448319676SAharon Landau int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, 39548319676SAharon Landau int access_flags) 39648319676SAharon Landau { 39748319676SAharon Landau struct mlx5_ib_dev *dev = mr_to_mdev(mr); 39848319676SAharon Landau struct mlx5r_umr_wqe wqe = {}; 39948319676SAharon Landau int err; 40048319676SAharon Landau 40148319676SAharon Landau wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev); 40248319676SAharon Landau wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); 40348319676SAharon Landau wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE; 40448319676SAharon Landau wqe.ctrl_seg.flags |= MLX5_UMR_INLINE; 40548319676SAharon Landau 40648319676SAharon Landau mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags); 40748319676SAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn); 40848319676SAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff); 40948319676SAharon Landau MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0, 41048319676SAharon Landau mlx5_mkey_variant(mr->mmkey.key)); 41148319676SAharon Landau 41248319676SAharon Landau err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false); 41348319676SAharon Landau if (err) 41448319676SAharon Landau return err; 41548319676SAharon Landau 41648319676SAharon Landau mr->access_flags = access_flags; 41748319676SAharon Landau return 0; 41848319676SAharon Landau } 419916adb49SAharon Landau 420916adb49SAharon Landau #define MLX5_MAX_UMR_CHUNK \ 42102648b4bSTariq Toukan ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT) 422916adb49SAharon Landau #define MLX5_SPARE_UMR_CHUNK 0x10000 423916adb49SAharon Landau 424916adb49SAharon Landau /* 425916adb49SAharon Landau * Allocate a temporary buffer to hold the per-page information to transfer to 426916adb49SAharon Landau * HW. For efficiency this should be as large as it can be, but buffer 427916adb49SAharon Landau * allocation failure is not allowed, so try smaller sizes. 428916adb49SAharon Landau */ 429916adb49SAharon Landau static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) 430916adb49SAharon Landau { 43102648b4bSTariq Toukan const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size; 432916adb49SAharon Landau size_t size; 433916adb49SAharon Landau void *res = NULL; 434916adb49SAharon Landau 43502648b4bSTariq Toukan static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0); 436916adb49SAharon Landau 437916adb49SAharon Landau /* 438916adb49SAharon Landau * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the 439916adb49SAharon Landau * allocation can't trigger any kind of reclaim. 440916adb49SAharon Landau */ 441916adb49SAharon Landau might_sleep(); 442916adb49SAharon Landau 443916adb49SAharon Landau gfp_mask |= __GFP_ZERO | __GFP_NORETRY; 444916adb49SAharon Landau 445916adb49SAharon Landau /* 446916adb49SAharon Landau * If the system already has a suitable high order page then just use 447916adb49SAharon Landau * that, but don't try hard to create one. This max is about 1M, so a 448916adb49SAharon Landau * free x86 huge page will satisfy it. 449916adb49SAharon Landau */ 450916adb49SAharon Landau size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align), 451916adb49SAharon Landau MLX5_MAX_UMR_CHUNK); 452916adb49SAharon Landau *nents = size / ent_size; 453916adb49SAharon Landau res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 454916adb49SAharon Landau get_order(size)); 455916adb49SAharon Landau if (res) 456916adb49SAharon Landau return res; 457916adb49SAharon Landau 458916adb49SAharon Landau if (size > MLX5_SPARE_UMR_CHUNK) { 459916adb49SAharon Landau size = MLX5_SPARE_UMR_CHUNK; 460916adb49SAharon Landau *nents = size / ent_size; 461916adb49SAharon Landau res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 462916adb49SAharon Landau get_order(size)); 463916adb49SAharon Landau if (res) 464916adb49SAharon Landau return res; 465916adb49SAharon Landau } 466916adb49SAharon Landau 467916adb49SAharon Landau *nents = PAGE_SIZE / ent_size; 468916adb49SAharon Landau res = (void *)__get_free_page(gfp_mask); 469916adb49SAharon Landau if (res) 470916adb49SAharon Landau return res; 471916adb49SAharon Landau 472916adb49SAharon Landau mutex_lock(&xlt_emergency_page_mutex); 473916adb49SAharon Landau memset(xlt_emergency_page, 0, PAGE_SIZE); 474916adb49SAharon Landau return xlt_emergency_page; 475916adb49SAharon Landau } 476916adb49SAharon Landau 477916adb49SAharon Landau static void mlx5r_umr_free_xlt(void *xlt, size_t length) 478916adb49SAharon Landau { 479916adb49SAharon Landau if (xlt == xlt_emergency_page) { 480916adb49SAharon Landau mutex_unlock(&xlt_emergency_page_mutex); 481916adb49SAharon Landau return; 482916adb49SAharon Landau } 483916adb49SAharon Landau 484916adb49SAharon Landau free_pages((unsigned long)xlt, get_order(length)); 485916adb49SAharon Landau } 486916adb49SAharon Landau 487636bdbfcSAharon Landau static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt, 488916adb49SAharon Landau struct ib_sge *sg) 489916adb49SAharon Landau { 490916adb49SAharon Landau struct device *ddev = &dev->mdev->pdev->dev; 491916adb49SAharon Landau 492916adb49SAharon Landau dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE); 493916adb49SAharon Landau mlx5r_umr_free_xlt(xlt, sg->length); 494916adb49SAharon Landau } 495916adb49SAharon Landau 496916adb49SAharon Landau /* 497916adb49SAharon Landau * Create an XLT buffer ready for submission. 498916adb49SAharon Landau */ 499636bdbfcSAharon Landau static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg, 500636bdbfcSAharon Landau size_t nents, size_t ent_size, 501636bdbfcSAharon Landau unsigned int flags) 502916adb49SAharon Landau { 503916adb49SAharon Landau struct device *ddev = &dev->mdev->pdev->dev; 504916adb49SAharon Landau dma_addr_t dma; 505916adb49SAharon Landau void *xlt; 506916adb49SAharon Landau 507916adb49SAharon Landau xlt = mlx5r_umr_alloc_xlt(&nents, ent_size, 508916adb49SAharon Landau flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : 509916adb49SAharon Landau GFP_KERNEL); 510916adb49SAharon Landau sg->length = nents * ent_size; 511916adb49SAharon Landau dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE); 512916adb49SAharon Landau if (dma_mapping_error(ddev, dma)) { 513916adb49SAharon Landau mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); 514916adb49SAharon Landau mlx5r_umr_free_xlt(xlt, sg->length); 515916adb49SAharon Landau return NULL; 516916adb49SAharon Landau } 517916adb49SAharon Landau sg->addr = dma; 518916adb49SAharon Landau sg->lkey = dev->umrc.pd->local_dma_lkey; 519916adb49SAharon Landau 520916adb49SAharon Landau return xlt; 521916adb49SAharon Landau } 522b3d47ebdSAharon Landau 523b3d47ebdSAharon Landau static void 524b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg, 525b3d47ebdSAharon Landau unsigned int flags, struct ib_sge *sg) 526b3d47ebdSAharon Landau { 527b3d47ebdSAharon Landau if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) 528b3d47ebdSAharon Landau /* fail if free */ 529b3d47ebdSAharon Landau ctrl_seg->flags = MLX5_UMR_CHECK_FREE; 530b3d47ebdSAharon Landau else 531b3d47ebdSAharon Landau /* fail if not free */ 532b3d47ebdSAharon Landau ctrl_seg->flags = MLX5_UMR_CHECK_NOT_FREE; 533b3d47ebdSAharon Landau ctrl_seg->xlt_octowords = 534b3d47ebdSAharon Landau cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length)); 535b3d47ebdSAharon Landau } 536b3d47ebdSAharon Landau 537b3d47ebdSAharon Landau static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev, 538b3d47ebdSAharon Landau struct mlx5_mkey_seg *mkey_seg, 539b3d47ebdSAharon Landau struct mlx5_ib_mr *mr, 540b3d47ebdSAharon Landau unsigned int page_shift) 541b3d47ebdSAharon Landau { 542b3d47ebdSAharon Landau mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags); 543b3d47ebdSAharon Landau MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn); 544b3d47ebdSAharon Landau MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova); 545b3d47ebdSAharon Landau MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length); 546b3d47ebdSAharon Landau MLX5_SET(mkc, mkey_seg, log_page_size, page_shift); 547b3d47ebdSAharon Landau MLX5_SET(mkc, mkey_seg, qpn, 0xffffff); 548b3d47ebdSAharon Landau MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key)); 549b3d47ebdSAharon Landau } 550b3d47ebdSAharon Landau 551b3d47ebdSAharon Landau static void 552b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg, 553b3d47ebdSAharon Landau struct ib_sge *sg) 554b3d47ebdSAharon Landau { 555b3d47ebdSAharon Landau data_seg->byte_count = cpu_to_be32(sg->length); 556b3d47ebdSAharon Landau data_seg->lkey = cpu_to_be32(sg->lkey); 557b3d47ebdSAharon Landau data_seg->addr = cpu_to_be64(sg->addr); 558b3d47ebdSAharon Landau } 559b3d47ebdSAharon Landau 560b3d47ebdSAharon Landau static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg, 561b3d47ebdSAharon Landau u64 offset) 562b3d47ebdSAharon Landau { 563b3d47ebdSAharon Landau u64 octo_offset = mlx5r_umr_get_xlt_octo(offset); 564b3d47ebdSAharon Landau 565b3d47ebdSAharon Landau ctrl_seg->xlt_offset = cpu_to_be16(octo_offset & 0xffff); 566b3d47ebdSAharon Landau ctrl_seg->xlt_offset_47_16 = cpu_to_be32(octo_offset >> 16); 567b3d47ebdSAharon Landau ctrl_seg->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; 568b3d47ebdSAharon Landau } 569b3d47ebdSAharon Landau 570b3d47ebdSAharon Landau static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev, 571b3d47ebdSAharon Landau struct mlx5r_umr_wqe *wqe, 572b3d47ebdSAharon Landau struct mlx5_ib_mr *mr, struct ib_sge *sg, 573b3d47ebdSAharon Landau unsigned int flags) 574b3d47ebdSAharon Landau { 575b3d47ebdSAharon Landau bool update_pd_access, update_translation; 576b3d47ebdSAharon Landau 577b3d47ebdSAharon Landau if (flags & MLX5_IB_UPD_XLT_ENABLE) 578b3d47ebdSAharon Landau wqe->ctrl_seg.mkey_mask |= get_umr_enable_mr_mask(); 579b3d47ebdSAharon Landau 580b3d47ebdSAharon Landau update_pd_access = flags & MLX5_IB_UPD_XLT_ENABLE || 581b3d47ebdSAharon Landau flags & MLX5_IB_UPD_XLT_PD || 582b3d47ebdSAharon Landau flags & MLX5_IB_UPD_XLT_ACCESS; 583b3d47ebdSAharon Landau 584b3d47ebdSAharon Landau if (update_pd_access) { 585b3d47ebdSAharon Landau wqe->ctrl_seg.mkey_mask |= get_umr_update_access_mask(dev); 586b3d47ebdSAharon Landau wqe->ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); 587b3d47ebdSAharon Landau } 588b3d47ebdSAharon Landau 589b3d47ebdSAharon Landau update_translation = 590b3d47ebdSAharon Landau flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR; 591b3d47ebdSAharon Landau 592b3d47ebdSAharon Landau if (update_translation) { 593b3d47ebdSAharon Landau wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask(); 594b3d47ebdSAharon Landau if (!mr->ibmr.length) 595b3d47ebdSAharon Landau MLX5_SET(mkc, &wqe->mkey_seg, length64, 1); 596b3d47ebdSAharon Landau } 597b3d47ebdSAharon Landau 598b3d47ebdSAharon Landau wqe->ctrl_seg.xlt_octowords = 599b3d47ebdSAharon Landau cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length)); 600b3d47ebdSAharon Landau wqe->data_seg.byte_count = cpu_to_be32(sg->length); 601b3d47ebdSAharon Landau } 602b3d47ebdSAharon Landau 603b3d47ebdSAharon Landau /* 604b3d47ebdSAharon Landau * Send the DMA list to the HW for a normal MR using UMR. 605b3d47ebdSAharon Landau * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP 606b3d47ebdSAharon Landau * flag may be used. 607b3d47ebdSAharon Landau */ 608b3d47ebdSAharon Landau int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) 609b3d47ebdSAharon Landau { 610b3d47ebdSAharon Landau struct mlx5_ib_dev *dev = mr_to_mdev(mr); 611b3d47ebdSAharon Landau struct device *ddev = &dev->mdev->pdev->dev; 612b3d47ebdSAharon Landau struct mlx5r_umr_wqe wqe = {}; 613b3d47ebdSAharon Landau struct ib_block_iter biter; 614b3d47ebdSAharon Landau struct mlx5_mtt *cur_mtt; 615b3d47ebdSAharon Landau size_t orig_sg_length; 616b3d47ebdSAharon Landau struct mlx5_mtt *mtt; 617b3d47ebdSAharon Landau size_t final_size; 618b3d47ebdSAharon Landau struct ib_sge sg; 619b3d47ebdSAharon Landau u64 offset = 0; 620b3d47ebdSAharon Landau int err = 0; 621b3d47ebdSAharon Landau 622b3d47ebdSAharon Landau if (WARN_ON(mr->umem->is_odp)) 623b3d47ebdSAharon Landau return -EINVAL; 624b3d47ebdSAharon Landau 625b3d47ebdSAharon Landau mtt = mlx5r_umr_create_xlt( 626b3d47ebdSAharon Landau dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift), 627b3d47ebdSAharon Landau sizeof(*mtt), flags); 628b3d47ebdSAharon Landau if (!mtt) 629b3d47ebdSAharon Landau return -ENOMEM; 630b3d47ebdSAharon Landau 631b3d47ebdSAharon Landau orig_sg_length = sg.length; 632b3d47ebdSAharon Landau 633b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg); 634b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, 635b3d47ebdSAharon Landau mr->page_shift); 636b3d47ebdSAharon Landau mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg); 637b3d47ebdSAharon Landau 638b3d47ebdSAharon Landau cur_mtt = mtt; 639*50a542a8SJason Gunthorpe rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) { 640b3d47ebdSAharon Landau if (cur_mtt == (void *)mtt + sg.length) { 641b3d47ebdSAharon Landau dma_sync_single_for_device(ddev, sg.addr, sg.length, 642b3d47ebdSAharon Landau DMA_TO_DEVICE); 643b3d47ebdSAharon Landau 644b3d47ebdSAharon Landau err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, 645b3d47ebdSAharon Landau true); 646b3d47ebdSAharon Landau if (err) 647b3d47ebdSAharon Landau goto err; 648b3d47ebdSAharon Landau dma_sync_single_for_cpu(ddev, sg.addr, sg.length, 649b3d47ebdSAharon Landau DMA_TO_DEVICE); 650b3d47ebdSAharon Landau offset += sg.length; 651b3d47ebdSAharon Landau mlx5r_umr_update_offset(&wqe.ctrl_seg, offset); 652b3d47ebdSAharon Landau 653b3d47ebdSAharon Landau cur_mtt = mtt; 654b3d47ebdSAharon Landau } 655b3d47ebdSAharon Landau 656b3d47ebdSAharon Landau cur_mtt->ptag = 657b3d47ebdSAharon Landau cpu_to_be64(rdma_block_iter_dma_address(&biter) | 658b3d47ebdSAharon Landau MLX5_IB_MTT_PRESENT); 659b3d47ebdSAharon Landau 660b3d47ebdSAharon Landau if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) 661b3d47ebdSAharon Landau cur_mtt->ptag = 0; 662b3d47ebdSAharon Landau 663b3d47ebdSAharon Landau cur_mtt++; 664b3d47ebdSAharon Landau } 665b3d47ebdSAharon Landau 666b3d47ebdSAharon Landau final_size = (void *)cur_mtt - (void *)mtt; 66702648b4bSTariq Toukan sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT); 668b3d47ebdSAharon Landau memset(cur_mtt, 0, sg.length - final_size); 669b3d47ebdSAharon Landau mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); 670b3d47ebdSAharon Landau 671b3d47ebdSAharon Landau dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE); 672b3d47ebdSAharon Landau err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true); 673b3d47ebdSAharon Landau 674b3d47ebdSAharon Landau err: 675b3d47ebdSAharon Landau sg.length = orig_sg_length; 676b3d47ebdSAharon Landau mlx5r_umr_unmap_free_xlt(dev, mtt, &sg); 677b3d47ebdSAharon Landau return err; 678b3d47ebdSAharon Landau } 679636bdbfcSAharon Landau 680636bdbfcSAharon Landau static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) 681636bdbfcSAharon Landau { 682636bdbfcSAharon Landau return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); 683636bdbfcSAharon Landau } 684636bdbfcSAharon Landau 685636bdbfcSAharon Landau int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 686636bdbfcSAharon Landau int page_shift, int flags) 687636bdbfcSAharon Landau { 688636bdbfcSAharon Landau int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) 689636bdbfcSAharon Landau ? sizeof(struct mlx5_klm) 690636bdbfcSAharon Landau : sizeof(struct mlx5_mtt); 69102648b4bSTariq Toukan const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size; 692636bdbfcSAharon Landau struct mlx5_ib_dev *dev = mr_to_mdev(mr); 693636bdbfcSAharon Landau struct device *ddev = &dev->mdev->pdev->dev; 694636bdbfcSAharon Landau const int page_mask = page_align - 1; 695636bdbfcSAharon Landau struct mlx5r_umr_wqe wqe = {}; 696636bdbfcSAharon Landau size_t pages_mapped = 0; 697636bdbfcSAharon Landau size_t pages_to_map = 0; 698636bdbfcSAharon Landau size_t size_to_map = 0; 699636bdbfcSAharon Landau size_t orig_sg_length; 700636bdbfcSAharon Landau size_t pages_iter; 701636bdbfcSAharon Landau struct ib_sge sg; 702636bdbfcSAharon Landau int err = 0; 703636bdbfcSAharon Landau void *xlt; 704636bdbfcSAharon Landau 705636bdbfcSAharon Landau if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && 706636bdbfcSAharon Landau !umr_can_use_indirect_mkey(dev)) 707636bdbfcSAharon Landau return -EPERM; 708636bdbfcSAharon Landau 709636bdbfcSAharon Landau if (WARN_ON(!mr->umem->is_odp)) 710636bdbfcSAharon Landau return -EINVAL; 711636bdbfcSAharon Landau 71202648b4bSTariq Toukan /* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes, 713636bdbfcSAharon Landau * so we need to align the offset and length accordingly 714636bdbfcSAharon Landau */ 715636bdbfcSAharon Landau if (idx & page_mask) { 716636bdbfcSAharon Landau npages += idx & page_mask; 717636bdbfcSAharon Landau idx &= ~page_mask; 718636bdbfcSAharon Landau } 719636bdbfcSAharon Landau pages_to_map = ALIGN(npages, page_align); 720636bdbfcSAharon Landau 721636bdbfcSAharon Landau xlt = mlx5r_umr_create_xlt(dev, &sg, npages, desc_size, flags); 722636bdbfcSAharon Landau if (!xlt) 723636bdbfcSAharon Landau return -ENOMEM; 724636bdbfcSAharon Landau 725636bdbfcSAharon Landau pages_iter = sg.length / desc_size; 726636bdbfcSAharon Landau orig_sg_length = sg.length; 727636bdbfcSAharon Landau 728636bdbfcSAharon Landau if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) { 729636bdbfcSAharon Landau struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 730636bdbfcSAharon Landau size_t max_pages = ib_umem_odp_num_pages(odp) - idx; 731636bdbfcSAharon Landau 732636bdbfcSAharon Landau pages_to_map = min_t(size_t, pages_to_map, max_pages); 733636bdbfcSAharon Landau } 734636bdbfcSAharon Landau 735636bdbfcSAharon Landau mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg); 736636bdbfcSAharon Landau mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift); 737636bdbfcSAharon Landau mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg); 738636bdbfcSAharon Landau 739636bdbfcSAharon Landau for (pages_mapped = 0; 740636bdbfcSAharon Landau pages_mapped < pages_to_map && !err; 741636bdbfcSAharon Landau pages_mapped += pages_iter, idx += pages_iter) { 742636bdbfcSAharon Landau npages = min_t(int, pages_iter, pages_to_map - pages_mapped); 743636bdbfcSAharon Landau size_to_map = npages * desc_size; 744636bdbfcSAharon Landau dma_sync_single_for_cpu(ddev, sg.addr, sg.length, 745636bdbfcSAharon Landau DMA_TO_DEVICE); 746636bdbfcSAharon Landau mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); 747636bdbfcSAharon Landau dma_sync_single_for_device(ddev, sg.addr, sg.length, 748636bdbfcSAharon Landau DMA_TO_DEVICE); 74902648b4bSTariq Toukan sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT); 750636bdbfcSAharon Landau 751636bdbfcSAharon Landau if (pages_mapped + pages_iter >= pages_to_map) 752636bdbfcSAharon Landau mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); 753636bdbfcSAharon Landau mlx5r_umr_update_offset(&wqe.ctrl_seg, idx * desc_size); 754636bdbfcSAharon Landau err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true); 755636bdbfcSAharon Landau } 756636bdbfcSAharon Landau sg.length = orig_sg_length; 757636bdbfcSAharon Landau mlx5r_umr_unmap_free_xlt(dev, xlt, &sg); 758636bdbfcSAharon Landau return err; 759636bdbfcSAharon Landau } 760