Lines Matching refs:mr
127 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
129 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
131 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
758 struct mlx5_ib_mr *mr; in _mlx5_mr_cache_alloc() local
761 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in _mlx5_mr_cache_alloc()
762 if (!mr) in _mlx5_mr_cache_alloc()
772 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
777 kfree(mr); in _mlx5_mr_cache_alloc()
781 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
785 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
786 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_mr_cache_alloc()
787 init_waitqueue_head(&mr->mmkey.wait); in _mlx5_mr_cache_alloc()
788 return mr; in _mlx5_mr_cache_alloc()
1077 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
1082 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
1083 if (!mr) in mlx5_ib_get_dma_mr()
1099 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
1104 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
1105 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1106 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
1107 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1109 return &mr->ibmr; in mlx5_ib_get_dma_mr()
1115 kfree(mr); in mlx5_ib_get_dma_mr()
1138 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
1141 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
1142 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
1143 mr->ibmr.length = length; in set_mr_fields()
1144 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
1145 mr->ibmr.iova = iova; in set_mr_fields()
1146 mr->access_flags = access_flags; in set_mr_fields()
1169 struct mlx5_ib_mr *mr; in alloc_cacheable_mr() local
1190 mr = reg_create(pd, umem, iova, access_flags, page_size, false); in alloc_cacheable_mr()
1192 if (IS_ERR(mr)) in alloc_cacheable_mr()
1193 return mr; in alloc_cacheable_mr()
1194 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1195 return mr; in alloc_cacheable_mr()
1198 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1199 if (IS_ERR(mr)) in alloc_cacheable_mr()
1200 return mr; in alloc_cacheable_mr()
1202 mr->ibmr.pd = pd; in alloc_cacheable_mr()
1203 mr->umem = umem; in alloc_cacheable_mr()
1204 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
1205 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
1207 return mr; in alloc_cacheable_mr()
1219 struct mlx5_ib_mr *mr; in reg_create() local
1229 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1230 if (!mr) in reg_create()
1233 mr->ibmr.pd = pd; in reg_create()
1234 mr->access_flags = access_flags; in reg_create()
1235 mr->page_shift = order_base_2(page_size); in reg_create()
1252 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1271 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1272 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1277 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1280 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1285 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1286 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); in reg_create()
1287 mr->umem = umem; in reg_create()
1288 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1291 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1293 return mr; in reg_create()
1298 kfree(mr); in reg_create()
1307 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1312 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1313 if (!mr) in mlx5_ib_get_dm_mr()
1329 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1335 set_mr_fields(dev, mr, length, acc, start_addr); in mlx5_ib_get_dm_mr()
1337 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1343 kfree(mr); in mlx5_ib_get_dm_mr()
1401 struct mlx5_ib_mr *mr = NULL; in create_real_mr() local
1407 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); in create_real_mr()
1413 mr = reg_create(pd, umem, iova, access_flags, page_size, true); in create_real_mr()
1416 if (IS_ERR(mr)) { in create_real_mr()
1418 return ERR_CAST(mr); in create_real_mr()
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1431 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); in create_real_mr()
1433 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1437 return &mr->ibmr; in create_real_mr()
1446 struct mlx5_ib_mr *mr; in create_user_odp_mr() local
1461 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1462 if (IS_ERR(mr)) in create_user_odp_mr()
1463 return ERR_CAST(mr); in create_user_odp_mr()
1464 return &mr->ibmr; in create_user_odp_mr()
1476 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1477 if (IS_ERR(mr)) { in create_user_odp_mr()
1479 return ERR_CAST(mr); in create_user_odp_mr()
1481 xa_init(&mr->implicit_children); in create_user_odp_mr()
1483 odp->private = mr; in create_user_odp_mr()
1484 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1488 err = mlx5_ib_init_odp_mr(mr); in create_user_odp_mr()
1491 return &mr->ibmr; in create_user_odp_mr()
1494 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1523 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb() local
1530 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); in mlx5_ib_dmabuf_invalidate_cb()
1545 struct mlx5_ib_mr *mr = NULL; in mlx5_ib_reg_user_mr_dmabuf() local
1570 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1572 if (IS_ERR(mr)) { in mlx5_ib_reg_user_mr_dmabuf()
1574 return ERR_CAST(mr); in mlx5_ib_reg_user_mr_dmabuf()
1577 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1579 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1580 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1581 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1585 err = mlx5_ib_init_dmabuf_mr(mr); in mlx5_ib_reg_user_mr_dmabuf()
1588 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1591 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1613 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, in can_use_umr_rereg_pas() argument
1618 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1621 if (!mr->mmkey.cache_ent) in can_use_umr_rereg_pas()
1630 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1634 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pas() argument
1638 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1640 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1648 err = mlx5r_umr_revoke_mr(mr); in umr_rereg_pas()
1653 mr->ibmr.pd = pd; in umr_rereg_pas()
1657 mr->access_flags = access_flags; in umr_rereg_pas()
1661 mr->ibmr.iova = iova; in umr_rereg_pas()
1662 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1663 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1664 mr->umem = new_umem; in umr_rereg_pas()
1665 err = mlx5r_umr_update_mr_pas(mr, upd_flags); in umr_rereg_pas()
1671 mr->umem = old_umem; in umr_rereg_pas()
1687 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1702 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1710 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1712 err = mlx5r_umr_rereg_pd_access(mr, new_pd, in mlx5_ib_rereg_user_mr()
1719 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1726 err = mlx5r_umr_revoke_mr(mr); in mlx5_ib_rereg_user_mr()
1729 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1730 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1733 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1741 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1745 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1755 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, in mlx5_ib_rereg_user_mr()
1757 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, in mlx5_ib_rereg_user_mr()
1779 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1796 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1797 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1800 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1802 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1803 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1810 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1816 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1818 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1819 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1820 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1823 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1825 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1826 mr->descs = NULL; in mlx5_free_priv_descs()
1831 struct mlx5_ib_mr *mr) in cache_ent_find_and_store() argument
1837 if (mr->mmkey.cache_ent) { in cache_ent_find_and_store()
1838 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1839 mr->mmkey.cache_ent->in_use--; in cache_ent_find_and_store()
1844 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1846 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1851 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1852 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1858 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1863 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1864 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1867 ret = push_mkey_locked(mr->mmkey.cache_ent, false, in cache_ent_find_and_store()
1868 xa_mk_value(mr->mmkey.key)); in cache_ent_find_and_store()
1869 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); in cache_ent_find_and_store()
1875 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_dereg_mr() local
1885 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1886 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1887 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1890 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1891 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1893 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1894 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1897 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1899 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1900 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1903 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1907 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1909 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1910 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1912 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1913 kfree(mr->sig); in mlx5_ib_dereg_mr()
1914 mr->sig = NULL; in mlx5_ib_dereg_mr()
1918 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) in mlx5_ib_dereg_mr()
1919 if (mlx5r_umr_revoke_mr(mr) || in mlx5_ib_dereg_mr()
1920 cache_ent_find_and_store(dev, mr)) in mlx5_ib_dereg_mr()
1921 mr->mmkey.cache_ent = NULL; in mlx5_ib_dereg_mr()
1923 if (!mr->mmkey.cache_ent) { in mlx5_ib_dereg_mr()
1924 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1929 if (mr->umem) { in mlx5_ib_dereg_mr()
1930 bool is_odp = is_odp_mr(mr); in mlx5_ib_dereg_mr()
1933 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1935 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1937 mlx5_ib_free_odp_mr(mr); in mlx5_ib_dereg_mr()
1940 if (!mr->mmkey.cache_ent) in mlx5_ib_dereg_mr()
1941 mlx5_free_priv_descs(mr); in mlx5_ib_dereg_mr()
1943 kfree(mr); in mlx5_ib_dereg_mr()
1964 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
1971 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
1972 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
1973 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
1975 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
1981 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
1985 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
1986 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1987 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
1992 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
2003 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
2007 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
2008 if (!mr) in mlx5_ib_alloc_pi_mr()
2011 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2012 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2023 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
2028 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2031 return mr; in mlx5_ib_alloc_pi_mr()
2036 kfree(mr); in mlx5_ib_alloc_pi_mr()
2040 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
2043 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
2048 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
2051 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
2055 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
2064 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2065 if (!mr->sig) in mlx5_alloc_integrity_descs()
2073 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2074 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2076 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2077 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2079 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2080 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2083 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2084 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2087 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2090 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2091 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2100 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
2105 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2106 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2112 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
2113 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
2115 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2116 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2118 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2119 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2121 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2123 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2124 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2126 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2128 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2140 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
2144 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
2145 if (!mr) in __mlx5_ib_alloc_mr()
2154 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2155 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2159 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2162 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2165 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
2178 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2183 kfree(mr); in __mlx5_ib_alloc_mr()
2339 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2343 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2346 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2349 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2350 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2353 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2358 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2359 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2361 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2368 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2377 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2379 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2382 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2383 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2386 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2391 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2399 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2400 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2406 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2413 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2420 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2421 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2429 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2432 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2435 descs = mr->descs; in mlx5_set_page()
2436 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2443 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2446 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2449 descs = mr->descs; in mlx5_set_page_pi()
2450 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2462 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2463 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2527 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2528 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2560 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2566 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2567 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2568 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2569 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2570 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2590 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2597 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2607 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2611 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2619 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2622 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2624 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2625 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2628 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2629 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2635 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2636 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()