mr.c (7e111bbff92620f56609a81353bba5bd1944851b) mr.c (831df88381f73bca0f5624b69ab985cac3d036bc)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the

--- 28 unchanged lines hidden (view full) ---

37#include <linux/debugfs.h>
38#include <linux/export.h>
39#include <linux/delay.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-resv.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_umem_odp.h>
44#include <rdma/ib_verbs.h>
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the

--- 28 unchanged lines hidden (view full) ---

37#include <linux/debugfs.h>
38#include <linux/export.h>
39#include <linux/delay.h>
40#include <linux/dma-buf.h>
41#include <linux/dma-resv.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_umem_odp.h>
44#include <rdma/ib_verbs.h>
45#include "dm.h"
45#include "mlx5_ib.h"
46
47/*
48 * We can't use an array for xlt_emergency_page because dma_map_single doesn't
49 * work on kernel modules memory
50 */
51void *xlt_emergency_page;
52static DEFINE_MUTEX(xlt_emergency_page_mutex);

--- 61 unchanged lines hidden (view full) ---

114 struct mlx5_async_work *context)
115{
116 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
117 assign_mkey_variant(dev, mkey, in);
118 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
119 create_mkey_callback, context);
120}
121
46#include "mlx5_ib.h"
47
48/*
49 * We can't use an array for xlt_emergency_page because dma_map_single doesn't
50 * work on kernel modules memory
51 */
52void *xlt_emergency_page;
53static DEFINE_MUTEX(xlt_emergency_page_mutex);

--- 61 unchanged lines hidden (view full) ---

115 struct mlx5_async_work *context)
116{
117 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
118 assign_mkey_variant(dev, mkey, in);
119 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
120 create_mkey_callback, context);
121}
122
123static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
124static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
122static int mr_cache_max_order(struct mlx5_ib_dev *dev);
123static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
124
125static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
126{
127 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
128}
129

--- 453 unchanged lines hidden (view full) ---

583 if (IS_ERR(mr))
584 return mr;
585 } else {
586 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
587 list_del(&mr->list);
588 ent->available_mrs--;
589 queue_adjust_cache_locked(ent);
590 spin_unlock_irq(&ent->lock);
125static int mr_cache_max_order(struct mlx5_ib_dev *dev);
126static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
127
128static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
129{
130 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
131}
132

--- 453 unchanged lines hidden (view full) ---

586 if (IS_ERR(mr))
587 return mr;
588 } else {
589 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
590 list_del(&mr->list);
591 ent->available_mrs--;
592 queue_adjust_cache_locked(ent);
593 spin_unlock_irq(&ent->lock);
591
592 mlx5_clear_mr(mr);
593 }
594 mr->access_flags = access_flags;
595 return mr;
596}
597
598/* Return a MR already available in the cache */
599static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
600{

--- 9 unchanged lines hidden (view full) ---

610 spin_lock_irq(&ent->lock);
611 if (!list_empty(&ent->head)) {
612 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
613 list);
614 list_del(&mr->list);
615 ent->available_mrs--;
616 queue_adjust_cache_locked(ent);
617 spin_unlock_irq(&ent->lock);
594 }
595 mr->access_flags = access_flags;
596 return mr;
597}
598
599/* Return a MR already available in the cache */
600static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
601{

--- 9 unchanged lines hidden (view full) ---

611 spin_lock_irq(&ent->lock);
612 if (!list_empty(&ent->head)) {
613 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
614 list);
615 list_del(&mr->list);
616 ent->available_mrs--;
617 queue_adjust_cache_locked(ent);
618 spin_unlock_irq(&ent->lock);
618 mlx5_clear_mr(mr);
619 return mr;
619 break;
620 }
621 queue_adjust_cache_locked(ent);
622 spin_unlock_irq(&ent->lock);
623 }
620 }
621 queue_adjust_cache_locked(ent);
622 spin_unlock_irq(&ent->lock);
623 }
624 req_ent->miss++;
625 return NULL;
624
625 if (!mr)
626 req_ent->miss++;
627
628 return mr;
626}
627
629}
630
628static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
631static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
629{
630 struct mlx5_cache_ent *ent = mr->cache_ent;
631
632{
633 struct mlx5_cache_ent *ent = mr->cache_ent;
634
635 mr->cache_ent = NULL;
632 spin_lock_irq(&ent->lock);
636 spin_lock_irq(&ent->lock);
637 ent->total_mrs--;
638 spin_unlock_irq(&ent->lock);
639}
640
641void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
642{
643 struct mlx5_cache_ent *ent = mr->cache_ent;
644
645 if (!ent)
646 return;
647
648 if (mlx5_mr_cache_invalidate(mr)) {
649 detach_mr_from_cache(mr);
650 destroy_mkey(dev, mr);
651 kfree(mr);
652 return;
653 }
654
655 spin_lock_irq(&ent->lock);
633 list_add_tail(&mr->list, &ent->head);
634 ent->available_mrs++;
635 queue_adjust_cache_locked(ent);
636 spin_unlock_irq(&ent->lock);
637}
638
639static void clean_keys(struct mlx5_ib_dev *dev, int c)
640{

--- 325 unchanged lines hidden (view full) ---

966 * no reason to try it again.
967 */
968 if (IS_ERR(mr))
969 return mr;
970 }
971
972 mr->ibmr.pd = pd;
973 mr->umem = umem;
656 list_add_tail(&mr->list, &ent->head);
657 ent->available_mrs++;
658 queue_adjust_cache_locked(ent);
659 spin_unlock_irq(&ent->lock);
660}
661
662static void clean_keys(struct mlx5_ib_dev *dev, int c)
663{

--- 325 unchanged lines hidden (view full) ---

989 * no reason to try it again.
990 */
991 if (IS_ERR(mr))
992 return mr;
993 }
994
995 mr->ibmr.pd = pd;
996 mr->umem = umem;
997 mr->access_flags = access_flags;
998 mr->desc_size = sizeof(struct mlx5_mtt);
974 mr->mmkey.iova = iova;
975 mr->mmkey.size = umem->length;
976 mr->mmkey.pd = to_mpd(pd)->pdn;
977 mr->page_shift = order_base_2(page_size);
978 set_mr_fields(dev, mr, umem->length, access_flags);
979
980 return mr;
981}

--- 17 unchanged lines hidden (view full) ---

999 static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
1000
1001 /*
1002 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
1003 * allocation can't trigger any kind of reclaim.
1004 */
1005 might_sleep();
1006
999 mr->mmkey.iova = iova;
1000 mr->mmkey.size = umem->length;
1001 mr->mmkey.pd = to_mpd(pd)->pdn;
1002 mr->page_shift = order_base_2(page_size);
1003 set_mr_fields(dev, mr, umem->length, access_flags);
1004
1005 return mr;
1006}

--- 17 unchanged lines hidden (view full) ---

1024 static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
1025
1026 /*
1027 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
1028 * allocation can't trigger any kind of reclaim.
1029 */
1030 might_sleep();
1031
1007 gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
1032 gfp_mask |= __GFP_ZERO;
1008
1009 /*
1010 * If the system already has a suitable high order page then just use
1011 * that, but don't try hard to create one. This max is about 1M, so a
1012 * free x86 huge page will satisfy it.
1013 */
1014 size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
1015 MLX5_MAX_UMR_CHUNK);

--- 460 unchanged lines hidden (view full) ---

1476 if (xlt_with_umr) {
1477 /*
1478 * If the MR was created with reg_create then it will be
1479 * configured properly but left disabled. It is safe to go ahead
1480 * and configure it again via UMR while enabling it.
1481 */
1482 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1483 if (err) {
1033
1034 /*
1035 * If the system already has a suitable high order page then just use
1036 * that, but don't try hard to create one. This max is about 1M, so a
1037 * free x86 huge page will satisfy it.
1038 */
1039 size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
1040 MLX5_MAX_UMR_CHUNK);

--- 460 unchanged lines hidden (view full) ---

1501 if (xlt_with_umr) {
1502 /*
1503 * If the MR was created with reg_create then it will be
1504 * configured properly but left disabled. It is safe to go ahead
1505 * and configure it again via UMR while enabling it.
1506 */
1507 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1508 if (err) {
1484 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1509 dereg_mr(dev, mr);
1485 return ERR_PTR(err);
1486 }
1487 }
1488 return &mr->ibmr;
1489}
1490
1491static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1492 u64 iova, int access_flags,
1493 struct ib_udata *udata)
1494{
1495 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1496 struct ib_umem_odp *odp;
1497 struct mlx5_ib_mr *mr;
1498 int err;
1499
1500 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1501 return ERR_PTR(-EOPNOTSUPP);
1502
1510 return ERR_PTR(err);
1511 }
1512 }
1513 return &mr->ibmr;
1514}
1515
1516static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1517 u64 iova, int access_flags,
1518 struct ib_udata *udata)
1519{
1520 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1521 struct ib_umem_odp *odp;
1522 struct mlx5_ib_mr *mr;
1523 int err;
1524
1525 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1526 return ERR_PTR(-EOPNOTSUPP);
1527
1503 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
1504 if (err)
1505 return ERR_PTR(err);
1506 if (!start && length == U64_MAX) {
1507 if (iova != 0)
1508 return ERR_PTR(-EINVAL);
1509 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1510 return ERR_PTR(-EINVAL);
1511
1512 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1513 if (IS_ERR(mr))

--- 22 unchanged lines hidden (view full) ---

1536 goto err_dereg_mr;
1537
1538 err = mlx5_ib_init_odp_mr(mr);
1539 if (err)
1540 goto err_dereg_mr;
1541 return &mr->ibmr;
1542
1543err_dereg_mr:
1528 if (!start && length == U64_MAX) {
1529 if (iova != 0)
1530 return ERR_PTR(-EINVAL);
1531 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1532 return ERR_PTR(-EINVAL);
1533
1534 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1535 if (IS_ERR(mr))

--- 22 unchanged lines hidden (view full) ---

1558 goto err_dereg_mr;
1559
1560 err = mlx5_ib_init_odp_mr(mr);
1561 if (err)
1562 goto err_dereg_mr;
1563 return &mr->ibmr;
1564
1565err_dereg_mr:
1544 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1566 dereg_mr(dev, mr);
1545 return ERR_PTR(err);
1546}
1547
1548struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1549 u64 iova, int access_flags,
1550 struct ib_udata *udata)
1551{
1552 struct mlx5_ib_dev *dev = to_mdev(pd->device);

--- 80 unchanged lines hidden (view full) ---

1633 goto err_dereg_mr;
1634
1635 err = mlx5_ib_init_dmabuf_mr(mr);
1636 if (err)
1637 goto err_dereg_mr;
1638 return &mr->ibmr;
1639
1640err_dereg_mr:
1567 return ERR_PTR(err);
1568}
1569
1570struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1571 u64 iova, int access_flags,
1572 struct ib_udata *udata)
1573{
1574 struct mlx5_ib_dev *dev = to_mdev(pd->device);

--- 80 unchanged lines hidden (view full) ---

1655 goto err_dereg_mr;
1656
1657 err = mlx5_ib_init_dmabuf_mr(mr);
1658 if (err)
1659 goto err_dereg_mr;
1660 return &mr->ibmr;
1661
1662err_dereg_mr:
1641 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1663 dereg_mr(dev, mr);
1642 return ERR_PTR(err);
1643}
1644
1645/**
1664 return ERR_PTR(err);
1665}
1666
1667/**
1646 * revoke_mr - Fence all DMA on the MR
1668 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1647 * @mr: The MR to fence
1648 *
1649 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1669 * @mr: The MR to fence
1670 *
1671 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1650 * and any DMA in progress will be completed. Failure of this function
1672 * and any DMA inprogress will be completed. Failure of this function
1651 * indicates the HW has failed catastrophically.
1652 */
1673 * indicates the HW has failed catastrophically.
1674 */
1653static int revoke_mr(struct mlx5_ib_mr *mr)
1675int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
1654{
1655 struct mlx5_umr_wr umrwr = {};
1656
1657 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1658 return 0;
1659
1660 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1661 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;

--- 77 unchanged lines hidden (view full) ---

1739 struct ib_umem *old_umem = mr->umem;
1740 int err;
1741
1742 /*
1743 * To keep everything simple the MR is revoked before we start to mess
1744 * with it. This ensure the change is atomic relative to any use of the
1745 * MR.
1746 */
1676{
1677 struct mlx5_umr_wr umrwr = {};
1678
1679 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1680 return 0;
1681
1682 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1683 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;

--- 77 unchanged lines hidden (view full) ---

1761 struct ib_umem *old_umem = mr->umem;
1762 int err;
1763
1764 /*
1765 * To keep everything simple the MR is revoked before we start to mess
1766 * with it. This ensure the change is atomic relative to any use of the
1767 * MR.
1768 */
1747 err = revoke_mr(mr);
1769 err = mlx5_mr_cache_invalidate(mr);
1748 if (err)
1749 return err;
1750
1751 if (flags & IB_MR_REREG_PD) {
1752 mr->ibmr.pd = pd;
1753 mr->mmkey.pd = to_mpd(pd)->pdn;
1754 upd_flags |= MLX5_IB_UPD_XLT_PD;
1755 }

--- 62 unchanged lines hidden (view full) ---

1818 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1819 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1820 goto recreate;
1821
1822 /*
1823 * Only one active MR can refer to a umem at one time, revoke
1824 * the old MR before assigning the umem to the new one.
1825 */
1770 if (err)
1771 return err;
1772
1773 if (flags & IB_MR_REREG_PD) {
1774 mr->ibmr.pd = pd;
1775 mr->mmkey.pd = to_mpd(pd)->pdn;
1776 upd_flags |= MLX5_IB_UPD_XLT_PD;
1777 }

--- 62 unchanged lines hidden (view full) ---

1840 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1841 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1842 goto recreate;
1843
1844 /*
1845 * Only one active MR can refer to a umem at one time, revoke
1846 * the old MR before assigning the umem to the new one.
1847 */
1826 err = revoke_mr(mr);
1848 err = mlx5_mr_cache_invalidate(mr);
1827 if (err)
1828 return ERR_PTR(err);
1829 umem = mr->umem;
1830 mr->umem = NULL;
1831 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1832
1833 return create_real_mr(new_pd, umem, mr->mmkey.iova,
1834 new_access_flags);

--- 70 unchanged lines hidden (view full) ---

1905 kfree(mr->descs_alloc);
1906
1907 return ret;
1908}
1909
1910static void
1911mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1912{
1849 if (err)
1850 return ERR_PTR(err);
1851 umem = mr->umem;
1852 mr->umem = NULL;
1853 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1854
1855 return create_real_mr(new_pd, umem, mr->mmkey.iova,
1856 new_access_flags);

--- 70 unchanged lines hidden (view full) ---

1927 kfree(mr->descs_alloc);
1928
1929 return ret;
1930}
1931
1932static void
1933mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1934{
1913 if (!mr->umem && mr->descs) {
1935 if (mr->descs) {
1914 struct ib_device *device = mr->ibmr.device;
1915 int size = mr->max_descs * mr->desc_size;
1916 struct mlx5_ib_dev *dev = to_mdev(device);
1917
1918 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1919 DMA_TO_DEVICE);
1920 kfree(mr->descs_alloc);
1921 mr->descs = NULL;
1922 }
1923}
1924
1936 struct ib_device *device = mr->ibmr.device;
1937 int size = mr->max_descs * mr->desc_size;
1938 struct mlx5_ib_dev *dev = to_mdev(device);
1939
1940 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1941 DMA_TO_DEVICE);
1942 kfree(mr->descs_alloc);
1943 mr->descs = NULL;
1944 }
1945}
1946
1925int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1947static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1926{
1948{
1927 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1928 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1929 int rc;
1930
1931 /*
1932 * Any async use of the mr must hold the refcount, once the refcount
1933 * goes to zero no other thread, such as ODP page faults, prefetch, any
1934 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1935 */
1936 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1937 refcount_read(&mr->mmkey.usecount) != 0 &&
1938 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1939 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1940
1941 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1942 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
1943 NULL, GFP_KERNEL);
1944
1945 if (mr->mtt_mr) {
1946 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1947 if (rc)
1948 return rc;
1949 mr->mtt_mr = NULL;
1950 }
1951 if (mr->klm_mr) {
1952 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1953 if (rc)
1954 return rc;
1955 mr->klm_mr = NULL;
1956 }
1957
1949 if (mr->sig) {
1958 if (mlx5_core_destroy_psv(dev->mdev,
1959 mr->sig->psv_memory.psv_idx))
1960 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1961 mr->sig->psv_memory.psv_idx);
1950 if (mlx5_core_destroy_psv(dev->mdev,
1951 mr->sig->psv_memory.psv_idx))
1952 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1953 mr->sig->psv_memory.psv_idx);
1962 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1954 if (mlx5_core_destroy_psv(dev->mdev,
1955 mr->sig->psv_wire.psv_idx))
1963 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1964 mr->sig->psv_wire.psv_idx);
1956 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1957 mr->sig->psv_wire.psv_idx);
1958 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
1965 kfree(mr->sig);
1966 mr->sig = NULL;
1967 }
1968
1959 kfree(mr->sig);
1960 mr->sig = NULL;
1961 }
1962
1969 /* Stop DMA */
1970 if (mr->cache_ent) {
1971 if (revoke_mr(mr)) {
1972 spin_lock_irq(&mr->cache_ent->lock);
1973 mr->cache_ent->total_mrs--;
1974 spin_unlock_irq(&mr->cache_ent->lock);
1975 mr->cache_ent = NULL;
1976 }
1977 }
1978 if (!mr->cache_ent) {
1963 if (!mr->cache_ent) {
1979 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1980 if (rc)
1981 return rc;
1964 destroy_mkey(dev, mr);
1965 mlx5_free_priv_descs(mr);
1982 }
1966 }
1967}
1983
1968
1984 if (mr->umem) {
1985 bool is_odp = is_odp_mr(mr);
1969static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1970{
1971 struct ib_umem *umem = mr->umem;
1986
1972
1987 if (!is_odp)
1988 atomic_sub(ib_umem_num_pages(mr->umem),
1973 /* Stop all DMA */
1974 if (is_odp_mr(mr))
1975 mlx5_ib_fence_odp_mr(mr);
1976 else if (is_dmabuf_mr(mr))
1977 mlx5_ib_fence_dmabuf_mr(mr);
1978 else
1979 clean_mr(dev, mr);
1980
1981 if (umem) {
1982 if (!is_odp_mr(mr))
1983 atomic_sub(ib_umem_num_pages(umem),
1989 &dev->mdev->priv.reg_pages);
1984 &dev->mdev->priv.reg_pages);
1990 ib_umem_release(mr->umem);
1991 if (is_odp)
1992 mlx5_ib_free_odp_mr(mr);
1985 ib_umem_release(umem);
1993 }
1994
1986 }
1987
1995 if (mr->cache_ent) {
1988 if (mr->cache_ent)
1996 mlx5_mr_cache_free(dev, mr);
1989 mlx5_mr_cache_free(dev, mr);
1997 } else {
1998 mlx5_free_priv_descs(mr);
1990 else
1999 kfree(mr);
1991 kfree(mr);
1992}
1993
1994int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1995{
1996 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1997
1998 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1999 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
2000 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
2000 }
2001 }
2002
2003 if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
2004 mlx5_ib_free_implicit_mr(mmr);
2005 return 0;
2006 }
2007
2008 dereg_mr(to_mdev(ibmr->device), mmr);
2009
2001 return 0;
2002}
2003
2004static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
2005 int access_mode, int page_shift)
2006{
2007 void *mkc;
2008

--- 155 unchanged lines hidden (view full) ---

2164 if (err)
2165 goto err_free_descs;
2166 return 0;
2167
2168err_free_descs:
2169 destroy_mkey(dev, mr);
2170 mlx5_free_priv_descs(mr);
2171err_free_mtt_mr:
2010 return 0;
2011}
2012
2013static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
2014 int access_mode, int page_shift)
2015{
2016 void *mkc;
2017

--- 155 unchanged lines hidden (view full) ---

2173 if (err)
2174 goto err_free_descs;
2175 return 0;
2176
2177err_free_descs:
2178 destroy_mkey(dev, mr);
2179 mlx5_free_priv_descs(mr);
2180err_free_mtt_mr:
2172 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2181 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
2173 mr->mtt_mr = NULL;
2174err_free_klm_mr:
2182 mr->mtt_mr = NULL;
2183err_free_klm_mr:
2175 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2184 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
2176 mr->klm_mr = NULL;
2177err_destroy_psv:
2178 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2179 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2180 mr->sig->psv_memory.psv_idx);
2181 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2182 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2183 mr->sig->psv_wire.psv_idx);

--- 516 unchanged lines hidden ---
2185 mr->klm_mr = NULL;
2186err_destroy_psv:
2187 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2188 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2189 mr->sig->psv_memory.psv_idx);
2190 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2191 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2192 mr->sig->psv_wire.psv_idx);

--- 516 unchanged lines hidden ---