mr.c (49780d42dfc9ec0f4090c32ca59688449da1a1cd) mr.c (81713d3788d2e6bc005f15ee1c59d0eb06050a6b)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 455 unchanged lines hidden (view full) ---

464
465 ent = &cache->ent[entry];
466 while (1) {
467 spin_lock_irq(&ent->lock);
468 if (list_empty(&ent->head)) {
469 spin_unlock_irq(&ent->lock);
470
471 err = add_keys(dev, entry, 1);
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 455 unchanged lines hidden (view full) ---

464
465 ent = &cache->ent[entry];
466 while (1) {
467 spin_lock_irq(&ent->lock);
468 if (list_empty(&ent->head)) {
469 spin_unlock_irq(&ent->lock);
470
471 err = add_keys(dev, entry, 1);
472 if (err)
472 if (err && err != -EAGAIN)
473 return ERR_PTR(err);
474
475 wait_for_completion(&ent->compl);
476 } else {
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
478 list);
479 list_del(&mr->list);
480 ent->cur--;

--- 183 unchanged lines hidden (view full) ---

664 ent->dev = dev;
665 ent->limit = 0;
666
667 init_completion(&ent->compl);
668 INIT_WORK(&ent->work, cache_work_func);
669 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
670 queue_work(cache->wq, &ent->work);
671
473 return ERR_PTR(err);
474
475 wait_for_completion(&ent->compl);
476 } else {
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
478 list);
479 list_del(&mr->list);
480 ent->cur--;

--- 183 unchanged lines hidden (view full) ---

664 ent->dev = dev;
665 ent->limit = 0;
666
667 init_completion(&ent->compl);
668 INIT_WORK(&ent->work, cache_work_func);
669 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
670 queue_work(cache->wq, &ent->work);
671
672 if (i > MAX_UMR_CACHE_ENTRY)
672 if (i > MAX_UMR_CACHE_ENTRY) {
673 mlx5_odp_init_mr_cache_entry(ent);
673 continue;
674 continue;
675 }
674
675 if (!use_umr(dev, ent->order))
676 continue;
677
678 ent->page = PAGE_SHIFT;
679 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
680 MLX5_IB_UMR_OCTOWORD;
681 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;

--- 248 unchanged lines hidden (view full) ---

930}
931
932static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
933 void *xlt, int page_shift, size_t size,
934 int flags)
935{
936 struct mlx5_ib_dev *dev = mr->dev;
937 struct ib_umem *umem = mr->umem;
676
677 if (!use_umr(dev, ent->order))
678 continue;
679
680 ent->page = PAGE_SHIFT;
681 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
682 MLX5_IB_UMR_OCTOWORD;
683 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;

--- 248 unchanged lines hidden (view full) ---

932}
933
934static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
935 void *xlt, int page_shift, size_t size,
936 int flags)
937{
938 struct mlx5_ib_dev *dev = mr->dev;
939 struct ib_umem *umem = mr->umem;
940 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
941 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
942 return npages;
943 }
938
939 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
940
941 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
942 __mlx5_ib_populate_pas(dev, umem, page_shift,
943 idx, npages, xlt,
944 MLX5_IB_MTT_PRESENT);
945 /* Clear padding after the pages

--- 17 unchanged lines hidden (view full) ---

963 struct device *ddev = dev->ib_dev.dma_device;
964 struct mlx5_ib_ucontext *uctx = NULL;
965 int size;
966 void *xlt;
967 dma_addr_t dma;
968 struct mlx5_umr_wr wr;
969 struct ib_sge sg;
970 int err = 0;
944
945 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
946
947 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
948 __mlx5_ib_populate_pas(dev, umem, page_shift,
949 idx, npages, xlt,
950 MLX5_IB_MTT_PRESENT);
951 /* Clear padding after the pages

--- 17 unchanged lines hidden (view full) ---

969 struct device *ddev = dev->ib_dev.dma_device;
970 struct mlx5_ib_ucontext *uctx = NULL;
971 int size;
972 void *xlt;
973 dma_addr_t dma;
974 struct mlx5_umr_wr wr;
975 struct ib_sge sg;
976 int err = 0;
971 int desc_size = sizeof(struct mlx5_mtt);
977 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
978 ? sizeof(struct mlx5_klm)
979 : sizeof(struct mlx5_mtt);
972 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
973 const int page_mask = page_align - 1;
974 size_t pages_mapped = 0;
975 size_t pages_to_map = 0;
976 size_t pages_iter = 0;
977 gfp_t gfp;
978
979 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,

--- 201 unchanged lines hidden (view full) ---

1181 int page_shift;
1182 int npages;
1183 int ncont;
1184 int order;
1185 int err;
1186
1187 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1188 start, virt_addr, length, access_flags);
980 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
981 const int page_mask = page_align - 1;
982 size_t pages_mapped = 0;
983 size_t pages_to_map = 0;
984 size_t pages_iter = 0;
985 gfp_t gfp;
986
987 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,

--- 201 unchanged lines hidden (view full) ---

1189 int page_shift;
1190 int npages;
1191 int ncont;
1192 int order;
1193 int err;
1194
1195 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1196 start, virt_addr, length, access_flags);
1197
1198#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1199 if (!start && length == U64_MAX) {
1200 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1201 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1202 return ERR_PTR(-EINVAL);
1203
1204 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1205 return &mr->ibmr;
1206 }
1207#endif
1208
1189 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1190 &page_shift, &ncont, &order);
1191
1192 if (err < 0)
1193 return ERR_PTR(err);
1194
1195 if (use_umr(dev, order)) {
1196 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,

--- 269 unchanged lines hidden (view full) ---

1466
1467#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1468 if (umem && umem->odp_data) {
1469 /* Prevent new page faults from succeeding */
1470 mr->live = 0;
1471 /* Wait for all running page-fault handlers to finish. */
1472 synchronize_srcu(&dev->mr_srcu);
1473 /* Destroy all page mappings */
1209 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1210 &page_shift, &ncont, &order);
1211
1212 if (err < 0)
1213 return ERR_PTR(err);
1214
1215 if (use_umr(dev, order)) {
1216 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,

--- 269 unchanged lines hidden (view full) ---

1486
1487#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1488 if (umem && umem->odp_data) {
1489 /* Prevent new page faults from succeeding */
1490 mr->live = 0;
1491 /* Wait for all running page-fault handlers to finish. */
1492 synchronize_srcu(&dev->mr_srcu);
1493 /* Destroy all page mappings */
1474 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1475 ib_umem_end(umem));
1494 if (umem->odp_data->page_list)
1495 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1496 ib_umem_end(umem));
1497 else
1498 mlx5_ib_free_implicit_mr(mr);
1476 /*
1477 * We kill the umem before the MR for ODP,
1478 * so that there will not be any invalidations in
1479 * flight, looking at the *mr struct.
1480 */
1481 ib_umem_release(umem);
1482 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1483

--- 327 unchanged lines hidden ---
1499 /*
1500 * We kill the umem before the MR for ODP,
1501 * so that there will not be any invalidations in
1502 * flight, looking at the *mr struct.
1503 */
1504 ib_umem_release(umem);
1505 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1506

--- 327 unchanged lines hidden ---