Lines Matching refs:pages

163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)  in iopt_pages_add_npinned()  argument
167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned()
169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned()
172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument
176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned()
178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned()
181 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument
189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin()
249 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, in iopt_pages_find_domain_area() argument
254 node = interval_tree_iter_first(&pages->domains_itree, index, index); in iopt_pages_find_domain_area()
576 unsigned long last_index, struct page **pages) in pages_to_xarray() argument
578 struct page **end_pages = pages + (last_index - start_index) + 1; in pages_to_xarray()
579 struct page **half_pages = pages + (end_pages - pages) / 2; in pages_to_xarray()
586 while (pages != end_pages) { in pages_to_xarray()
588 if (pages == half_pages && iommufd_should_fail()) { in pages_to_xarray()
596 old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); in pages_to_xarray()
600 pages++; in pages_to_xarray()
615 static void batch_from_pages(struct pfn_batch *batch, struct page **pages, in batch_from_pages() argument
618 struct page **end = pages + npages; in batch_from_pages()
620 for (; pages != end; pages++) in batch_from_pages()
621 if (!batch_add_pfn(batch, page_to_pfn(*pages))) in batch_from_pages()
625 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, in batch_unpin() argument
643 to_unpin, pages->writable); in batch_unpin()
644 iopt_pages_sub_npinned(pages, to_unpin); in batch_unpin()
709 struct iopt_pages *pages) in pfn_reader_user_init() argument
717 if (pages->writable) in pfn_reader_user_init()
722 struct iopt_pages *pages) in pfn_reader_user_destroy() argument
726 mmap_read_unlock(pages->source_mm); in pfn_reader_user_destroy()
727 if (pages->source_mm != current->mm) in pfn_reader_user_destroy()
728 mmput(pages->source_mm); in pfn_reader_user_destroy()
737 struct iopt_pages *pages, in pfn_reader_user_pin() argument
741 bool remote_mm = pages->source_mm != current->mm; in pfn_reader_user_pin()
766 if (!mmget_not_zero(pages->source_mm)) in pfn_reader_user_pin()
779 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); in pfn_reader_user_pin()
785 mmap_read_lock(pages->source_mm); in pfn_reader_user_pin()
788 rc = pin_user_pages_remote(pages->source_mm, uptr, npages, in pfn_reader_user_pin()
797 iopt_pages_add_npinned(pages, rc); in pfn_reader_user_pin()
804 static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in incr_user_locked_vm() argument
810 lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> in incr_user_locked_vm()
813 cur_pages = atomic_long_read(&pages->source_user->locked_vm); in incr_user_locked_vm()
817 } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages, in incr_user_locked_vm()
822 static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) in decr_user_locked_vm() argument
824 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) in decr_user_locked_vm()
826 atomic_long_sub(npages, &pages->source_user->locked_vm); in decr_user_locked_vm()
830 static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, in update_mm_locked_vm() argument
837 mmap_read_unlock(pages->source_mm); in update_mm_locked_vm()
841 pages->source_mm != current->mm) { in update_mm_locked_vm()
842 if (!mmget_not_zero(pages->source_mm)) in update_mm_locked_vm()
847 mmap_write_lock(pages->source_mm); in update_mm_locked_vm()
848 rc = __account_locked_vm(pages->source_mm, npages, inc, in update_mm_locked_vm()
849 pages->source_task, false); in update_mm_locked_vm()
850 mmap_write_unlock(pages->source_mm); in update_mm_locked_vm()
853 mmput(pages->source_mm); in update_mm_locked_vm()
857 static int do_update_pinned(struct iopt_pages *pages, unsigned long npages, in do_update_pinned() argument
862 switch (pages->account_mode) { in do_update_pinned()
867 rc = incr_user_locked_vm(pages, npages); in do_update_pinned()
869 decr_user_locked_vm(pages, npages); in do_update_pinned()
872 rc = update_mm_locked_vm(pages, npages, inc, user); in do_update_pinned()
878 pages->last_npinned = pages->npinned; in do_update_pinned()
880 atomic64_add(npages, &pages->source_mm->pinned_vm); in do_update_pinned()
882 atomic64_sub(npages, &pages->source_mm->pinned_vm); in do_update_pinned()
886 static void update_unpinned(struct iopt_pages *pages) in update_unpinned() argument
888 if (WARN_ON(pages->npinned > pages->last_npinned)) in update_unpinned()
890 if (pages->npinned == pages->last_npinned) in update_unpinned()
892 do_update_pinned(pages, pages->last_npinned - pages->npinned, false, in update_unpinned()
904 struct iopt_pages *pages) in pfn_reader_user_update_pinned() argument
909 lockdep_assert_held(&pages->mutex); in pfn_reader_user_update_pinned()
911 if (pages->npinned == pages->last_npinned) in pfn_reader_user_update_pinned()
914 if (pages->npinned < pages->last_npinned) { in pfn_reader_user_update_pinned()
915 npages = pages->last_npinned - pages->npinned; in pfn_reader_user_update_pinned()
920 npages = pages->npinned - pages->last_npinned; in pfn_reader_user_update_pinned()
923 return do_update_pinned(pages, npages, inc, user); in pfn_reader_user_update_pinned()
937 struct iopt_pages *pages; member
949 return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); in pfn_reader_update_pinned()
961 struct iopt_pages *pages = pfns->pages; in pfn_reader_unpin() local
963 lockdep_assert_held(&pages->mutex); in pfn_reader_unpin()
965 interval_tree_for_each_double_span(&span, &pages->access_itree, in pfn_reader_unpin()
966 &pages->domains_itree, start, last) { in pfn_reader_unpin()
970 batch_unpin(&pfns->batch, pages, span.start_hole - start, in pfn_reader_unpin()
988 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, in pfn_reader_fill_span()
999 area = iopt_pages_find_domain_area(pfns->pages, start_index); in pfn_reader_fill_span()
1011 rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, in pfn_reader_fill_span()
1062 static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_init() argument
1067 lockdep_assert_held(&pages->mutex); in pfn_reader_init()
1069 pfns->pages = pages; in pfn_reader_init()
1073 pfn_reader_user_init(&pfns->user, pages); in pfn_reader_init()
1077 interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, in pfn_reader_init()
1078 &pages->domains_itree, start_index, in pfn_reader_init()
1093 struct iopt_pages *pages = pfns->pages; in pfn_reader_release_pins() local
1102 iopt_pages_sub_npinned(pages, npages); in pfn_reader_release_pins()
1113 struct iopt_pages *pages = pfns->pages; in pfn_reader_destroy() local
1116 pfn_reader_user_destroy(&pfns->user, pfns->pages); in pfn_reader_destroy()
1118 WARN_ON(pages->last_npinned != pages->npinned); in pfn_reader_destroy()
1121 static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, in pfn_reader_first() argument
1130 rc = pfn_reader_init(pfns, pages, start_index, last_index); in pfn_reader_first()
1144 struct iopt_pages *pages; in iopt_alloc_pages() local
1157 pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); in iopt_alloc_pages()
1158 if (!pages) in iopt_alloc_pages()
1161 kref_init(&pages->kref); in iopt_alloc_pages()
1162 xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); in iopt_alloc_pages()
1163 mutex_init(&pages->mutex); in iopt_alloc_pages()
1164 pages->source_mm = current->mm; in iopt_alloc_pages()
1165 mmgrab(pages->source_mm); in iopt_alloc_pages()
1166 pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); in iopt_alloc_pages()
1167 pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE); in iopt_alloc_pages()
1168 pages->access_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1169 pages->domains_itree = RB_ROOT_CACHED; in iopt_alloc_pages()
1170 pages->writable = writable; in iopt_alloc_pages()
1172 pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; in iopt_alloc_pages()
1174 pages->account_mode = IOPT_PAGES_ACCOUNT_USER; in iopt_alloc_pages()
1175 pages->source_task = current->group_leader; in iopt_alloc_pages()
1177 pages->source_user = get_uid(current_user()); in iopt_alloc_pages()
1178 return pages; in iopt_alloc_pages()
1183 struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); in iopt_release_pages() local
1185 WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); in iopt_release_pages()
1186 WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); in iopt_release_pages()
1187 WARN_ON(pages->npinned); in iopt_release_pages()
1188 WARN_ON(!xa_empty(&pages->pinned_pfns)); in iopt_release_pages()
1189 mmdrop(pages->source_mm); in iopt_release_pages()
1190 mutex_destroy(&pages->mutex); in iopt_release_pages()
1191 put_task_struct(pages->source_task); in iopt_release_pages()
1192 free_uid(pages->source_user); in iopt_release_pages()
1193 kfree(pages); in iopt_release_pages()
1198 struct iopt_pages *pages, struct iommu_domain *domain, in iopt_area_unpin_domain() argument
1248 batch_unpin(batch, pages, 0, in iopt_area_unpin_domain()
1258 struct iopt_pages *pages, in __iopt_area_unfill_domain() argument
1268 lockdep_assert_held(&pages->mutex); in __iopt_area_unfill_domain()
1285 interval_tree_for_each_double_span(&span, &pages->domains_itree, in __iopt_area_unfill_domain()
1286 &pages->access_itree, start_index, in __iopt_area_unfill_domain()
1293 iopt_area_unpin_domain(&batch, area, pages, domain, in __iopt_area_unfill_domain()
1306 update_unpinned(pages); in __iopt_area_unfill_domain()
1310 struct iopt_pages *pages, in iopt_area_unfill_partial_domain() argument
1315 __iopt_area_unfill_domain(area, pages, domain, end_index - 1); in iopt_area_unfill_partial_domain()
1342 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, in iopt_area_unfill_domain() argument
1345 __iopt_area_unfill_domain(area, pages, domain, in iopt_area_unfill_domain()
1363 lockdep_assert_held(&area->pages->mutex); in iopt_area_fill_domain()
1365 rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), in iopt_area_fill_domain()
1390 iopt_area_unfill_partial_domain(area, area->pages, domain, in iopt_area_fill_domain()
1408 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_fill_domains() argument
1423 mutex_lock(&pages->mutex); in iopt_area_fill_domains()
1424 rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), in iopt_area_fill_domains()
1449 interval_tree_insert(&area->pages_node, &pages->domains_itree); in iopt_area_fill_domains()
1473 iopt_area_unfill_partial_domain(area, pages, domain, in iopt_area_fill_domains()
1480 mutex_unlock(&pages->mutex); in iopt_area_fill_domains()
1492 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) in iopt_area_unfill_domains() argument
1500 mutex_lock(&pages->mutex); in iopt_area_unfill_domains()
1512 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_unfill_domains()
1513 iopt_area_unfill_domain(area, pages, area->storage_domain); in iopt_area_unfill_domains()
1516 mutex_unlock(&pages->mutex); in iopt_area_unfill_domains()
1520 struct iopt_pages *pages, in iopt_pages_unpin_xarray() argument
1525 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, in iopt_pages_unpin_xarray()
1527 batch_unpin(batch, pages, 0, batch->total_pfns); in iopt_pages_unpin_xarray()
1542 void iopt_pages_unfill_xarray(struct iopt_pages *pages, in iopt_pages_unfill_xarray() argument
1551 lockdep_assert_held(&pages->mutex); in iopt_pages_unfill_xarray()
1553 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_unfill_xarray()
1554 &pages->domains_itree, start_index, in iopt_pages_unfill_xarray()
1563 iopt_pages_unpin_xarray(&batch, pages, span.start_hole, in iopt_pages_unfill_xarray()
1567 clear_xarray(&pages->pinned_pfns, span.start_used, in iopt_pages_unfill_xarray()
1574 update_unpinned(pages); in iopt_pages_unfill_xarray()
1590 void iopt_pages_fill_from_xarray(struct iopt_pages *pages, in iopt_pages_fill_from_xarray() argument
1595 XA_STATE(xas, &pages->pinned_pfns, start_index); in iopt_pages_fill_from_xarray()
1610 static int iopt_pages_fill_from_domain(struct iopt_pages *pages, in iopt_pages_fill_from_domain() argument
1619 area = iopt_pages_find_domain_area(pages, start_index); in iopt_pages_fill_from_domain()
1632 static int iopt_pages_fill_from_mm(struct iopt_pages *pages, in iopt_pages_fill_from_mm() argument
1643 rc = pfn_reader_user_pin(user, pages, cur_index, last_index); in iopt_pages_fill_from_mm()
1652 iopt_pages_err_unpin(pages, start_index, cur_index - 1, in iopt_pages_fill_from_mm()
1671 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, in iopt_pages_fill_xarray() argument
1679 lockdep_assert_held(&pages->mutex); in iopt_pages_fill_xarray()
1681 pfn_reader_user_init(&user, pages); in iopt_pages_fill_xarray()
1683 interval_tree_for_each_double_span(&span, &pages->access_itree, in iopt_pages_fill_xarray()
1684 &pages->domains_itree, start_index, in iopt_pages_fill_xarray()
1690 iopt_pages_fill_from_xarray(pages, span.start_used, in iopt_pages_fill_xarray()
1697 iopt_pages_fill_from_domain(pages, span.start_used, in iopt_pages_fill_xarray()
1699 rc = pages_to_xarray(&pages->pinned_pfns, in iopt_pages_fill_xarray()
1710 rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, in iopt_pages_fill_xarray()
1714 rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, in iopt_pages_fill_xarray()
1717 iopt_pages_err_unpin(pages, span.start_hole, in iopt_pages_fill_xarray()
1723 rc = pfn_reader_user_update_pinned(&user, pages); in iopt_pages_fill_xarray()
1727 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1732 iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); in iopt_pages_fill_xarray()
1734 pfn_reader_user_destroy(&user, pages); in iopt_pages_fill_xarray()
1743 static int iopt_pages_rw_slow(struct iopt_pages *pages, in iopt_pages_rw_slow() argument
1752 mutex_lock(&pages->mutex); in iopt_pages_rw_slow()
1754 rc = pfn_reader_first(&pfns, pages, start_index, last_index); in iopt_pages_rw_slow()
1776 mutex_unlock(&pages->mutex); in iopt_pages_rw_slow()
1784 static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, in iopt_pages_rw_page() argument
1791 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_page()
1792 return iopt_pages_rw_slow(pages, index, index, offset, data, in iopt_pages_rw_page()
1800 mmap_read_lock(pages->source_mm); in iopt_pages_rw_page()
1802 pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), in iopt_pages_rw_page()
1805 mmap_read_unlock(pages->source_mm); in iopt_pages_rw_page()
1816 mmput(pages->source_mm); in iopt_pages_rw_page()
1831 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, in iopt_pages_rw_access() argument
1836 bool change_mm = current->mm != pages->source_mm; in iopt_pages_rw_access()
1843 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_pages_rw_access()
1848 return iopt_pages_rw_page(pages, start_index, in iopt_pages_rw_access()
1851 return iopt_pages_rw_slow(pages, start_index, last_index, in iopt_pages_rw_access()
1861 if (!mmget_not_zero(pages->source_mm)) in iopt_pages_rw_access()
1862 return iopt_pages_rw_slow(pages, start_index, in iopt_pages_rw_access()
1866 kthread_use_mm(pages->source_mm); in iopt_pages_rw_access()
1870 if (copy_to_user(pages->uptr + start_byte, data, length)) in iopt_pages_rw_access()
1873 if (copy_from_user(data, pages->uptr + start_byte, length)) in iopt_pages_rw_access()
1878 kthread_unuse_mm(pages->source_mm); in iopt_pages_rw_access()
1879 mmput(pages->source_mm); in iopt_pages_rw_access()
1886 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, in iopt_pages_get_exact_access() argument
1891 lockdep_assert_held(&pages->mutex); in iopt_pages_get_exact_access()
1894 for (node = interval_tree_iter_first(&pages->access_itree, index, last); in iopt_pages_get_exact_access()
1919 struct iopt_pages *pages = area->pages; in iopt_area_add_access() local
1923 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) in iopt_area_add_access()
1926 mutex_lock(&pages->mutex); in iopt_area_add_access()
1927 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_add_access()
1931 iopt_pages_fill_from_xarray(pages, start_index, last_index, in iopt_area_add_access()
1933 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1943 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); in iopt_area_add_access()
1951 interval_tree_insert(&access->node, &pages->access_itree); in iopt_area_add_access()
1952 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1958 mutex_unlock(&pages->mutex); in iopt_area_add_access()
1974 struct iopt_pages *pages = area->pages; in iopt_area_remove_access() local
1977 mutex_lock(&pages->mutex); in iopt_area_remove_access()
1978 access = iopt_pages_get_exact_access(pages, start_index, last_index); in iopt_area_remove_access()
1988 interval_tree_remove(&access->node, &pages->access_itree); in iopt_area_remove_access()
1989 iopt_pages_unfill_xarray(pages, start_index, last_index); in iopt_area_remove_access()
1992 mutex_unlock(&pages->mutex); in iopt_area_remove_access()