Lines Matching full:area
24 struct iopt_area *area; member
39 iter->area = iopt_area_iter_first(iopt, iova, iova); in iopt_area_contig_init()
40 if (!iter->area) in iopt_area_contig_init()
42 if (!iter->area->pages) { in iopt_area_contig_init()
43 iter->area = NULL; in iopt_area_contig_init()
46 return iter->area; in iopt_area_contig_init()
53 if (!iter->area) in iopt_area_contig_next()
55 last_iova = iopt_area_last_iova(iter->area); in iopt_area_contig_next()
60 iter->area = iopt_area_iter_next(iter->area, iter->cur_iova, in iopt_area_contig_next()
62 if (!iter->area) in iopt_area_contig_next()
64 if (iter->cur_iova != iopt_area_iova(iter->area) || in iopt_area_contig_next()
65 !iter->area->pages) { in iopt_area_contig_next()
66 iter->area = NULL; in iopt_area_contig_next()
69 return iter->area; in iopt_area_contig_next()
195 * The area takes a slice of the pages from start_bytes to start_byte + length
197 static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, in iopt_insert_area() argument
207 area->iommu_prot = iommu_prot; in iopt_insert_area()
208 area->page_offset = start_byte % PAGE_SIZE; in iopt_insert_area()
209 if (area->page_offset & (iopt->iova_alignment - 1)) in iopt_insert_area()
212 area->node.start = iova; in iopt_insert_area()
213 if (check_add_overflow(iova, length - 1, &area->node.last)) in iopt_insert_area()
216 area->pages_node.start = start_byte / PAGE_SIZE; in iopt_insert_area()
217 if (check_add_overflow(start_byte, length - 1, &area->pages_node.last)) in iopt_insert_area()
219 area->pages_node.last = area->pages_node.last / PAGE_SIZE; in iopt_insert_area()
220 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
224 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
227 area->iopt = iopt; in iopt_insert_area()
228 interval_tree_insert(&area->node, &iopt->area_itree); in iopt_insert_area()
234 struct iopt_area *area; in iopt_area_alloc() local
236 area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); in iopt_area_alloc()
237 if (!area) in iopt_area_alloc()
239 RB_CLEAR_NODE(&area->node.rb); in iopt_area_alloc()
240 RB_CLEAR_NODE(&area->pages_node.rb); in iopt_area_alloc()
241 return area; in iopt_area_alloc()
254 elm->area = iopt_area_alloc(); in iopt_alloc_area_pages()
255 if (!elm->area) in iopt_alloc_area_pages()
291 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
303 static void iopt_abort_area(struct iopt_area *area) in iopt_abort_area() argument
306 WARN_ON(area->pages); in iopt_abort_area()
307 if (area->iopt) { in iopt_abort_area()
308 down_write(&area->iopt->iova_rwsem); in iopt_abort_area()
309 interval_tree_remove(&area->node, &area->iopt->area_itree); in iopt_abort_area()
310 up_write(&area->iopt->iova_rwsem); in iopt_abort_area()
312 kfree(area); in iopt_abort_area()
321 if (elm->area) in iopt_free_pages_list()
322 iopt_abort_area(elm->area); in iopt_free_pages_list()
337 rc = iopt_area_fill_domains(elm->area, elm->pages); in iopt_fill_domains_pages()
347 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); in iopt_fill_domains_pages()
372 * area->pages must be set inside the domains_rwsem to ensure in iopt_map_pages()
376 elm->area->pages = elm->pages; in iopt_map_pages()
378 elm->area = NULL; in iopt_map_pages()
426 if (elm.area) in iopt_map_user_pages()
427 iopt_abort_area(elm.area); in iopt_map_user_pages()
440 struct iopt_area *area; in iopt_get_pages() local
449 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { in iopt_get_pages()
451 unsigned long last = min(last_iova, iopt_area_last_iova(area)); in iopt_get_pages()
458 elm->start_byte = iopt_area_start_byte(area, iter.cur_iova); in iopt_get_pages()
459 elm->pages = area->pages; in iopt_get_pages()
479 struct iopt_area *area; in iopt_unmap_iova_range() local
485 * The domains_rwsem must be held in read mode any time any area->pages in iopt_unmap_iova_range()
487 * concurrently with cleaning up the area. in iopt_unmap_iova_range()
492 while ((area = iopt_area_iter_first(iopt, start, last))) { in iopt_unmap_iova_range()
493 unsigned long area_last = iopt_area_last_iova(area); in iopt_unmap_iova_range()
494 unsigned long area_first = iopt_area_iova(area); in iopt_unmap_iova_range()
497 /* Userspace should not race map/unmap's of the same area */ in iopt_unmap_iova_range()
498 if (!area->pages) { in iopt_unmap_iova_range()
516 if (area->num_accesses) { in iopt_unmap_iova_range()
517 size_t length = iopt_area_length(area); in iopt_unmap_iova_range()
520 area->prevent_access = true; in iopt_unmap_iova_range()
532 pages = area->pages; in iopt_unmap_iova_range()
533 area->pages = NULL; in iopt_unmap_iova_range()
536 iopt_area_unfill_domains(area, pages); in iopt_unmap_iova_range()
537 iopt_abort_area(area); in iopt_unmap_iova_range()
700 * This is used when removing a domain from the iopt. Every area in the iopt
707 struct iopt_area *area; in iopt_unfill_domain() local
721 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_unfill_domain()
722 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_unfill_domain()
723 struct iopt_pages *pages = area->pages; in iopt_unfill_domain()
730 WARN_ON(!area->storage_domain); in iopt_unfill_domain()
731 if (area->storage_domain == domain) in iopt_unfill_domain()
732 area->storage_domain = storage_domain; in iopt_unfill_domain()
735 iopt_area_unmap_domain(area, domain); in iopt_unfill_domain()
740 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_unfill_domain()
741 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_unfill_domain()
742 struct iopt_pages *pages = area->pages; in iopt_unfill_domain()
748 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_unfill_domain()
749 WARN_ON(area->storage_domain != domain); in iopt_unfill_domain()
750 area->storage_domain = NULL; in iopt_unfill_domain()
751 iopt_area_unfill_domain(area, pages, domain); in iopt_unfill_domain()
761 * Fill the domain with PFNs from every area in the iopt. On failure the domain
768 struct iopt_area *area; in iopt_fill_domain() local
774 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_fill_domain()
775 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_fill_domain()
776 struct iopt_pages *pages = area->pages; in iopt_fill_domain()
782 rc = iopt_area_fill_domain(area, domain); in iopt_fill_domain()
787 if (!area->storage_domain) { in iopt_fill_domain()
789 area->storage_domain = domain; in iopt_fill_domain()
790 interval_tree_insert(&area->pages_node, in iopt_fill_domain()
798 end_area = area; in iopt_fill_domain()
799 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_fill_domain()
800 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_fill_domain()
801 struct iopt_pages *pages = area->pages; in iopt_fill_domain()
803 if (area == end_area) in iopt_fill_domain()
809 interval_tree_remove(&area->pages_node, in iopt_fill_domain()
811 area->storage_domain = NULL; in iopt_fill_domain()
813 iopt_area_unfill_domain(area, pages, domain); in iopt_fill_domain()
819 /* All existing area's conform to an increased page size */
824 struct iopt_area *area; in iopt_check_iova_alignment() local
829 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_check_iova_alignment()
830 area = iopt_area_iter_next(area, 0, ULONG_MAX)) in iopt_check_iova_alignment()
831 if ((iopt_area_iova(area) & align_mask) || in iopt_check_iova_alignment()
832 (iopt_area_length(area) & align_mask) || in iopt_check_iova_alignment()
833 (area->page_offset & align_mask)) in iopt_check_iova_alignment()
888 /* No area exists that is outside the allowed domain aperture */ in iopt_table_add_domain()
996 * iopt_area_split - Split an area into two parts at iova
997 * @area: The area to split
998 * @iova: Becomes the last of a new area
1000 * This splits an area into two. It is part of the VFIO compatibility to allow
1004 static int iopt_area_split(struct iopt_area *area, unsigned long iova) in iopt_area_split() argument
1006 unsigned long alignment = area->iopt->iova_alignment; in iopt_area_split()
1007 unsigned long last_iova = iopt_area_last_iova(area); in iopt_area_split()
1008 unsigned long start_iova = iopt_area_iova(area); in iopt_area_split()
1010 struct io_pagetable *iopt = area->iopt; in iopt_area_split()
1011 struct iopt_pages *pages = area->pages; in iopt_area_split()
1021 if (!pages || area->prevent_access) in iopt_area_split()
1025 iopt_area_start_byte(area, new_start) & (alignment - 1)) in iopt_area_split()
1043 if (area->num_accesses) { in iopt_area_split()
1052 if (area->storage_domain && !iopt->disable_large_pages) { in iopt_area_split()
1057 interval_tree_remove(&area->node, &iopt->area_itree); in iopt_area_split()
1058 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, in iopt_area_split()
1059 iopt_area_start_byte(area, start_iova), in iopt_area_split()
1061 area->iommu_prot); in iopt_area_split()
1065 rc = iopt_insert_area(iopt, rhs, area->pages, new_start, in iopt_area_split()
1066 iopt_area_start_byte(area, new_start), in iopt_area_split()
1067 last_iova - new_start + 1, area->iommu_prot); in iopt_area_split()
1072 * If the original area has filled a domain, domains_itree has to be in iopt_area_split()
1075 if (area->storage_domain) { in iopt_area_split()
1076 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_split()
1081 lhs->storage_domain = area->storage_domain; in iopt_area_split()
1082 lhs->pages = area->pages; in iopt_area_split()
1083 rhs->storage_domain = area->storage_domain; in iopt_area_split()
1084 rhs->pages = area->pages; in iopt_area_split()
1086 kfree(area); in iopt_area_split()
1098 interval_tree_insert(&area->node, &iopt->area_itree); in iopt_area_split()
1115 struct iopt_area *area; in iopt_cut_iova() local
1117 area = iopt_area_iter_first(iopt, iovas[i], iovas[i]); in iopt_cut_iova()
1118 if (!area) in iopt_cut_iova()
1120 rc = iopt_area_split(area, iovas[i]); in iopt_cut_iova()