Lines Matching full:region
199 * range addressed by a single page table into a low and high region
201 * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
336 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
374 * MMIO region would prevent silently clobbering the MMIO region. in __vm_create()
377 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
449 struct userspace_mem_region *region; in kvm_vm_restart() local
455 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
456 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in kvm_vm_restart()
461 ret, errno, region->region.slot, in kvm_vm_restart()
462 region->region.flags, in kvm_vm_restart()
463 region->region.guest_phys_addr, in kvm_vm_restart()
464 region->region.memory_size); in kvm_vm_restart()
552 * Userspace Memory Region Find
562 * Pointer to overlapping region, NULL if no such region.
564 * Searches for a region with any physical memory that overlaps with
568 * region exists.
576 struct userspace_mem_region *region = in userspace_mem_region_find() local
578 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
579 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
580 + region->region.memory_size - 1; in userspace_mem_region_find()
582 return region; in userspace_mem_region_find()
594 * KVM Userspace Memory Region Find
604 * Pointer to overlapping region, NULL if no such region.
613 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
615 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
616 if (!region) in kvm_userspace_memory_region_find()
619 return ®ion->region; in kvm_userspace_memory_region_find()
677 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
683 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
684 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
685 hash_del(®ion->slot_node); in __vm_mem_region_delete()
688 region->region.memory_size = 0; in __vm_mem_region_delete()
689 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
691 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
692 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
694 if (region->fd >= 0) { in __vm_mem_region_delete()
696 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
698 close(region->fd); in __vm_mem_region_delete()
701 free(region); in __vm_mem_region_delete()
711 struct userspace_mem_region *region; in kvm_vm_free() local
723 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
724 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
827 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
836 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
837 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
840 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
841 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
842 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
848 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
849 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
853 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
862 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
865 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
867 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
873 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
874 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
881 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
889 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
902 * VM Userspace Memory Region Add
906 * src_type - Storage source for this region.
909 * slot - KVM region slot
911 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
919 * given by guest_paddr. The region is created with a KVM region slot
921 * region is created with the flags given by flags.
929 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
949 * Confirm a mem region with an overlapping address doesn't in vm_userspace_mem_region_add()
952 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
954 if (region != NULL) in vm_userspace_mem_region_add()
961 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
962 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
964 /* Confirm no region with the requested slot already exists. */ in vm_userspace_mem_region_add()
965 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
967 if (region->region.slot != slot) in vm_userspace_mem_region_add()
970 TEST_FAIL("A mem region with the requested slot " in vm_userspace_mem_region_add()
975 region->region.slot, in vm_userspace_mem_region_add()
976 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
977 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
980 /* Allocate and initialize new mem region structure. */ in vm_userspace_mem_region_add()
981 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
982 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
983 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
1005 region->mmap_size += alignment; in vm_userspace_mem_region_add()
1007 region->fd = -1; in vm_userspace_mem_region_add()
1009 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_userspace_mem_region_add()
1012 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
1015 region->fd, 0); in vm_userspace_mem_region_add()
1016 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
1020 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_userspace_mem_region_add()
1022 region->mmap_start, backing_src_pagesz); in vm_userspace_mem_region_add()
1025 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_userspace_mem_region_add()
1030 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1033 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1037 region->backing_src_type = src_type; in vm_userspace_mem_region_add()
1038 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
1039 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
1041 region->region.slot = slot; in vm_userspace_mem_region_add()
1042 region->region.flags = flags; in vm_userspace_mem_region_add()
1043 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
1044 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
1045 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
1046 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
1052 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
1055 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
1056 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
1057 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
1060 if (region->fd >= 0) { in vm_userspace_mem_region_add()
1061 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
1064 region->fd, 0); in vm_userspace_mem_region_add()
1065 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
1069 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
1074 * Memslot to region
1083 * Pointer to memory region structure that describe memory region
1085 * on error (e.g. currently no memory region using memslot as a KVM
1091 struct userspace_mem_region *region; in memslot2region() local
1093 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1095 if (region->region.slot == memslot) in memslot2region()
1096 return region; in memslot2region()
1098 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
1102 TEST_FAIL("Mem region not found"); in memslot2region()
1107 * VM Memory Region Flags Set
1117 * Sets the flags of the memory region specified by the value of slot,
1123 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1125 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1127 region->region.flags = flags; in vm_mem_region_set_flags()
1129 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1137 * VM Memory Region Move
1141 * slot - Slot of the memory region to move
1148 * Change the gpa of a memory region.
1152 struct userspace_mem_region *region; in vm_mem_region_move() local
1155 region = memslot2region(vm, slot); in vm_mem_region_move()
1157 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1159 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1167 * VM Memory Region Delete
1171 * slot - Slot of the memory region to delete
1177 * Delete a memory region.
1377 * a page. The allocated physical space comes from the TEST_DATA memory region.
1473 * Locates the memory region containing the VM physical address given
1476 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1480 struct userspace_mem_region *region; in addr_gpa2hva() local
1482 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1483 if (!region) { in addr_gpa2hva()
1488 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1489 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1504 * Locates the memory region containing the host virtual address given
1507 * region containing hva exists.
1514 struct userspace_mem_region *region = in addr_hva2gpa() local
1517 if (hva >= region->host_mem) { in addr_hva2gpa()
1518 if (hva <= (region->host_mem in addr_hva2gpa()
1519 + region->region.memory_size - 1)) in addr_hva2gpa()
1521 region->region.guest_phys_addr in addr_hva2gpa()
1522 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1554 struct userspace_mem_region *region; in addr_gpa2alias() local
1557 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1558 if (!region) in addr_gpa2alias()
1561 if (!region->host_alias) in addr_gpa2alias()
1564 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1565 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1813 struct userspace_mem_region *region; in vm_dump() local
1820 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1823 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1824 (uint64_t) region->region.memory_size, in vm_dump()
1825 region->host_mem); in vm_dump()
1827 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1928 * memslot - Memory region to allocate page from
1943 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
1953 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1958 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
1959 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
1975 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()