/openbmc/linux/arch/powerpc/kvm/ |
H A D | book3s_64_mmu_radix.c | 144 u64 pte, base, gpa; in kvmppc_mmu_walk_radix_tree() local 199 gpa = pte & 0x01fffffffffff000ul; in kvmppc_mmu_walk_radix_tree() 200 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree() 202 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree() 210 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument 425 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() 429 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte() 430 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte() 449 gpa &= ~(page_size - 1); in kvmppc_unmap_pte() [all …]
|
H A D | book3s_hv_uvmem.c | 234 unsigned long gpa; member 516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument 536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 569 gpa, 0, page_shift); in __kvmppc_svm_page_out() 589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument 595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out() 645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages() 647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages() 695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument 719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page() [all …]
|
/openbmc/linux/tools/testing/selftests/kvm/ |
H A D | max_guest_memory_test.c | 23 uint64_t gpa; in guest_code() local 25 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() 26 *((volatile uint64_t *)gpa) = gpa; in guest_code() 96 uint64_t gpa, nr_bytes; in spawn_workers() local 110 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { in spawn_workers() 112 info[i].start_gpa = gpa; in spawn_workers() 113 info[i].end_gpa = gpa + nr_bytes; in spawn_workers() 170 uint64_t max_gpa, gpa, slot_size, max_mem, i; in main() local 232 gpa = 0; in main() 234 gpa = start_gpa + ((slot - first_slot) * slot_size); in main() [all …]
|
H A D | memslot_perf_test.c | 185 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument 192 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); in vm_gpa2hva() 193 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva() 195 gpa -= MEM_GPA; in vm_gpa2hva() 197 gpage = gpa / guest_page_size; in vm_gpa2hva() 198 pgoffs = gpa % guest_page_size; in vm_gpa2hva() 331 uint64_t gpa; in prepare_vm() local 337 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); in prepare_vm() 338 TEST_ASSERT(gpa == guest_addr, in prepare_vm() 632 uint64_t gpa, ctr; in test_memslot_do_unmap() local [all …]
|
H A D | memslot_modification_stress_test.c | 69 uint64_t gpa; in add_remove_memslot() local 76 gpa = memstress_args.gpa - pages * vm->page_size; in add_remove_memslot() 80 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, in add_remove_memslot()
|
/openbmc/qemu/target/i386/hvf/ |
H A D | x86_mmu.c | 45 uint64_t gpa; member 83 uint64_t gpa = pt->pte[level] & page_mask; in get_pt_entry() local 86 gpa = pt->pte[level]; in get_pt_entry() 90 address_space_read(&address_space_memory, gpa + index * pte_size(pae), in get_pt_entry() 209 pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff); in walk_gpt() 211 pt->gpa = large_page_gpa(pt, pae, largeness); in walk_gpt() 218 bool mmu_gva_to_gpa(CPUState *cpu, target_ulong gva, uint64_t *gpa) in mmu_gva_to_gpa() argument 225 *gpa = gva; in mmu_gva_to_gpa() 231 *gpa = pt.gpa; in mmu_gva_to_gpa() 240 uint64_t gpa; in vmx_write_mem() local [all …]
|
/openbmc/linux/arch/s390/kvm/ |
H A D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 unsigned long *gpa, enum gacc_mode mode, 196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 209 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old, 371 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument 374 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs() [all …]
|
H A D | vsie.c | 658 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 662 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 665 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 670 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 674 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 739 gpa_t gpa; in pin_blocks() local 742 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 744 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; in pin_blocks() 745 if (gpa) { in pin_blocks() 746 if (gpa < 2 * PAGE_SIZE) in pin_blocks() [all …]
|
H A D | gaccess.c | 606 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument 608 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table() 633 unsigned long *gpa, const union asce asce, in guest_translate() argument 792 *gpa = raddr.addr; in guest_translate() 816 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() argument 826 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key() 879 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() argument 893 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); in vcpu_check_access_key() 962 unsigned long gpa; in guest_range_to_gpas() local 972 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); in guest_range_to_gpas() [all …]
|
/openbmc/qemu/hw/i386/kvm/ |
H A D | xen_overlay.c | 52 void xen_overlay_do_map_page(MemoryRegion *page, uint64_t gpa) in xen_overlay_do_map_page() argument 62 if (gpa == INVALID_GPA) { in xen_overlay_do_map_page() 66 memory_region_set_address(page, gpa); in xen_overlay_do_map_page() 68 } else if (gpa != INVALID_GPA) { in xen_overlay_do_map_page() 69 memory_region_add_subregion_overlap(get_system_memory(), gpa, page, 0); in xen_overlay_do_map_page() 188 int xen_overlay_map_shinfo_page(uint64_t gpa) in type_init() 207 xen_overlay_do_map_page(&s->shinfo_mem, gpa); in type_init() 208 if (gpa != INVALID_GPA) { in type_init() 209 ret = xen_overlay_set_be_shinfo(gpa >> XEN_PAGE_SHIFT); in type_init() 214 s->shinfo_gpa = gpa; in type_init()
|
/openbmc/linux/tools/testing/selftests/kvm/lib/ |
H A D | memstress.c | 109 vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes); in memstress_setup_vcpus() 114 vcpu_args->gpa = args->gpa; in memstress_setup_vcpus() 120 i, vcpu_args->gpa, vcpu_args->gpa + in memstress_setup_vcpus() 197 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; in memstress_create_vm() 198 args->gpa = align_down(args->gpa, backing_src_pagesz); in memstress_create_vm() 201 args->gpa = align_down(args->gpa, 1 << 20); in memstress_create_vm() 205 args->gpa, args->gpa + args->size); in memstress_create_vm() 210 vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; in memstress_create_vm() 218 virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages); in memstress_create_vm()
|
/openbmc/linux/arch/x86/include/asm/uv/ |
H A D | uv_hub.h | 461 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 463 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 475 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 476 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 478 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 490 return gpa >> n_lshift; in uv_gpa_to_gnode() 492 return uv_gam_range(gpa)->nasid >> 1; in uv_gpa_to_gnode() 496 static inline int uv_gpa_to_pnode(unsigned long gpa) in uv_gpa_to_pnode() argument [all …]
|
/openbmc/linux/virt/kvm/ |
H A D | pfncache.c | 86 if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE) in kvm_gpc_check() 222 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); in hva_to_pfn_retry() 239 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, in __kvm_gpc_refresh() argument 243 unsigned long page_offset = gpa & ~PAGE_MASK; in __kvm_gpc_refresh() 276 if (gpc->gpa != gpa || gpc->generation != slots->generation || in __kvm_gpc_refresh() 278 gfn_t gfn = gpa_to_gfn(gpa); in __kvm_gpc_refresh() 280 gpc->gpa = gpa; in __kvm_gpc_refresh() 336 return __kvm_gpc_refresh(gpc, gpc->gpa, len); in kvm_gpc_refresh() 357 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) in kvm_gpc_activate() argument 378 return __kvm_gpc_refresh(gpc, gpa, len); in kvm_gpc_activate()
|
/openbmc/qemu/target/i386/kvm/ |
H A D | xen-emu.c | 55 static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, in kvm_gva_to_gpa() argument 70 *gpa = tr.physical_address; in kvm_gva_to_gpa() 78 uint64_t gpa; in kvm_gva_rw() local 82 if (!kvm_gva_to_gpa(cs, gva, &gpa, &len, is_write)) { in kvm_gva_rw() 89 cpu_physical_memory_rw(gpa, buf, len, is_write); in kvm_gva_rw() 284 static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa) in kvm_xen_set_vcpu_attr() argument 289 xhsi.u.gpa = gpa; in kvm_xen_set_vcpu_attr() 291 trace_kvm_xen_set_vcpu_attr(cs->cpu_index, type, gpa); in kvm_xen_set_vcpu_attr() 321 static int set_vcpu_info(CPUState *cs, uint64_t gpa) in set_vcpu_info() argument 329 ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, gpa); in set_vcpu_info() [all …]
|
H A D | trace-events | 8 kvm_hc_map_gpa_range(uint64_t gpa, uint64_t size, uint64_t attributes, uint64_t flags) "gpa 0x%" PR… 14 kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
|
/openbmc/linux/arch/x86/kvm/mmu/ |
H A D | page_track.h | 30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); 41 static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, in __kvm_page_track_write() argument 50 static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_page_track_write() argument 53 __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); in kvm_page_track_write() 55 kvm_mmu_track_write(vcpu, gpa, new, bytes); in kvm_page_track_write()
|
/openbmc/linux/arch/riscv/kvm/ |
H A D | tlb.c | 21 gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_gpa() argument 61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 334 gpa_t gpa, gpa_t gpsz, in kvm_riscv_hfence_gvma_vmid_gpa() argument 341 data.addr = gpa; in kvm_riscv_hfence_gvma_vmid_gpa()
|
H A D | mmu.c | 179 gpa_t gpa, phys_addr_t hpa, in gstage_map_page() argument 219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page() 346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument 359 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_gstage_ioremap() 362 for (addr = gpa; addr < end; addr += PAGE_SIZE) { in kvm_riscv_gstage_ioremap() 386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument 389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap() 425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local 429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot() 510 gpa_t gpa = base_gpa + (vm_start - hva); in kvm_arch_prepare_memory_region() local [all …]
|
/openbmc/linux/arch/x86/kvm/ |
H A D | cpuid.h | 46 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_legal_gpa() argument 48 return !(gpa & vcpu->arch.reserved_gpa_bits); in kvm_vcpu_is_legal_gpa() 51 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_illegal_gpa() argument 53 return !kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_illegal_gpa() 57 gpa_t gpa, gpa_t alignment) in kvm_vcpu_is_legal_aligned_gpa() argument 59 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_legal_aligned_gpa() 62 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) in page_address_valid() argument 64 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); in page_address_valid()
|
/openbmc/linux/drivers/gpu/drm/i915/gvt/ |
H A D | page_track.c | 159 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, in intel_vgpu_page_track_handler() argument 165 page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 171 intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); in intel_vgpu_page_track_handler() 173 ret = page_track->handler(page_track, gpa, data, bytes); in intel_vgpu_page_track_handler() 175 gvt_err("guest page write error, gpa %llx\n", gpa); in intel_vgpu_page_track_handler()
|
/openbmc/qemu/hw/i386/ |
H A D | pc_sysfw.c | 158 hwaddr gpa; in pc_system_flash_map() local 189 gpa = 0x100000000ULL - total_size; /* where the flash is mapped */ in pc_system_flash_map() 193 sysbus_mmio_map(SYS_BUS_DEVICE(system_flash), 0, gpa); in pc_system_flash_map() 208 x86_firmware_configure(gpa, flash_ptr, flash_size); in pc_system_flash_map() 261 void x86_firmware_configure(hwaddr gpa, void *ptr, int size) in x86_firmware_configure() argument 282 sev_encrypt_flash(gpa, ptr, size, &error_fatal); in x86_firmware_configure()
|
/openbmc/qemu/hw/vfio/ |
H A D | spapr.c | 45 static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) in vfio_prereg_gpa_to_vaddr() argument 49 (gpa - section->offset_within_address_space); in vfio_prereg_gpa_to_vaddr() 59 const hwaddr gpa = section->offset_within_address_space; in vfio_prereg_listener_region_add() local 84 if (gpa >= end) { in vfio_prereg_listener_region_add() 90 reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); in vfio_prereg_listener_region_add() 91 reg.size = end - gpa; in vfio_prereg_listener_region_add() 118 const hwaddr gpa = section->offset_within_address_space; in vfio_prereg_listener_region_del() local 143 if (gpa >= end) { in vfio_prereg_listener_region_del() 147 reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); in vfio_prereg_listener_region_del() 148 reg.size = end - gpa; in vfio_prereg_listener_region_del()
|
/openbmc/linux/arch/mips/kvm/ |
H A D | mmu.c | 449 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_set_spte_gfn() local 451 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_gfn() 484 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_test_age_gfn() local 485 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_gfn() 510 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument 515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast() 524 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast() 586 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_mips_map_page() argument 592 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page() 602 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, in kvm_mips_map_page() [all …]
|
/openbmc/linux/arch/riscv/include/asm/ |
H A D | kvm_host.h | 258 gpa_t gpa, gpa_t gpsz, 261 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, 287 gpa_t gpa, gpa_t gpsz, 305 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 308 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 312 gpa_t gpa, unsigned long hva, bool is_write);
|
/openbmc/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | smaller_maxphyaddr_emulation_test.c | 55 uint64_t gpa; in main() local 75 gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE, in main() 77 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); in main()
|