Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 25 of 97) sorted by relevance

1234

/openbmc/linux/arch/x86/kvm/mmu/
H A Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
224 __entry->gfn = gfn;
246 __entry->gfn = gfn;
339 __field(u64, gfn)
350 __entry->gfn = gfn;
375 __field(u64, gfn)
381 __entry->gfn = fault->gfn;
397 __field(u64, gfn)
407 __entry->gfn = gfn;
[all …]
H A Dpage_track.c67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
78 gfn_t gfn) in __kvm_write_track_add_gfn() argument
88 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn()
94 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn()
101 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn() argument
111 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn()
117 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn()
262 slot = gfn_to_memslot(kvm, gfn); in kvm_write_track_add_gfn()
269 __kvm_write_track_add_gfn(kvm, slot, gfn); in kvm_write_track_add_gfn()
292 slot = gfn_to_memslot(kvm, gfn); in kvm_write_track_remove_gfn()
[all …]
H A Dmmu_internal.h80 gfn_t gfn; member
160 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument
162 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level()
166 gfn_t gfn, bool can_unsync, bool prefetch);
169 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
171 struct kvm_memory_slot *slot, u64 gfn,
177 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn()
231 gfn_t gfn; member
303 fault.gfn = fault.addr >> PAGE_SHIFT; in kvm_mmu_do_page_fault()
304 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault()
[all …]
H A Dtdp_mmu.c199 sp->gfn = gfn; in tdp_mmu_init_sp()
324 gfn_t base_gfn = sp->gfn; in handle_removed_pt()
629 iter->gfn, iter->level); in tdp_mmu_iter_set_spte()
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { in kvm_tdp_mmu_map()
1601 gfn + BITS_PER_LONG) { in clear_dirty_pt_masked()
1609 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked()
1612 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked()
1683 if (iter.gfn < start || iter.gfn >= end) in zap_collapsible_spte_range()
1730 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { in write_protect_gfn()
1785 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk()
[all …]
H A Dtdp_iter.c15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart()
97 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in try_step_down()
116 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side()
120 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side()
121 iter->next_last_level_gfn = iter->gfn; in try_step_side()
139 iter->gfn = gfn_round_for_level(iter->gfn, iter->level); in try_step_up()
H A Dpaging_tmpl.h91 gfn_t gfn; member
322 gfn_t gfn; in FNAME() local
445 gfn += pse36_gfn_delta(pte); in FNAME()
538 gfn_t gfn; in FNAME() local
544 gfn = gpte_to_gfn(gpte); in FNAME()
634 gfn_t base_gfn = fault->gfn; in FNAME()
636 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME()
802 fault->gfn = walker.gfn; in FNAME()
884 gpa = gfn_to_gpa(walker.gfn); in FNAME()
912 gfn_t gfn; in FNAME() local
[all …]
H A Dmmu.c825 gfn_t gfn; in account_shadowed() local
828 gfn = sp->gfn; in account_shadowed()
873 gfn_t gfn; in unaccount_shadowed() local
876 gfn = sp->gfn; in unaccount_shadowed()
1094 gfn_t gfn; in rmap_remove() local
1497 gfn_t gfn; member
2157 if (sp->gfn != gfn) { in kvm_mmu_find_shadow_page()
2256 sp->gfn = gfn; in kvm_mmu_alloc_shadow_page()
2979 gfn_t gfn; in direct_pte_prefetch_many() local
4227 arch.gfn = gfn; in kvm_arch_setup_async_pf()
[all …]
H A Dpage_track.h19 gfn_t gfn);
21 struct kvm_memory_slot *slot, gfn_t gfn);
24 const struct kvm_memory_slot *slot, gfn_t gfn);
/openbmc/linux/drivers/gpu/drm/i915/gvt/
H A Dpage_track.c35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track()
56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track()
67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track()
83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument
87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track()
90 intel_gvt_page_track_remove(vgpu, gfn); in intel_vgpu_unregister_page_track()
108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track()
115 ret = intel_gvt_page_track_add(vgpu, gfn); in intel_vgpu_enable_page_track()
135 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_disable_page_track()
[all …]
H A Dpage_track.h45 struct intel_vgpu *vgpu, unsigned long gfn);
48 unsigned long gfn, gvt_page_track_handler_t handler,
51 unsigned long gfn);
53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
H A Dkvmgt.c92 gfn_t gfn; member
100 gfn_t gfn; member
206 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_unmap_page()
236 if (gfn < itr->gfn) in __gvt_cache_find_gfn()
238 else if (gfn > itr->gfn) in __gvt_cache_find_gfn()
257 new->gfn = gfn; in __gvt_cache_add()
268 if (gfn < itr->gfn) in __gvt_cache_add()
356 if (gfn == p->gfn) { in __kvmgt_protect_table_find()
381 if (WARN(!p, "gfn: 0x%llx\n", gfn)) in kvmgt_protect_table_add()
384 p->gfn = gfn; in kvmgt_protect_table_add()
[all …]
/openbmc/linux/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn()
369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn()
383 *gfn = i; in kvmppc_next_nontransitioned_gfn()
617 unsigned long uvmem_pfn, gfn; in kvmppc_uvmem_drop_pages() local
624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
904 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page()
920 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page()
962 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in()
1067 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out()
[all …]
H A Dbook3s_64_mmu_hv.c578 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
859 gfn_t gfn; in kvm_unmap_gfn_range_hv() local
862 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv()
865 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv()
875 unsigned long gfn; in kvmppc_core_flush_memslot_hv() local
900 unsigned long gfn) in kvm_age_rmapp() argument
956 gfn_t gfn; in kvm_age_gfn_hv() local
960 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv()
963 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv()
1119 unsigned long gfn; in kvmppc_harvest_vpa_dirty() local
[all …]
H A De500_mmu_host.c353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
453 __func__, (long)gfn); in kvmppc_e500_shadow_map()
488 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map()
[all …]
H A Dbook3s_hv_rm_mmu.c104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local
117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte()
131 unsigned long gfn; in revmap_for_hpte() local
138 *gfnp = gfn; in revmap_for_hpte()
156 unsigned long gfn; in remove_revmap_chain() local
179 kvmppc_update_dirty_map(memslot, gfn, in remove_revmap_chain()
188 unsigned long i, pa, gpa, gfn, psize; in kvmppc_do_h_enter() local
227 gfn = gpa >> PAGE_SHIFT; in kvmppc_do_h_enter()
242 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
[all …]
/openbmc/linux/include/xen/
H A Dxen-ops.h66 xen_pfn_t *gfn, int nr,
79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
176 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
/openbmc/linux/include/linux/
H A Dkvm_host.h295 kvm_pfn_t gfn; member
1244 mark_page_dirty(kvm, gfn); \
1514 gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range() argument
1653 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot()
1676 if (gfn >= slot->base_gfn) { in search_memslots()
1677 if (gfn < slot->base_gfn + slot->npages) in search_memslots()
1693 slot = try_get_memslot(slot, gfn); in ____gfn_to_memslot()
1697 slot = search_memslots(slots, gfn, approx); in ____gfn_to_memslot()
1733 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1744 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() argument
[all …]
/openbmc/linux/drivers/xen/
H A Dxlate_mmu.c45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument
89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams()
145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
158 data.fgfn = gfn; in xen_xlate_remap_gfn_array()
174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument
179 xrp.gpfn = gfn; in unmap_gfn()
197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
/openbmc/linux/virt/kvm/
H A Dkvm_main.c2451 gfn_t gfn) in gfn_to_hva_memslot() argument
2459 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2808 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
2814 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); in kvm_vcpu_gfn_to_pfn()
2895 map->gfn = gfn; in kvm_vcpu_map()
3075 ++gfn; in kvm_read_guest()
3095 ++gfn; in kvm_vcpu_read_guest()
3179 ++gfn; in kvm_write_guest()
3200 ++gfn; in kvm_vcpu_write_guest()
3344 ++gfn; in kvm_clear_guest()
[all …]
H A Ddirty_ring.c89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument
91 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid()
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied()
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
/openbmc/linux/arch/x86/include/asm/
H A Dkvm_page_track.h43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages,
52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
H A Dsev-common.h86 #define GHCB_MSR_PSC_REQ_GFN(gfn, op) \ argument
90 ((u64)((gfn) & GENMASK_ULL(39, 0)) << 12) | \
125 gfn : 40, member
/openbmc/qemu/hw/s390x/
H A Ds390-skeys.c124 uint64_t pages, gfn; in qmp_dump_skeys() local
164 gfn = block->target_start / TARGET_PAGE_SIZE; in qmp_dump_skeys()
170 ret = skeyclass->get_skeys(ss, gfn, cur_pages, buf); in qmp_dump_skeys()
177 write_keys(f, buf, gfn, cur_pages, &lerr); in qmp_dump_skeys()
182 gfn += cur_pages; in qmp_dump_skeys()
306 uint64_t pages, gfn; in s390_storage_keys_save() local
328 gfn = block->target_start / TARGET_PAGE_SIZE; in s390_storage_keys_save()
337 error = skeyclass->get_skeys(ss, gfn, cur_pages, buf); in s390_storage_keys_save()
349 gfn += cur_pages; in s390_storage_keys_save()
/openbmc/linux/include/trace/events/
H A Dkvm.h261 TP_PROTO(u64 gva, u64 gfn),
263 TP_ARGS(gva, gfn),
267 __field(u64, gfn)
272 __entry->gfn = gfn;
275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
280 TP_PROTO(u64 gva, u64 gfn),
282 TP_ARGS(gva, gfn)
287 TP_PROTO(u64 gva, u64 gfn),
289 TP_ARGS(gva, gfn)
/openbmc/linux/arch/riscv/kvm/
H A Dvcpu_exit.c19 gfn_t gfn; in gstage_page_fault() local
23 gfn = fault_addr >> PAGE_SHIFT; in gstage_page_fault()
24 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault()
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault()

1234