Lines Matching +full:gpa +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
15 * The page-in or page-out requests from UV will come to HV as hcalls and
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
37 * migrate_vma routines and page-in/out routines.
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
43 * not a cause for concern. Also currently the number of page-outs caused
45 * overcommitting, then we might see concurrent guest driven page-outs.
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
74 * page size. Using 64K page size is correct here because any non-secure
76 * and page-out ensures this.
81 * into 64k mappings and would have done page-outs earlier.
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
106 * ---------------
109 * (a) Secure - The GFN is secure. The GFN is associated with
111 * to the Hypervisor. This GFN can be backed by a secure-PFN,
112 * or can be backed by a normal-PFN with contents encrypted.
113 * The former is true when the GFN is paged-in into the
114 * ultravisor. The latter is true when the GFN is paged-out
117 * (b) Shared - The GFN is shared. The GFN is associated with a
119 * Hypervisor. This GFN is backed by a normal-PFN and its
120 * content is un-encrypted.
122 * (c) Normal - The GFN is a normal. The GFN is associated with
127 * ---------------
130 * the hypervisor. All its GFNs are normal-GFNs.
134 * either Shared-GFN or Secure-GFNs.
140 * in any of the three states; i.e Secure-GFN, Shared-GFN,
141 * and Normal-GFN. The VM never executes in this state
142 * in supervisor-mode.
145 * -----------------------------
150 * --------------------
159 * secure-state. At this point any left-over normal-GFNs are
160 * transitioned to Secure-GFN.
163 * All its GFNs are moved to Normal-GFNs.
165 * UV_TERMINATE transitions the secure-VM back to normal-VM. All
166 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
167 * Note: The contents of the normal-GFN is undefined at this point.
170 * -------------------------
172 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
173 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
174 * set, and contains the value of the secure-PFN.
175 * It is associated with a normal-PFN; also called mem_pfn, when
177 * The value of the normal-PFN is not tracked.
179 * Shared GFN is associated with a normal-PFN. Its pfn[] has
180 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
183 * Normal GFN is associated with normal-PFN. Its pfn[] has
184 * no flag set. The value of the normal-PFN is not tracked.
187 * --------------------
189 * --------------------------------------------------------------
193 * -------------------------------------------------------------
200 * --------------------------------------------------------------
203 * --------------------
205 * --------------------------------------------------------------------
209 * --------- ----------------------------------------------------------
216 * --------------------------------------------------------------------
234 unsigned long gpa; member
254 return -ENOMEM; in kvmppc_uvmem_slot_init()
255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init()
256 if (!p->pfns) { in kvmppc_uvmem_slot_init()
258 return -ENOMEM; in kvmppc_uvmem_slot_init()
260 p->nr_pfns = slot->npages; in kvmppc_uvmem_slot_init()
261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
263 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
264 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
265 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
267 return 0; in kvmppc_uvmem_slot_init()
277 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
280 vfree(p->pfns); in kvmppc_uvmem_slot_free()
281 list_del(&p->list); in kvmppc_uvmem_slot_free()
286 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
299 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn()
301 p->pfns[index] = flag; in kvmppc_mark_gfn()
307 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
314 /* mark the GFN as secure-GFN associated with a memory-PFN. */
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
326 /* mark the GFN as a non-existent GFN. */
329 kvmppc_mark_gfn(gfn, kvm, 0, 0); in kvmppc_gfn_remove()
332 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn()
340 unsigned long index = gfn - p->base_pfn; in kvmppc_gfn_is_uvmem_pfn()
342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn()
344 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn()
359 * Must be called with kvm->arch.uvmem_lock held.
368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn()
379 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) { in kvmppc_next_nontransitioned_gfn()
380 unsigned long index = i - p->base_pfn; in kvmppc_next_nontransitioned_gfn()
382 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { in kvmppc_next_nontransitioned_gfn()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
397 int ret = 0; in kvmppc_memslot_page_merge()
404 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge()
406 mmap_write_lock(kvm->mm); in kvmppc_memslot_page_merge()
408 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_memslot_page_merge()
415 vm_flags = vma->vm_flags; in kvmppc_memslot_page_merge()
416 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, in kvmppc_memslot_page_merge()
423 start = vma->vm_end; in kvmppc_memslot_page_merge()
424 } while (end > vma->vm_end); in kvmppc_memslot_page_merge()
426 mmap_write_unlock(kvm->mm); in kvmppc_memslot_page_merge()
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
449 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
450 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
451 memslot->npages * PAGE_SIZE, in __kvmppc_uvmem_memslot_create()
452 0, memslot->id); in __kvmppc_uvmem_memslot_create()
453 if (ret < 0) { in __kvmppc_uvmem_memslot_create()
457 return 0; in __kvmppc_uvmem_memslot_create()
472 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
482 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
485 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_start()
504 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_start()
511 * Caller must held kvm->arch.uvmem_lock.
516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument
518 unsigned long src_pfn, dst_pfn = 0; in __kvmppc_svm_page_out()
519 struct migrate_vma mig = { 0 }; in __kvmppc_svm_page_out()
525 memset(&mig, 0, sizeof(mig)); in __kvmppc_svm_page_out()
535 /* The requested page is already paged-out, nothing to do */ in __kvmppc_svm_page_out()
536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
541 return -1; in __kvmppc_svm_page_out()
552 ret = -1; in __kvmppc_svm_page_out()
557 pvt = spage->zone_device_data; in __kvmppc_svm_page_out()
562 * - When HV touches a secure page, for which we do UV_PAGE_OUT in __kvmppc_svm_page_out()
563 * - When a secure page is converted to shared page, we *get* in __kvmppc_svm_page_out()
565 * case we skip page-out. in __kvmppc_svm_page_out()
567 if (!pvt->skip_page_out) in __kvmppc_svm_page_out()
568 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
569 gpa, 0, page_shift); in __kvmppc_svm_page_out()
589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument
594 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out()
597 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
620 mmap_read_lock(kvm->mm); in kvmppc_uvmem_drop_pages()
622 addr = slot->userspace_addr; in kvmppc_uvmem_drop_pages()
624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
625 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) { in kvmppc_uvmem_drop_pages()
628 if (!vma || addr >= vma->vm_end) { in kvmppc_uvmem_drop_pages()
629 vma = vma_lookup(kvm->mm, addr); in kvmppc_uvmem_drop_pages()
631 pr_err("Can't find VMA for gfn:0x%lx\n", gfn); in kvmppc_uvmem_drop_pages()
636 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
640 pvt = uvmem_page->zone_device_data; in kvmppc_uvmem_drop_pages()
641 pvt->skip_page_out = skip_page_out; in kvmppc_uvmem_drop_pages()
642 pvt->remove_gfn = true; in kvmppc_uvmem_drop_pages()
645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages()
646 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", in kvmppc_uvmem_drop_pages()
647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages()
653 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
656 mmap_read_unlock(kvm->mm); in kvmppc_uvmem_drop_pages()
668 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
671 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
674 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_abort()
679 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_abort()
681 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
682 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
693 * Called with kvm->arch.uvmem_lock held
695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
708 pfn_last - pfn_first); in kvmppc_uvmem_get_page()
709 if (bit >= (pfn_last - pfn_first)) in kvmppc_uvmem_get_page()
719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
721 pvt->gpa = gpa; in kvmppc_uvmem_get_page()
722 pvt->kvm = kvm; in kvmppc_uvmem_get_page()
725 dpage->zone_device_data = pvt; in kvmppc_uvmem_get_page()
742 unsigned long end, unsigned long gpa, struct kvm *kvm, in kvmppc_svm_page_in() argument
746 unsigned long src_pfn, dst_pfn = 0; in kvmppc_svm_page_in()
747 struct migrate_vma mig = { 0 }; in kvmppc_svm_page_in()
751 int ret = 0; in kvmppc_svm_page_in()
753 memset(&mig, 0, sizeof(mig)); in kvmppc_svm_page_in()
766 ret = -1; in kvmppc_svm_page_in()
770 dpage = kvmppc_uvmem_get_page(gpa, kvm); in kvmppc_svm_page_in()
772 ret = -1; in kvmppc_svm_page_in()
780 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
781 gpa, 0, page_shift); in kvmppc_svm_page_in()
797 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
800 int ret = 0; in kvmppc_uv_migrate_mem_slot()
802 mmap_read_lock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
803 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
811 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_uv_migrate_mem_slot()
812 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_uv_migrate_mem_slot()
825 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
826 mmap_read_unlock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
837 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
841 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_done()
860 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
861 pr_info("LPID %d went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
864 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_done()
871 * - If the page is already secure, then provision a new page and share
872 * - If the page is a normal page, share the existing page
877 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, in kvmppc_share_page() argument
885 unsigned long gfn = gpa >> page_shift; in kvmppc_share_page()
889 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_share_page()
890 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
893 pvt = uvmem_page->zone_device_data; in kvmppc_share_page()
894 pvt->skip_page_out = true; in kvmppc_share_page()
899 pvt->remove_gfn = false; in kvmppc_share_page()
903 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
908 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
911 pvt = uvmem_page->zone_device_data; in kvmppc_share_page()
912 pvt->skip_page_out = true; in kvmppc_share_page()
913 pvt->remove_gfn = false; /* it continues to be a valid GFN */ in kvmppc_share_page()
918 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
924 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
926 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_share_page()
936 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_in() argument
943 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_in()
946 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
956 return kvmppc_share_page(kvm, gpa, page_shift); in kvmppc_h_svm_page_in()
959 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_in()
960 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_in()
966 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
967 /* Fail the page-in request of an already paged-in page */ in kvmppc_h_svm_page_in()
972 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_in()
973 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_in()
976 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, in kvmppc_h_svm_page_in()
983 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
985 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_in()
986 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_in()
1001 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; in kvmppc_uvmem_migrate_to_ram()
1003 if (kvmppc_svm_page_out(vmf->vma, vmf->address, in kvmppc_uvmem_migrate_to_ram()
1004 vmf->address + PAGE_SIZE, PAGE_SHIFT, in kvmppc_uvmem_migrate_to_ram()
1005 pvt->kvm, pvt->gpa, vmf->page)) in kvmppc_uvmem_migrate_to_ram()
1008 return 0; in kvmppc_uvmem_migrate_to_ram()
1014 * Gets called when secure GFN tranistions from a secure-PFN
1016 * Gets called with kvm->arch.uvmem_lock held.
1020 unsigned long pfn = page_to_pfn(page) - in kvmppc_uvmem_page_free()
1028 pvt = page->zone_device_data; in kvmppc_uvmem_page_free()
1029 page->zone_device_data = NULL; in kvmppc_uvmem_page_free()
1030 if (pvt->remove_gfn) in kvmppc_uvmem_page_free()
1031 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1033 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1046 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_out() argument
1049 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_out()
1055 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1065 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_out()
1066 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_out()
1072 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_out()
1073 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_out()
1076 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL)) in kvmppc_h_svm_page_out()
1079 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_out()
1080 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_out()
1091 return -EFAULT; in kvmppc_send_page_to_uv()
1093 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1097 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1098 0, PAGE_SHIFT); in kvmppc_send_page_to_uv()
1101 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1102 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; in kvmppc_send_page_to_uv()
1125 u64 size = 0; in kvmppc_get_secmem_size()
1128 * First try the new ibm,secure-memory nodes which supersede the in kvmppc_get_secmem_size()
1129 * secure-memory-ranges property. in kvmppc_get_secmem_size()
1132 for_each_compatible_node(np, NULL, "ibm,secure-memory") { in kvmppc_get_secmem_size()
1141 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); in kvmppc_get_secmem_size()
1145 prop = of_get_property(np, "secure-memory-ranges", &len); in kvmppc_get_secmem_size()
1149 for (i = 0; i < len / (sizeof(*prop) * 4); i++) in kvmppc_get_secmem_size()
1160 int ret = 0; in kvmppc_uvmem_init()
1169 * Don't fail the initialization of kvm-hv module if in kvmppc_uvmem_init()
1170 * the platform doesn't export ibm,uv-firmware node. in kvmppc_uvmem_init()
1171 * Let normal guests run on such PEF-disabled platform. in kvmppc_uvmem_init()
1173 pr_info("KVMPPC-UVMEM: No support for secure guests\n"); in kvmppc_uvmem_init()
1184 kvmppc_uvmem_pgmap.range.start = res->start; in kvmppc_uvmem_init()
1185 kvmppc_uvmem_pgmap.range.end = res->end; in kvmppc_uvmem_init()
1196 pfn_first = res->start >> PAGE_SHIFT; in kvmppc_uvmem_init()
1198 kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL); in kvmppc_uvmem_init()
1200 ret = -ENOMEM; in kvmppc_uvmem_init()
1204 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); in kvmppc_uvmem_init()
1209 release_mem_region(res->start, size); in kvmppc_uvmem_init()