1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Secure pages management: Migration of pages between normal and secure 4 * memory of KVM guests. 5 * 6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com> 7 */ 8 9 /* 10 * A pseries guest can be run as secure guest on Ultravisor-enabled 11 * POWER platforms. On such platforms, this driver will be used to manage 12 * the movement of guest pages between the normal memory managed by 13 * hypervisor (HV) and secure memory managed by Ultravisor (UV). 14 * 15 * The page-in or page-out requests from UV will come to HV as hcalls and 16 * HV will call back into UV via ultracalls to satisfy these page requests. 17 * 18 * Private ZONE_DEVICE memory equal to the amount of secure memory 19 * available in the platform for running secure guests is hotplugged. 20 * Whenever a page belonging to the guest becomes secure, a page from this 21 * private device memory is used to represent and track that secure page 22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are 23 * shared between UV and HV. However such pages aren't represented by 24 * device private memory and mappings to shared memory exist in both 25 * UV and HV page tables. 26 */ 27 28 /* 29 * Notes on locking 30 * 31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent 32 * page-in and page-out requests for the same GPA. Concurrent accesses 33 * can either come via UV (guest vCPUs requesting for same page) 34 * or when HV and guest simultaneously access the same page. 35 * This mutex serializes the migration of page from HV(normal) to 36 * UV(secure) and vice versa. So the serialization points are around 37 * migrate_vma routines and page-in/out routines. 38 * 39 * Per-guest mutex comes with a cost though. Mainly it serializes the 40 * fault path as page-out can occur when HV faults on accessing secure 41 * guest pages. Currently UV issues page-in requests for all the guest 42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is 43 * not a cause for concern. Also currently the number of page-outs caused 44 * by HV touching secure pages is very very low. If an when UV supports 45 * overcommitting, then we might see concurrent guest driven page-outs. 46 * 47 * Locking order 48 * 49 * 1. kvm->srcu - Protects KVM memslots 50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise 51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting 52 * as sync-points for page-in/out 53 */ 54 55 /* 56 * Notes on page size 57 * 58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN 59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks 60 * secure GPAs at 64K page size and maintains one device PFN for each 61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued 62 * for 64K page at a time. 63 * 64 * HV faulting on secure pages: When HV touches any secure page, it 65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently 66 * UV splits and remaps the 2MB page if necessary and copies out the 67 * required 64K page contents. 68 * 69 * Shared pages: Whenever guest shares a secure page, UV will split and 70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size. 71 * 72 * HV invalidating a page: When a regular page belonging to secure 73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K 74 * page size. Using 64K page size is correct here because any non-secure 75 * page will essentially be of 64K page size. Splitting by UV during sharing 76 * and page-out ensures this. 77 * 78 * Page fault handling: When HV handles page fault of a page belonging 79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request. 80 * Using 64K size is correct here too as UV would have split the 2MB page 81 * into 64k mappings and would have done page-outs earlier. 82 * 83 * In summary, the current secure pages handling code in HV assumes 84 * 64K page size and in fact fails any page-in/page-out requests of 85 * non-64K size upfront. If and when UV starts supporting multiple 86 * page-sizes, we need to break this assumption. 87 */ 88 89 #include <linux/pagemap.h> 90 #include <linux/migrate.h> 91 #include <linux/kvm_host.h> 92 #include <linux/ksm.h> 93 #include <linux/of.h> 94 #include <linux/memremap.h> 95 #include <asm/ultravisor.h> 96 #include <asm/mman.h> 97 #include <asm/kvm_ppc.h> 98 #include <asm/kvm_book3s_uvmem.h> 99 100 static struct dev_pagemap kvmppc_uvmem_pgmap; 101 static unsigned long *kvmppc_uvmem_bitmap; 102 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock); 103 104 /* 105 * States of a GFN 106 * --------------- 107 * The GFN can be in one of the following states. 108 * 109 * (a) Secure - The GFN is secure. The GFN is associated with 110 * a Secure VM, the contents of the GFN is not accessible 111 * to the Hypervisor. This GFN can be backed by a secure-PFN, 112 * or can be backed by a normal-PFN with contents encrypted. 113 * The former is true when the GFN is paged-in into the 114 * ultravisor. The latter is true when the GFN is paged-out 115 * of the ultravisor. 116 * 117 * (b) Shared - The GFN is shared. The GFN is associated with a 118 * a secure VM. The contents of the GFN is accessible to 119 * Hypervisor. This GFN is backed by a normal-PFN and its 120 * content is un-encrypted. 121 * 122 * (c) Normal - The GFN is a normal. The GFN is associated with 123 * a normal VM. The contents of the GFN is accesible to 124 * the Hypervisor. Its content is never encrypted. 125 * 126 * States of a VM. 127 * --------------- 128 * 129 * Normal VM: A VM whose contents are always accessible to 130 * the hypervisor. All its GFNs are normal-GFNs. 131 * 132 * Secure VM: A VM whose contents are not accessible to the 133 * hypervisor without the VM's consent. Its GFNs are 134 * either Shared-GFN or Secure-GFNs. 135 * 136 * Transient VM: A Normal VM that is transitioning to secure VM. 137 * The transition starts on successful return of 138 * H_SVM_INIT_START, and ends on successful return 139 * of H_SVM_INIT_DONE. This transient VM, can have GFNs 140 * in any of the three states; i.e Secure-GFN, Shared-GFN, 141 * and Normal-GFN. The VM never executes in this state 142 * in supervisor-mode. 143 * 144 * Memory slot State. 145 * ----------------------------- 146 * The state of a memory slot mirrors the state of the 147 * VM the memory slot is associated with. 148 * 149 * VM State transition. 150 * -------------------- 151 * 152 * A VM always starts in Normal Mode. 153 * 154 * H_SVM_INIT_START moves the VM into transient state. During this 155 * time the Ultravisor may request some of its GFNs to be shared or 156 * secured. So its GFNs can be in one of the three GFN states. 157 * 158 * H_SVM_INIT_DONE moves the VM entirely from transient state to 159 * secure-state. At this point any left-over normal-GFNs are 160 * transitioned to Secure-GFN. 161 * 162 * H_SVM_INIT_ABORT moves the transient VM back to normal VM. 163 * All its GFNs are moved to Normal-GFNs. 164 * 165 * UV_TERMINATE transitions the secure-VM back to normal-VM. All 166 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN 167 * Note: The contents of the normal-GFN is undefined at this point. 168 * 169 * GFN state implementation: 170 * ------------------------- 171 * 172 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn, 173 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag 174 * set, and contains the value of the secure-PFN. 175 * It is associated with a normal-PFN; also called mem_pfn, when 176 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set. 177 * The value of the normal-PFN is not tracked. 178 * 179 * Shared GFN is associated with a normal-PFN. Its pfn[] has 180 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN 181 * is not tracked. 182 * 183 * Normal GFN is associated with normal-PFN. Its pfn[] has 184 * no flag set. The value of the normal-PFN is not tracked. 185 * 186 * Life cycle of a GFN 187 * -------------------- 188 * 189 * -------------------------------------------------------------- 190 * | | Share | Unshare | SVM |H_SVM_INIT_DONE| 191 * | |operation |operation | abort/ | | 192 * | | | | terminate | | 193 * ------------------------------------------------------------- 194 * | | | | | | 195 * | Secure | Shared | Secure |Normal |Secure | 196 * | | | | | | 197 * | Shared | Shared | Secure |Normal |Shared | 198 * | | | | | | 199 * | Normal | Shared | Secure |Normal |Secure | 200 * -------------------------------------------------------------- 201 * 202 * Life cycle of a VM 203 * -------------------- 204 * 205 * -------------------------------------------------------------------- 206 * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ | 207 * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE | 208 * | | | | | | | 209 * --------- ---------------------------------------------------------- 210 * | | | | | | | 211 * | Normal | Normal | Transient|Error |Error |Normal | 212 * | | | | | | | 213 * | Secure | Error | Error |Error |Error |Normal | 214 * | | | | | | | 215 * |Transient| N/A | Error |Secure |Normal |Normal | 216 * -------------------------------------------------------------------- 217 */ 218 219 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63) 220 #define KVMPPC_GFN_MEM_PFN (1UL << 62) 221 #define KVMPPC_GFN_SHARED (1UL << 61) 222 #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN) 223 #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED) 224 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK) 225 226 struct kvmppc_uvmem_slot { 227 struct list_head list; 228 unsigned long nr_pfns; 229 unsigned long base_pfn; 230 unsigned long *pfns; 231 }; 232 struct kvmppc_uvmem_page_pvt { 233 struct kvm *kvm; 234 unsigned long gpa; 235 bool skip_page_out; 236 bool remove_gfn; 237 }; 238 239 bool kvmppc_uvmem_available(void) 240 { 241 /* 242 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor 243 * and our data structures have been initialized successfully. 244 */ 245 return !!kvmppc_uvmem_bitmap; 246 } 247 248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) 249 { 250 struct kvmppc_uvmem_slot *p; 251 252 p = kzalloc(sizeof(*p), GFP_KERNEL); 253 if (!p) 254 return -ENOMEM; 255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); 256 if (!p->pfns) { 257 kfree(p); 258 return -ENOMEM; 259 } 260 p->nr_pfns = slot->npages; 261 p->base_pfn = slot->base_gfn; 262 263 mutex_lock(&kvm->arch.uvmem_lock); 264 list_add(&p->list, &kvm->arch.uvmem_pfns); 265 mutex_unlock(&kvm->arch.uvmem_lock); 266 267 return 0; 268 } 269 270 /* 271 * All device PFNs are already released by the time we come here. 272 */ 273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) 274 { 275 struct kvmppc_uvmem_slot *p, *next; 276 277 mutex_lock(&kvm->arch.uvmem_lock); 278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { 279 if (p->base_pfn == slot->base_gfn) { 280 vfree(p->pfns); 281 list_del(&p->list); 282 kfree(p); 283 break; 284 } 285 } 286 mutex_unlock(&kvm->arch.uvmem_lock); 287 } 288 289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, 290 unsigned long flag, unsigned long uvmem_pfn) 291 { 292 struct kvmppc_uvmem_slot *p; 293 294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { 295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { 296 unsigned long index = gfn - p->base_pfn; 297 298 if (flag == KVMPPC_GFN_UVMEM_PFN) 299 p->pfns[index] = uvmem_pfn | flag; 300 else 301 p->pfns[index] = flag; 302 return; 303 } 304 } 305 } 306 307 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */ 308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, 309 unsigned long uvmem_pfn, struct kvm *kvm) 310 { 311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); 312 } 313 314 /* mark the GFN as secure-GFN associated with a memory-PFN. */ 315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) 316 { 317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); 318 } 319 320 /* mark the GFN as a shared GFN. */ 321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) 322 { 323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); 324 } 325 326 /* mark the GFN as a non-existent GFN. */ 327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) 328 { 329 kvmppc_mark_gfn(gfn, kvm, 0, 0); 330 } 331 332 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */ 333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, 334 unsigned long *uvmem_pfn) 335 { 336 struct kvmppc_uvmem_slot *p; 337 338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { 339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { 340 unsigned long index = gfn - p->base_pfn; 341 342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { 343 if (uvmem_pfn) 344 *uvmem_pfn = p->pfns[index] & 345 KVMPPC_GFN_PFN_MASK; 346 return true; 347 } else 348 return false; 349 } 350 } 351 return false; 352 } 353 354 /* 355 * starting from *gfn search for the next available GFN that is not yet 356 * transitioned to a secure GFN. return the value of that GFN in *gfn. If a 357 * GFN is found, return true, else return false 358 * 359 * Must be called with kvm->arch.uvmem_lock held. 360 */ 361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, 362 struct kvm *kvm, unsigned long *gfn) 363 { 364 struct kvmppc_uvmem_slot *p; 365 bool ret = false; 366 unsigned long i; 367 368 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) 369 if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) 370 break; 371 if (!p) 372 return ret; 373 /* 374 * The code below assumes, one to one correspondence between 375 * kvmppc_uvmem_slot and memslot. 376 */ 377 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) { 378 unsigned long index = i - p->base_pfn; 379 380 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { 381 *gfn = i; 382 ret = true; 383 break; 384 } 385 } 386 return ret; 387 } 388 389 static int kvmppc_memslot_page_merge(struct kvm *kvm, 390 const struct kvm_memory_slot *memslot, bool merge) 391 { 392 unsigned long gfn = memslot->base_gfn; 393 unsigned long end, start = gfn_to_hva(kvm, gfn); 394 int ret = 0; 395 struct vm_area_struct *vma; 396 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; 397 398 if (kvm_is_error_hva(start)) 399 return H_STATE; 400 401 end = start + (memslot->npages << PAGE_SHIFT); 402 403 mmap_write_lock(kvm->mm); 404 do { 405 vma = find_vma_intersection(kvm->mm, start, end); 406 if (!vma) { 407 ret = H_STATE; 408 break; 409 } 410 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, 411 merge_flag, &vma->vm_flags); 412 if (ret) { 413 ret = H_STATE; 414 break; 415 } 416 start = vma->vm_end; 417 } while (end > vma->vm_end); 418 419 mmap_write_unlock(kvm->mm); 420 return ret; 421 } 422 423 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm, 424 const struct kvm_memory_slot *memslot) 425 { 426 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); 427 kvmppc_uvmem_slot_free(kvm, memslot); 428 kvmppc_memslot_page_merge(kvm, memslot, true); 429 } 430 431 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm, 432 const struct kvm_memory_slot *memslot) 433 { 434 int ret = H_PARAMETER; 435 436 if (kvmppc_memslot_page_merge(kvm, memslot, false)) 437 return ret; 438 439 if (kvmppc_uvmem_slot_init(kvm, memslot)) 440 goto out1; 441 442 ret = uv_register_mem_slot(kvm->arch.lpid, 443 memslot->base_gfn << PAGE_SHIFT, 444 memslot->npages * PAGE_SIZE, 445 0, memslot->id); 446 if (ret < 0) { 447 ret = H_PARAMETER; 448 goto out; 449 } 450 return 0; 451 out: 452 kvmppc_uvmem_slot_free(kvm, memslot); 453 out1: 454 kvmppc_memslot_page_merge(kvm, memslot, true); 455 return ret; 456 } 457 458 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) 459 { 460 struct kvm_memslots *slots; 461 struct kvm_memory_slot *memslot, *m; 462 int ret = H_SUCCESS; 463 int srcu_idx, bkt; 464 465 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; 466 467 if (!kvmppc_uvmem_bitmap) 468 return H_UNSUPPORTED; 469 470 /* Only radix guests can be secure guests */ 471 if (!kvm_is_radix(kvm)) 472 return H_UNSUPPORTED; 473 474 /* NAK the transition to secure if not enabled */ 475 if (!kvm->arch.svm_enabled) 476 return H_AUTHORITY; 477 478 srcu_idx = srcu_read_lock(&kvm->srcu); 479 480 /* register the memslot */ 481 slots = kvm_memslots(kvm); 482 kvm_for_each_memslot(memslot, bkt, slots) { 483 ret = __kvmppc_uvmem_memslot_create(kvm, memslot); 484 if (ret) 485 break; 486 } 487 488 if (ret) { 489 slots = kvm_memslots(kvm); 490 kvm_for_each_memslot(m, bkt, slots) { 491 if (m == memslot) 492 break; 493 __kvmppc_uvmem_memslot_delete(kvm, memslot); 494 } 495 } 496 497 srcu_read_unlock(&kvm->srcu, srcu_idx); 498 return ret; 499 } 500 501 /* 502 * Provision a new page on HV side and copy over the contents 503 * from secure memory using UV_PAGE_OUT uvcall. 504 * Caller must held kvm->arch.uvmem_lock. 505 */ 506 static int __kvmppc_svm_page_out(struct vm_area_struct *vma, 507 unsigned long start, 508 unsigned long end, unsigned long page_shift, 509 struct kvm *kvm, unsigned long gpa) 510 { 511 unsigned long src_pfn, dst_pfn = 0; 512 struct migrate_vma mig; 513 struct page *dpage, *spage; 514 struct kvmppc_uvmem_page_pvt *pvt; 515 unsigned long pfn; 516 int ret = U_SUCCESS; 517 518 memset(&mig, 0, sizeof(mig)); 519 mig.vma = vma; 520 mig.start = start; 521 mig.end = end; 522 mig.src = &src_pfn; 523 mig.dst = &dst_pfn; 524 mig.pgmap_owner = &kvmppc_uvmem_pgmap; 525 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 526 527 /* The requested page is already paged-out, nothing to do */ 528 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) 529 return ret; 530 531 ret = migrate_vma_setup(&mig); 532 if (ret) 533 return -1; 534 535 spage = migrate_pfn_to_page(*mig.src); 536 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE)) 537 goto out_finalize; 538 539 if (!is_zone_device_page(spage)) 540 goto out_finalize; 541 542 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); 543 if (!dpage) { 544 ret = -1; 545 goto out_finalize; 546 } 547 548 lock_page(dpage); 549 pvt = spage->zone_device_data; 550 pfn = page_to_pfn(dpage); 551 552 /* 553 * This function is used in two cases: 554 * - When HV touches a secure page, for which we do UV_PAGE_OUT 555 * - When a secure page is converted to shared page, we *get* 556 * the page to essentially unmap the device page. In this 557 * case we skip page-out. 558 */ 559 if (!pvt->skip_page_out) 560 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, 561 gpa, 0, page_shift); 562 563 if (ret == U_SUCCESS) 564 *mig.dst = migrate_pfn(pfn); 565 else { 566 unlock_page(dpage); 567 __free_page(dpage); 568 goto out_finalize; 569 } 570 571 migrate_vma_pages(&mig); 572 573 out_finalize: 574 migrate_vma_finalize(&mig); 575 return ret; 576 } 577 578 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma, 579 unsigned long start, unsigned long end, 580 unsigned long page_shift, 581 struct kvm *kvm, unsigned long gpa) 582 { 583 int ret; 584 585 mutex_lock(&kvm->arch.uvmem_lock); 586 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); 587 mutex_unlock(&kvm->arch.uvmem_lock); 588 589 return ret; 590 } 591 592 /* 593 * Drop device pages that we maintain for the secure guest 594 * 595 * We first mark the pages to be skipped from UV_PAGE_OUT when there 596 * is HV side fault on these pages. Next we *get* these pages, forcing 597 * fault on them, do fault time migration to replace the device PTEs in 598 * QEMU page table with normal PTEs from newly allocated pages. 599 */ 600 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot, 601 struct kvm *kvm, bool skip_page_out) 602 { 603 int i; 604 struct kvmppc_uvmem_page_pvt *pvt; 605 struct page *uvmem_page; 606 struct vm_area_struct *vma = NULL; 607 unsigned long uvmem_pfn, gfn; 608 unsigned long addr; 609 610 mmap_read_lock(kvm->mm); 611 612 addr = slot->userspace_addr; 613 614 gfn = slot->base_gfn; 615 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) { 616 617 /* Fetch the VMA if addr is not in the latest fetched one */ 618 if (!vma || addr >= vma->vm_end) { 619 vma = vma_lookup(kvm->mm, addr); 620 if (!vma) { 621 pr_err("Can't find VMA for gfn:0x%lx\n", gfn); 622 break; 623 } 624 } 625 626 mutex_lock(&kvm->arch.uvmem_lock); 627 628 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { 629 uvmem_page = pfn_to_page(uvmem_pfn); 630 pvt = uvmem_page->zone_device_data; 631 pvt->skip_page_out = skip_page_out; 632 pvt->remove_gfn = true; 633 634 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE, 635 PAGE_SHIFT, kvm, pvt->gpa)) 636 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", 637 pvt->gpa, addr); 638 } else { 639 /* Remove the shared flag if any */ 640 kvmppc_gfn_remove(gfn, kvm); 641 } 642 643 mutex_unlock(&kvm->arch.uvmem_lock); 644 } 645 646 mmap_read_unlock(kvm->mm); 647 } 648 649 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) 650 { 651 int srcu_idx, bkt; 652 struct kvm_memory_slot *memslot; 653 654 /* 655 * Expect to be called only after INIT_START and before INIT_DONE. 656 * If INIT_DONE was completed, use normal VM termination sequence. 657 */ 658 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 659 return H_UNSUPPORTED; 660 661 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) 662 return H_STATE; 663 664 srcu_idx = srcu_read_lock(&kvm->srcu); 665 666 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) 667 kvmppc_uvmem_drop_pages(memslot, kvm, false); 668 669 srcu_read_unlock(&kvm->srcu, srcu_idx); 670 671 kvm->arch.secure_guest = 0; 672 uv_svm_terminate(kvm->arch.lpid); 673 674 return H_PARAMETER; 675 } 676 677 /* 678 * Get a free device PFN from the pool 679 * 680 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device 681 * PFN will be used to keep track of the secure page on HV side. 682 * 683 * Called with kvm->arch.uvmem_lock held 684 */ 685 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) 686 { 687 struct page *dpage = NULL; 688 unsigned long bit, uvmem_pfn; 689 struct kvmppc_uvmem_page_pvt *pvt; 690 unsigned long pfn_last, pfn_first; 691 692 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT; 693 pfn_last = pfn_first + 694 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT); 695 696 spin_lock(&kvmppc_uvmem_bitmap_lock); 697 bit = find_first_zero_bit(kvmppc_uvmem_bitmap, 698 pfn_last - pfn_first); 699 if (bit >= (pfn_last - pfn_first)) 700 goto out; 701 bitmap_set(kvmppc_uvmem_bitmap, bit, 1); 702 spin_unlock(&kvmppc_uvmem_bitmap_lock); 703 704 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL); 705 if (!pvt) 706 goto out_clear; 707 708 uvmem_pfn = bit + pfn_first; 709 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); 710 711 pvt->gpa = gpa; 712 pvt->kvm = kvm; 713 714 dpage = pfn_to_page(uvmem_pfn); 715 dpage->zone_device_data = pvt; 716 lock_page(dpage); 717 return dpage; 718 out_clear: 719 spin_lock(&kvmppc_uvmem_bitmap_lock); 720 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1); 721 out: 722 spin_unlock(&kvmppc_uvmem_bitmap_lock); 723 return NULL; 724 } 725 726 /* 727 * Alloc a PFN from private device memory pool. If @pagein is true, 728 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall. 729 */ 730 static int kvmppc_svm_page_in(struct vm_area_struct *vma, 731 unsigned long start, 732 unsigned long end, unsigned long gpa, struct kvm *kvm, 733 unsigned long page_shift, 734 bool pagein) 735 { 736 unsigned long src_pfn, dst_pfn = 0; 737 struct migrate_vma mig; 738 struct page *spage; 739 unsigned long pfn; 740 struct page *dpage; 741 int ret = 0; 742 743 memset(&mig, 0, sizeof(mig)); 744 mig.vma = vma; 745 mig.start = start; 746 mig.end = end; 747 mig.src = &src_pfn; 748 mig.dst = &dst_pfn; 749 mig.flags = MIGRATE_VMA_SELECT_SYSTEM; 750 751 ret = migrate_vma_setup(&mig); 752 if (ret) 753 return ret; 754 755 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) { 756 ret = -1; 757 goto out_finalize; 758 } 759 760 dpage = kvmppc_uvmem_get_page(gpa, kvm); 761 if (!dpage) { 762 ret = -1; 763 goto out_finalize; 764 } 765 766 if (pagein) { 767 pfn = *mig.src >> MIGRATE_PFN_SHIFT; 768 spage = migrate_pfn_to_page(*mig.src); 769 if (spage) { 770 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, 771 gpa, 0, page_shift); 772 if (ret) 773 goto out_finalize; 774 } 775 } 776 777 *mig.dst = migrate_pfn(page_to_pfn(dpage)); 778 migrate_vma_pages(&mig); 779 out_finalize: 780 migrate_vma_finalize(&mig); 781 return ret; 782 } 783 784 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm, 785 const struct kvm_memory_slot *memslot) 786 { 787 unsigned long gfn = memslot->base_gfn; 788 struct vm_area_struct *vma; 789 unsigned long start, end; 790 int ret = 0; 791 792 mmap_read_lock(kvm->mm); 793 mutex_lock(&kvm->arch.uvmem_lock); 794 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { 795 ret = H_STATE; 796 start = gfn_to_hva(kvm, gfn); 797 if (kvm_is_error_hva(start)) 798 break; 799 800 end = start + (1UL << PAGE_SHIFT); 801 vma = find_vma_intersection(kvm->mm, start, end); 802 if (!vma || vma->vm_start > start || vma->vm_end < end) 803 break; 804 805 ret = kvmppc_svm_page_in(vma, start, end, 806 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); 807 if (ret) { 808 ret = H_STATE; 809 break; 810 } 811 812 /* relinquish the cpu if needed */ 813 cond_resched(); 814 } 815 mutex_unlock(&kvm->arch.uvmem_lock); 816 mmap_read_unlock(kvm->mm); 817 return ret; 818 } 819 820 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) 821 { 822 struct kvm_memslots *slots; 823 struct kvm_memory_slot *memslot; 824 int srcu_idx, bkt; 825 long ret = H_SUCCESS; 826 827 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 828 return H_UNSUPPORTED; 829 830 /* migrate any unmoved normal pfn to device pfns*/ 831 srcu_idx = srcu_read_lock(&kvm->srcu); 832 slots = kvm_memslots(kvm); 833 kvm_for_each_memslot(memslot, bkt, slots) { 834 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); 835 if (ret) { 836 /* 837 * The pages will remain transitioned. 838 * Its the callers responsibility to 839 * terminate the VM, which will undo 840 * all state of the VM. Till then 841 * this VM is in a erroneous state. 842 * Its KVMPPC_SECURE_INIT_DONE will 843 * remain unset. 844 */ 845 ret = H_STATE; 846 goto out; 847 } 848 } 849 850 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; 851 pr_info("LPID %d went secure\n", kvm->arch.lpid); 852 853 out: 854 srcu_read_unlock(&kvm->srcu, srcu_idx); 855 return ret; 856 } 857 858 /* 859 * Shares the page with HV, thus making it a normal page. 860 * 861 * - If the page is already secure, then provision a new page and share 862 * - If the page is a normal page, share the existing page 863 * 864 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler 865 * to unmap the device page from QEMU's page tables. 866 */ 867 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, 868 unsigned long page_shift) 869 { 870 871 int ret = H_PARAMETER; 872 struct page *uvmem_page; 873 struct kvmppc_uvmem_page_pvt *pvt; 874 unsigned long pfn; 875 unsigned long gfn = gpa >> page_shift; 876 int srcu_idx; 877 unsigned long uvmem_pfn; 878 879 srcu_idx = srcu_read_lock(&kvm->srcu); 880 mutex_lock(&kvm->arch.uvmem_lock); 881 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { 882 uvmem_page = pfn_to_page(uvmem_pfn); 883 pvt = uvmem_page->zone_device_data; 884 pvt->skip_page_out = true; 885 /* 886 * do not drop the GFN. It is a valid GFN 887 * that is transitioned to a shared GFN. 888 */ 889 pvt->remove_gfn = false; 890 } 891 892 retry: 893 mutex_unlock(&kvm->arch.uvmem_lock); 894 pfn = gfn_to_pfn(kvm, gfn); 895 if (is_error_noslot_pfn(pfn)) 896 goto out; 897 898 mutex_lock(&kvm->arch.uvmem_lock); 899 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { 900 uvmem_page = pfn_to_page(uvmem_pfn); 901 pvt = uvmem_page->zone_device_data; 902 pvt->skip_page_out = true; 903 pvt->remove_gfn = false; /* it continues to be a valid GFN */ 904 kvm_release_pfn_clean(pfn); 905 goto retry; 906 } 907 908 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, 909 page_shift)) { 910 kvmppc_gfn_shared(gfn, kvm); 911 ret = H_SUCCESS; 912 } 913 kvm_release_pfn_clean(pfn); 914 mutex_unlock(&kvm->arch.uvmem_lock); 915 out: 916 srcu_read_unlock(&kvm->srcu, srcu_idx); 917 return ret; 918 } 919 920 /* 921 * H_SVM_PAGE_IN: Move page from normal memory to secure memory. 922 * 923 * H_PAGE_IN_SHARED flag makes the page shared which means that the same 924 * memory in is visible from both UV and HV. 925 */ 926 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, 927 unsigned long flags, 928 unsigned long page_shift) 929 { 930 unsigned long start, end; 931 struct vm_area_struct *vma; 932 int srcu_idx; 933 unsigned long gfn = gpa >> page_shift; 934 int ret; 935 936 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 937 return H_UNSUPPORTED; 938 939 if (page_shift != PAGE_SHIFT) 940 return H_P3; 941 942 if (flags & ~H_PAGE_IN_SHARED) 943 return H_P2; 944 945 if (flags & H_PAGE_IN_SHARED) 946 return kvmppc_share_page(kvm, gpa, page_shift); 947 948 ret = H_PARAMETER; 949 srcu_idx = srcu_read_lock(&kvm->srcu); 950 mmap_read_lock(kvm->mm); 951 952 start = gfn_to_hva(kvm, gfn); 953 if (kvm_is_error_hva(start)) 954 goto out; 955 956 mutex_lock(&kvm->arch.uvmem_lock); 957 /* Fail the page-in request of an already paged-in page */ 958 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) 959 goto out_unlock; 960 961 end = start + (1UL << page_shift); 962 vma = find_vma_intersection(kvm->mm, start, end); 963 if (!vma || vma->vm_start > start || vma->vm_end < end) 964 goto out_unlock; 965 966 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, 967 true)) 968 goto out_unlock; 969 970 ret = H_SUCCESS; 971 972 out_unlock: 973 mutex_unlock(&kvm->arch.uvmem_lock); 974 out: 975 mmap_read_unlock(kvm->mm); 976 srcu_read_unlock(&kvm->srcu, srcu_idx); 977 return ret; 978 } 979 980 981 /* 982 * Fault handler callback that gets called when HV touches any page that 983 * has been moved to secure memory, we ask UV to give back the page by 984 * issuing UV_PAGE_OUT uvcall. 985 * 986 * This eventually results in dropping of device PFN and the newly 987 * provisioned page/PFN gets populated in QEMU page tables. 988 */ 989 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) 990 { 991 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; 992 993 if (kvmppc_svm_page_out(vmf->vma, vmf->address, 994 vmf->address + PAGE_SIZE, PAGE_SHIFT, 995 pvt->kvm, pvt->gpa)) 996 return VM_FAULT_SIGBUS; 997 else 998 return 0; 999 } 1000 1001 /* 1002 * Release the device PFN back to the pool 1003 * 1004 * Gets called when secure GFN tranistions from a secure-PFN 1005 * to a normal PFN during H_SVM_PAGE_OUT. 1006 * Gets called with kvm->arch.uvmem_lock held. 1007 */ 1008 static void kvmppc_uvmem_page_free(struct page *page) 1009 { 1010 unsigned long pfn = page_to_pfn(page) - 1011 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); 1012 struct kvmppc_uvmem_page_pvt *pvt; 1013 1014 spin_lock(&kvmppc_uvmem_bitmap_lock); 1015 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1); 1016 spin_unlock(&kvmppc_uvmem_bitmap_lock); 1017 1018 pvt = page->zone_device_data; 1019 page->zone_device_data = NULL; 1020 if (pvt->remove_gfn) 1021 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); 1022 else 1023 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); 1024 kfree(pvt); 1025 } 1026 1027 static const struct dev_pagemap_ops kvmppc_uvmem_ops = { 1028 .page_free = kvmppc_uvmem_page_free, 1029 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram, 1030 }; 1031 1032 /* 1033 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory. 1034 */ 1035 unsigned long 1036 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, 1037 unsigned long flags, unsigned long page_shift) 1038 { 1039 unsigned long gfn = gpa >> page_shift; 1040 unsigned long start, end; 1041 struct vm_area_struct *vma; 1042 int srcu_idx; 1043 int ret; 1044 1045 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) 1046 return H_UNSUPPORTED; 1047 1048 if (page_shift != PAGE_SHIFT) 1049 return H_P3; 1050 1051 if (flags) 1052 return H_P2; 1053 1054 ret = H_PARAMETER; 1055 srcu_idx = srcu_read_lock(&kvm->srcu); 1056 mmap_read_lock(kvm->mm); 1057 start = gfn_to_hva(kvm, gfn); 1058 if (kvm_is_error_hva(start)) 1059 goto out; 1060 1061 end = start + (1UL << page_shift); 1062 vma = find_vma_intersection(kvm->mm, start, end); 1063 if (!vma || vma->vm_start > start || vma->vm_end < end) 1064 goto out; 1065 1066 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) 1067 ret = H_SUCCESS; 1068 out: 1069 mmap_read_unlock(kvm->mm); 1070 srcu_read_unlock(&kvm->srcu, srcu_idx); 1071 return ret; 1072 } 1073 1074 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) 1075 { 1076 unsigned long pfn; 1077 int ret = U_SUCCESS; 1078 1079 pfn = gfn_to_pfn(kvm, gfn); 1080 if (is_error_noslot_pfn(pfn)) 1081 return -EFAULT; 1082 1083 mutex_lock(&kvm->arch.uvmem_lock); 1084 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) 1085 goto out; 1086 1087 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, 1088 0, PAGE_SHIFT); 1089 out: 1090 kvm_release_pfn_clean(pfn); 1091 mutex_unlock(&kvm->arch.uvmem_lock); 1092 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; 1093 } 1094 1095 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new) 1096 { 1097 int ret = __kvmppc_uvmem_memslot_create(kvm, new); 1098 1099 if (!ret) 1100 ret = kvmppc_uv_migrate_mem_slot(kvm, new); 1101 1102 return ret; 1103 } 1104 1105 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old) 1106 { 1107 __kvmppc_uvmem_memslot_delete(kvm, old); 1108 } 1109 1110 static u64 kvmppc_get_secmem_size(void) 1111 { 1112 struct device_node *np; 1113 int i, len; 1114 const __be32 *prop; 1115 u64 size = 0; 1116 1117 /* 1118 * First try the new ibm,secure-memory nodes which supersede the 1119 * secure-memory-ranges property. 1120 * If we found some, no need to read the deprecated ones. 1121 */ 1122 for_each_compatible_node(np, NULL, "ibm,secure-memory") { 1123 prop = of_get_property(np, "reg", &len); 1124 if (!prop) 1125 continue; 1126 size += of_read_number(prop + 2, 2); 1127 } 1128 if (size) 1129 return size; 1130 1131 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); 1132 if (!np) 1133 goto out; 1134 1135 prop = of_get_property(np, "secure-memory-ranges", &len); 1136 if (!prop) 1137 goto out_put; 1138 1139 for (i = 0; i < len / (sizeof(*prop) * 4); i++) 1140 size += of_read_number(prop + (i * 4) + 2, 2); 1141 1142 out_put: 1143 of_node_put(np); 1144 out: 1145 return size; 1146 } 1147 1148 int kvmppc_uvmem_init(void) 1149 { 1150 int ret = 0; 1151 unsigned long size; 1152 struct resource *res; 1153 void *addr; 1154 unsigned long pfn_last, pfn_first; 1155 1156 size = kvmppc_get_secmem_size(); 1157 if (!size) { 1158 /* 1159 * Don't fail the initialization of kvm-hv module if 1160 * the platform doesn't export ibm,uv-firmware node. 1161 * Let normal guests run on such PEF-disabled platform. 1162 */ 1163 pr_info("KVMPPC-UVMEM: No support for secure guests\n"); 1164 goto out; 1165 } 1166 1167 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem"); 1168 if (IS_ERR(res)) { 1169 ret = PTR_ERR(res); 1170 goto out; 1171 } 1172 1173 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; 1174 kvmppc_uvmem_pgmap.range.start = res->start; 1175 kvmppc_uvmem_pgmap.range.end = res->end; 1176 kvmppc_uvmem_pgmap.nr_range = 1; 1177 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; 1178 /* just one global instance: */ 1179 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; 1180 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE); 1181 if (IS_ERR(addr)) { 1182 ret = PTR_ERR(addr); 1183 goto out_free_region; 1184 } 1185 1186 pfn_first = res->start >> PAGE_SHIFT; 1187 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); 1188 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), 1189 sizeof(unsigned long), GFP_KERNEL); 1190 if (!kvmppc_uvmem_bitmap) { 1191 ret = -ENOMEM; 1192 goto out_unmap; 1193 } 1194 1195 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); 1196 return ret; 1197 out_unmap: 1198 memunmap_pages(&kvmppc_uvmem_pgmap); 1199 out_free_region: 1200 release_mem_region(res->start, size); 1201 out: 1202 return ret; 1203 } 1204 1205 void kvmppc_uvmem_free(void) 1206 { 1207 if (!kvmppc_uvmem_bitmap) 1208 return; 1209 1210 memunmap_pages(&kvmppc_uvmem_pgmap); 1211 release_mem_region(kvmppc_uvmem_pgmap.range.start, 1212 range_len(&kvmppc_uvmem_pgmap.range)); 1213 kfree(kvmppc_uvmem_bitmap); 1214 } 1215