1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <linux/kvm_host.h> 8 #include <asm/kvm_emulate.h> 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/kvm_pgtable.h> 12 #include <asm/kvm_pkvm.h> 13 #include <asm/stage2_pgtable.h> 14 15 #include <hyp/fault.h> 16 17 #include <nvhe/gfp.h> 18 #include <nvhe/memory.h> 19 #include <nvhe/mem_protect.h> 20 #include <nvhe/mm.h> 21 22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) 23 24 extern unsigned long hyp_nr_cpus; 25 struct host_kvm host_kvm; 26 27 static struct hyp_pool host_s2_pool; 28 29 const u8 pkvm_hyp_id = 1; 30 31 static void host_lock_component(void) 32 { 33 hyp_spin_lock(&host_kvm.lock); 34 } 35 36 static void host_unlock_component(void) 37 { 38 hyp_spin_unlock(&host_kvm.lock); 39 } 40 41 static void hyp_lock_component(void) 42 { 43 hyp_spin_lock(&pkvm_pgd_lock); 44 } 45 46 static void hyp_unlock_component(void) 47 { 48 hyp_spin_unlock(&pkvm_pgd_lock); 49 } 50 51 static void *host_s2_zalloc_pages_exact(size_t size) 52 { 53 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); 54 55 hyp_split_page(hyp_virt_to_page(addr)); 56 57 /* 58 * The size of concatenated PGDs is always a power of two of PAGE_SIZE, 59 * so there should be no need to free any of the tail pages to make the 60 * allocation exact. 61 */ 62 WARN_ON(size != (PAGE_SIZE << get_order(size))); 63 64 return addr; 65 } 66 67 static void *host_s2_zalloc_page(void *pool) 68 { 69 return hyp_alloc_pages(pool, 0); 70 } 71 72 static void host_s2_get_page(void *addr) 73 { 74 hyp_get_page(&host_s2_pool, addr); 75 } 76 77 static void host_s2_put_page(void *addr) 78 { 79 hyp_put_page(&host_s2_pool, addr); 80 } 81 82 static int prepare_s2_pool(void *pgt_pool_base) 83 { 84 unsigned long nr_pages, pfn; 85 int ret; 86 87 pfn = hyp_virt_to_pfn(pgt_pool_base); 88 nr_pages = host_s2_pgtable_pages(); 89 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); 90 if (ret) 91 return ret; 92 93 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { 94 .zalloc_pages_exact = host_s2_zalloc_pages_exact, 95 .zalloc_page = host_s2_zalloc_page, 96 .phys_to_virt = hyp_phys_to_virt, 97 .virt_to_phys = hyp_virt_to_phys, 98 .page_count = hyp_page_count, 99 .get_page = host_s2_get_page, 100 .put_page = host_s2_put_page, 101 }; 102 103 return 0; 104 } 105 106 static void prepare_host_vtcr(void) 107 { 108 u32 parange, phys_shift; 109 110 /* The host stage 2 is id-mapped, so use parange for T0SZ */ 111 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); 112 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); 113 114 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, 115 id_aa64mmfr1_el1_sys_val, phys_shift); 116 } 117 118 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); 119 120 int kvm_host_prepare_stage2(void *pgt_pool_base) 121 { 122 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; 123 int ret; 124 125 prepare_host_vtcr(); 126 hyp_spin_lock_init(&host_kvm.lock); 127 mmu->arch = &host_kvm.arch; 128 129 ret = prepare_s2_pool(pgt_pool_base); 130 if (ret) 131 return ret; 132 133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, 134 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS, 135 host_stage2_force_pte_cb); 136 if (ret) 137 return ret; 138 139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); 140 mmu->pgt = &host_kvm.pgt; 141 WRITE_ONCE(mmu->vmid.vmid_gen, 0); 142 WRITE_ONCE(mmu->vmid.vmid, 0); 143 144 return 0; 145 } 146 147 int __pkvm_prot_finalize(void) 148 { 149 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; 150 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); 151 152 if (params->hcr_el2 & HCR_VM) 153 return -EPERM; 154 155 params->vttbr = kvm_get_vttbr(mmu); 156 params->vtcr = host_kvm.arch.vtcr; 157 params->hcr_el2 |= HCR_VM; 158 kvm_flush_dcache_to_poc(params, sizeof(*params)); 159 160 write_sysreg(params->hcr_el2, hcr_el2); 161 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); 162 163 /* 164 * Make sure to have an ISB before the TLB maintenance below but only 165 * when __load_stage2() doesn't include one already. 166 */ 167 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 168 169 /* Invalidate stale HCR bits that may be cached in TLBs */ 170 __tlbi(vmalls12e1); 171 dsb(nsh); 172 isb(); 173 174 return 0; 175 } 176 177 static int host_stage2_unmap_dev_all(void) 178 { 179 struct kvm_pgtable *pgt = &host_kvm.pgt; 180 struct memblock_region *reg; 181 u64 addr = 0; 182 int i, ret; 183 184 /* Unmap all non-memory regions to recycle the pages */ 185 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { 186 reg = &hyp_memory[i]; 187 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); 188 if (ret) 189 return ret; 190 } 191 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); 192 } 193 194 struct kvm_mem_range { 195 u64 start; 196 u64 end; 197 }; 198 199 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) 200 { 201 int cur, left = 0, right = hyp_memblock_nr; 202 struct memblock_region *reg; 203 phys_addr_t end; 204 205 range->start = 0; 206 range->end = ULONG_MAX; 207 208 /* The list of memblock regions is sorted, binary search it */ 209 while (left < right) { 210 cur = (left + right) >> 1; 211 reg = &hyp_memory[cur]; 212 end = reg->base + reg->size; 213 if (addr < reg->base) { 214 right = cur; 215 range->end = reg->base; 216 } else if (addr >= end) { 217 left = cur + 1; 218 range->start = end; 219 } else { 220 range->start = reg->base; 221 range->end = end; 222 return true; 223 } 224 } 225 226 return false; 227 } 228 229 bool addr_is_memory(phys_addr_t phys) 230 { 231 struct kvm_mem_range range; 232 233 return find_mem_range(phys, &range); 234 } 235 236 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) 237 { 238 return range->start <= addr && addr < range->end; 239 } 240 241 static bool range_is_memory(u64 start, u64 end) 242 { 243 struct kvm_mem_range r; 244 245 if (!find_mem_range(start, &r)) 246 return false; 247 248 return is_in_mem_range(end - 1, &r); 249 } 250 251 static inline int __host_stage2_idmap(u64 start, u64 end, 252 enum kvm_pgtable_prot prot) 253 { 254 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, 255 prot, &host_s2_pool); 256 } 257 258 /* 259 * The pool has been provided with enough pages to cover all of memory with 260 * page granularity, but it is difficult to know how much of the MMIO range 261 * we will need to cover upfront, so we may need to 'recycle' the pages if we 262 * run out. 263 */ 264 #define host_stage2_try(fn, ...) \ 265 ({ \ 266 int __ret; \ 267 hyp_assert_lock_held(&host_kvm.lock); \ 268 __ret = fn(__VA_ARGS__); \ 269 if (__ret == -ENOMEM) { \ 270 __ret = host_stage2_unmap_dev_all(); \ 271 if (!__ret) \ 272 __ret = fn(__VA_ARGS__); \ 273 } \ 274 __ret; \ 275 }) 276 277 static inline bool range_included(struct kvm_mem_range *child, 278 struct kvm_mem_range *parent) 279 { 280 return parent->start <= child->start && child->end <= parent->end; 281 } 282 283 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) 284 { 285 struct kvm_mem_range cur; 286 kvm_pte_t pte; 287 u32 level; 288 int ret; 289 290 hyp_assert_lock_held(&host_kvm.lock); 291 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level); 292 if (ret) 293 return ret; 294 295 if (kvm_pte_valid(pte)) 296 return -EAGAIN; 297 298 if (pte) 299 return -EPERM; 300 301 do { 302 u64 granule = kvm_granule_size(level); 303 cur.start = ALIGN_DOWN(addr, granule); 304 cur.end = cur.start + granule; 305 level++; 306 } while ((level < KVM_PGTABLE_MAX_LEVELS) && 307 !(kvm_level_supports_block_mapping(level) && 308 range_included(&cur, range))); 309 310 *range = cur; 311 312 return 0; 313 } 314 315 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, 316 enum kvm_pgtable_prot prot) 317 { 318 hyp_assert_lock_held(&host_kvm.lock); 319 320 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); 321 } 322 323 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) 324 { 325 hyp_assert_lock_held(&host_kvm.lock); 326 327 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, 328 addr, size, &host_s2_pool, owner_id); 329 } 330 331 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) 332 { 333 /* 334 * Block mappings must be used with care in the host stage-2 as a 335 * kvm_pgtable_stage2_map() operation targeting a page in the range of 336 * an existing block will delete the block under the assumption that 337 * mappings in the rest of the block range can always be rebuilt lazily. 338 * That assumption is correct for the host stage-2 with RWX mappings 339 * targeting memory or RW mappings targeting MMIO ranges (see 340 * host_stage2_idmap() below which implements some of the host memory 341 * abort logic). However, this is not safe for any other mappings where 342 * the host stage-2 page-table is in fact the only place where this 343 * state is stored. In all those cases, it is safer to use page-level 344 * mappings, hence avoiding to lose the state because of side-effects in 345 * kvm_pgtable_stage2_map(). 346 */ 347 if (range_is_memory(addr, end)) 348 return prot != PKVM_HOST_MEM_PROT; 349 else 350 return prot != PKVM_HOST_MMIO_PROT; 351 } 352 353 static int host_stage2_idmap(u64 addr) 354 { 355 struct kvm_mem_range range; 356 bool is_memory = find_mem_range(addr, &range); 357 enum kvm_pgtable_prot prot; 358 int ret; 359 360 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; 361 362 host_lock_component(); 363 ret = host_stage2_adjust_range(addr, &range); 364 if (ret) 365 goto unlock; 366 367 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); 368 unlock: 369 host_unlock_component(); 370 371 return ret; 372 } 373 374 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) 375 { 376 struct kvm_vcpu_fault_info fault; 377 u64 esr, addr; 378 int ret = 0; 379 380 esr = read_sysreg_el2(SYS_ESR); 381 BUG_ON(!__get_fault_info(esr, &fault)); 382 383 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; 384 ret = host_stage2_idmap(addr); 385 BUG_ON(ret && ret != -EAGAIN); 386 } 387 388 /* This corresponds to locking order */ 389 enum pkvm_component_id { 390 PKVM_ID_HOST, 391 PKVM_ID_HYP, 392 }; 393 394 struct pkvm_mem_transition { 395 u64 nr_pages; 396 397 struct { 398 enum pkvm_component_id id; 399 /* Address in the initiator's address space */ 400 u64 addr; 401 402 union { 403 struct { 404 /* Address in the completer's address space */ 405 u64 completer_addr; 406 } host; 407 }; 408 } initiator; 409 410 struct { 411 enum pkvm_component_id id; 412 } completer; 413 }; 414 415 struct pkvm_mem_share { 416 const struct pkvm_mem_transition tx; 417 const enum kvm_pgtable_prot completer_prot; 418 }; 419 420 struct check_walk_data { 421 enum pkvm_page_state desired; 422 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); 423 }; 424 425 static int __check_page_state_visitor(u64 addr, u64 end, u32 level, 426 kvm_pte_t *ptep, 427 enum kvm_pgtable_walk_flags flag, 428 void * const arg) 429 { 430 struct check_walk_data *d = arg; 431 kvm_pte_t pte = *ptep; 432 433 if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte))) 434 return -EINVAL; 435 436 return d->get_page_state(pte) == d->desired ? 0 : -EPERM; 437 } 438 439 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, 440 struct check_walk_data *data) 441 { 442 struct kvm_pgtable_walker walker = { 443 .cb = __check_page_state_visitor, 444 .arg = data, 445 .flags = KVM_PGTABLE_WALK_LEAF, 446 }; 447 448 return kvm_pgtable_walk(pgt, addr, size, &walker); 449 } 450 451 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) 452 { 453 if (!kvm_pte_valid(pte) && pte) 454 return PKVM_NOPAGE; 455 456 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 457 } 458 459 static int __host_check_page_state_range(u64 addr, u64 size, 460 enum pkvm_page_state state) 461 { 462 struct check_walk_data d = { 463 .desired = state, 464 .get_page_state = host_get_page_state, 465 }; 466 467 hyp_assert_lock_held(&host_kvm.lock); 468 return check_page_state_range(&host_kvm.pgt, addr, size, &d); 469 } 470 471 static int __host_set_page_state_range(u64 addr, u64 size, 472 enum pkvm_page_state state) 473 { 474 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state); 475 476 return host_stage2_idmap_locked(addr, size, prot); 477 } 478 479 static int host_request_owned_transition(u64 *completer_addr, 480 const struct pkvm_mem_transition *tx) 481 { 482 u64 size = tx->nr_pages * PAGE_SIZE; 483 u64 addr = tx->initiator.addr; 484 485 *completer_addr = tx->initiator.host.completer_addr; 486 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); 487 } 488 489 static int host_request_unshare(u64 *completer_addr, 490 const struct pkvm_mem_transition *tx) 491 { 492 u64 size = tx->nr_pages * PAGE_SIZE; 493 u64 addr = tx->initiator.addr; 494 495 *completer_addr = tx->initiator.host.completer_addr; 496 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); 497 } 498 499 static int host_initiate_share(u64 *completer_addr, 500 const struct pkvm_mem_transition *tx) 501 { 502 u64 size = tx->nr_pages * PAGE_SIZE; 503 u64 addr = tx->initiator.addr; 504 505 *completer_addr = tx->initiator.host.completer_addr; 506 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); 507 } 508 509 static int host_initiate_unshare(u64 *completer_addr, 510 const struct pkvm_mem_transition *tx) 511 { 512 u64 size = tx->nr_pages * PAGE_SIZE; 513 u64 addr = tx->initiator.addr; 514 515 *completer_addr = tx->initiator.host.completer_addr; 516 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); 517 } 518 519 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) 520 { 521 if (!kvm_pte_valid(pte)) 522 return PKVM_NOPAGE; 523 524 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 525 } 526 527 static int __hyp_check_page_state_range(u64 addr, u64 size, 528 enum pkvm_page_state state) 529 { 530 struct check_walk_data d = { 531 .desired = state, 532 .get_page_state = hyp_get_page_state, 533 }; 534 535 hyp_assert_lock_held(&pkvm_pgd_lock); 536 return check_page_state_range(&pkvm_pgtable, addr, size, &d); 537 } 538 539 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) 540 { 541 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || 542 tx->initiator.id != PKVM_ID_HOST); 543 } 544 545 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, 546 enum kvm_pgtable_prot perms) 547 { 548 u64 size = tx->nr_pages * PAGE_SIZE; 549 550 if (perms != PAGE_HYP) 551 return -EPERM; 552 553 if (__hyp_ack_skip_pgtable_check(tx)) 554 return 0; 555 556 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); 557 } 558 559 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) 560 { 561 u64 size = tx->nr_pages * PAGE_SIZE; 562 563 if (__hyp_ack_skip_pgtable_check(tx)) 564 return 0; 565 566 return __hyp_check_page_state_range(addr, size, 567 PKVM_PAGE_SHARED_BORROWED); 568 } 569 570 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, 571 enum kvm_pgtable_prot perms) 572 { 573 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); 574 enum kvm_pgtable_prot prot; 575 576 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED); 577 return pkvm_create_mappings_locked(start, end, prot); 578 } 579 580 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) 581 { 582 u64 size = tx->nr_pages * PAGE_SIZE; 583 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); 584 585 return (ret != size) ? -EFAULT : 0; 586 } 587 588 static int check_share(struct pkvm_mem_share *share) 589 { 590 const struct pkvm_mem_transition *tx = &share->tx; 591 u64 completer_addr; 592 int ret; 593 594 switch (tx->initiator.id) { 595 case PKVM_ID_HOST: 596 ret = host_request_owned_transition(&completer_addr, tx); 597 break; 598 default: 599 ret = -EINVAL; 600 } 601 602 if (ret) 603 return ret; 604 605 switch (tx->completer.id) { 606 case PKVM_ID_HYP: 607 ret = hyp_ack_share(completer_addr, tx, share->completer_prot); 608 break; 609 default: 610 ret = -EINVAL; 611 } 612 613 return ret; 614 } 615 616 static int __do_share(struct pkvm_mem_share *share) 617 { 618 const struct pkvm_mem_transition *tx = &share->tx; 619 u64 completer_addr; 620 int ret; 621 622 switch (tx->initiator.id) { 623 case PKVM_ID_HOST: 624 ret = host_initiate_share(&completer_addr, tx); 625 break; 626 default: 627 ret = -EINVAL; 628 } 629 630 if (ret) 631 return ret; 632 633 switch (tx->completer.id) { 634 case PKVM_ID_HYP: 635 ret = hyp_complete_share(completer_addr, tx, share->completer_prot); 636 break; 637 default: 638 ret = -EINVAL; 639 } 640 641 return ret; 642 } 643 644 /* 645 * do_share(): 646 * 647 * The page owner grants access to another component with a given set 648 * of permissions. 649 * 650 * Initiator: OWNED => SHARED_OWNED 651 * Completer: NOPAGE => SHARED_BORROWED 652 */ 653 static int do_share(struct pkvm_mem_share *share) 654 { 655 int ret; 656 657 ret = check_share(share); 658 if (ret) 659 return ret; 660 661 return WARN_ON(__do_share(share)); 662 } 663 664 static int check_unshare(struct pkvm_mem_share *share) 665 { 666 const struct pkvm_mem_transition *tx = &share->tx; 667 u64 completer_addr; 668 int ret; 669 670 switch (tx->initiator.id) { 671 case PKVM_ID_HOST: 672 ret = host_request_unshare(&completer_addr, tx); 673 break; 674 default: 675 ret = -EINVAL; 676 } 677 678 if (ret) 679 return ret; 680 681 switch (tx->completer.id) { 682 case PKVM_ID_HYP: 683 ret = hyp_ack_unshare(completer_addr, tx); 684 break; 685 default: 686 ret = -EINVAL; 687 } 688 689 return ret; 690 } 691 692 static int __do_unshare(struct pkvm_mem_share *share) 693 { 694 const struct pkvm_mem_transition *tx = &share->tx; 695 u64 completer_addr; 696 int ret; 697 698 switch (tx->initiator.id) { 699 case PKVM_ID_HOST: 700 ret = host_initiate_unshare(&completer_addr, tx); 701 break; 702 default: 703 ret = -EINVAL; 704 } 705 706 if (ret) 707 return ret; 708 709 switch (tx->completer.id) { 710 case PKVM_ID_HYP: 711 ret = hyp_complete_unshare(completer_addr, tx); 712 break; 713 default: 714 ret = -EINVAL; 715 } 716 717 return ret; 718 } 719 720 /* 721 * do_unshare(): 722 * 723 * The page owner revokes access from another component for a range of 724 * pages which were previously shared using do_share(). 725 * 726 * Initiator: SHARED_OWNED => OWNED 727 * Completer: SHARED_BORROWED => NOPAGE 728 */ 729 static int do_unshare(struct pkvm_mem_share *share) 730 { 731 int ret; 732 733 ret = check_unshare(share); 734 if (ret) 735 return ret; 736 737 return WARN_ON(__do_unshare(share)); 738 } 739 740 int __pkvm_host_share_hyp(u64 pfn) 741 { 742 int ret; 743 u64 host_addr = hyp_pfn_to_phys(pfn); 744 u64 hyp_addr = (u64)__hyp_va(host_addr); 745 struct pkvm_mem_share share = { 746 .tx = { 747 .nr_pages = 1, 748 .initiator = { 749 .id = PKVM_ID_HOST, 750 .addr = host_addr, 751 .host = { 752 .completer_addr = hyp_addr, 753 }, 754 }, 755 .completer = { 756 .id = PKVM_ID_HYP, 757 }, 758 }, 759 .completer_prot = PAGE_HYP, 760 }; 761 762 host_lock_component(); 763 hyp_lock_component(); 764 765 ret = do_share(&share); 766 767 hyp_unlock_component(); 768 host_unlock_component(); 769 770 return ret; 771 } 772 773 int __pkvm_host_unshare_hyp(u64 pfn) 774 { 775 int ret; 776 u64 host_addr = hyp_pfn_to_phys(pfn); 777 u64 hyp_addr = (u64)__hyp_va(host_addr); 778 struct pkvm_mem_share share = { 779 .tx = { 780 .nr_pages = 1, 781 .initiator = { 782 .id = PKVM_ID_HOST, 783 .addr = host_addr, 784 .host = { 785 .completer_addr = hyp_addr, 786 }, 787 }, 788 .completer = { 789 .id = PKVM_ID_HYP, 790 }, 791 }, 792 .completer_prot = PAGE_HYP, 793 }; 794 795 host_lock_component(); 796 hyp_lock_component(); 797 798 ret = do_unshare(&share); 799 800 hyp_unlock_component(); 801 host_unlock_component(); 802 803 return ret; 804 } 805