1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <linux/kvm_host.h> 8 #include <asm/kvm_emulate.h> 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/kvm_pgtable.h> 12 #include <asm/kvm_pkvm.h> 13 #include <asm/stage2_pgtable.h> 14 15 #include <hyp/fault.h> 16 17 #include <nvhe/gfp.h> 18 #include <nvhe/memory.h> 19 #include <nvhe/mem_protect.h> 20 #include <nvhe/mm.h> 21 22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) 23 24 extern unsigned long hyp_nr_cpus; 25 struct host_kvm host_kvm; 26 27 static struct hyp_pool host_s2_pool; 28 29 const u8 pkvm_hyp_id = 1; 30 31 static void host_lock_component(void) 32 { 33 hyp_spin_lock(&host_kvm.lock); 34 } 35 36 static void host_unlock_component(void) 37 { 38 hyp_spin_unlock(&host_kvm.lock); 39 } 40 41 static void hyp_lock_component(void) 42 { 43 hyp_spin_lock(&pkvm_pgd_lock); 44 } 45 46 static void hyp_unlock_component(void) 47 { 48 hyp_spin_unlock(&pkvm_pgd_lock); 49 } 50 51 static void *host_s2_zalloc_pages_exact(size_t size) 52 { 53 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); 54 55 hyp_split_page(hyp_virt_to_page(addr)); 56 57 /* 58 * The size of concatenated PGDs is always a power of two of PAGE_SIZE, 59 * so there should be no need to free any of the tail pages to make the 60 * allocation exact. 61 */ 62 WARN_ON(size != (PAGE_SIZE << get_order(size))); 63 64 return addr; 65 } 66 67 static void *host_s2_zalloc_page(void *pool) 68 { 69 return hyp_alloc_pages(pool, 0); 70 } 71 72 static void host_s2_get_page(void *addr) 73 { 74 hyp_get_page(&host_s2_pool, addr); 75 } 76 77 static void host_s2_put_page(void *addr) 78 { 79 hyp_put_page(&host_s2_pool, addr); 80 } 81 82 static int prepare_s2_pool(void *pgt_pool_base) 83 { 84 unsigned long nr_pages, pfn; 85 int ret; 86 87 pfn = hyp_virt_to_pfn(pgt_pool_base); 88 nr_pages = host_s2_pgtable_pages(); 89 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); 90 if (ret) 91 return ret; 92 93 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { 94 .zalloc_pages_exact = host_s2_zalloc_pages_exact, 95 .zalloc_page = host_s2_zalloc_page, 96 .phys_to_virt = hyp_phys_to_virt, 97 .virt_to_phys = hyp_virt_to_phys, 98 .page_count = hyp_page_count, 99 .get_page = host_s2_get_page, 100 .put_page = host_s2_put_page, 101 }; 102 103 return 0; 104 } 105 106 static void prepare_host_vtcr(void) 107 { 108 u32 parange, phys_shift; 109 110 /* The host stage 2 is id-mapped, so use parange for T0SZ */ 111 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); 112 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); 113 114 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, 115 id_aa64mmfr1_el1_sys_val, phys_shift); 116 } 117 118 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); 119 120 int kvm_host_prepare_stage2(void *pgt_pool_base) 121 { 122 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; 123 int ret; 124 125 prepare_host_vtcr(); 126 hyp_spin_lock_init(&host_kvm.lock); 127 mmu->arch = &host_kvm.arch; 128 129 ret = prepare_s2_pool(pgt_pool_base); 130 if (ret) 131 return ret; 132 133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, 134 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS, 135 host_stage2_force_pte_cb); 136 if (ret) 137 return ret; 138 139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); 140 mmu->pgt = &host_kvm.pgt; 141 atomic64_set(&mmu->vmid.id, 0); 142 143 return 0; 144 } 145 146 int __pkvm_prot_finalize(void) 147 { 148 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; 149 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); 150 151 if (params->hcr_el2 & HCR_VM) 152 return -EPERM; 153 154 params->vttbr = kvm_get_vttbr(mmu); 155 params->vtcr = host_kvm.arch.vtcr; 156 params->hcr_el2 |= HCR_VM; 157 kvm_flush_dcache_to_poc(params, sizeof(*params)); 158 159 write_sysreg(params->hcr_el2, hcr_el2); 160 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch); 161 162 /* 163 * Make sure to have an ISB before the TLB maintenance below but only 164 * when __load_stage2() doesn't include one already. 165 */ 166 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 167 168 /* Invalidate stale HCR bits that may be cached in TLBs */ 169 __tlbi(vmalls12e1); 170 dsb(nsh); 171 isb(); 172 173 return 0; 174 } 175 176 static int host_stage2_unmap_dev_all(void) 177 { 178 struct kvm_pgtable *pgt = &host_kvm.pgt; 179 struct memblock_region *reg; 180 u64 addr = 0; 181 int i, ret; 182 183 /* Unmap all non-memory regions to recycle the pages */ 184 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { 185 reg = &hyp_memory[i]; 186 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); 187 if (ret) 188 return ret; 189 } 190 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); 191 } 192 193 struct kvm_mem_range { 194 u64 start; 195 u64 end; 196 }; 197 198 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) 199 { 200 int cur, left = 0, right = hyp_memblock_nr; 201 struct memblock_region *reg; 202 phys_addr_t end; 203 204 range->start = 0; 205 range->end = ULONG_MAX; 206 207 /* The list of memblock regions is sorted, binary search it */ 208 while (left < right) { 209 cur = (left + right) >> 1; 210 reg = &hyp_memory[cur]; 211 end = reg->base + reg->size; 212 if (addr < reg->base) { 213 right = cur; 214 range->end = reg->base; 215 } else if (addr >= end) { 216 left = cur + 1; 217 range->start = end; 218 } else { 219 range->start = reg->base; 220 range->end = end; 221 return true; 222 } 223 } 224 225 return false; 226 } 227 228 bool addr_is_memory(phys_addr_t phys) 229 { 230 struct kvm_mem_range range; 231 232 return find_mem_range(phys, &range); 233 } 234 235 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) 236 { 237 return range->start <= addr && addr < range->end; 238 } 239 240 static bool range_is_memory(u64 start, u64 end) 241 { 242 struct kvm_mem_range r; 243 244 if (!find_mem_range(start, &r)) 245 return false; 246 247 return is_in_mem_range(end - 1, &r); 248 } 249 250 static inline int __host_stage2_idmap(u64 start, u64 end, 251 enum kvm_pgtable_prot prot) 252 { 253 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, 254 prot, &host_s2_pool); 255 } 256 257 /* 258 * The pool has been provided with enough pages to cover all of memory with 259 * page granularity, but it is difficult to know how much of the MMIO range 260 * we will need to cover upfront, so we may need to 'recycle' the pages if we 261 * run out. 262 */ 263 #define host_stage2_try(fn, ...) \ 264 ({ \ 265 int __ret; \ 266 hyp_assert_lock_held(&host_kvm.lock); \ 267 __ret = fn(__VA_ARGS__); \ 268 if (__ret == -ENOMEM) { \ 269 __ret = host_stage2_unmap_dev_all(); \ 270 if (!__ret) \ 271 __ret = fn(__VA_ARGS__); \ 272 } \ 273 __ret; \ 274 }) 275 276 static inline bool range_included(struct kvm_mem_range *child, 277 struct kvm_mem_range *parent) 278 { 279 return parent->start <= child->start && child->end <= parent->end; 280 } 281 282 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) 283 { 284 struct kvm_mem_range cur; 285 kvm_pte_t pte; 286 u32 level; 287 int ret; 288 289 hyp_assert_lock_held(&host_kvm.lock); 290 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level); 291 if (ret) 292 return ret; 293 294 if (kvm_pte_valid(pte)) 295 return -EAGAIN; 296 297 if (pte) 298 return -EPERM; 299 300 do { 301 u64 granule = kvm_granule_size(level); 302 cur.start = ALIGN_DOWN(addr, granule); 303 cur.end = cur.start + granule; 304 level++; 305 } while ((level < KVM_PGTABLE_MAX_LEVELS) && 306 !(kvm_level_supports_block_mapping(level) && 307 range_included(&cur, range))); 308 309 *range = cur; 310 311 return 0; 312 } 313 314 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, 315 enum kvm_pgtable_prot prot) 316 { 317 hyp_assert_lock_held(&host_kvm.lock); 318 319 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); 320 } 321 322 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) 323 { 324 hyp_assert_lock_held(&host_kvm.lock); 325 326 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, 327 addr, size, &host_s2_pool, owner_id); 328 } 329 330 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) 331 { 332 /* 333 * Block mappings must be used with care in the host stage-2 as a 334 * kvm_pgtable_stage2_map() operation targeting a page in the range of 335 * an existing block will delete the block under the assumption that 336 * mappings in the rest of the block range can always be rebuilt lazily. 337 * That assumption is correct for the host stage-2 with RWX mappings 338 * targeting memory or RW mappings targeting MMIO ranges (see 339 * host_stage2_idmap() below which implements some of the host memory 340 * abort logic). However, this is not safe for any other mappings where 341 * the host stage-2 page-table is in fact the only place where this 342 * state is stored. In all those cases, it is safer to use page-level 343 * mappings, hence avoiding to lose the state because of side-effects in 344 * kvm_pgtable_stage2_map(). 345 */ 346 if (range_is_memory(addr, end)) 347 return prot != PKVM_HOST_MEM_PROT; 348 else 349 return prot != PKVM_HOST_MMIO_PROT; 350 } 351 352 static int host_stage2_idmap(u64 addr) 353 { 354 struct kvm_mem_range range; 355 bool is_memory = find_mem_range(addr, &range); 356 enum kvm_pgtable_prot prot; 357 int ret; 358 359 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; 360 361 host_lock_component(); 362 ret = host_stage2_adjust_range(addr, &range); 363 if (ret) 364 goto unlock; 365 366 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); 367 unlock: 368 host_unlock_component(); 369 370 return ret; 371 } 372 373 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) 374 { 375 struct kvm_vcpu_fault_info fault; 376 u64 esr, addr; 377 int ret = 0; 378 379 esr = read_sysreg_el2(SYS_ESR); 380 BUG_ON(!__get_fault_info(esr, &fault)); 381 382 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; 383 ret = host_stage2_idmap(addr); 384 BUG_ON(ret && ret != -EAGAIN); 385 } 386 387 /* This corresponds to locking order */ 388 enum pkvm_component_id { 389 PKVM_ID_HOST, 390 PKVM_ID_HYP, 391 }; 392 393 struct pkvm_mem_transition { 394 u64 nr_pages; 395 396 struct { 397 enum pkvm_component_id id; 398 /* Address in the initiator's address space */ 399 u64 addr; 400 401 union { 402 struct { 403 /* Address in the completer's address space */ 404 u64 completer_addr; 405 } host; 406 }; 407 } initiator; 408 409 struct { 410 enum pkvm_component_id id; 411 } completer; 412 }; 413 414 struct pkvm_mem_share { 415 const struct pkvm_mem_transition tx; 416 const enum kvm_pgtable_prot completer_prot; 417 }; 418 419 struct check_walk_data { 420 enum pkvm_page_state desired; 421 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); 422 }; 423 424 static int __check_page_state_visitor(u64 addr, u64 end, u32 level, 425 kvm_pte_t *ptep, 426 enum kvm_pgtable_walk_flags flag, 427 void * const arg) 428 { 429 struct check_walk_data *d = arg; 430 kvm_pte_t pte = *ptep; 431 432 if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte))) 433 return -EINVAL; 434 435 return d->get_page_state(pte) == d->desired ? 0 : -EPERM; 436 } 437 438 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, 439 struct check_walk_data *data) 440 { 441 struct kvm_pgtable_walker walker = { 442 .cb = __check_page_state_visitor, 443 .arg = data, 444 .flags = KVM_PGTABLE_WALK_LEAF, 445 }; 446 447 return kvm_pgtable_walk(pgt, addr, size, &walker); 448 } 449 450 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) 451 { 452 if (!kvm_pte_valid(pte) && pte) 453 return PKVM_NOPAGE; 454 455 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 456 } 457 458 static int __host_check_page_state_range(u64 addr, u64 size, 459 enum pkvm_page_state state) 460 { 461 struct check_walk_data d = { 462 .desired = state, 463 .get_page_state = host_get_page_state, 464 }; 465 466 hyp_assert_lock_held(&host_kvm.lock); 467 return check_page_state_range(&host_kvm.pgt, addr, size, &d); 468 } 469 470 static int __host_set_page_state_range(u64 addr, u64 size, 471 enum pkvm_page_state state) 472 { 473 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state); 474 475 return host_stage2_idmap_locked(addr, size, prot); 476 } 477 478 static int host_request_owned_transition(u64 *completer_addr, 479 const struct pkvm_mem_transition *tx) 480 { 481 u64 size = tx->nr_pages * PAGE_SIZE; 482 u64 addr = tx->initiator.addr; 483 484 *completer_addr = tx->initiator.host.completer_addr; 485 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); 486 } 487 488 static int host_request_unshare(u64 *completer_addr, 489 const struct pkvm_mem_transition *tx) 490 { 491 u64 size = tx->nr_pages * PAGE_SIZE; 492 u64 addr = tx->initiator.addr; 493 494 *completer_addr = tx->initiator.host.completer_addr; 495 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); 496 } 497 498 static int host_initiate_share(u64 *completer_addr, 499 const struct pkvm_mem_transition *tx) 500 { 501 u64 size = tx->nr_pages * PAGE_SIZE; 502 u64 addr = tx->initiator.addr; 503 504 *completer_addr = tx->initiator.host.completer_addr; 505 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); 506 } 507 508 static int host_initiate_unshare(u64 *completer_addr, 509 const struct pkvm_mem_transition *tx) 510 { 511 u64 size = tx->nr_pages * PAGE_SIZE; 512 u64 addr = tx->initiator.addr; 513 514 *completer_addr = tx->initiator.host.completer_addr; 515 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); 516 } 517 518 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) 519 { 520 if (!kvm_pte_valid(pte)) 521 return PKVM_NOPAGE; 522 523 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 524 } 525 526 static int __hyp_check_page_state_range(u64 addr, u64 size, 527 enum pkvm_page_state state) 528 { 529 struct check_walk_data d = { 530 .desired = state, 531 .get_page_state = hyp_get_page_state, 532 }; 533 534 hyp_assert_lock_held(&pkvm_pgd_lock); 535 return check_page_state_range(&pkvm_pgtable, addr, size, &d); 536 } 537 538 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) 539 { 540 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || 541 tx->initiator.id != PKVM_ID_HOST); 542 } 543 544 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, 545 enum kvm_pgtable_prot perms) 546 { 547 u64 size = tx->nr_pages * PAGE_SIZE; 548 549 if (perms != PAGE_HYP) 550 return -EPERM; 551 552 if (__hyp_ack_skip_pgtable_check(tx)) 553 return 0; 554 555 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); 556 } 557 558 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) 559 { 560 u64 size = tx->nr_pages * PAGE_SIZE; 561 562 if (__hyp_ack_skip_pgtable_check(tx)) 563 return 0; 564 565 return __hyp_check_page_state_range(addr, size, 566 PKVM_PAGE_SHARED_BORROWED); 567 } 568 569 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, 570 enum kvm_pgtable_prot perms) 571 { 572 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); 573 enum kvm_pgtable_prot prot; 574 575 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED); 576 return pkvm_create_mappings_locked(start, end, prot); 577 } 578 579 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) 580 { 581 u64 size = tx->nr_pages * PAGE_SIZE; 582 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); 583 584 return (ret != size) ? -EFAULT : 0; 585 } 586 587 static int check_share(struct pkvm_mem_share *share) 588 { 589 const struct pkvm_mem_transition *tx = &share->tx; 590 u64 completer_addr; 591 int ret; 592 593 switch (tx->initiator.id) { 594 case PKVM_ID_HOST: 595 ret = host_request_owned_transition(&completer_addr, tx); 596 break; 597 default: 598 ret = -EINVAL; 599 } 600 601 if (ret) 602 return ret; 603 604 switch (tx->completer.id) { 605 case PKVM_ID_HYP: 606 ret = hyp_ack_share(completer_addr, tx, share->completer_prot); 607 break; 608 default: 609 ret = -EINVAL; 610 } 611 612 return ret; 613 } 614 615 static int __do_share(struct pkvm_mem_share *share) 616 { 617 const struct pkvm_mem_transition *tx = &share->tx; 618 u64 completer_addr; 619 int ret; 620 621 switch (tx->initiator.id) { 622 case PKVM_ID_HOST: 623 ret = host_initiate_share(&completer_addr, tx); 624 break; 625 default: 626 ret = -EINVAL; 627 } 628 629 if (ret) 630 return ret; 631 632 switch (tx->completer.id) { 633 case PKVM_ID_HYP: 634 ret = hyp_complete_share(completer_addr, tx, share->completer_prot); 635 break; 636 default: 637 ret = -EINVAL; 638 } 639 640 return ret; 641 } 642 643 /* 644 * do_share(): 645 * 646 * The page owner grants access to another component with a given set 647 * of permissions. 648 * 649 * Initiator: OWNED => SHARED_OWNED 650 * Completer: NOPAGE => SHARED_BORROWED 651 */ 652 static int do_share(struct pkvm_mem_share *share) 653 { 654 int ret; 655 656 ret = check_share(share); 657 if (ret) 658 return ret; 659 660 return WARN_ON(__do_share(share)); 661 } 662 663 static int check_unshare(struct pkvm_mem_share *share) 664 { 665 const struct pkvm_mem_transition *tx = &share->tx; 666 u64 completer_addr; 667 int ret; 668 669 switch (tx->initiator.id) { 670 case PKVM_ID_HOST: 671 ret = host_request_unshare(&completer_addr, tx); 672 break; 673 default: 674 ret = -EINVAL; 675 } 676 677 if (ret) 678 return ret; 679 680 switch (tx->completer.id) { 681 case PKVM_ID_HYP: 682 ret = hyp_ack_unshare(completer_addr, tx); 683 break; 684 default: 685 ret = -EINVAL; 686 } 687 688 return ret; 689 } 690 691 static int __do_unshare(struct pkvm_mem_share *share) 692 { 693 const struct pkvm_mem_transition *tx = &share->tx; 694 u64 completer_addr; 695 int ret; 696 697 switch (tx->initiator.id) { 698 case PKVM_ID_HOST: 699 ret = host_initiate_unshare(&completer_addr, tx); 700 break; 701 default: 702 ret = -EINVAL; 703 } 704 705 if (ret) 706 return ret; 707 708 switch (tx->completer.id) { 709 case PKVM_ID_HYP: 710 ret = hyp_complete_unshare(completer_addr, tx); 711 break; 712 default: 713 ret = -EINVAL; 714 } 715 716 return ret; 717 } 718 719 /* 720 * do_unshare(): 721 * 722 * The page owner revokes access from another component for a range of 723 * pages which were previously shared using do_share(). 724 * 725 * Initiator: SHARED_OWNED => OWNED 726 * Completer: SHARED_BORROWED => NOPAGE 727 */ 728 static int do_unshare(struct pkvm_mem_share *share) 729 { 730 int ret; 731 732 ret = check_unshare(share); 733 if (ret) 734 return ret; 735 736 return WARN_ON(__do_unshare(share)); 737 } 738 739 int __pkvm_host_share_hyp(u64 pfn) 740 { 741 int ret; 742 u64 host_addr = hyp_pfn_to_phys(pfn); 743 u64 hyp_addr = (u64)__hyp_va(host_addr); 744 struct pkvm_mem_share share = { 745 .tx = { 746 .nr_pages = 1, 747 .initiator = { 748 .id = PKVM_ID_HOST, 749 .addr = host_addr, 750 .host = { 751 .completer_addr = hyp_addr, 752 }, 753 }, 754 .completer = { 755 .id = PKVM_ID_HYP, 756 }, 757 }, 758 .completer_prot = PAGE_HYP, 759 }; 760 761 host_lock_component(); 762 hyp_lock_component(); 763 764 ret = do_share(&share); 765 766 hyp_unlock_component(); 767 host_unlock_component(); 768 769 return ret; 770 } 771 772 int __pkvm_host_unshare_hyp(u64 pfn) 773 { 774 int ret; 775 u64 host_addr = hyp_pfn_to_phys(pfn); 776 u64 hyp_addr = (u64)__hyp_va(host_addr); 777 struct pkvm_mem_share share = { 778 .tx = { 779 .nr_pages = 1, 780 .initiator = { 781 .id = PKVM_ID_HOST, 782 .addr = host_addr, 783 .host = { 784 .completer_addr = hyp_addr, 785 }, 786 }, 787 .completer = { 788 .id = PKVM_ID_HYP, 789 }, 790 }, 791 .completer_prot = PAGE_HYP, 792 }; 793 794 host_lock_component(); 795 hyp_lock_component(); 796 797 ret = do_unshare(&share); 798 799 hyp_unlock_component(); 800 host_unlock_component(); 801 802 return ret; 803 } 804