1ecdc5d84SVasily Gorbik // SPDX-License-Identifier: GPL-2.0 2ecdc5d84SVasily Gorbik /* 3ecdc5d84SVasily Gorbik * Common Ultravisor functions and initialization 4ecdc5d84SVasily Gorbik * 5ecdc5d84SVasily Gorbik * Copyright IBM Corp. 2019, 2020 6ecdc5d84SVasily Gorbik */ 7ecdc5d84SVasily Gorbik #define KMSG_COMPONENT "prot_virt" 8ecdc5d84SVasily Gorbik #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9ecdc5d84SVasily Gorbik 10ecdc5d84SVasily Gorbik #include <linux/kernel.h> 11ecdc5d84SVasily Gorbik #include <linux/types.h> 12ecdc5d84SVasily Gorbik #include <linux/sizes.h> 13ecdc5d84SVasily Gorbik #include <linux/bitmap.h> 14ecdc5d84SVasily Gorbik #include <linux/memblock.h> 15214d9bbcSClaudio Imbrenda #include <linux/pagemap.h> 16214d9bbcSClaudio Imbrenda #include <linux/swap.h> 17ecdc5d84SVasily Gorbik #include <asm/facility.h> 18ecdc5d84SVasily Gorbik #include <asm/sections.h> 19ecdc5d84SVasily Gorbik #include <asm/uv.h> 20ecdc5d84SVasily Gorbik 21ecdc5d84SVasily Gorbik /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ 22ecdc5d84SVasily Gorbik #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST 23ecdc5d84SVasily Gorbik int __bootdata_preserved(prot_virt_guest); 24ecdc5d84SVasily Gorbik #endif 25ecdc5d84SVasily Gorbik 26673deb0bSClaudio Imbrenda struct uv_info __bootdata_preserved(uv_info); 27673deb0bSClaudio Imbrenda 28ecdc5d84SVasily Gorbik #if IS_ENABLED(CONFIG_KVM) 291d6671aeSVasily Gorbik int __bootdata_preserved(prot_virt_host); 30ecdc5d84SVasily Gorbik EXPORT_SYMBOL(prot_virt_host); 31ecdc5d84SVasily Gorbik EXPORT_SYMBOL(uv_info); 32ecdc5d84SVasily Gorbik 3315b5c183SHeiko Carstens static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len) 3429d37e5bSVasily Gorbik { 3529d37e5bSVasily Gorbik struct uv_cb_init uvcb = { 3629d37e5bSVasily Gorbik .header.cmd = UVC_CMD_INIT_UV, 3729d37e5bSVasily Gorbik .header.len = sizeof(uvcb), 3829d37e5bSVasily Gorbik .stor_origin = stor_base, 3929d37e5bSVasily Gorbik .stor_len = stor_len, 4029d37e5bSVasily Gorbik }; 4129d37e5bSVasily Gorbik 4229d37e5bSVasily Gorbik if (uv_call(0, (uint64_t)&uvcb)) { 4329d37e5bSVasily Gorbik pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n", 4429d37e5bSVasily Gorbik uvcb.header.rc, uvcb.header.rrc); 4529d37e5bSVasily Gorbik return -1; 4629d37e5bSVasily Gorbik } 4729d37e5bSVasily Gorbik return 0; 4829d37e5bSVasily Gorbik } 4929d37e5bSVasily Gorbik 5029d37e5bSVasily Gorbik void __init setup_uv(void) 5129d37e5bSVasily Gorbik { 5215b5c183SHeiko Carstens void *uv_stor_base; 5329d37e5bSVasily Gorbik 541d6671aeSVasily Gorbik if (!is_prot_virt_host()) 551d6671aeSVasily Gorbik return; 561d6671aeSVasily Gorbik 5715b5c183SHeiko Carstens uv_stor_base = memblock_alloc_try_nid( 5829d37e5bSVasily Gorbik uv_info.uv_base_stor_len, SZ_1M, SZ_2G, 5929d37e5bSVasily Gorbik MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 6029d37e5bSVasily Gorbik if (!uv_stor_base) { 6129d37e5bSVasily Gorbik pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n", 6229d37e5bSVasily Gorbik uv_info.uv_base_stor_len); 6329d37e5bSVasily Gorbik goto fail; 6429d37e5bSVasily Gorbik } 6529d37e5bSVasily Gorbik 6615b5c183SHeiko Carstens if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) { 6715b5c183SHeiko Carstens memblock_free(uv_stor_base, uv_info.uv_base_stor_len); 6829d37e5bSVasily Gorbik goto fail; 6929d37e5bSVasily Gorbik } 7029d37e5bSVasily Gorbik 7129d37e5bSVasily Gorbik pr_info("Reserving %luMB as ultravisor base storage\n", 7229d37e5bSVasily Gorbik uv_info.uv_base_stor_len >> 20); 7329d37e5bSVasily Gorbik return; 7429d37e5bSVasily Gorbik fail: 7529d37e5bSVasily Gorbik pr_info("Disabling support for protected virtualization"); 7629d37e5bSVasily Gorbik prot_virt_host = 0; 7729d37e5bSVasily Gorbik } 7829d37e5bSVasily Gorbik 79214d9bbcSClaudio Imbrenda /* 80214d9bbcSClaudio Imbrenda * Requests the Ultravisor to pin the page in the shared state. This will 81214d9bbcSClaudio Imbrenda * cause an intercept when the guest attempts to unshare the pinned page. 82214d9bbcSClaudio Imbrenda */ 83214d9bbcSClaudio Imbrenda static int uv_pin_shared(unsigned long paddr) 84214d9bbcSClaudio Imbrenda { 85214d9bbcSClaudio Imbrenda struct uv_cb_cfs uvcb = { 86214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_PIN_PAGE_SHARED, 87214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 88214d9bbcSClaudio Imbrenda .paddr = paddr, 89214d9bbcSClaudio Imbrenda }; 90214d9bbcSClaudio Imbrenda 91214d9bbcSClaudio Imbrenda if (uv_call(0, (u64)&uvcb)) 92214d9bbcSClaudio Imbrenda return -EINVAL; 93214d9bbcSClaudio Imbrenda return 0; 94214d9bbcSClaudio Imbrenda } 95214d9bbcSClaudio Imbrenda 96214d9bbcSClaudio Imbrenda /* 971a80b54dSJanosch Frank * Requests the Ultravisor to destroy a guest page and make it 981a80b54dSJanosch Frank * accessible to the host. The destroy clears the page instead of 991a80b54dSJanosch Frank * exporting. 1001a80b54dSJanosch Frank * 1011a80b54dSJanosch Frank * @paddr: Absolute host address of page to be destroyed 1021a80b54dSJanosch Frank */ 103380d97bdSClaudio Imbrenda static int uv_destroy_page(unsigned long paddr) 1041a80b54dSJanosch Frank { 1051a80b54dSJanosch Frank struct uv_cb_cfs uvcb = { 1061a80b54dSJanosch Frank .header.cmd = UVC_CMD_DESTR_SEC_STOR, 1071a80b54dSJanosch Frank .header.len = sizeof(uvcb), 1081a80b54dSJanosch Frank .paddr = paddr 1091a80b54dSJanosch Frank }; 1101a80b54dSJanosch Frank 1114c80d057SChristian Borntraeger if (uv_call(0, (u64)&uvcb)) { 1124c80d057SChristian Borntraeger /* 1134c80d057SChristian Borntraeger * Older firmware uses 107/d as an indication of a non secure 1144c80d057SChristian Borntraeger * page. Let us emulate the newer variant (no-op). 1154c80d057SChristian Borntraeger */ 1164c80d057SChristian Borntraeger if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd) 1174c80d057SChristian Borntraeger return 0; 1181a80b54dSJanosch Frank return -EINVAL; 1194c80d057SChristian Borntraeger } 1201a80b54dSJanosch Frank return 0; 1211a80b54dSJanosch Frank } 1221a80b54dSJanosch Frank 1231a80b54dSJanosch Frank /* 124380d97bdSClaudio Imbrenda * The caller must already hold a reference to the page 125380d97bdSClaudio Imbrenda */ 126380d97bdSClaudio Imbrenda int uv_destroy_owned_page(unsigned long paddr) 127380d97bdSClaudio Imbrenda { 128380d97bdSClaudio Imbrenda struct page *page = phys_to_page(paddr); 129380d97bdSClaudio Imbrenda int rc; 130380d97bdSClaudio Imbrenda 131380d97bdSClaudio Imbrenda get_page(page); 132380d97bdSClaudio Imbrenda rc = uv_destroy_page(paddr); 133380d97bdSClaudio Imbrenda if (!rc) 134380d97bdSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 135380d97bdSClaudio Imbrenda put_page(page); 136380d97bdSClaudio Imbrenda return rc; 137380d97bdSClaudio Imbrenda } 138380d97bdSClaudio Imbrenda 139380d97bdSClaudio Imbrenda /* 140214d9bbcSClaudio Imbrenda * Requests the Ultravisor to encrypt a guest page and make it 141214d9bbcSClaudio Imbrenda * accessible to the host for paging (export). 142214d9bbcSClaudio Imbrenda * 143214d9bbcSClaudio Imbrenda * @paddr: Absolute host address of page to be exported 144214d9bbcSClaudio Imbrenda */ 145214d9bbcSClaudio Imbrenda int uv_convert_from_secure(unsigned long paddr) 146214d9bbcSClaudio Imbrenda { 147214d9bbcSClaudio Imbrenda struct uv_cb_cfs uvcb = { 148214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR, 149214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 150214d9bbcSClaudio Imbrenda .paddr = paddr 151214d9bbcSClaudio Imbrenda }; 152214d9bbcSClaudio Imbrenda 153214d9bbcSClaudio Imbrenda if (uv_call(0, (u64)&uvcb)) 154214d9bbcSClaudio Imbrenda return -EINVAL; 155214d9bbcSClaudio Imbrenda return 0; 156214d9bbcSClaudio Imbrenda } 157214d9bbcSClaudio Imbrenda 158214d9bbcSClaudio Imbrenda /* 159380d97bdSClaudio Imbrenda * The caller must already hold a reference to the page 160380d97bdSClaudio Imbrenda */ 161380d97bdSClaudio Imbrenda int uv_convert_owned_from_secure(unsigned long paddr) 162380d97bdSClaudio Imbrenda { 163380d97bdSClaudio Imbrenda struct page *page = phys_to_page(paddr); 164380d97bdSClaudio Imbrenda int rc; 165380d97bdSClaudio Imbrenda 166380d97bdSClaudio Imbrenda get_page(page); 167380d97bdSClaudio Imbrenda rc = uv_convert_from_secure(paddr); 168380d97bdSClaudio Imbrenda if (!rc) 169380d97bdSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 170380d97bdSClaudio Imbrenda put_page(page); 171380d97bdSClaudio Imbrenda return rc; 172380d97bdSClaudio Imbrenda } 173380d97bdSClaudio Imbrenda 174380d97bdSClaudio Imbrenda /* 175214d9bbcSClaudio Imbrenda * Calculate the expected ref_count for a page that would otherwise have no 176214d9bbcSClaudio Imbrenda * further pins. This was cribbed from similar functions in other places in 177214d9bbcSClaudio Imbrenda * the kernel, but with some slight modifications. We know that a secure 178214d9bbcSClaudio Imbrenda * page can not be a huge page for example. 179214d9bbcSClaudio Imbrenda */ 180214d9bbcSClaudio Imbrenda static int expected_page_refs(struct page *page) 181214d9bbcSClaudio Imbrenda { 182214d9bbcSClaudio Imbrenda int res; 183214d9bbcSClaudio Imbrenda 184214d9bbcSClaudio Imbrenda res = page_mapcount(page); 185214d9bbcSClaudio Imbrenda if (PageSwapCache(page)) { 186214d9bbcSClaudio Imbrenda res++; 187214d9bbcSClaudio Imbrenda } else if (page_mapping(page)) { 188214d9bbcSClaudio Imbrenda res++; 189214d9bbcSClaudio Imbrenda if (page_has_private(page)) 190214d9bbcSClaudio Imbrenda res++; 191214d9bbcSClaudio Imbrenda } 192214d9bbcSClaudio Imbrenda return res; 193214d9bbcSClaudio Imbrenda } 194214d9bbcSClaudio Imbrenda 195214d9bbcSClaudio Imbrenda static int make_secure_pte(pte_t *ptep, unsigned long addr, 196214d9bbcSClaudio Imbrenda struct page *exp_page, struct uv_cb_header *uvcb) 197214d9bbcSClaudio Imbrenda { 198214d9bbcSClaudio Imbrenda pte_t entry = READ_ONCE(*ptep); 199214d9bbcSClaudio Imbrenda struct page *page; 200f0a1a061SClaudio Imbrenda int expected, cc = 0; 201214d9bbcSClaudio Imbrenda 202214d9bbcSClaudio Imbrenda if (!pte_present(entry)) 203214d9bbcSClaudio Imbrenda return -ENXIO; 204214d9bbcSClaudio Imbrenda if (pte_val(entry) & _PAGE_INVALID) 205214d9bbcSClaudio Imbrenda return -ENXIO; 206214d9bbcSClaudio Imbrenda 207214d9bbcSClaudio Imbrenda page = pte_page(entry); 208214d9bbcSClaudio Imbrenda if (page != exp_page) 209214d9bbcSClaudio Imbrenda return -ENXIO; 210214d9bbcSClaudio Imbrenda if (PageWriteback(page)) 211214d9bbcSClaudio Imbrenda return -EAGAIN; 212214d9bbcSClaudio Imbrenda expected = expected_page_refs(page); 213214d9bbcSClaudio Imbrenda if (!page_ref_freeze(page, expected)) 214214d9bbcSClaudio Imbrenda return -EBUSY; 215214d9bbcSClaudio Imbrenda set_bit(PG_arch_1, &page->flags); 216f0a1a061SClaudio Imbrenda /* 217f0a1a061SClaudio Imbrenda * If the UVC does not succeed or fail immediately, we don't want to 218f0a1a061SClaudio Imbrenda * loop for long, or we might get stall notifications. 219f0a1a061SClaudio Imbrenda * On the other hand, this is a complex scenario and we are holding a lot of 220f0a1a061SClaudio Imbrenda * locks, so we can't easily sleep and reschedule. We try only once, 221f0a1a061SClaudio Imbrenda * and if the UVC returned busy or partial completion, we return 222f0a1a061SClaudio Imbrenda * -EAGAIN and we let the callers deal with it. 223f0a1a061SClaudio Imbrenda */ 224f0a1a061SClaudio Imbrenda cc = __uv_call(0, (u64)uvcb); 225214d9bbcSClaudio Imbrenda page_ref_unfreeze(page, expected); 226f0a1a061SClaudio Imbrenda /* 227f0a1a061SClaudio Imbrenda * Return -ENXIO if the page was not mapped, -EINVAL for other errors. 228f0a1a061SClaudio Imbrenda * If busy or partially completed, return -EAGAIN. 229f0a1a061SClaudio Imbrenda */ 230f0a1a061SClaudio Imbrenda if (cc == UVC_CC_OK) 231f0a1a061SClaudio Imbrenda return 0; 232f0a1a061SClaudio Imbrenda else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) 233f0a1a061SClaudio Imbrenda return -EAGAIN; 234f0a1a061SClaudio Imbrenda return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; 235214d9bbcSClaudio Imbrenda } 236214d9bbcSClaudio Imbrenda 23772b1daffSClaudio Imbrenda /** 23872b1daffSClaudio Imbrenda * should_export_before_import - Determine whether an export is needed 23972b1daffSClaudio Imbrenda * before an import-like operation 24072b1daffSClaudio Imbrenda * @uvcb: the Ultravisor control block of the UVC to be performed 24172b1daffSClaudio Imbrenda * @mm: the mm of the process 24272b1daffSClaudio Imbrenda * 24372b1daffSClaudio Imbrenda * Returns whether an export is needed before every import-like operation. 24472b1daffSClaudio Imbrenda * This is needed for shared pages, which don't trigger a secure storage 24572b1daffSClaudio Imbrenda * exception when accessed from a different guest. 24672b1daffSClaudio Imbrenda * 24772b1daffSClaudio Imbrenda * Although considered as one, the Unpin Page UVC is not an actual import, 24872b1daffSClaudio Imbrenda * so it is not affected. 24972b1daffSClaudio Imbrenda * 25072b1daffSClaudio Imbrenda * No export is needed also when there is only one protected VM, because the 25172b1daffSClaudio Imbrenda * page cannot belong to the wrong VM in that case (there is no "other VM" 25272b1daffSClaudio Imbrenda * it can belong to). 25372b1daffSClaudio Imbrenda * 25472b1daffSClaudio Imbrenda * Return: true if an export is needed before every import, otherwise false. 25572b1daffSClaudio Imbrenda */ 25672b1daffSClaudio Imbrenda static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm) 25772b1daffSClaudio Imbrenda { 258*afe20eb8SClaudio Imbrenda /* 259*afe20eb8SClaudio Imbrenda * The misc feature indicates, among other things, that importing a 260*afe20eb8SClaudio Imbrenda * shared page from a different protected VM will automatically also 261*afe20eb8SClaudio Imbrenda * transfer its ownership. 262*afe20eb8SClaudio Imbrenda */ 263*afe20eb8SClaudio Imbrenda if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications)) 264*afe20eb8SClaudio Imbrenda return false; 26572b1daffSClaudio Imbrenda if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED) 26672b1daffSClaudio Imbrenda return false; 26772b1daffSClaudio Imbrenda return atomic_read(&mm->context.protected_count) > 1; 26872b1daffSClaudio Imbrenda } 26972b1daffSClaudio Imbrenda 270214d9bbcSClaudio Imbrenda /* 271214d9bbcSClaudio Imbrenda * Requests the Ultravisor to make a page accessible to a guest. 272214d9bbcSClaudio Imbrenda * If it's brought in the first time, it will be cleared. If 273214d9bbcSClaudio Imbrenda * it has been exported before, it will be decrypted and integrity 274214d9bbcSClaudio Imbrenda * checked. 275214d9bbcSClaudio Imbrenda */ 276214d9bbcSClaudio Imbrenda int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) 277214d9bbcSClaudio Imbrenda { 278214d9bbcSClaudio Imbrenda struct vm_area_struct *vma; 279214d9bbcSClaudio Imbrenda bool local_drain = false; 280214d9bbcSClaudio Imbrenda spinlock_t *ptelock; 281214d9bbcSClaudio Imbrenda unsigned long uaddr; 282214d9bbcSClaudio Imbrenda struct page *page; 283214d9bbcSClaudio Imbrenda pte_t *ptep; 284214d9bbcSClaudio Imbrenda int rc; 285214d9bbcSClaudio Imbrenda 286214d9bbcSClaudio Imbrenda again: 287214d9bbcSClaudio Imbrenda rc = -EFAULT; 288d8ed45c5SMichel Lespinasse mmap_read_lock(gmap->mm); 289214d9bbcSClaudio Imbrenda 290214d9bbcSClaudio Imbrenda uaddr = __gmap_translate(gmap, gaddr); 291214d9bbcSClaudio Imbrenda if (IS_ERR_VALUE(uaddr)) 292214d9bbcSClaudio Imbrenda goto out; 29346c22ffdSDavid Hildenbrand vma = vma_lookup(gmap->mm, uaddr); 294214d9bbcSClaudio Imbrenda if (!vma) 295214d9bbcSClaudio Imbrenda goto out; 296214d9bbcSClaudio Imbrenda /* 297214d9bbcSClaudio Imbrenda * Secure pages cannot be huge and userspace should not combine both. 298214d9bbcSClaudio Imbrenda * In case userspace does it anyway this will result in an -EFAULT for 299214d9bbcSClaudio Imbrenda * the unpack. The guest is thus never reaching secure mode. If 300214d9bbcSClaudio Imbrenda * userspace is playing dirty tricky with mapping huge pages later 301214d9bbcSClaudio Imbrenda * on this will result in a segmentation fault. 302214d9bbcSClaudio Imbrenda */ 303214d9bbcSClaudio Imbrenda if (is_vm_hugetlb_page(vma)) 304214d9bbcSClaudio Imbrenda goto out; 305214d9bbcSClaudio Imbrenda 306214d9bbcSClaudio Imbrenda rc = -ENXIO; 307214d9bbcSClaudio Imbrenda page = follow_page(vma, uaddr, FOLL_WRITE); 308214d9bbcSClaudio Imbrenda if (IS_ERR_OR_NULL(page)) 309214d9bbcSClaudio Imbrenda goto out; 310214d9bbcSClaudio Imbrenda 311214d9bbcSClaudio Imbrenda lock_page(page); 312214d9bbcSClaudio Imbrenda ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); 31372b1daffSClaudio Imbrenda if (should_export_before_import(uvcb, gmap->mm)) 31472b1daffSClaudio Imbrenda uv_convert_from_secure(page_to_phys(page)); 315214d9bbcSClaudio Imbrenda rc = make_secure_pte(ptep, uaddr, page, uvcb); 316214d9bbcSClaudio Imbrenda pte_unmap_unlock(ptep, ptelock); 317214d9bbcSClaudio Imbrenda unlock_page(page); 318214d9bbcSClaudio Imbrenda out: 319d8ed45c5SMichel Lespinasse mmap_read_unlock(gmap->mm); 320214d9bbcSClaudio Imbrenda 321214d9bbcSClaudio Imbrenda if (rc == -EAGAIN) { 322f0a1a061SClaudio Imbrenda /* 323f0a1a061SClaudio Imbrenda * If we are here because the UVC returned busy or partial 324f0a1a061SClaudio Imbrenda * completion, this is just a useless check, but it is safe. 325f0a1a061SClaudio Imbrenda */ 326214d9bbcSClaudio Imbrenda wait_on_page_writeback(page); 327214d9bbcSClaudio Imbrenda } else if (rc == -EBUSY) { 328214d9bbcSClaudio Imbrenda /* 329214d9bbcSClaudio Imbrenda * If we have tried a local drain and the page refcount 330214d9bbcSClaudio Imbrenda * still does not match our expected safe value, try with a 331214d9bbcSClaudio Imbrenda * system wide drain. This is needed if the pagevecs holding 332214d9bbcSClaudio Imbrenda * the page are on a different CPU. 333214d9bbcSClaudio Imbrenda */ 334214d9bbcSClaudio Imbrenda if (local_drain) { 335214d9bbcSClaudio Imbrenda lru_add_drain_all(); 336214d9bbcSClaudio Imbrenda /* We give up here, and let the caller try again */ 337214d9bbcSClaudio Imbrenda return -EAGAIN; 338214d9bbcSClaudio Imbrenda } 339214d9bbcSClaudio Imbrenda /* 340214d9bbcSClaudio Imbrenda * We are here if the page refcount does not match the 341214d9bbcSClaudio Imbrenda * expected safe value. The main culprits are usually 342214d9bbcSClaudio Imbrenda * pagevecs. With lru_add_drain() we drain the pagevecs 343214d9bbcSClaudio Imbrenda * on the local CPU so that hopefully the refcount will 344214d9bbcSClaudio Imbrenda * reach the expected safe value. 345214d9bbcSClaudio Imbrenda */ 346214d9bbcSClaudio Imbrenda lru_add_drain(); 347214d9bbcSClaudio Imbrenda local_drain = true; 348214d9bbcSClaudio Imbrenda /* And now we try again immediately after draining */ 349214d9bbcSClaudio Imbrenda goto again; 350214d9bbcSClaudio Imbrenda } else if (rc == -ENXIO) { 351214d9bbcSClaudio Imbrenda if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE)) 352214d9bbcSClaudio Imbrenda return -EFAULT; 353214d9bbcSClaudio Imbrenda return -EAGAIN; 354214d9bbcSClaudio Imbrenda } 355214d9bbcSClaudio Imbrenda return rc; 356214d9bbcSClaudio Imbrenda } 357214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_make_secure); 358214d9bbcSClaudio Imbrenda 359214d9bbcSClaudio Imbrenda int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) 360214d9bbcSClaudio Imbrenda { 361214d9bbcSClaudio Imbrenda struct uv_cb_cts uvcb = { 362214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, 363214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 364214d9bbcSClaudio Imbrenda .guest_handle = gmap->guest_handle, 365214d9bbcSClaudio Imbrenda .gaddr = gaddr, 366214d9bbcSClaudio Imbrenda }; 367214d9bbcSClaudio Imbrenda 368214d9bbcSClaudio Imbrenda return gmap_make_secure(gmap, gaddr, &uvcb); 369214d9bbcSClaudio Imbrenda } 370214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_convert_to_secure); 371214d9bbcSClaudio Imbrenda 372a52c2584SClaudio Imbrenda /** 373a52c2584SClaudio Imbrenda * gmap_destroy_page - Destroy a guest page. 374a52c2584SClaudio Imbrenda * @gmap: the gmap of the guest 375a52c2584SClaudio Imbrenda * @gaddr: the guest address to destroy 376a52c2584SClaudio Imbrenda * 377a52c2584SClaudio Imbrenda * An attempt will be made to destroy the given guest page. If the attempt 378a52c2584SClaudio Imbrenda * fails, an attempt is made to export the page. If both attempts fail, an 379a52c2584SClaudio Imbrenda * appropriate error is returned. 380a52c2584SClaudio Imbrenda */ 381a52c2584SClaudio Imbrenda int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) 382a52c2584SClaudio Imbrenda { 383a52c2584SClaudio Imbrenda struct vm_area_struct *vma; 384a52c2584SClaudio Imbrenda unsigned long uaddr; 385a52c2584SClaudio Imbrenda struct page *page; 386a52c2584SClaudio Imbrenda int rc; 387a52c2584SClaudio Imbrenda 388a52c2584SClaudio Imbrenda rc = -EFAULT; 389a52c2584SClaudio Imbrenda mmap_read_lock(gmap->mm); 390a52c2584SClaudio Imbrenda 391a52c2584SClaudio Imbrenda uaddr = __gmap_translate(gmap, gaddr); 392a52c2584SClaudio Imbrenda if (IS_ERR_VALUE(uaddr)) 393a52c2584SClaudio Imbrenda goto out; 394a52c2584SClaudio Imbrenda vma = vma_lookup(gmap->mm, uaddr); 395a52c2584SClaudio Imbrenda if (!vma) 396a52c2584SClaudio Imbrenda goto out; 397a52c2584SClaudio Imbrenda /* 398a52c2584SClaudio Imbrenda * Huge pages should not be able to become secure 399a52c2584SClaudio Imbrenda */ 400a52c2584SClaudio Imbrenda if (is_vm_hugetlb_page(vma)) 401a52c2584SClaudio Imbrenda goto out; 402a52c2584SClaudio Imbrenda 403a52c2584SClaudio Imbrenda rc = 0; 404a52c2584SClaudio Imbrenda /* we take an extra reference here */ 405a52c2584SClaudio Imbrenda page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET); 406a52c2584SClaudio Imbrenda if (IS_ERR_OR_NULL(page)) 407a52c2584SClaudio Imbrenda goto out; 408a52c2584SClaudio Imbrenda rc = uv_destroy_owned_page(page_to_phys(page)); 409a52c2584SClaudio Imbrenda /* 410a52c2584SClaudio Imbrenda * Fault handlers can race; it is possible that two CPUs will fault 411a52c2584SClaudio Imbrenda * on the same secure page. One CPU can destroy the page, reboot, 412a52c2584SClaudio Imbrenda * re-enter secure mode and import it, while the second CPU was 413a52c2584SClaudio Imbrenda * stuck at the beginning of the handler. At some point the second 414a52c2584SClaudio Imbrenda * CPU will be able to progress, and it will not be able to destroy 415a52c2584SClaudio Imbrenda * the page. In that case we do not want to terminate the process, 416a52c2584SClaudio Imbrenda * we instead try to export the page. 417a52c2584SClaudio Imbrenda */ 418a52c2584SClaudio Imbrenda if (rc) 419a52c2584SClaudio Imbrenda rc = uv_convert_owned_from_secure(page_to_phys(page)); 420a52c2584SClaudio Imbrenda put_page(page); 421a52c2584SClaudio Imbrenda out: 422a52c2584SClaudio Imbrenda mmap_read_unlock(gmap->mm); 423a52c2584SClaudio Imbrenda return rc; 424a52c2584SClaudio Imbrenda } 425a52c2584SClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_destroy_page); 426a52c2584SClaudio Imbrenda 427214d9bbcSClaudio Imbrenda /* 428214d9bbcSClaudio Imbrenda * To be called with the page locked or with an extra reference! This will 429214d9bbcSClaudio Imbrenda * prevent gmap_make_secure from touching the page concurrently. Having 2 430214d9bbcSClaudio Imbrenda * parallel make_page_accessible is fine, as the UV calls will become a 431214d9bbcSClaudio Imbrenda * no-op if the page is already exported. 432214d9bbcSClaudio Imbrenda */ 433214d9bbcSClaudio Imbrenda int arch_make_page_accessible(struct page *page) 434214d9bbcSClaudio Imbrenda { 435214d9bbcSClaudio Imbrenda int rc = 0; 436214d9bbcSClaudio Imbrenda 437214d9bbcSClaudio Imbrenda /* Hugepage cannot be protected, so nothing to do */ 438214d9bbcSClaudio Imbrenda if (PageHuge(page)) 439214d9bbcSClaudio Imbrenda return 0; 440214d9bbcSClaudio Imbrenda 441214d9bbcSClaudio Imbrenda /* 442214d9bbcSClaudio Imbrenda * PG_arch_1 is used in 3 places: 443214d9bbcSClaudio Imbrenda * 1. for kernel page tables during early boot 444214d9bbcSClaudio Imbrenda * 2. for storage keys of huge pages and KVM 445214d9bbcSClaudio Imbrenda * 3. As an indication that this page might be secure. This can 446214d9bbcSClaudio Imbrenda * overindicate, e.g. we set the bit before calling 447214d9bbcSClaudio Imbrenda * convert_to_secure. 448214d9bbcSClaudio Imbrenda * As secure pages are never huge, all 3 variants can co-exists. 449214d9bbcSClaudio Imbrenda */ 450214d9bbcSClaudio Imbrenda if (!test_bit(PG_arch_1, &page->flags)) 451214d9bbcSClaudio Imbrenda return 0; 452214d9bbcSClaudio Imbrenda 453214d9bbcSClaudio Imbrenda rc = uv_pin_shared(page_to_phys(page)); 454214d9bbcSClaudio Imbrenda if (!rc) { 455214d9bbcSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 456214d9bbcSClaudio Imbrenda return 0; 457214d9bbcSClaudio Imbrenda } 458214d9bbcSClaudio Imbrenda 459214d9bbcSClaudio Imbrenda rc = uv_convert_from_secure(page_to_phys(page)); 460214d9bbcSClaudio Imbrenda if (!rc) { 461214d9bbcSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 462214d9bbcSClaudio Imbrenda return 0; 463214d9bbcSClaudio Imbrenda } 464214d9bbcSClaudio Imbrenda 465214d9bbcSClaudio Imbrenda return rc; 466214d9bbcSClaudio Imbrenda } 467214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(arch_make_page_accessible); 468214d9bbcSClaudio Imbrenda 469ecdc5d84SVasily Gorbik #endif 470a0f60f84SJanosch Frank 471a0f60f84SJanosch Frank #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM) 472a0f60f84SJanosch Frank static ssize_t uv_query_facilities(struct kobject *kobj, 473a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 474a0f60f84SJanosch Frank { 47599448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n", 476a0f60f84SJanosch Frank uv_info.inst_calls_list[0], 477a0f60f84SJanosch Frank uv_info.inst_calls_list[1], 478a0f60f84SJanosch Frank uv_info.inst_calls_list[2], 479a0f60f84SJanosch Frank uv_info.inst_calls_list[3]); 480a0f60f84SJanosch Frank } 481a0f60f84SJanosch Frank 482a0f60f84SJanosch Frank static struct kobj_attribute uv_query_facilities_attr = 483a0f60f84SJanosch Frank __ATTR(facilities, 0444, uv_query_facilities, NULL); 484a0f60f84SJanosch Frank 485ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj, 486ac640db3SJanosch Frank struct kobj_attribute *attr, char *buf) 487ac640db3SJanosch Frank { 488ac640db3SJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver); 489ac640db3SJanosch Frank } 490ac640db3SJanosch Frank 491ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_ver_attr = 492ac640db3SJanosch Frank __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL); 493ac640db3SJanosch Frank 494ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj, 495ac640db3SJanosch Frank struct kobj_attribute *attr, char *buf) 496ac640db3SJanosch Frank { 497ac640db3SJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf); 498ac640db3SJanosch Frank } 499ac640db3SJanosch Frank 500ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr = 501ac640db3SJanosch Frank __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL); 502ac640db3SJanosch Frank 50338c21825SJanosch Frank static ssize_t uv_query_dump_cpu_len(struct kobject *kobj, 50438c21825SJanosch Frank struct kobj_attribute *attr, char *page) 50538c21825SJanosch Frank { 50638c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 50738c21825SJanosch Frank uv_info.guest_cpu_stor_len); 50838c21825SJanosch Frank } 50938c21825SJanosch Frank 51038c21825SJanosch Frank static struct kobj_attribute uv_query_dump_cpu_len_attr = 51138c21825SJanosch Frank __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL); 51238c21825SJanosch Frank 51338c21825SJanosch Frank static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj, 51438c21825SJanosch Frank struct kobj_attribute *attr, char *page) 51538c21825SJanosch Frank { 51638c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 51738c21825SJanosch Frank uv_info.conf_dump_storage_state_len); 51838c21825SJanosch Frank } 51938c21825SJanosch Frank 52038c21825SJanosch Frank static struct kobj_attribute uv_query_dump_storage_state_len_attr = 52138c21825SJanosch Frank __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL); 52238c21825SJanosch Frank 52338c21825SJanosch Frank static ssize_t uv_query_dump_finalize_len(struct kobject *kobj, 52438c21825SJanosch Frank struct kobj_attribute *attr, char *page) 52538c21825SJanosch Frank { 52638c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 52738c21825SJanosch Frank uv_info.conf_dump_finalize_len); 52838c21825SJanosch Frank } 52938c21825SJanosch Frank 53038c21825SJanosch Frank static struct kobj_attribute uv_query_dump_finalize_len_attr = 53138c21825SJanosch Frank __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL); 53238c21825SJanosch Frank 53385b18d7bSJanosch Frank static ssize_t uv_query_feature_indications(struct kobject *kobj, 53485b18d7bSJanosch Frank struct kobj_attribute *attr, char *buf) 53585b18d7bSJanosch Frank { 53685b18d7bSJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications); 53785b18d7bSJanosch Frank } 53885b18d7bSJanosch Frank 53985b18d7bSJanosch Frank static struct kobj_attribute uv_query_feature_indications_attr = 54085b18d7bSJanosch Frank __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL); 54185b18d7bSJanosch Frank 542a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_cpus(struct kobject *kobj, 543a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 544a0f60f84SJanosch Frank { 54599448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%d\n", 546e82080e1SJanosch Frank uv_info.max_guest_cpu_id + 1); 547a0f60f84SJanosch Frank } 548a0f60f84SJanosch Frank 549a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_cpus_attr = 550a0f60f84SJanosch Frank __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL); 551a0f60f84SJanosch Frank 552a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_vms(struct kobject *kobj, 553a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 554a0f60f84SJanosch Frank { 55599448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%d\n", 556a0f60f84SJanosch Frank uv_info.max_num_sec_conf); 557a0f60f84SJanosch Frank } 558a0f60f84SJanosch Frank 559a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_vms_attr = 560a0f60f84SJanosch Frank __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL); 561a0f60f84SJanosch Frank 562a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_addr(struct kobject *kobj, 563a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 564a0f60f84SJanosch Frank { 56599448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%lx\n", 566a0f60f84SJanosch Frank uv_info.max_sec_stor_addr); 567a0f60f84SJanosch Frank } 568a0f60f84SJanosch Frank 569a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_addr_attr = 570a0f60f84SJanosch Frank __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL); 571a0f60f84SJanosch Frank 5721b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj, 5731b6abe95SSteffen Eiden struct kobj_attribute *attr, char *page) 5741b6abe95SSteffen Eiden { 5751b6abe95SSteffen Eiden return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver); 5761b6abe95SSteffen Eiden } 5771b6abe95SSteffen Eiden 5781b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr = 5791b6abe95SSteffen Eiden __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL); 5801b6abe95SSteffen Eiden 5811b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_pflags(struct kobject *kobj, 5821b6abe95SSteffen Eiden struct kobj_attribute *attr, char *page) 5831b6abe95SSteffen Eiden { 5841b6abe95SSteffen Eiden return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags); 5851b6abe95SSteffen Eiden } 5861b6abe95SSteffen Eiden 5871b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_pflags_attr = 5881b6abe95SSteffen Eiden __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL); 5891b6abe95SSteffen Eiden 590a0f60f84SJanosch Frank static struct attribute *uv_query_attrs[] = { 591a0f60f84SJanosch Frank &uv_query_facilities_attr.attr, 59285b18d7bSJanosch Frank &uv_query_feature_indications_attr.attr, 593a0f60f84SJanosch Frank &uv_query_max_guest_cpus_attr.attr, 594a0f60f84SJanosch Frank &uv_query_max_guest_vms_attr.attr, 595a0f60f84SJanosch Frank &uv_query_max_guest_addr_attr.attr, 596ac640db3SJanosch Frank &uv_query_supp_se_hdr_ver_attr.attr, 597ac640db3SJanosch Frank &uv_query_supp_se_hdr_pcf_attr.attr, 59838c21825SJanosch Frank &uv_query_dump_storage_state_len_attr.attr, 59938c21825SJanosch Frank &uv_query_dump_finalize_len_attr.attr, 60038c21825SJanosch Frank &uv_query_dump_cpu_len_attr.attr, 6011b6abe95SSteffen Eiden &uv_query_supp_att_req_hdr_ver_attr.attr, 6021b6abe95SSteffen Eiden &uv_query_supp_att_pflags_attr.attr, 603a0f60f84SJanosch Frank NULL, 604a0f60f84SJanosch Frank }; 605a0f60f84SJanosch Frank 606a0f60f84SJanosch Frank static struct attribute_group uv_query_attr_group = { 607a0f60f84SJanosch Frank .attrs = uv_query_attrs, 608a0f60f84SJanosch Frank }; 609a0f60f84SJanosch Frank 61037564ed8SJanosch Frank static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, 61137564ed8SJanosch Frank struct kobj_attribute *attr, char *page) 61237564ed8SJanosch Frank { 61337564ed8SJanosch Frank int val = 0; 61437564ed8SJanosch Frank 61537564ed8SJanosch Frank #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST 61637564ed8SJanosch Frank val = prot_virt_guest; 61737564ed8SJanosch Frank #endif 61837564ed8SJanosch Frank return scnprintf(page, PAGE_SIZE, "%d\n", val); 61937564ed8SJanosch Frank } 62037564ed8SJanosch Frank 62137564ed8SJanosch Frank static ssize_t uv_is_prot_virt_host(struct kobject *kobj, 62237564ed8SJanosch Frank struct kobj_attribute *attr, char *page) 62337564ed8SJanosch Frank { 624df2e400eSJanosch Frank int val = 0; 625df2e400eSJanosch Frank 626df2e400eSJanosch Frank #if IS_ENABLED(CONFIG_KVM) 627df2e400eSJanosch Frank val = prot_virt_host; 628df2e400eSJanosch Frank #endif 629df2e400eSJanosch Frank 630df2e400eSJanosch Frank return scnprintf(page, PAGE_SIZE, "%d\n", val); 63137564ed8SJanosch Frank } 63237564ed8SJanosch Frank 63337564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_guest = 63437564ed8SJanosch Frank __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL); 63537564ed8SJanosch Frank 63637564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_host = 63737564ed8SJanosch Frank __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL); 63837564ed8SJanosch Frank 63937564ed8SJanosch Frank static const struct attribute *uv_prot_virt_attrs[] = { 64037564ed8SJanosch Frank &uv_prot_virt_guest.attr, 64137564ed8SJanosch Frank &uv_prot_virt_host.attr, 64237564ed8SJanosch Frank NULL, 64337564ed8SJanosch Frank }; 64437564ed8SJanosch Frank 645a0f60f84SJanosch Frank static struct kset *uv_query_kset; 646a0f60f84SJanosch Frank static struct kobject *uv_kobj; 647a0f60f84SJanosch Frank 648a0f60f84SJanosch Frank static int __init uv_info_init(void) 649a0f60f84SJanosch Frank { 650a0f60f84SJanosch Frank int rc = -ENOMEM; 651a0f60f84SJanosch Frank 652a0f60f84SJanosch Frank if (!test_facility(158)) 653a0f60f84SJanosch Frank return 0; 654a0f60f84SJanosch Frank 655a0f60f84SJanosch Frank uv_kobj = kobject_create_and_add("uv", firmware_kobj); 656a0f60f84SJanosch Frank if (!uv_kobj) 657a0f60f84SJanosch Frank return -ENOMEM; 658a0f60f84SJanosch Frank 65937564ed8SJanosch Frank rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs); 66037564ed8SJanosch Frank if (rc) 66137564ed8SJanosch Frank goto out_kobj; 66237564ed8SJanosch Frank 663a0f60f84SJanosch Frank uv_query_kset = kset_create_and_add("query", NULL, uv_kobj); 66464497517Szhongbaisong if (!uv_query_kset) { 66564497517Szhongbaisong rc = -ENOMEM; 66637564ed8SJanosch Frank goto out_ind_files; 66764497517Szhongbaisong } 668a0f60f84SJanosch Frank 669a0f60f84SJanosch Frank rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group); 670a0f60f84SJanosch Frank if (!rc) 671a0f60f84SJanosch Frank return 0; 672a0f60f84SJanosch Frank 673a0f60f84SJanosch Frank kset_unregister(uv_query_kset); 67437564ed8SJanosch Frank out_ind_files: 67537564ed8SJanosch Frank sysfs_remove_files(uv_kobj, uv_prot_virt_attrs); 676a0f60f84SJanosch Frank out_kobj: 677a0f60f84SJanosch Frank kobject_del(uv_kobj); 678a0f60f84SJanosch Frank kobject_put(uv_kobj); 679a0f60f84SJanosch Frank return rc; 680a0f60f84SJanosch Frank } 681a0f60f84SJanosch Frank device_initcall(uv_info_init); 682a0f60f84SJanosch Frank #endif 683