1ecdc5d84SVasily Gorbik // SPDX-License-Identifier: GPL-2.0 2ecdc5d84SVasily Gorbik /* 3ecdc5d84SVasily Gorbik * Common Ultravisor functions and initialization 4ecdc5d84SVasily Gorbik * 5ecdc5d84SVasily Gorbik * Copyright IBM Corp. 2019, 2020 6ecdc5d84SVasily Gorbik */ 7ecdc5d84SVasily Gorbik #define KMSG_COMPONENT "prot_virt" 8ecdc5d84SVasily Gorbik #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9ecdc5d84SVasily Gorbik 10ecdc5d84SVasily Gorbik #include <linux/kernel.h> 11ecdc5d84SVasily Gorbik #include <linux/types.h> 12ecdc5d84SVasily Gorbik #include <linux/sizes.h> 13ecdc5d84SVasily Gorbik #include <linux/bitmap.h> 14ecdc5d84SVasily Gorbik #include <linux/memblock.h> 15214d9bbcSClaudio Imbrenda #include <linux/pagemap.h> 16214d9bbcSClaudio Imbrenda #include <linux/swap.h> 17ecdc5d84SVasily Gorbik #include <asm/facility.h> 18ecdc5d84SVasily Gorbik #include <asm/sections.h> 19ecdc5d84SVasily Gorbik #include <asm/uv.h> 20ecdc5d84SVasily Gorbik 21ecdc5d84SVasily Gorbik /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ 22ecdc5d84SVasily Gorbik #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST 23ecdc5d84SVasily Gorbik int __bootdata_preserved(prot_virt_guest); 24ecdc5d84SVasily Gorbik #endif 25ecdc5d84SVasily Gorbik 26673deb0bSClaudio Imbrenda struct uv_info __bootdata_preserved(uv_info); 27673deb0bSClaudio Imbrenda 28ecdc5d84SVasily Gorbik #if IS_ENABLED(CONFIG_KVM) 291d6671aeSVasily Gorbik int __bootdata_preserved(prot_virt_host); 30ecdc5d84SVasily Gorbik EXPORT_SYMBOL(prot_virt_host); 31ecdc5d84SVasily Gorbik EXPORT_SYMBOL(uv_info); 32ecdc5d84SVasily Gorbik 3315b5c183SHeiko Carstens static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len) 3429d37e5bSVasily Gorbik { 3529d37e5bSVasily Gorbik struct uv_cb_init uvcb = { 3629d37e5bSVasily Gorbik .header.cmd = UVC_CMD_INIT_UV, 3729d37e5bSVasily Gorbik .header.len = sizeof(uvcb), 3829d37e5bSVasily Gorbik .stor_origin = stor_base, 3929d37e5bSVasily Gorbik .stor_len = stor_len, 4029d37e5bSVasily Gorbik }; 4129d37e5bSVasily Gorbik 4229d37e5bSVasily Gorbik if (uv_call(0, (uint64_t)&uvcb)) { 4329d37e5bSVasily Gorbik pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n", 4429d37e5bSVasily Gorbik uvcb.header.rc, uvcb.header.rrc); 4529d37e5bSVasily Gorbik return -1; 4629d37e5bSVasily Gorbik } 4729d37e5bSVasily Gorbik return 0; 4829d37e5bSVasily Gorbik } 4929d37e5bSVasily Gorbik 5029d37e5bSVasily Gorbik void __init setup_uv(void) 5129d37e5bSVasily Gorbik { 5215b5c183SHeiko Carstens void *uv_stor_base; 5329d37e5bSVasily Gorbik 541d6671aeSVasily Gorbik if (!is_prot_virt_host()) 551d6671aeSVasily Gorbik return; 561d6671aeSVasily Gorbik 5715b5c183SHeiko Carstens uv_stor_base = memblock_alloc_try_nid( 5829d37e5bSVasily Gorbik uv_info.uv_base_stor_len, SZ_1M, SZ_2G, 5929d37e5bSVasily Gorbik MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 6029d37e5bSVasily Gorbik if (!uv_stor_base) { 6129d37e5bSVasily Gorbik pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n", 6229d37e5bSVasily Gorbik uv_info.uv_base_stor_len); 6329d37e5bSVasily Gorbik goto fail; 6429d37e5bSVasily Gorbik } 6529d37e5bSVasily Gorbik 6615b5c183SHeiko Carstens if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) { 6715b5c183SHeiko Carstens memblock_free(uv_stor_base, uv_info.uv_base_stor_len); 6829d37e5bSVasily Gorbik goto fail; 6929d37e5bSVasily Gorbik } 7029d37e5bSVasily Gorbik 7129d37e5bSVasily Gorbik pr_info("Reserving %luMB as ultravisor base storage\n", 7229d37e5bSVasily Gorbik uv_info.uv_base_stor_len >> 20); 7329d37e5bSVasily Gorbik return; 7429d37e5bSVasily Gorbik fail: 7529d37e5bSVasily Gorbik pr_info("Disabling support for protected virtualization"); 7629d37e5bSVasily Gorbik prot_virt_host = 0; 7729d37e5bSVasily Gorbik } 7829d37e5bSVasily Gorbik 79214d9bbcSClaudio Imbrenda /* 80214d9bbcSClaudio Imbrenda * Requests the Ultravisor to pin the page in the shared state. This will 81214d9bbcSClaudio Imbrenda * cause an intercept when the guest attempts to unshare the pinned page. 82214d9bbcSClaudio Imbrenda */ 83214d9bbcSClaudio Imbrenda static int uv_pin_shared(unsigned long paddr) 84214d9bbcSClaudio Imbrenda { 85214d9bbcSClaudio Imbrenda struct uv_cb_cfs uvcb = { 86214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_PIN_PAGE_SHARED, 87214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 88214d9bbcSClaudio Imbrenda .paddr = paddr, 89214d9bbcSClaudio Imbrenda }; 90214d9bbcSClaudio Imbrenda 91214d9bbcSClaudio Imbrenda if (uv_call(0, (u64)&uvcb)) 92214d9bbcSClaudio Imbrenda return -EINVAL; 93214d9bbcSClaudio Imbrenda return 0; 94214d9bbcSClaudio Imbrenda } 95214d9bbcSClaudio Imbrenda 96214d9bbcSClaudio Imbrenda /* 971a80b54dSJanosch Frank * Requests the Ultravisor to destroy a guest page and make it 981a80b54dSJanosch Frank * accessible to the host. The destroy clears the page instead of 991a80b54dSJanosch Frank * exporting. 1001a80b54dSJanosch Frank * 1011a80b54dSJanosch Frank * @paddr: Absolute host address of page to be destroyed 1021a80b54dSJanosch Frank */ 103380d97bdSClaudio Imbrenda static int uv_destroy_page(unsigned long paddr) 1041a80b54dSJanosch Frank { 1051a80b54dSJanosch Frank struct uv_cb_cfs uvcb = { 1061a80b54dSJanosch Frank .header.cmd = UVC_CMD_DESTR_SEC_STOR, 1071a80b54dSJanosch Frank .header.len = sizeof(uvcb), 1081a80b54dSJanosch Frank .paddr = paddr 1091a80b54dSJanosch Frank }; 1101a80b54dSJanosch Frank 1114c80d057SChristian Borntraeger if (uv_call(0, (u64)&uvcb)) { 1124c80d057SChristian Borntraeger /* 1134c80d057SChristian Borntraeger * Older firmware uses 107/d as an indication of a non secure 1144c80d057SChristian Borntraeger * page. Let us emulate the newer variant (no-op). 1154c80d057SChristian Borntraeger */ 1164c80d057SChristian Borntraeger if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd) 1174c80d057SChristian Borntraeger return 0; 1181a80b54dSJanosch Frank return -EINVAL; 1194c80d057SChristian Borntraeger } 1201a80b54dSJanosch Frank return 0; 1211a80b54dSJanosch Frank } 1221a80b54dSJanosch Frank 1231a80b54dSJanosch Frank /* 124380d97bdSClaudio Imbrenda * The caller must already hold a reference to the page 125380d97bdSClaudio Imbrenda */ 126380d97bdSClaudio Imbrenda int uv_destroy_owned_page(unsigned long paddr) 127380d97bdSClaudio Imbrenda { 128380d97bdSClaudio Imbrenda struct page *page = phys_to_page(paddr); 129380d97bdSClaudio Imbrenda int rc; 130380d97bdSClaudio Imbrenda 131380d97bdSClaudio Imbrenda get_page(page); 132380d97bdSClaudio Imbrenda rc = uv_destroy_page(paddr); 133380d97bdSClaudio Imbrenda if (!rc) 134380d97bdSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 135380d97bdSClaudio Imbrenda put_page(page); 136380d97bdSClaudio Imbrenda return rc; 137380d97bdSClaudio Imbrenda } 138380d97bdSClaudio Imbrenda 139380d97bdSClaudio Imbrenda /* 140214d9bbcSClaudio Imbrenda * Requests the Ultravisor to encrypt a guest page and make it 141214d9bbcSClaudio Imbrenda * accessible to the host for paging (export). 142214d9bbcSClaudio Imbrenda * 143214d9bbcSClaudio Imbrenda * @paddr: Absolute host address of page to be exported 144214d9bbcSClaudio Imbrenda */ 145214d9bbcSClaudio Imbrenda int uv_convert_from_secure(unsigned long paddr) 146214d9bbcSClaudio Imbrenda { 147214d9bbcSClaudio Imbrenda struct uv_cb_cfs uvcb = { 148214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR, 149214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 150214d9bbcSClaudio Imbrenda .paddr = paddr 151214d9bbcSClaudio Imbrenda }; 152214d9bbcSClaudio Imbrenda 153214d9bbcSClaudio Imbrenda if (uv_call(0, (u64)&uvcb)) 154214d9bbcSClaudio Imbrenda return -EINVAL; 155214d9bbcSClaudio Imbrenda return 0; 156214d9bbcSClaudio Imbrenda } 157214d9bbcSClaudio Imbrenda 158214d9bbcSClaudio Imbrenda /* 159380d97bdSClaudio Imbrenda * The caller must already hold a reference to the page 160380d97bdSClaudio Imbrenda */ 161380d97bdSClaudio Imbrenda int uv_convert_owned_from_secure(unsigned long paddr) 162380d97bdSClaudio Imbrenda { 163380d97bdSClaudio Imbrenda struct page *page = phys_to_page(paddr); 164380d97bdSClaudio Imbrenda int rc; 165380d97bdSClaudio Imbrenda 166380d97bdSClaudio Imbrenda get_page(page); 167380d97bdSClaudio Imbrenda rc = uv_convert_from_secure(paddr); 168380d97bdSClaudio Imbrenda if (!rc) 169380d97bdSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 170380d97bdSClaudio Imbrenda put_page(page); 171380d97bdSClaudio Imbrenda return rc; 172380d97bdSClaudio Imbrenda } 173380d97bdSClaudio Imbrenda 174380d97bdSClaudio Imbrenda /* 175214d9bbcSClaudio Imbrenda * Calculate the expected ref_count for a page that would otherwise have no 176214d9bbcSClaudio Imbrenda * further pins. This was cribbed from similar functions in other places in 177214d9bbcSClaudio Imbrenda * the kernel, but with some slight modifications. We know that a secure 178214d9bbcSClaudio Imbrenda * page can not be a huge page for example. 179214d9bbcSClaudio Imbrenda */ 180214d9bbcSClaudio Imbrenda static int expected_page_refs(struct page *page) 181214d9bbcSClaudio Imbrenda { 182214d9bbcSClaudio Imbrenda int res; 183214d9bbcSClaudio Imbrenda 184214d9bbcSClaudio Imbrenda res = page_mapcount(page); 185214d9bbcSClaudio Imbrenda if (PageSwapCache(page)) { 186214d9bbcSClaudio Imbrenda res++; 187214d9bbcSClaudio Imbrenda } else if (page_mapping(page)) { 188214d9bbcSClaudio Imbrenda res++; 189214d9bbcSClaudio Imbrenda if (page_has_private(page)) 190214d9bbcSClaudio Imbrenda res++; 191214d9bbcSClaudio Imbrenda } 192214d9bbcSClaudio Imbrenda return res; 193214d9bbcSClaudio Imbrenda } 194214d9bbcSClaudio Imbrenda 195214d9bbcSClaudio Imbrenda static int make_secure_pte(pte_t *ptep, unsigned long addr, 196214d9bbcSClaudio Imbrenda struct page *exp_page, struct uv_cb_header *uvcb) 197214d9bbcSClaudio Imbrenda { 198214d9bbcSClaudio Imbrenda pte_t entry = READ_ONCE(*ptep); 199214d9bbcSClaudio Imbrenda struct page *page; 200f0a1a061SClaudio Imbrenda int expected, cc = 0; 201214d9bbcSClaudio Imbrenda 202214d9bbcSClaudio Imbrenda if (!pte_present(entry)) 203214d9bbcSClaudio Imbrenda return -ENXIO; 204214d9bbcSClaudio Imbrenda if (pte_val(entry) & _PAGE_INVALID) 205214d9bbcSClaudio Imbrenda return -ENXIO; 206214d9bbcSClaudio Imbrenda 207214d9bbcSClaudio Imbrenda page = pte_page(entry); 208214d9bbcSClaudio Imbrenda if (page != exp_page) 209214d9bbcSClaudio Imbrenda return -ENXIO; 210214d9bbcSClaudio Imbrenda if (PageWriteback(page)) 211214d9bbcSClaudio Imbrenda return -EAGAIN; 212214d9bbcSClaudio Imbrenda expected = expected_page_refs(page); 213214d9bbcSClaudio Imbrenda if (!page_ref_freeze(page, expected)) 214214d9bbcSClaudio Imbrenda return -EBUSY; 215214d9bbcSClaudio Imbrenda set_bit(PG_arch_1, &page->flags); 216f0a1a061SClaudio Imbrenda /* 217f0a1a061SClaudio Imbrenda * If the UVC does not succeed or fail immediately, we don't want to 218f0a1a061SClaudio Imbrenda * loop for long, or we might get stall notifications. 219f0a1a061SClaudio Imbrenda * On the other hand, this is a complex scenario and we are holding a lot of 220f0a1a061SClaudio Imbrenda * locks, so we can't easily sleep and reschedule. We try only once, 221f0a1a061SClaudio Imbrenda * and if the UVC returned busy or partial completion, we return 222f0a1a061SClaudio Imbrenda * -EAGAIN and we let the callers deal with it. 223f0a1a061SClaudio Imbrenda */ 224f0a1a061SClaudio Imbrenda cc = __uv_call(0, (u64)uvcb); 225214d9bbcSClaudio Imbrenda page_ref_unfreeze(page, expected); 226f0a1a061SClaudio Imbrenda /* 227f0a1a061SClaudio Imbrenda * Return -ENXIO if the page was not mapped, -EINVAL for other errors. 228f0a1a061SClaudio Imbrenda * If busy or partially completed, return -EAGAIN. 229f0a1a061SClaudio Imbrenda */ 230f0a1a061SClaudio Imbrenda if (cc == UVC_CC_OK) 231f0a1a061SClaudio Imbrenda return 0; 232f0a1a061SClaudio Imbrenda else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) 233f0a1a061SClaudio Imbrenda return -EAGAIN; 234f0a1a061SClaudio Imbrenda return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; 235214d9bbcSClaudio Imbrenda } 236214d9bbcSClaudio Imbrenda 237214d9bbcSClaudio Imbrenda /* 238214d9bbcSClaudio Imbrenda * Requests the Ultravisor to make a page accessible to a guest. 239214d9bbcSClaudio Imbrenda * If it's brought in the first time, it will be cleared. If 240214d9bbcSClaudio Imbrenda * it has been exported before, it will be decrypted and integrity 241214d9bbcSClaudio Imbrenda * checked. 242214d9bbcSClaudio Imbrenda */ 243214d9bbcSClaudio Imbrenda int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) 244214d9bbcSClaudio Imbrenda { 245214d9bbcSClaudio Imbrenda struct vm_area_struct *vma; 246214d9bbcSClaudio Imbrenda bool local_drain = false; 247214d9bbcSClaudio Imbrenda spinlock_t *ptelock; 248214d9bbcSClaudio Imbrenda unsigned long uaddr; 249214d9bbcSClaudio Imbrenda struct page *page; 250214d9bbcSClaudio Imbrenda pte_t *ptep; 251214d9bbcSClaudio Imbrenda int rc; 252214d9bbcSClaudio Imbrenda 253214d9bbcSClaudio Imbrenda again: 254214d9bbcSClaudio Imbrenda rc = -EFAULT; 255d8ed45c5SMichel Lespinasse mmap_read_lock(gmap->mm); 256214d9bbcSClaudio Imbrenda 257214d9bbcSClaudio Imbrenda uaddr = __gmap_translate(gmap, gaddr); 258214d9bbcSClaudio Imbrenda if (IS_ERR_VALUE(uaddr)) 259214d9bbcSClaudio Imbrenda goto out; 26046c22ffdSDavid Hildenbrand vma = vma_lookup(gmap->mm, uaddr); 261214d9bbcSClaudio Imbrenda if (!vma) 262214d9bbcSClaudio Imbrenda goto out; 263214d9bbcSClaudio Imbrenda /* 264214d9bbcSClaudio Imbrenda * Secure pages cannot be huge and userspace should not combine both. 265214d9bbcSClaudio Imbrenda * In case userspace does it anyway this will result in an -EFAULT for 266214d9bbcSClaudio Imbrenda * the unpack. The guest is thus never reaching secure mode. If 267214d9bbcSClaudio Imbrenda * userspace is playing dirty tricky with mapping huge pages later 268214d9bbcSClaudio Imbrenda * on this will result in a segmentation fault. 269214d9bbcSClaudio Imbrenda */ 270214d9bbcSClaudio Imbrenda if (is_vm_hugetlb_page(vma)) 271214d9bbcSClaudio Imbrenda goto out; 272214d9bbcSClaudio Imbrenda 273214d9bbcSClaudio Imbrenda rc = -ENXIO; 274214d9bbcSClaudio Imbrenda page = follow_page(vma, uaddr, FOLL_WRITE); 275214d9bbcSClaudio Imbrenda if (IS_ERR_OR_NULL(page)) 276214d9bbcSClaudio Imbrenda goto out; 277214d9bbcSClaudio Imbrenda 278214d9bbcSClaudio Imbrenda lock_page(page); 279214d9bbcSClaudio Imbrenda ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); 280214d9bbcSClaudio Imbrenda rc = make_secure_pte(ptep, uaddr, page, uvcb); 281214d9bbcSClaudio Imbrenda pte_unmap_unlock(ptep, ptelock); 282214d9bbcSClaudio Imbrenda unlock_page(page); 283214d9bbcSClaudio Imbrenda out: 284d8ed45c5SMichel Lespinasse mmap_read_unlock(gmap->mm); 285214d9bbcSClaudio Imbrenda 286214d9bbcSClaudio Imbrenda if (rc == -EAGAIN) { 287f0a1a061SClaudio Imbrenda /* 288f0a1a061SClaudio Imbrenda * If we are here because the UVC returned busy or partial 289f0a1a061SClaudio Imbrenda * completion, this is just a useless check, but it is safe. 290f0a1a061SClaudio Imbrenda */ 291214d9bbcSClaudio Imbrenda wait_on_page_writeback(page); 292214d9bbcSClaudio Imbrenda } else if (rc == -EBUSY) { 293214d9bbcSClaudio Imbrenda /* 294214d9bbcSClaudio Imbrenda * If we have tried a local drain and the page refcount 295214d9bbcSClaudio Imbrenda * still does not match our expected safe value, try with a 296214d9bbcSClaudio Imbrenda * system wide drain. This is needed if the pagevecs holding 297214d9bbcSClaudio Imbrenda * the page are on a different CPU. 298214d9bbcSClaudio Imbrenda */ 299214d9bbcSClaudio Imbrenda if (local_drain) { 300214d9bbcSClaudio Imbrenda lru_add_drain_all(); 301214d9bbcSClaudio Imbrenda /* We give up here, and let the caller try again */ 302214d9bbcSClaudio Imbrenda return -EAGAIN; 303214d9bbcSClaudio Imbrenda } 304214d9bbcSClaudio Imbrenda /* 305214d9bbcSClaudio Imbrenda * We are here if the page refcount does not match the 306214d9bbcSClaudio Imbrenda * expected safe value. The main culprits are usually 307214d9bbcSClaudio Imbrenda * pagevecs. With lru_add_drain() we drain the pagevecs 308214d9bbcSClaudio Imbrenda * on the local CPU so that hopefully the refcount will 309214d9bbcSClaudio Imbrenda * reach the expected safe value. 310214d9bbcSClaudio Imbrenda */ 311214d9bbcSClaudio Imbrenda lru_add_drain(); 312214d9bbcSClaudio Imbrenda local_drain = true; 313214d9bbcSClaudio Imbrenda /* And now we try again immediately after draining */ 314214d9bbcSClaudio Imbrenda goto again; 315214d9bbcSClaudio Imbrenda } else if (rc == -ENXIO) { 316214d9bbcSClaudio Imbrenda if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE)) 317214d9bbcSClaudio Imbrenda return -EFAULT; 318214d9bbcSClaudio Imbrenda return -EAGAIN; 319214d9bbcSClaudio Imbrenda } 320214d9bbcSClaudio Imbrenda return rc; 321214d9bbcSClaudio Imbrenda } 322214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_make_secure); 323214d9bbcSClaudio Imbrenda 324214d9bbcSClaudio Imbrenda int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) 325214d9bbcSClaudio Imbrenda { 326214d9bbcSClaudio Imbrenda struct uv_cb_cts uvcb = { 327214d9bbcSClaudio Imbrenda .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, 328214d9bbcSClaudio Imbrenda .header.len = sizeof(uvcb), 329214d9bbcSClaudio Imbrenda .guest_handle = gmap->guest_handle, 330214d9bbcSClaudio Imbrenda .gaddr = gaddr, 331214d9bbcSClaudio Imbrenda }; 332214d9bbcSClaudio Imbrenda 333214d9bbcSClaudio Imbrenda return gmap_make_secure(gmap, gaddr, &uvcb); 334214d9bbcSClaudio Imbrenda } 335214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_convert_to_secure); 336214d9bbcSClaudio Imbrenda 337*a52c2584SClaudio Imbrenda /** 338*a52c2584SClaudio Imbrenda * gmap_destroy_page - Destroy a guest page. 339*a52c2584SClaudio Imbrenda * @gmap: the gmap of the guest 340*a52c2584SClaudio Imbrenda * @gaddr: the guest address to destroy 341*a52c2584SClaudio Imbrenda * 342*a52c2584SClaudio Imbrenda * An attempt will be made to destroy the given guest page. If the attempt 343*a52c2584SClaudio Imbrenda * fails, an attempt is made to export the page. If both attempts fail, an 344*a52c2584SClaudio Imbrenda * appropriate error is returned. 345*a52c2584SClaudio Imbrenda */ 346*a52c2584SClaudio Imbrenda int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) 347*a52c2584SClaudio Imbrenda { 348*a52c2584SClaudio Imbrenda struct vm_area_struct *vma; 349*a52c2584SClaudio Imbrenda unsigned long uaddr; 350*a52c2584SClaudio Imbrenda struct page *page; 351*a52c2584SClaudio Imbrenda int rc; 352*a52c2584SClaudio Imbrenda 353*a52c2584SClaudio Imbrenda rc = -EFAULT; 354*a52c2584SClaudio Imbrenda mmap_read_lock(gmap->mm); 355*a52c2584SClaudio Imbrenda 356*a52c2584SClaudio Imbrenda uaddr = __gmap_translate(gmap, gaddr); 357*a52c2584SClaudio Imbrenda if (IS_ERR_VALUE(uaddr)) 358*a52c2584SClaudio Imbrenda goto out; 359*a52c2584SClaudio Imbrenda vma = vma_lookup(gmap->mm, uaddr); 360*a52c2584SClaudio Imbrenda if (!vma) 361*a52c2584SClaudio Imbrenda goto out; 362*a52c2584SClaudio Imbrenda /* 363*a52c2584SClaudio Imbrenda * Huge pages should not be able to become secure 364*a52c2584SClaudio Imbrenda */ 365*a52c2584SClaudio Imbrenda if (is_vm_hugetlb_page(vma)) 366*a52c2584SClaudio Imbrenda goto out; 367*a52c2584SClaudio Imbrenda 368*a52c2584SClaudio Imbrenda rc = 0; 369*a52c2584SClaudio Imbrenda /* we take an extra reference here */ 370*a52c2584SClaudio Imbrenda page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET); 371*a52c2584SClaudio Imbrenda if (IS_ERR_OR_NULL(page)) 372*a52c2584SClaudio Imbrenda goto out; 373*a52c2584SClaudio Imbrenda rc = uv_destroy_owned_page(page_to_phys(page)); 374*a52c2584SClaudio Imbrenda /* 375*a52c2584SClaudio Imbrenda * Fault handlers can race; it is possible that two CPUs will fault 376*a52c2584SClaudio Imbrenda * on the same secure page. One CPU can destroy the page, reboot, 377*a52c2584SClaudio Imbrenda * re-enter secure mode and import it, while the second CPU was 378*a52c2584SClaudio Imbrenda * stuck at the beginning of the handler. At some point the second 379*a52c2584SClaudio Imbrenda * CPU will be able to progress, and it will not be able to destroy 380*a52c2584SClaudio Imbrenda * the page. In that case we do not want to terminate the process, 381*a52c2584SClaudio Imbrenda * we instead try to export the page. 382*a52c2584SClaudio Imbrenda */ 383*a52c2584SClaudio Imbrenda if (rc) 384*a52c2584SClaudio Imbrenda rc = uv_convert_owned_from_secure(page_to_phys(page)); 385*a52c2584SClaudio Imbrenda put_page(page); 386*a52c2584SClaudio Imbrenda out: 387*a52c2584SClaudio Imbrenda mmap_read_unlock(gmap->mm); 388*a52c2584SClaudio Imbrenda return rc; 389*a52c2584SClaudio Imbrenda } 390*a52c2584SClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_destroy_page); 391*a52c2584SClaudio Imbrenda 392214d9bbcSClaudio Imbrenda /* 393214d9bbcSClaudio Imbrenda * To be called with the page locked or with an extra reference! This will 394214d9bbcSClaudio Imbrenda * prevent gmap_make_secure from touching the page concurrently. Having 2 395214d9bbcSClaudio Imbrenda * parallel make_page_accessible is fine, as the UV calls will become a 396214d9bbcSClaudio Imbrenda * no-op if the page is already exported. 397214d9bbcSClaudio Imbrenda */ 398214d9bbcSClaudio Imbrenda int arch_make_page_accessible(struct page *page) 399214d9bbcSClaudio Imbrenda { 400214d9bbcSClaudio Imbrenda int rc = 0; 401214d9bbcSClaudio Imbrenda 402214d9bbcSClaudio Imbrenda /* Hugepage cannot be protected, so nothing to do */ 403214d9bbcSClaudio Imbrenda if (PageHuge(page)) 404214d9bbcSClaudio Imbrenda return 0; 405214d9bbcSClaudio Imbrenda 406214d9bbcSClaudio Imbrenda /* 407214d9bbcSClaudio Imbrenda * PG_arch_1 is used in 3 places: 408214d9bbcSClaudio Imbrenda * 1. for kernel page tables during early boot 409214d9bbcSClaudio Imbrenda * 2. for storage keys of huge pages and KVM 410214d9bbcSClaudio Imbrenda * 3. As an indication that this page might be secure. This can 411214d9bbcSClaudio Imbrenda * overindicate, e.g. we set the bit before calling 412214d9bbcSClaudio Imbrenda * convert_to_secure. 413214d9bbcSClaudio Imbrenda * As secure pages are never huge, all 3 variants can co-exists. 414214d9bbcSClaudio Imbrenda */ 415214d9bbcSClaudio Imbrenda if (!test_bit(PG_arch_1, &page->flags)) 416214d9bbcSClaudio Imbrenda return 0; 417214d9bbcSClaudio Imbrenda 418214d9bbcSClaudio Imbrenda rc = uv_pin_shared(page_to_phys(page)); 419214d9bbcSClaudio Imbrenda if (!rc) { 420214d9bbcSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 421214d9bbcSClaudio Imbrenda return 0; 422214d9bbcSClaudio Imbrenda } 423214d9bbcSClaudio Imbrenda 424214d9bbcSClaudio Imbrenda rc = uv_convert_from_secure(page_to_phys(page)); 425214d9bbcSClaudio Imbrenda if (!rc) { 426214d9bbcSClaudio Imbrenda clear_bit(PG_arch_1, &page->flags); 427214d9bbcSClaudio Imbrenda return 0; 428214d9bbcSClaudio Imbrenda } 429214d9bbcSClaudio Imbrenda 430214d9bbcSClaudio Imbrenda return rc; 431214d9bbcSClaudio Imbrenda } 432214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(arch_make_page_accessible); 433214d9bbcSClaudio Imbrenda 434ecdc5d84SVasily Gorbik #endif 435a0f60f84SJanosch Frank 436a0f60f84SJanosch Frank #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM) 437a0f60f84SJanosch Frank static ssize_t uv_query_facilities(struct kobject *kobj, 438a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 439a0f60f84SJanosch Frank { 44099448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n", 441a0f60f84SJanosch Frank uv_info.inst_calls_list[0], 442a0f60f84SJanosch Frank uv_info.inst_calls_list[1], 443a0f60f84SJanosch Frank uv_info.inst_calls_list[2], 444a0f60f84SJanosch Frank uv_info.inst_calls_list[3]); 445a0f60f84SJanosch Frank } 446a0f60f84SJanosch Frank 447a0f60f84SJanosch Frank static struct kobj_attribute uv_query_facilities_attr = 448a0f60f84SJanosch Frank __ATTR(facilities, 0444, uv_query_facilities, NULL); 449a0f60f84SJanosch Frank 450ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj, 451ac640db3SJanosch Frank struct kobj_attribute *attr, char *buf) 452ac640db3SJanosch Frank { 453ac640db3SJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver); 454ac640db3SJanosch Frank } 455ac640db3SJanosch Frank 456ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_ver_attr = 457ac640db3SJanosch Frank __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL); 458ac640db3SJanosch Frank 459ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj, 460ac640db3SJanosch Frank struct kobj_attribute *attr, char *buf) 461ac640db3SJanosch Frank { 462ac640db3SJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf); 463ac640db3SJanosch Frank } 464ac640db3SJanosch Frank 465ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr = 466ac640db3SJanosch Frank __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL); 467ac640db3SJanosch Frank 46838c21825SJanosch Frank static ssize_t uv_query_dump_cpu_len(struct kobject *kobj, 46938c21825SJanosch Frank struct kobj_attribute *attr, char *page) 47038c21825SJanosch Frank { 47138c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 47238c21825SJanosch Frank uv_info.guest_cpu_stor_len); 47338c21825SJanosch Frank } 47438c21825SJanosch Frank 47538c21825SJanosch Frank static struct kobj_attribute uv_query_dump_cpu_len_attr = 47638c21825SJanosch Frank __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL); 47738c21825SJanosch Frank 47838c21825SJanosch Frank static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj, 47938c21825SJanosch Frank struct kobj_attribute *attr, char *page) 48038c21825SJanosch Frank { 48138c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 48238c21825SJanosch Frank uv_info.conf_dump_storage_state_len); 48338c21825SJanosch Frank } 48438c21825SJanosch Frank 48538c21825SJanosch Frank static struct kobj_attribute uv_query_dump_storage_state_len_attr = 48638c21825SJanosch Frank __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL); 48738c21825SJanosch Frank 48838c21825SJanosch Frank static ssize_t uv_query_dump_finalize_len(struct kobject *kobj, 48938c21825SJanosch Frank struct kobj_attribute *attr, char *page) 49038c21825SJanosch Frank { 49138c21825SJanosch Frank return scnprintf(page, PAGE_SIZE, "%lx\n", 49238c21825SJanosch Frank uv_info.conf_dump_finalize_len); 49338c21825SJanosch Frank } 49438c21825SJanosch Frank 49538c21825SJanosch Frank static struct kobj_attribute uv_query_dump_finalize_len_attr = 49638c21825SJanosch Frank __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL); 49738c21825SJanosch Frank 49885b18d7bSJanosch Frank static ssize_t uv_query_feature_indications(struct kobject *kobj, 49985b18d7bSJanosch Frank struct kobj_attribute *attr, char *buf) 50085b18d7bSJanosch Frank { 50185b18d7bSJanosch Frank return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications); 50285b18d7bSJanosch Frank } 50385b18d7bSJanosch Frank 50485b18d7bSJanosch Frank static struct kobj_attribute uv_query_feature_indications_attr = 50585b18d7bSJanosch Frank __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL); 50685b18d7bSJanosch Frank 507a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_cpus(struct kobject *kobj, 508a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 509a0f60f84SJanosch Frank { 51099448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%d\n", 511e82080e1SJanosch Frank uv_info.max_guest_cpu_id + 1); 512a0f60f84SJanosch Frank } 513a0f60f84SJanosch Frank 514a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_cpus_attr = 515a0f60f84SJanosch Frank __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL); 516a0f60f84SJanosch Frank 517a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_vms(struct kobject *kobj, 518a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 519a0f60f84SJanosch Frank { 52099448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%d\n", 521a0f60f84SJanosch Frank uv_info.max_num_sec_conf); 522a0f60f84SJanosch Frank } 523a0f60f84SJanosch Frank 524a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_vms_attr = 525a0f60f84SJanosch Frank __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL); 526a0f60f84SJanosch Frank 527a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_addr(struct kobject *kobj, 528a0f60f84SJanosch Frank struct kobj_attribute *attr, char *page) 529a0f60f84SJanosch Frank { 53099448016SChen Zhou return scnprintf(page, PAGE_SIZE, "%lx\n", 531a0f60f84SJanosch Frank uv_info.max_sec_stor_addr); 532a0f60f84SJanosch Frank } 533a0f60f84SJanosch Frank 534a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_addr_attr = 535a0f60f84SJanosch Frank __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL); 536a0f60f84SJanosch Frank 5371b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj, 5381b6abe95SSteffen Eiden struct kobj_attribute *attr, char *page) 5391b6abe95SSteffen Eiden { 5401b6abe95SSteffen Eiden return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver); 5411b6abe95SSteffen Eiden } 5421b6abe95SSteffen Eiden 5431b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr = 5441b6abe95SSteffen Eiden __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL); 5451b6abe95SSteffen Eiden 5461b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_pflags(struct kobject *kobj, 5471b6abe95SSteffen Eiden struct kobj_attribute *attr, char *page) 5481b6abe95SSteffen Eiden { 5491b6abe95SSteffen Eiden return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags); 5501b6abe95SSteffen Eiden } 5511b6abe95SSteffen Eiden 5521b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_pflags_attr = 5531b6abe95SSteffen Eiden __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL); 5541b6abe95SSteffen Eiden 555a0f60f84SJanosch Frank static struct attribute *uv_query_attrs[] = { 556a0f60f84SJanosch Frank &uv_query_facilities_attr.attr, 55785b18d7bSJanosch Frank &uv_query_feature_indications_attr.attr, 558a0f60f84SJanosch Frank &uv_query_max_guest_cpus_attr.attr, 559a0f60f84SJanosch Frank &uv_query_max_guest_vms_attr.attr, 560a0f60f84SJanosch Frank &uv_query_max_guest_addr_attr.attr, 561ac640db3SJanosch Frank &uv_query_supp_se_hdr_ver_attr.attr, 562ac640db3SJanosch Frank &uv_query_supp_se_hdr_pcf_attr.attr, 56338c21825SJanosch Frank &uv_query_dump_storage_state_len_attr.attr, 56438c21825SJanosch Frank &uv_query_dump_finalize_len_attr.attr, 56538c21825SJanosch Frank &uv_query_dump_cpu_len_attr.attr, 5661b6abe95SSteffen Eiden &uv_query_supp_att_req_hdr_ver_attr.attr, 5671b6abe95SSteffen Eiden &uv_query_supp_att_pflags_attr.attr, 568a0f60f84SJanosch Frank NULL, 569a0f60f84SJanosch Frank }; 570a0f60f84SJanosch Frank 571a0f60f84SJanosch Frank static struct attribute_group uv_query_attr_group = { 572a0f60f84SJanosch Frank .attrs = uv_query_attrs, 573a0f60f84SJanosch Frank }; 574a0f60f84SJanosch Frank 57537564ed8SJanosch Frank static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, 57637564ed8SJanosch Frank struct kobj_attribute *attr, char *page) 57737564ed8SJanosch Frank { 57837564ed8SJanosch Frank int val = 0; 57937564ed8SJanosch Frank 58037564ed8SJanosch Frank #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST 58137564ed8SJanosch Frank val = prot_virt_guest; 58237564ed8SJanosch Frank #endif 58337564ed8SJanosch Frank return scnprintf(page, PAGE_SIZE, "%d\n", val); 58437564ed8SJanosch Frank } 58537564ed8SJanosch Frank 58637564ed8SJanosch Frank static ssize_t uv_is_prot_virt_host(struct kobject *kobj, 58737564ed8SJanosch Frank struct kobj_attribute *attr, char *page) 58837564ed8SJanosch Frank { 589df2e400eSJanosch Frank int val = 0; 590df2e400eSJanosch Frank 591df2e400eSJanosch Frank #if IS_ENABLED(CONFIG_KVM) 592df2e400eSJanosch Frank val = prot_virt_host; 593df2e400eSJanosch Frank #endif 594df2e400eSJanosch Frank 595df2e400eSJanosch Frank return scnprintf(page, PAGE_SIZE, "%d\n", val); 59637564ed8SJanosch Frank } 59737564ed8SJanosch Frank 59837564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_guest = 59937564ed8SJanosch Frank __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL); 60037564ed8SJanosch Frank 60137564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_host = 60237564ed8SJanosch Frank __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL); 60337564ed8SJanosch Frank 60437564ed8SJanosch Frank static const struct attribute *uv_prot_virt_attrs[] = { 60537564ed8SJanosch Frank &uv_prot_virt_guest.attr, 60637564ed8SJanosch Frank &uv_prot_virt_host.attr, 60737564ed8SJanosch Frank NULL, 60837564ed8SJanosch Frank }; 60937564ed8SJanosch Frank 610a0f60f84SJanosch Frank static struct kset *uv_query_kset; 611a0f60f84SJanosch Frank static struct kobject *uv_kobj; 612a0f60f84SJanosch Frank 613a0f60f84SJanosch Frank static int __init uv_info_init(void) 614a0f60f84SJanosch Frank { 615a0f60f84SJanosch Frank int rc = -ENOMEM; 616a0f60f84SJanosch Frank 617a0f60f84SJanosch Frank if (!test_facility(158)) 618a0f60f84SJanosch Frank return 0; 619a0f60f84SJanosch Frank 620a0f60f84SJanosch Frank uv_kobj = kobject_create_and_add("uv", firmware_kobj); 621a0f60f84SJanosch Frank if (!uv_kobj) 622a0f60f84SJanosch Frank return -ENOMEM; 623a0f60f84SJanosch Frank 62437564ed8SJanosch Frank rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs); 62537564ed8SJanosch Frank if (rc) 62637564ed8SJanosch Frank goto out_kobj; 62737564ed8SJanosch Frank 628a0f60f84SJanosch Frank uv_query_kset = kset_create_and_add("query", NULL, uv_kobj); 62964497517Szhongbaisong if (!uv_query_kset) { 63064497517Szhongbaisong rc = -ENOMEM; 63137564ed8SJanosch Frank goto out_ind_files; 63264497517Szhongbaisong } 633a0f60f84SJanosch Frank 634a0f60f84SJanosch Frank rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group); 635a0f60f84SJanosch Frank if (!rc) 636a0f60f84SJanosch Frank return 0; 637a0f60f84SJanosch Frank 638a0f60f84SJanosch Frank kset_unregister(uv_query_kset); 63937564ed8SJanosch Frank out_ind_files: 64037564ed8SJanosch Frank sysfs_remove_files(uv_kobj, uv_prot_virt_attrs); 641a0f60f84SJanosch Frank out_kobj: 642a0f60f84SJanosch Frank kobject_del(uv_kobj); 643a0f60f84SJanosch Frank kobject_put(uv_kobj); 644a0f60f84SJanosch Frank return rc; 645a0f60f84SJanosch Frank } 646a0f60f84SJanosch Frank device_initcall(uv_info_init); 647a0f60f84SJanosch Frank #endif 648