xref: /openbmc/linux/arch/s390/kernel/uv.c (revision 8736604e)
1ecdc5d84SVasily Gorbik // SPDX-License-Identifier: GPL-2.0
2ecdc5d84SVasily Gorbik /*
3ecdc5d84SVasily Gorbik  * Common Ultravisor functions and initialization
4ecdc5d84SVasily Gorbik  *
5ecdc5d84SVasily Gorbik  * Copyright IBM Corp. 2019, 2020
6ecdc5d84SVasily Gorbik  */
7ecdc5d84SVasily Gorbik #define KMSG_COMPONENT "prot_virt"
8ecdc5d84SVasily Gorbik #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9ecdc5d84SVasily Gorbik 
10ecdc5d84SVasily Gorbik #include <linux/kernel.h>
11ecdc5d84SVasily Gorbik #include <linux/types.h>
12ecdc5d84SVasily Gorbik #include <linux/sizes.h>
13ecdc5d84SVasily Gorbik #include <linux/bitmap.h>
14ecdc5d84SVasily Gorbik #include <linux/memblock.h>
15214d9bbcSClaudio Imbrenda #include <linux/pagemap.h>
16214d9bbcSClaudio Imbrenda #include <linux/swap.h>
17ecdc5d84SVasily Gorbik #include <asm/facility.h>
18ecdc5d84SVasily Gorbik #include <asm/sections.h>
19ecdc5d84SVasily Gorbik #include <asm/uv.h>
20ecdc5d84SVasily Gorbik 
21ecdc5d84SVasily Gorbik /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22ecdc5d84SVasily Gorbik #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23ecdc5d84SVasily Gorbik int __bootdata_preserved(prot_virt_guest);
24ecdc5d84SVasily Gorbik #endif
25ecdc5d84SVasily Gorbik 
264255ce01SSteffen Eiden /*
274255ce01SSteffen Eiden  * uv_info contains both host and guest information but it's currently only
284255ce01SSteffen Eiden  * expected to be used within modules if it's the KVM module or for
294255ce01SSteffen Eiden  * any PV guest module.
304255ce01SSteffen Eiden  *
314255ce01SSteffen Eiden  * The kernel itself will write these values once in uv_query_info()
324255ce01SSteffen Eiden  * and then make some of them readable via a sysfs interface.
334255ce01SSteffen Eiden  */
34673deb0bSClaudio Imbrenda struct uv_info __bootdata_preserved(uv_info);
354255ce01SSteffen Eiden EXPORT_SYMBOL(uv_info);
36673deb0bSClaudio Imbrenda 
37ecdc5d84SVasily Gorbik #if IS_ENABLED(CONFIG_KVM)
381d6671aeSVasily Gorbik int __bootdata_preserved(prot_virt_host);
39ecdc5d84SVasily Gorbik EXPORT_SYMBOL(prot_virt_host);
40ecdc5d84SVasily Gorbik 
uv_init(phys_addr_t stor_base,unsigned long stor_len)4115b5c183SHeiko Carstens static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
4229d37e5bSVasily Gorbik {
4329d37e5bSVasily Gorbik 	struct uv_cb_init uvcb = {
4429d37e5bSVasily Gorbik 		.header.cmd = UVC_CMD_INIT_UV,
4529d37e5bSVasily Gorbik 		.header.len = sizeof(uvcb),
4629d37e5bSVasily Gorbik 		.stor_origin = stor_base,
4729d37e5bSVasily Gorbik 		.stor_len = stor_len,
4829d37e5bSVasily Gorbik 	};
4929d37e5bSVasily Gorbik 
5029d37e5bSVasily Gorbik 	if (uv_call(0, (uint64_t)&uvcb)) {
5129d37e5bSVasily Gorbik 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
5229d37e5bSVasily Gorbik 		       uvcb.header.rc, uvcb.header.rrc);
5329d37e5bSVasily Gorbik 		return -1;
5429d37e5bSVasily Gorbik 	}
5529d37e5bSVasily Gorbik 	return 0;
5629d37e5bSVasily Gorbik }
5729d37e5bSVasily Gorbik 
setup_uv(void)5829d37e5bSVasily Gorbik void __init setup_uv(void)
5929d37e5bSVasily Gorbik {
6015b5c183SHeiko Carstens 	void *uv_stor_base;
6129d37e5bSVasily Gorbik 
621d6671aeSVasily Gorbik 	if (!is_prot_virt_host())
631d6671aeSVasily Gorbik 		return;
641d6671aeSVasily Gorbik 
6515b5c183SHeiko Carstens 	uv_stor_base = memblock_alloc_try_nid(
6629d37e5bSVasily Gorbik 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
6729d37e5bSVasily Gorbik 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
6829d37e5bSVasily Gorbik 	if (!uv_stor_base) {
6929d37e5bSVasily Gorbik 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
7029d37e5bSVasily Gorbik 			uv_info.uv_base_stor_len);
7129d37e5bSVasily Gorbik 		goto fail;
7229d37e5bSVasily Gorbik 	}
7329d37e5bSVasily Gorbik 
7415b5c183SHeiko Carstens 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
7515b5c183SHeiko Carstens 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
7629d37e5bSVasily Gorbik 		goto fail;
7729d37e5bSVasily Gorbik 	}
7829d37e5bSVasily Gorbik 
7929d37e5bSVasily Gorbik 	pr_info("Reserving %luMB as ultravisor base storage\n",
8029d37e5bSVasily Gorbik 		uv_info.uv_base_stor_len >> 20);
8129d37e5bSVasily Gorbik 	return;
8229d37e5bSVasily Gorbik fail:
8329d37e5bSVasily Gorbik 	pr_info("Disabling support for protected virtualization");
8429d37e5bSVasily Gorbik 	prot_virt_host = 0;
8529d37e5bSVasily Gorbik }
8629d37e5bSVasily Gorbik 
87214d9bbcSClaudio Imbrenda /*
88214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to pin the page in the shared state. This will
89214d9bbcSClaudio Imbrenda  * cause an intercept when the guest attempts to unshare the pinned page.
90214d9bbcSClaudio Imbrenda  */
uv_pin_shared(unsigned long paddr)91cf3fa16aSJanosch Frank int uv_pin_shared(unsigned long paddr)
92214d9bbcSClaudio Imbrenda {
93214d9bbcSClaudio Imbrenda 	struct uv_cb_cfs uvcb = {
94214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
95214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
96214d9bbcSClaudio Imbrenda 		.paddr = paddr,
97214d9bbcSClaudio Imbrenda 	};
98214d9bbcSClaudio Imbrenda 
99214d9bbcSClaudio Imbrenda 	if (uv_call(0, (u64)&uvcb))
100214d9bbcSClaudio Imbrenda 		return -EINVAL;
101214d9bbcSClaudio Imbrenda 	return 0;
102214d9bbcSClaudio Imbrenda }
103cf3fa16aSJanosch Frank EXPORT_SYMBOL_GPL(uv_pin_shared);
104214d9bbcSClaudio Imbrenda 
105214d9bbcSClaudio Imbrenda /*
1061a80b54dSJanosch Frank  * Requests the Ultravisor to destroy a guest page and make it
1071a80b54dSJanosch Frank  * accessible to the host. The destroy clears the page instead of
1081a80b54dSJanosch Frank  * exporting.
1091a80b54dSJanosch Frank  *
1101a80b54dSJanosch Frank  * @paddr: Absolute host address of page to be destroyed
1111a80b54dSJanosch Frank  */
uv_destroy_page(unsigned long paddr)112380d97bdSClaudio Imbrenda static int uv_destroy_page(unsigned long paddr)
1131a80b54dSJanosch Frank {
1141a80b54dSJanosch Frank 	struct uv_cb_cfs uvcb = {
1151a80b54dSJanosch Frank 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
1161a80b54dSJanosch Frank 		.header.len = sizeof(uvcb),
1171a80b54dSJanosch Frank 		.paddr = paddr
1181a80b54dSJanosch Frank 	};
1191a80b54dSJanosch Frank 
1204c80d057SChristian Borntraeger 	if (uv_call(0, (u64)&uvcb)) {
1214c80d057SChristian Borntraeger 		/*
1224c80d057SChristian Borntraeger 		 * Older firmware uses 107/d as an indication of a non secure
1234c80d057SChristian Borntraeger 		 * page. Let us emulate the newer variant (no-op).
1244c80d057SChristian Borntraeger 		 */
1254c80d057SChristian Borntraeger 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
1264c80d057SChristian Borntraeger 			return 0;
1271a80b54dSJanosch Frank 		return -EINVAL;
1284c80d057SChristian Borntraeger 	}
1291a80b54dSJanosch Frank 	return 0;
1301a80b54dSJanosch Frank }
1311a80b54dSJanosch Frank 
1321a80b54dSJanosch Frank /*
133380d97bdSClaudio Imbrenda  * The caller must already hold a reference to the page
134380d97bdSClaudio Imbrenda  */
uv_destroy_owned_page(unsigned long paddr)135380d97bdSClaudio Imbrenda int uv_destroy_owned_page(unsigned long paddr)
136380d97bdSClaudio Imbrenda {
137380d97bdSClaudio Imbrenda 	struct page *page = phys_to_page(paddr);
138380d97bdSClaudio Imbrenda 	int rc;
139380d97bdSClaudio Imbrenda 
140380d97bdSClaudio Imbrenda 	get_page(page);
141380d97bdSClaudio Imbrenda 	rc = uv_destroy_page(paddr);
142380d97bdSClaudio Imbrenda 	if (!rc)
143380d97bdSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
144380d97bdSClaudio Imbrenda 	put_page(page);
145380d97bdSClaudio Imbrenda 	return rc;
146380d97bdSClaudio Imbrenda }
147380d97bdSClaudio Imbrenda 
148380d97bdSClaudio Imbrenda /*
149214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to encrypt a guest page and make it
150214d9bbcSClaudio Imbrenda  * accessible to the host for paging (export).
151214d9bbcSClaudio Imbrenda  *
152214d9bbcSClaudio Imbrenda  * @paddr: Absolute host address of page to be exported
153214d9bbcSClaudio Imbrenda  */
uv_convert_from_secure(unsigned long paddr)154214d9bbcSClaudio Imbrenda int uv_convert_from_secure(unsigned long paddr)
155214d9bbcSClaudio Imbrenda {
156214d9bbcSClaudio Imbrenda 	struct uv_cb_cfs uvcb = {
157214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
158214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
159214d9bbcSClaudio Imbrenda 		.paddr = paddr
160214d9bbcSClaudio Imbrenda 	};
161214d9bbcSClaudio Imbrenda 
162214d9bbcSClaudio Imbrenda 	if (uv_call(0, (u64)&uvcb))
163214d9bbcSClaudio Imbrenda 		return -EINVAL;
164214d9bbcSClaudio Imbrenda 	return 0;
165214d9bbcSClaudio Imbrenda }
166214d9bbcSClaudio Imbrenda 
167214d9bbcSClaudio Imbrenda /*
168380d97bdSClaudio Imbrenda  * The caller must already hold a reference to the page
169380d97bdSClaudio Imbrenda  */
uv_convert_owned_from_secure(unsigned long paddr)170380d97bdSClaudio Imbrenda int uv_convert_owned_from_secure(unsigned long paddr)
171380d97bdSClaudio Imbrenda {
172380d97bdSClaudio Imbrenda 	struct page *page = phys_to_page(paddr);
173380d97bdSClaudio Imbrenda 	int rc;
174380d97bdSClaudio Imbrenda 
175380d97bdSClaudio Imbrenda 	get_page(page);
176380d97bdSClaudio Imbrenda 	rc = uv_convert_from_secure(paddr);
177380d97bdSClaudio Imbrenda 	if (!rc)
178380d97bdSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
179380d97bdSClaudio Imbrenda 	put_page(page);
180380d97bdSClaudio Imbrenda 	return rc;
181380d97bdSClaudio Imbrenda }
182380d97bdSClaudio Imbrenda 
183380d97bdSClaudio Imbrenda /*
184098ca921SMatthew Wilcox (Oracle)  * Calculate the expected ref_count for a folio that would otherwise have no
185214d9bbcSClaudio Imbrenda  * further pins. This was cribbed from similar functions in other places in
186214d9bbcSClaudio Imbrenda  * the kernel, but with some slight modifications. We know that a secure
187098ca921SMatthew Wilcox (Oracle)  * folio can not be a large folio, for example.
188214d9bbcSClaudio Imbrenda  */
expected_folio_refs(struct folio * folio)189098ca921SMatthew Wilcox (Oracle) static int expected_folio_refs(struct folio *folio)
190214d9bbcSClaudio Imbrenda {
191214d9bbcSClaudio Imbrenda 	int res;
192214d9bbcSClaudio Imbrenda 
193098ca921SMatthew Wilcox (Oracle) 	res = folio_mapcount(folio);
194098ca921SMatthew Wilcox (Oracle) 	if (folio_test_swapcache(folio)) {
195214d9bbcSClaudio Imbrenda 		res++;
196098ca921SMatthew Wilcox (Oracle) 	} else if (folio_mapping(folio)) {
197214d9bbcSClaudio Imbrenda 		res++;
198098ca921SMatthew Wilcox (Oracle) 		if (folio->private)
199214d9bbcSClaudio Imbrenda 			res++;
200214d9bbcSClaudio Imbrenda 	}
201214d9bbcSClaudio Imbrenda 	return res;
202214d9bbcSClaudio Imbrenda }
203214d9bbcSClaudio Imbrenda 
make_folio_secure(struct folio * folio,struct uv_cb_header * uvcb)20479bcb67eSMatthew Wilcox (Oracle) static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
205214d9bbcSClaudio Imbrenda {
206f0a1a061SClaudio Imbrenda 	int expected, cc = 0;
207214d9bbcSClaudio Imbrenda 
208098ca921SMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
209214d9bbcSClaudio Imbrenda 		return -EAGAIN;
210098ca921SMatthew Wilcox (Oracle) 	expected = expected_folio_refs(folio);
211098ca921SMatthew Wilcox (Oracle) 	if (!folio_ref_freeze(folio, expected))
212214d9bbcSClaudio Imbrenda 		return -EBUSY;
213098ca921SMatthew Wilcox (Oracle) 	set_bit(PG_arch_1, &folio->flags);
214f0a1a061SClaudio Imbrenda 	/*
215f0a1a061SClaudio Imbrenda 	 * If the UVC does not succeed or fail immediately, we don't want to
216f0a1a061SClaudio Imbrenda 	 * loop for long, or we might get stall notifications.
217f0a1a061SClaudio Imbrenda 	 * On the other hand, this is a complex scenario and we are holding a lot of
218f0a1a061SClaudio Imbrenda 	 * locks, so we can't easily sleep and reschedule. We try only once,
219f0a1a061SClaudio Imbrenda 	 * and if the UVC returned busy or partial completion, we return
220f0a1a061SClaudio Imbrenda 	 * -EAGAIN and we let the callers deal with it.
221f0a1a061SClaudio Imbrenda 	 */
222f0a1a061SClaudio Imbrenda 	cc = __uv_call(0, (u64)uvcb);
223098ca921SMatthew Wilcox (Oracle) 	folio_ref_unfreeze(folio, expected);
224f0a1a061SClaudio Imbrenda 	/*
225098ca921SMatthew Wilcox (Oracle) 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
226f0a1a061SClaudio Imbrenda 	 * If busy or partially completed, return -EAGAIN.
227f0a1a061SClaudio Imbrenda 	 */
228f0a1a061SClaudio Imbrenda 	if (cc == UVC_CC_OK)
229f0a1a061SClaudio Imbrenda 		return 0;
230f0a1a061SClaudio Imbrenda 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
231f0a1a061SClaudio Imbrenda 		return -EAGAIN;
232f0a1a061SClaudio Imbrenda 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
233214d9bbcSClaudio Imbrenda }
234214d9bbcSClaudio Imbrenda 
23572b1daffSClaudio Imbrenda /**
23672b1daffSClaudio Imbrenda  * should_export_before_import - Determine whether an export is needed
23772b1daffSClaudio Imbrenda  * before an import-like operation
23872b1daffSClaudio Imbrenda  * @uvcb: the Ultravisor control block of the UVC to be performed
23972b1daffSClaudio Imbrenda  * @mm: the mm of the process
24072b1daffSClaudio Imbrenda  *
24172b1daffSClaudio Imbrenda  * Returns whether an export is needed before every import-like operation.
24272b1daffSClaudio Imbrenda  * This is needed for shared pages, which don't trigger a secure storage
24372b1daffSClaudio Imbrenda  * exception when accessed from a different guest.
24472b1daffSClaudio Imbrenda  *
24572b1daffSClaudio Imbrenda  * Although considered as one, the Unpin Page UVC is not an actual import,
24672b1daffSClaudio Imbrenda  * so it is not affected.
24772b1daffSClaudio Imbrenda  *
24872b1daffSClaudio Imbrenda  * No export is needed also when there is only one protected VM, because the
24972b1daffSClaudio Imbrenda  * page cannot belong to the wrong VM in that case (there is no "other VM"
25072b1daffSClaudio Imbrenda  * it can belong to).
25172b1daffSClaudio Imbrenda  *
25272b1daffSClaudio Imbrenda  * Return: true if an export is needed before every import, otherwise false.
25372b1daffSClaudio Imbrenda  */
should_export_before_import(struct uv_cb_header * uvcb,struct mm_struct * mm)25472b1daffSClaudio Imbrenda static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
25572b1daffSClaudio Imbrenda {
256afe20eb8SClaudio Imbrenda 	/*
257afe20eb8SClaudio Imbrenda 	 * The misc feature indicates, among other things, that importing a
258afe20eb8SClaudio Imbrenda 	 * shared page from a different protected VM will automatically also
259afe20eb8SClaudio Imbrenda 	 * transfer its ownership.
260afe20eb8SClaudio Imbrenda 	 */
26159a88140SSteffen Eiden 	if (uv_has_feature(BIT_UV_FEAT_MISC))
262afe20eb8SClaudio Imbrenda 		return false;
26372b1daffSClaudio Imbrenda 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
26472b1daffSClaudio Imbrenda 		return false;
26572b1daffSClaudio Imbrenda 	return atomic_read(&mm->context.protected_count) > 1;
26672b1daffSClaudio Imbrenda }
26772b1daffSClaudio Imbrenda 
268214d9bbcSClaudio Imbrenda /*
269214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to make a page accessible to a guest.
270214d9bbcSClaudio Imbrenda  * If it's brought in the first time, it will be cleared. If
271214d9bbcSClaudio Imbrenda  * it has been exported before, it will be decrypted and integrity
272214d9bbcSClaudio Imbrenda  * checked.
273214d9bbcSClaudio Imbrenda  */
gmap_make_secure(struct gmap * gmap,unsigned long gaddr,void * uvcb)274214d9bbcSClaudio Imbrenda int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
275214d9bbcSClaudio Imbrenda {
276214d9bbcSClaudio Imbrenda 	struct vm_area_struct *vma;
277214d9bbcSClaudio Imbrenda 	bool local_drain = false;
278214d9bbcSClaudio Imbrenda 	spinlock_t *ptelock;
279214d9bbcSClaudio Imbrenda 	unsigned long uaddr;
28079bcb67eSMatthew Wilcox (Oracle) 	struct folio *folio;
281214d9bbcSClaudio Imbrenda 	pte_t *ptep;
282214d9bbcSClaudio Imbrenda 	int rc;
283214d9bbcSClaudio Imbrenda 
284214d9bbcSClaudio Imbrenda again:
285214d9bbcSClaudio Imbrenda 	rc = -EFAULT;
286d8ed45c5SMichel Lespinasse 	mmap_read_lock(gmap->mm);
287214d9bbcSClaudio Imbrenda 
288214d9bbcSClaudio Imbrenda 	uaddr = __gmap_translate(gmap, gaddr);
289214d9bbcSClaudio Imbrenda 	if (IS_ERR_VALUE(uaddr))
290214d9bbcSClaudio Imbrenda 		goto out;
29146c22ffdSDavid Hildenbrand 	vma = vma_lookup(gmap->mm, uaddr);
292214d9bbcSClaudio Imbrenda 	if (!vma)
293214d9bbcSClaudio Imbrenda 		goto out;
294214d9bbcSClaudio Imbrenda 	/*
295214d9bbcSClaudio Imbrenda 	 * Secure pages cannot be huge and userspace should not combine both.
296214d9bbcSClaudio Imbrenda 	 * In case userspace does it anyway this will result in an -EFAULT for
297214d9bbcSClaudio Imbrenda 	 * the unpack. The guest is thus never reaching secure mode. If
298214d9bbcSClaudio Imbrenda 	 * userspace is playing dirty tricky with mapping huge pages later
299214d9bbcSClaudio Imbrenda 	 * on this will result in a segmentation fault.
300214d9bbcSClaudio Imbrenda 	 */
301214d9bbcSClaudio Imbrenda 	if (is_vm_hugetlb_page(vma))
302214d9bbcSClaudio Imbrenda 		goto out;
303214d9bbcSClaudio Imbrenda 
304214d9bbcSClaudio Imbrenda 	rc = -ENXIO;
305214d9bbcSClaudio Imbrenda 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
3065c7f3bf0SHugh Dickins 	if (!ptep)
3075c7f3bf0SHugh Dickins 		goto out;
308c148dc8eSClaudio Imbrenda 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
30979bcb67eSMatthew Wilcox (Oracle) 		folio = page_folio(pte_page(*ptep));
31079bcb67eSMatthew Wilcox (Oracle) 		rc = -EINVAL;
31179bcb67eSMatthew Wilcox (Oracle) 		if (folio_test_large(folio))
31279bcb67eSMatthew Wilcox (Oracle) 			goto unlock;
313c148dc8eSClaudio Imbrenda 		rc = -EAGAIN;
31479bcb67eSMatthew Wilcox (Oracle) 		if (folio_trylock(folio)) {
31572b1daffSClaudio Imbrenda 			if (should_export_before_import(uvcb, gmap->mm))
31679bcb67eSMatthew Wilcox (Oracle) 				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
31779bcb67eSMatthew Wilcox (Oracle) 			rc = make_folio_secure(folio, uvcb);
31879bcb67eSMatthew Wilcox (Oracle) 			folio_unlock(folio);
319c148dc8eSClaudio Imbrenda 		}
320*8736604eSDavid Hildenbrand 
321*8736604eSDavid Hildenbrand 		/*
322*8736604eSDavid Hildenbrand 		 * Once we drop the PTL, the folio may get unmapped and
323*8736604eSDavid Hildenbrand 		 * freed immediately. We need a temporary reference.
324*8736604eSDavid Hildenbrand 		 */
325*8736604eSDavid Hildenbrand 		if (rc == -EAGAIN)
326*8736604eSDavid Hildenbrand 			folio_get(folio);
327c148dc8eSClaudio Imbrenda 	}
32879bcb67eSMatthew Wilcox (Oracle) unlock:
329c148dc8eSClaudio Imbrenda 	pte_unmap_unlock(ptep, ptelock);
330214d9bbcSClaudio Imbrenda out:
331d8ed45c5SMichel Lespinasse 	mmap_read_unlock(gmap->mm);
332214d9bbcSClaudio Imbrenda 
333214d9bbcSClaudio Imbrenda 	if (rc == -EAGAIN) {
334f0a1a061SClaudio Imbrenda 		/*
335f0a1a061SClaudio Imbrenda 		 * If we are here because the UVC returned busy or partial
336f0a1a061SClaudio Imbrenda 		 * completion, this is just a useless check, but it is safe.
337f0a1a061SClaudio Imbrenda 		 */
33879bcb67eSMatthew Wilcox (Oracle) 		folio_wait_writeback(folio);
339*8736604eSDavid Hildenbrand 		folio_put(folio);
340214d9bbcSClaudio Imbrenda 	} else if (rc == -EBUSY) {
341214d9bbcSClaudio Imbrenda 		/*
34279bcb67eSMatthew Wilcox (Oracle) 		 * If we have tried a local drain and the folio refcount
343214d9bbcSClaudio Imbrenda 		 * still does not match our expected safe value, try with a
344214d9bbcSClaudio Imbrenda 		 * system wide drain. This is needed if the pagevecs holding
345214d9bbcSClaudio Imbrenda 		 * the page are on a different CPU.
346214d9bbcSClaudio Imbrenda 		 */
347214d9bbcSClaudio Imbrenda 		if (local_drain) {
348214d9bbcSClaudio Imbrenda 			lru_add_drain_all();
349214d9bbcSClaudio Imbrenda 			/* We give up here, and let the caller try again */
350214d9bbcSClaudio Imbrenda 			return -EAGAIN;
351214d9bbcSClaudio Imbrenda 		}
352214d9bbcSClaudio Imbrenda 		/*
35379bcb67eSMatthew Wilcox (Oracle) 		 * We are here if the folio refcount does not match the
354214d9bbcSClaudio Imbrenda 		 * expected safe value. The main culprits are usually
355214d9bbcSClaudio Imbrenda 		 * pagevecs. With lru_add_drain() we drain the pagevecs
356214d9bbcSClaudio Imbrenda 		 * on the local CPU so that hopefully the refcount will
357214d9bbcSClaudio Imbrenda 		 * reach the expected safe value.
358214d9bbcSClaudio Imbrenda 		 */
359214d9bbcSClaudio Imbrenda 		lru_add_drain();
360214d9bbcSClaudio Imbrenda 		local_drain = true;
361214d9bbcSClaudio Imbrenda 		/* And now we try again immediately after draining */
362214d9bbcSClaudio Imbrenda 		goto again;
363214d9bbcSClaudio Imbrenda 	} else if (rc == -ENXIO) {
364214d9bbcSClaudio Imbrenda 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
365214d9bbcSClaudio Imbrenda 			return -EFAULT;
366214d9bbcSClaudio Imbrenda 		return -EAGAIN;
367214d9bbcSClaudio Imbrenda 	}
368214d9bbcSClaudio Imbrenda 	return rc;
369214d9bbcSClaudio Imbrenda }
370214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_make_secure);
371214d9bbcSClaudio Imbrenda 
gmap_convert_to_secure(struct gmap * gmap,unsigned long gaddr)372214d9bbcSClaudio Imbrenda int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
373214d9bbcSClaudio Imbrenda {
374214d9bbcSClaudio Imbrenda 	struct uv_cb_cts uvcb = {
375214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
376214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
377214d9bbcSClaudio Imbrenda 		.guest_handle = gmap->guest_handle,
378214d9bbcSClaudio Imbrenda 		.gaddr = gaddr,
379214d9bbcSClaudio Imbrenda 	};
380214d9bbcSClaudio Imbrenda 
381214d9bbcSClaudio Imbrenda 	return gmap_make_secure(gmap, gaddr, &uvcb);
382214d9bbcSClaudio Imbrenda }
383214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
384214d9bbcSClaudio Imbrenda 
385a52c2584SClaudio Imbrenda /**
386a52c2584SClaudio Imbrenda  * gmap_destroy_page - Destroy a guest page.
387a52c2584SClaudio Imbrenda  * @gmap: the gmap of the guest
388a52c2584SClaudio Imbrenda  * @gaddr: the guest address to destroy
389a52c2584SClaudio Imbrenda  *
390a52c2584SClaudio Imbrenda  * An attempt will be made to destroy the given guest page. If the attempt
391a52c2584SClaudio Imbrenda  * fails, an attempt is made to export the page. If both attempts fail, an
392a52c2584SClaudio Imbrenda  * appropriate error is returned.
393a52c2584SClaudio Imbrenda  */
gmap_destroy_page(struct gmap * gmap,unsigned long gaddr)394a52c2584SClaudio Imbrenda int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
395a52c2584SClaudio Imbrenda {
396a52c2584SClaudio Imbrenda 	struct vm_area_struct *vma;
397a52c2584SClaudio Imbrenda 	unsigned long uaddr;
398a52c2584SClaudio Imbrenda 	struct page *page;
399a52c2584SClaudio Imbrenda 	int rc;
400a52c2584SClaudio Imbrenda 
401a52c2584SClaudio Imbrenda 	rc = -EFAULT;
402a52c2584SClaudio Imbrenda 	mmap_read_lock(gmap->mm);
403a52c2584SClaudio Imbrenda 
404a52c2584SClaudio Imbrenda 	uaddr = __gmap_translate(gmap, gaddr);
405a52c2584SClaudio Imbrenda 	if (IS_ERR_VALUE(uaddr))
406a52c2584SClaudio Imbrenda 		goto out;
407a52c2584SClaudio Imbrenda 	vma = vma_lookup(gmap->mm, uaddr);
408a52c2584SClaudio Imbrenda 	if (!vma)
409a52c2584SClaudio Imbrenda 		goto out;
410a52c2584SClaudio Imbrenda 	/*
411a52c2584SClaudio Imbrenda 	 * Huge pages should not be able to become secure
412a52c2584SClaudio Imbrenda 	 */
413a52c2584SClaudio Imbrenda 	if (is_vm_hugetlb_page(vma))
414a52c2584SClaudio Imbrenda 		goto out;
415a52c2584SClaudio Imbrenda 
416a52c2584SClaudio Imbrenda 	rc = 0;
417a52c2584SClaudio Imbrenda 	/* we take an extra reference here */
418a52c2584SClaudio Imbrenda 	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
419a52c2584SClaudio Imbrenda 	if (IS_ERR_OR_NULL(page))
420a52c2584SClaudio Imbrenda 		goto out;
421a52c2584SClaudio Imbrenda 	rc = uv_destroy_owned_page(page_to_phys(page));
422a52c2584SClaudio Imbrenda 	/*
423a52c2584SClaudio Imbrenda 	 * Fault handlers can race; it is possible that two CPUs will fault
424a52c2584SClaudio Imbrenda 	 * on the same secure page. One CPU can destroy the page, reboot,
425a52c2584SClaudio Imbrenda 	 * re-enter secure mode and import it, while the second CPU was
426a52c2584SClaudio Imbrenda 	 * stuck at the beginning of the handler. At some point the second
427a52c2584SClaudio Imbrenda 	 * CPU will be able to progress, and it will not be able to destroy
428a52c2584SClaudio Imbrenda 	 * the page. In that case we do not want to terminate the process,
429a52c2584SClaudio Imbrenda 	 * we instead try to export the page.
430a52c2584SClaudio Imbrenda 	 */
431a52c2584SClaudio Imbrenda 	if (rc)
432a52c2584SClaudio Imbrenda 		rc = uv_convert_owned_from_secure(page_to_phys(page));
433a52c2584SClaudio Imbrenda 	put_page(page);
434a52c2584SClaudio Imbrenda out:
435a52c2584SClaudio Imbrenda 	mmap_read_unlock(gmap->mm);
436a52c2584SClaudio Imbrenda 	return rc;
437a52c2584SClaudio Imbrenda }
438a52c2584SClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_destroy_page);
439a52c2584SClaudio Imbrenda 
440214d9bbcSClaudio Imbrenda /*
441214d9bbcSClaudio Imbrenda  * To be called with the page locked or with an extra reference! This will
442214d9bbcSClaudio Imbrenda  * prevent gmap_make_secure from touching the page concurrently. Having 2
443214d9bbcSClaudio Imbrenda  * parallel make_page_accessible is fine, as the UV calls will become a
444214d9bbcSClaudio Imbrenda  * no-op if the page is already exported.
445214d9bbcSClaudio Imbrenda  */
arch_make_page_accessible(struct page * page)446214d9bbcSClaudio Imbrenda int arch_make_page_accessible(struct page *page)
447214d9bbcSClaudio Imbrenda {
448214d9bbcSClaudio Imbrenda 	int rc = 0;
449214d9bbcSClaudio Imbrenda 
450214d9bbcSClaudio Imbrenda 	/* Hugepage cannot be protected, so nothing to do */
451214d9bbcSClaudio Imbrenda 	if (PageHuge(page))
452214d9bbcSClaudio Imbrenda 		return 0;
453214d9bbcSClaudio Imbrenda 
454214d9bbcSClaudio Imbrenda 	/*
455214d9bbcSClaudio Imbrenda 	 * PG_arch_1 is used in 3 places:
456214d9bbcSClaudio Imbrenda 	 * 1. for kernel page tables during early boot
457214d9bbcSClaudio Imbrenda 	 * 2. for storage keys of huge pages and KVM
458214d9bbcSClaudio Imbrenda 	 * 3. As an indication that this page might be secure. This can
459214d9bbcSClaudio Imbrenda 	 *    overindicate, e.g. we set the bit before calling
460214d9bbcSClaudio Imbrenda 	 *    convert_to_secure.
461214d9bbcSClaudio Imbrenda 	 * As secure pages are never huge, all 3 variants can co-exists.
462214d9bbcSClaudio Imbrenda 	 */
463214d9bbcSClaudio Imbrenda 	if (!test_bit(PG_arch_1, &page->flags))
464214d9bbcSClaudio Imbrenda 		return 0;
465214d9bbcSClaudio Imbrenda 
466214d9bbcSClaudio Imbrenda 	rc = uv_pin_shared(page_to_phys(page));
467214d9bbcSClaudio Imbrenda 	if (!rc) {
468214d9bbcSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
469214d9bbcSClaudio Imbrenda 		return 0;
470214d9bbcSClaudio Imbrenda 	}
471214d9bbcSClaudio Imbrenda 
472214d9bbcSClaudio Imbrenda 	rc = uv_convert_from_secure(page_to_phys(page));
473214d9bbcSClaudio Imbrenda 	if (!rc) {
474214d9bbcSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
475214d9bbcSClaudio Imbrenda 		return 0;
476214d9bbcSClaudio Imbrenda 	}
477214d9bbcSClaudio Imbrenda 
478214d9bbcSClaudio Imbrenda 	return rc;
479214d9bbcSClaudio Imbrenda }
480214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(arch_make_page_accessible);
481214d9bbcSClaudio Imbrenda 
482ecdc5d84SVasily Gorbik #endif
483a0f60f84SJanosch Frank 
484a0f60f84SJanosch Frank #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * buf)485a0f60f84SJanosch Frank static ssize_t uv_query_facilities(struct kobject *kobj,
48678d3326eSSteffen Eiden 				   struct kobj_attribute *attr, char *buf)
487a0f60f84SJanosch Frank {
48878d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
489a0f60f84SJanosch Frank 			  uv_info.inst_calls_list[0],
490a0f60f84SJanosch Frank 			  uv_info.inst_calls_list[1],
491a0f60f84SJanosch Frank 			  uv_info.inst_calls_list[2],
492a0f60f84SJanosch Frank 			  uv_info.inst_calls_list[3]);
493a0f60f84SJanosch Frank }
494a0f60f84SJanosch Frank 
495a0f60f84SJanosch Frank static struct kobj_attribute uv_query_facilities_attr =
496a0f60f84SJanosch Frank 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
497a0f60f84SJanosch Frank 
uv_query_supp_se_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)498ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
499ac640db3SJanosch Frank 					struct kobj_attribute *attr, char *buf)
500ac640db3SJanosch Frank {
501ac640db3SJanosch Frank 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
502ac640db3SJanosch Frank }
503ac640db3SJanosch Frank 
504ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
505ac640db3SJanosch Frank 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
506ac640db3SJanosch Frank 
uv_query_supp_se_hdr_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)507ac640db3SJanosch Frank static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
508ac640db3SJanosch Frank 					struct kobj_attribute *attr, char *buf)
509ac640db3SJanosch Frank {
510ac640db3SJanosch Frank 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
511ac640db3SJanosch Frank }
512ac640db3SJanosch Frank 
513ac640db3SJanosch Frank static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
514ac640db3SJanosch Frank 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
515ac640db3SJanosch Frank 
uv_query_dump_cpu_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)51638c21825SJanosch Frank static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
51778d3326eSSteffen Eiden 				     struct kobj_attribute *attr, char *buf)
51838c21825SJanosch Frank {
51978d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
52038c21825SJanosch Frank }
52138c21825SJanosch Frank 
52238c21825SJanosch Frank static struct kobj_attribute uv_query_dump_cpu_len_attr =
52338c21825SJanosch Frank 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
52438c21825SJanosch Frank 
uv_query_dump_storage_state_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)52538c21825SJanosch Frank static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
52678d3326eSSteffen Eiden 					       struct kobj_attribute *attr, char *buf)
52738c21825SJanosch Frank {
52878d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
52938c21825SJanosch Frank }
53038c21825SJanosch Frank 
53138c21825SJanosch Frank static struct kobj_attribute uv_query_dump_storage_state_len_attr =
53238c21825SJanosch Frank 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
53338c21825SJanosch Frank 
uv_query_dump_finalize_len(struct kobject * kobj,struct kobj_attribute * attr,char * buf)53438c21825SJanosch Frank static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
53578d3326eSSteffen Eiden 					  struct kobj_attribute *attr, char *buf)
53638c21825SJanosch Frank {
53778d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
53838c21825SJanosch Frank }
53938c21825SJanosch Frank 
54038c21825SJanosch Frank static struct kobj_attribute uv_query_dump_finalize_len_attr =
54138c21825SJanosch Frank 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
54238c21825SJanosch Frank 
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)54385b18d7bSJanosch Frank static ssize_t uv_query_feature_indications(struct kobject *kobj,
54485b18d7bSJanosch Frank 					    struct kobj_attribute *attr, char *buf)
54585b18d7bSJanosch Frank {
54685b18d7bSJanosch Frank 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
54785b18d7bSJanosch Frank }
54885b18d7bSJanosch Frank 
54985b18d7bSJanosch Frank static struct kobj_attribute uv_query_feature_indications_attr =
55085b18d7bSJanosch Frank 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
55185b18d7bSJanosch Frank 
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * buf)552a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
55378d3326eSSteffen Eiden 				       struct kobj_attribute *attr, char *buf)
554a0f60f84SJanosch Frank {
55578d3326eSSteffen Eiden 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
556a0f60f84SJanosch Frank }
557a0f60f84SJanosch Frank 
558a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_cpus_attr =
559a0f60f84SJanosch Frank 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
560a0f60f84SJanosch Frank 
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * buf)561a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
56278d3326eSSteffen Eiden 				      struct kobj_attribute *attr, char *buf)
563a0f60f84SJanosch Frank {
56478d3326eSSteffen Eiden 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
565a0f60f84SJanosch Frank }
566a0f60f84SJanosch Frank 
567a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_vms_attr =
568a0f60f84SJanosch Frank 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
569a0f60f84SJanosch Frank 
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * buf)570a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
57178d3326eSSteffen Eiden 				       struct kobj_attribute *attr, char *buf)
572a0f60f84SJanosch Frank {
57378d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
574a0f60f84SJanosch Frank }
575a0f60f84SJanosch Frank 
576a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_addr_attr =
577a0f60f84SJanosch Frank 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
578a0f60f84SJanosch Frank 
uv_query_supp_att_req_hdr_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5791b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
58078d3326eSSteffen Eiden 					     struct kobj_attribute *attr, char *buf)
5811b6abe95SSteffen Eiden {
58278d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
5831b6abe95SSteffen Eiden }
5841b6abe95SSteffen Eiden 
5851b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
5861b6abe95SSteffen Eiden 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
5871b6abe95SSteffen Eiden 
uv_query_supp_att_pflags(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5881b6abe95SSteffen Eiden static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
58978d3326eSSteffen Eiden 					struct kobj_attribute *attr, char *buf)
5901b6abe95SSteffen Eiden {
59178d3326eSSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
5921b6abe95SSteffen Eiden }
5931b6abe95SSteffen Eiden 
5941b6abe95SSteffen Eiden static struct kobj_attribute uv_query_supp_att_pflags_attr =
5951b6abe95SSteffen Eiden 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
5961b6abe95SSteffen Eiden 
uv_query_supp_add_secret_req_ver(struct kobject * kobj,struct kobj_attribute * attr,char * buf)597db54dfc9SSteffen Eiden static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
598db54dfc9SSteffen Eiden 						struct kobj_attribute *attr, char *buf)
599db54dfc9SSteffen Eiden {
600db54dfc9SSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
601db54dfc9SSteffen Eiden }
602db54dfc9SSteffen Eiden 
603db54dfc9SSteffen Eiden static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
604db54dfc9SSteffen Eiden 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
605db54dfc9SSteffen Eiden 
uv_query_supp_add_secret_pcf(struct kobject * kobj,struct kobj_attribute * attr,char * buf)606db54dfc9SSteffen Eiden static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
607db54dfc9SSteffen Eiden 					    struct kobj_attribute *attr, char *buf)
608db54dfc9SSteffen Eiden {
609db54dfc9SSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
610db54dfc9SSteffen Eiden }
611db54dfc9SSteffen Eiden 
612db54dfc9SSteffen Eiden static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
613db54dfc9SSteffen Eiden 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
614db54dfc9SSteffen Eiden 
uv_query_supp_secret_types(struct kobject * kobj,struct kobj_attribute * attr,char * buf)615db54dfc9SSteffen Eiden static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
616db54dfc9SSteffen Eiden 					  struct kobj_attribute *attr, char *buf)
617db54dfc9SSteffen Eiden {
618db54dfc9SSteffen Eiden 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
619db54dfc9SSteffen Eiden }
620db54dfc9SSteffen Eiden 
621db54dfc9SSteffen Eiden static struct kobj_attribute uv_query_supp_secret_types_attr =
622db54dfc9SSteffen Eiden 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
623db54dfc9SSteffen Eiden 
uv_query_max_secrets(struct kobject * kobj,struct kobj_attribute * attr,char * buf)624db54dfc9SSteffen Eiden static ssize_t uv_query_max_secrets(struct kobject *kobj,
625db54dfc9SSteffen Eiden 				    struct kobj_attribute *attr, char *buf)
626db54dfc9SSteffen Eiden {
627db54dfc9SSteffen Eiden 	return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
628db54dfc9SSteffen Eiden }
629db54dfc9SSteffen Eiden 
630db54dfc9SSteffen Eiden static struct kobj_attribute uv_query_max_secrets_attr =
631db54dfc9SSteffen Eiden 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
632db54dfc9SSteffen Eiden 
633a0f60f84SJanosch Frank static struct attribute *uv_query_attrs[] = {
634a0f60f84SJanosch Frank 	&uv_query_facilities_attr.attr,
63585b18d7bSJanosch Frank 	&uv_query_feature_indications_attr.attr,
636a0f60f84SJanosch Frank 	&uv_query_max_guest_cpus_attr.attr,
637a0f60f84SJanosch Frank 	&uv_query_max_guest_vms_attr.attr,
638a0f60f84SJanosch Frank 	&uv_query_max_guest_addr_attr.attr,
639ac640db3SJanosch Frank 	&uv_query_supp_se_hdr_ver_attr.attr,
640ac640db3SJanosch Frank 	&uv_query_supp_se_hdr_pcf_attr.attr,
64138c21825SJanosch Frank 	&uv_query_dump_storage_state_len_attr.attr,
64238c21825SJanosch Frank 	&uv_query_dump_finalize_len_attr.attr,
64338c21825SJanosch Frank 	&uv_query_dump_cpu_len_attr.attr,
6441b6abe95SSteffen Eiden 	&uv_query_supp_att_req_hdr_ver_attr.attr,
6451b6abe95SSteffen Eiden 	&uv_query_supp_att_pflags_attr.attr,
646db54dfc9SSteffen Eiden 	&uv_query_supp_add_secret_req_ver_attr.attr,
647db54dfc9SSteffen Eiden 	&uv_query_supp_add_secret_pcf_attr.attr,
648db54dfc9SSteffen Eiden 	&uv_query_supp_secret_types_attr.attr,
649db54dfc9SSteffen Eiden 	&uv_query_max_secrets_attr.attr,
650a0f60f84SJanosch Frank 	NULL,
651a0f60f84SJanosch Frank };
652a0f60f84SJanosch Frank 
653a0f60f84SJanosch Frank static struct attribute_group uv_query_attr_group = {
654a0f60f84SJanosch Frank 	.attrs = uv_query_attrs,
655a0f60f84SJanosch Frank };
656a0f60f84SJanosch Frank 
uv_is_prot_virt_guest(struct kobject * kobj,struct kobj_attribute * attr,char * buf)65737564ed8SJanosch Frank static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
65878d3326eSSteffen Eiden 				     struct kobj_attribute *attr, char *buf)
65937564ed8SJanosch Frank {
66037564ed8SJanosch Frank 	int val = 0;
66137564ed8SJanosch Frank 
66237564ed8SJanosch Frank #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
66337564ed8SJanosch Frank 	val = prot_virt_guest;
66437564ed8SJanosch Frank #endif
66578d3326eSSteffen Eiden 	return sysfs_emit(buf, "%d\n", val);
66637564ed8SJanosch Frank }
66737564ed8SJanosch Frank 
uv_is_prot_virt_host(struct kobject * kobj,struct kobj_attribute * attr,char * buf)66837564ed8SJanosch Frank static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
66978d3326eSSteffen Eiden 				    struct kobj_attribute *attr, char *buf)
67037564ed8SJanosch Frank {
671df2e400eSJanosch Frank 	int val = 0;
672df2e400eSJanosch Frank 
673df2e400eSJanosch Frank #if IS_ENABLED(CONFIG_KVM)
674df2e400eSJanosch Frank 	val = prot_virt_host;
675df2e400eSJanosch Frank #endif
676df2e400eSJanosch Frank 
67778d3326eSSteffen Eiden 	return sysfs_emit(buf, "%d\n", val);
67837564ed8SJanosch Frank }
67937564ed8SJanosch Frank 
68037564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_guest =
68137564ed8SJanosch Frank 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
68237564ed8SJanosch Frank 
68337564ed8SJanosch Frank static struct kobj_attribute uv_prot_virt_host =
68437564ed8SJanosch Frank 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
68537564ed8SJanosch Frank 
68637564ed8SJanosch Frank static const struct attribute *uv_prot_virt_attrs[] = {
68737564ed8SJanosch Frank 	&uv_prot_virt_guest.attr,
68837564ed8SJanosch Frank 	&uv_prot_virt_host.attr,
68937564ed8SJanosch Frank 	NULL,
69037564ed8SJanosch Frank };
69137564ed8SJanosch Frank 
692a0f60f84SJanosch Frank static struct kset *uv_query_kset;
693a0f60f84SJanosch Frank static struct kobject *uv_kobj;
694a0f60f84SJanosch Frank 
uv_info_init(void)695a0f60f84SJanosch Frank static int __init uv_info_init(void)
696a0f60f84SJanosch Frank {
697a0f60f84SJanosch Frank 	int rc = -ENOMEM;
698a0f60f84SJanosch Frank 
699a0f60f84SJanosch Frank 	if (!test_facility(158))
700a0f60f84SJanosch Frank 		return 0;
701a0f60f84SJanosch Frank 
702a0f60f84SJanosch Frank 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
703a0f60f84SJanosch Frank 	if (!uv_kobj)
704a0f60f84SJanosch Frank 		return -ENOMEM;
705a0f60f84SJanosch Frank 
70637564ed8SJanosch Frank 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
70737564ed8SJanosch Frank 	if (rc)
70837564ed8SJanosch Frank 		goto out_kobj;
70937564ed8SJanosch Frank 
710a0f60f84SJanosch Frank 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
71164497517Szhongbaisong 	if (!uv_query_kset) {
71264497517Szhongbaisong 		rc = -ENOMEM;
71337564ed8SJanosch Frank 		goto out_ind_files;
71464497517Szhongbaisong 	}
715a0f60f84SJanosch Frank 
716a0f60f84SJanosch Frank 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
717a0f60f84SJanosch Frank 	if (!rc)
718a0f60f84SJanosch Frank 		return 0;
719a0f60f84SJanosch Frank 
720a0f60f84SJanosch Frank 	kset_unregister(uv_query_kset);
72137564ed8SJanosch Frank out_ind_files:
72237564ed8SJanosch Frank 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
723a0f60f84SJanosch Frank out_kobj:
724a0f60f84SJanosch Frank 	kobject_del(uv_kobj);
725a0f60f84SJanosch Frank 	kobject_put(uv_kobj);
726a0f60f84SJanosch Frank 	return rc;
727a0f60f84SJanosch Frank }
728a0f60f84SJanosch Frank device_initcall(uv_info_init);
729a0f60f84SJanosch Frank #endif
730