xref: /openbmc/linux/arch/s390/kernel/uv.c (revision 99448016)
1ecdc5d84SVasily Gorbik // SPDX-License-Identifier: GPL-2.0
2ecdc5d84SVasily Gorbik /*
3ecdc5d84SVasily Gorbik  * Common Ultravisor functions and initialization
4ecdc5d84SVasily Gorbik  *
5ecdc5d84SVasily Gorbik  * Copyright IBM Corp. 2019, 2020
6ecdc5d84SVasily Gorbik  */
7ecdc5d84SVasily Gorbik #define KMSG_COMPONENT "prot_virt"
8ecdc5d84SVasily Gorbik #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9ecdc5d84SVasily Gorbik 
10ecdc5d84SVasily Gorbik #include <linux/kernel.h>
11ecdc5d84SVasily Gorbik #include <linux/types.h>
12ecdc5d84SVasily Gorbik #include <linux/sizes.h>
13ecdc5d84SVasily Gorbik #include <linux/bitmap.h>
14ecdc5d84SVasily Gorbik #include <linux/memblock.h>
15214d9bbcSClaudio Imbrenda #include <linux/pagemap.h>
16214d9bbcSClaudio Imbrenda #include <linux/swap.h>
17ecdc5d84SVasily Gorbik #include <asm/facility.h>
18ecdc5d84SVasily Gorbik #include <asm/sections.h>
19ecdc5d84SVasily Gorbik #include <asm/uv.h>
20ecdc5d84SVasily Gorbik 
21ecdc5d84SVasily Gorbik /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22ecdc5d84SVasily Gorbik #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23ecdc5d84SVasily Gorbik int __bootdata_preserved(prot_virt_guest);
24ecdc5d84SVasily Gorbik #endif
25ecdc5d84SVasily Gorbik 
26673deb0bSClaudio Imbrenda struct uv_info __bootdata_preserved(uv_info);
27673deb0bSClaudio Imbrenda 
28ecdc5d84SVasily Gorbik #if IS_ENABLED(CONFIG_KVM)
29ecdc5d84SVasily Gorbik int prot_virt_host;
30ecdc5d84SVasily Gorbik EXPORT_SYMBOL(prot_virt_host);
31ecdc5d84SVasily Gorbik EXPORT_SYMBOL(uv_info);
32ecdc5d84SVasily Gorbik 
33ecdc5d84SVasily Gorbik static int __init prot_virt_setup(char *val)
34ecdc5d84SVasily Gorbik {
35ecdc5d84SVasily Gorbik 	bool enabled;
36ecdc5d84SVasily Gorbik 	int rc;
37ecdc5d84SVasily Gorbik 
38ecdc5d84SVasily Gorbik 	rc = kstrtobool(val, &enabled);
39ecdc5d84SVasily Gorbik 	if (!rc && enabled)
40ecdc5d84SVasily Gorbik 		prot_virt_host = 1;
41ecdc5d84SVasily Gorbik 
42ecdc5d84SVasily Gorbik 	if (is_prot_virt_guest() && prot_virt_host) {
43ecdc5d84SVasily Gorbik 		prot_virt_host = 0;
44ecdc5d84SVasily Gorbik 		pr_warn("Protected virtualization not available in protected guests.");
45ecdc5d84SVasily Gorbik 	}
46ecdc5d84SVasily Gorbik 
47ecdc5d84SVasily Gorbik 	if (prot_virt_host && !test_facility(158)) {
48ecdc5d84SVasily Gorbik 		prot_virt_host = 0;
49ecdc5d84SVasily Gorbik 		pr_warn("Protected virtualization not supported by the hardware.");
50ecdc5d84SVasily Gorbik 	}
51ecdc5d84SVasily Gorbik 
52ecdc5d84SVasily Gorbik 	return rc;
53ecdc5d84SVasily Gorbik }
54ecdc5d84SVasily Gorbik early_param("prot_virt", prot_virt_setup);
5529d37e5bSVasily Gorbik 
5629d37e5bSVasily Gorbik static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
5729d37e5bSVasily Gorbik {
5829d37e5bSVasily Gorbik 	struct uv_cb_init uvcb = {
5929d37e5bSVasily Gorbik 		.header.cmd = UVC_CMD_INIT_UV,
6029d37e5bSVasily Gorbik 		.header.len = sizeof(uvcb),
6129d37e5bSVasily Gorbik 		.stor_origin = stor_base,
6229d37e5bSVasily Gorbik 		.stor_len = stor_len,
6329d37e5bSVasily Gorbik 	};
6429d37e5bSVasily Gorbik 
6529d37e5bSVasily Gorbik 	if (uv_call(0, (uint64_t)&uvcb)) {
6629d37e5bSVasily Gorbik 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
6729d37e5bSVasily Gorbik 		       uvcb.header.rc, uvcb.header.rrc);
6829d37e5bSVasily Gorbik 		return -1;
6929d37e5bSVasily Gorbik 	}
7029d37e5bSVasily Gorbik 	return 0;
7129d37e5bSVasily Gorbik }
7229d37e5bSVasily Gorbik 
7329d37e5bSVasily Gorbik void __init setup_uv(void)
7429d37e5bSVasily Gorbik {
7529d37e5bSVasily Gorbik 	unsigned long uv_stor_base;
7629d37e5bSVasily Gorbik 
7729d37e5bSVasily Gorbik 	uv_stor_base = (unsigned long)memblock_alloc_try_nid(
7829d37e5bSVasily Gorbik 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
7929d37e5bSVasily Gorbik 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
8029d37e5bSVasily Gorbik 	if (!uv_stor_base) {
8129d37e5bSVasily Gorbik 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
8229d37e5bSVasily Gorbik 			uv_info.uv_base_stor_len);
8329d37e5bSVasily Gorbik 		goto fail;
8429d37e5bSVasily Gorbik 	}
8529d37e5bSVasily Gorbik 
8629d37e5bSVasily Gorbik 	if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
8729d37e5bSVasily Gorbik 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
8829d37e5bSVasily Gorbik 		goto fail;
8929d37e5bSVasily Gorbik 	}
9029d37e5bSVasily Gorbik 
9129d37e5bSVasily Gorbik 	pr_info("Reserving %luMB as ultravisor base storage\n",
9229d37e5bSVasily Gorbik 		uv_info.uv_base_stor_len >> 20);
9329d37e5bSVasily Gorbik 	return;
9429d37e5bSVasily Gorbik fail:
9529d37e5bSVasily Gorbik 	pr_info("Disabling support for protected virtualization");
9629d37e5bSVasily Gorbik 	prot_virt_host = 0;
9729d37e5bSVasily Gorbik }
9829d37e5bSVasily Gorbik 
9929d37e5bSVasily Gorbik void adjust_to_uv_max(unsigned long *vmax)
10029d37e5bSVasily Gorbik {
10129d37e5bSVasily Gorbik 	*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
10229d37e5bSVasily Gorbik }
103214d9bbcSClaudio Imbrenda 
104214d9bbcSClaudio Imbrenda /*
105214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to pin the page in the shared state. This will
106214d9bbcSClaudio Imbrenda  * cause an intercept when the guest attempts to unshare the pinned page.
107214d9bbcSClaudio Imbrenda  */
108214d9bbcSClaudio Imbrenda static int uv_pin_shared(unsigned long paddr)
109214d9bbcSClaudio Imbrenda {
110214d9bbcSClaudio Imbrenda 	struct uv_cb_cfs uvcb = {
111214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
112214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
113214d9bbcSClaudio Imbrenda 		.paddr = paddr,
114214d9bbcSClaudio Imbrenda 	};
115214d9bbcSClaudio Imbrenda 
116214d9bbcSClaudio Imbrenda 	if (uv_call(0, (u64)&uvcb))
117214d9bbcSClaudio Imbrenda 		return -EINVAL;
118214d9bbcSClaudio Imbrenda 	return 0;
119214d9bbcSClaudio Imbrenda }
120214d9bbcSClaudio Imbrenda 
121214d9bbcSClaudio Imbrenda /*
122214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to encrypt a guest page and make it
123214d9bbcSClaudio Imbrenda  * accessible to the host for paging (export).
124214d9bbcSClaudio Imbrenda  *
125214d9bbcSClaudio Imbrenda  * @paddr: Absolute host address of page to be exported
126214d9bbcSClaudio Imbrenda  */
127214d9bbcSClaudio Imbrenda int uv_convert_from_secure(unsigned long paddr)
128214d9bbcSClaudio Imbrenda {
129214d9bbcSClaudio Imbrenda 	struct uv_cb_cfs uvcb = {
130214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
131214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
132214d9bbcSClaudio Imbrenda 		.paddr = paddr
133214d9bbcSClaudio Imbrenda 	};
134214d9bbcSClaudio Imbrenda 
135214d9bbcSClaudio Imbrenda 	if (uv_call(0, (u64)&uvcb))
136214d9bbcSClaudio Imbrenda 		return -EINVAL;
137214d9bbcSClaudio Imbrenda 	return 0;
138214d9bbcSClaudio Imbrenda }
139214d9bbcSClaudio Imbrenda 
140214d9bbcSClaudio Imbrenda /*
141214d9bbcSClaudio Imbrenda  * Calculate the expected ref_count for a page that would otherwise have no
142214d9bbcSClaudio Imbrenda  * further pins. This was cribbed from similar functions in other places in
143214d9bbcSClaudio Imbrenda  * the kernel, but with some slight modifications. We know that a secure
144214d9bbcSClaudio Imbrenda  * page can not be a huge page for example.
145214d9bbcSClaudio Imbrenda  */
146214d9bbcSClaudio Imbrenda static int expected_page_refs(struct page *page)
147214d9bbcSClaudio Imbrenda {
148214d9bbcSClaudio Imbrenda 	int res;
149214d9bbcSClaudio Imbrenda 
150214d9bbcSClaudio Imbrenda 	res = page_mapcount(page);
151214d9bbcSClaudio Imbrenda 	if (PageSwapCache(page)) {
152214d9bbcSClaudio Imbrenda 		res++;
153214d9bbcSClaudio Imbrenda 	} else if (page_mapping(page)) {
154214d9bbcSClaudio Imbrenda 		res++;
155214d9bbcSClaudio Imbrenda 		if (page_has_private(page))
156214d9bbcSClaudio Imbrenda 			res++;
157214d9bbcSClaudio Imbrenda 	}
158214d9bbcSClaudio Imbrenda 	return res;
159214d9bbcSClaudio Imbrenda }
160214d9bbcSClaudio Imbrenda 
161214d9bbcSClaudio Imbrenda static int make_secure_pte(pte_t *ptep, unsigned long addr,
162214d9bbcSClaudio Imbrenda 			   struct page *exp_page, struct uv_cb_header *uvcb)
163214d9bbcSClaudio Imbrenda {
164214d9bbcSClaudio Imbrenda 	pte_t entry = READ_ONCE(*ptep);
165214d9bbcSClaudio Imbrenda 	struct page *page;
166214d9bbcSClaudio Imbrenda 	int expected, rc = 0;
167214d9bbcSClaudio Imbrenda 
168214d9bbcSClaudio Imbrenda 	if (!pte_present(entry))
169214d9bbcSClaudio Imbrenda 		return -ENXIO;
170214d9bbcSClaudio Imbrenda 	if (pte_val(entry) & _PAGE_INVALID)
171214d9bbcSClaudio Imbrenda 		return -ENXIO;
172214d9bbcSClaudio Imbrenda 
173214d9bbcSClaudio Imbrenda 	page = pte_page(entry);
174214d9bbcSClaudio Imbrenda 	if (page != exp_page)
175214d9bbcSClaudio Imbrenda 		return -ENXIO;
176214d9bbcSClaudio Imbrenda 	if (PageWriteback(page))
177214d9bbcSClaudio Imbrenda 		return -EAGAIN;
178214d9bbcSClaudio Imbrenda 	expected = expected_page_refs(page);
179214d9bbcSClaudio Imbrenda 	if (!page_ref_freeze(page, expected))
180214d9bbcSClaudio Imbrenda 		return -EBUSY;
181214d9bbcSClaudio Imbrenda 	set_bit(PG_arch_1, &page->flags);
182214d9bbcSClaudio Imbrenda 	rc = uv_call(0, (u64)uvcb);
183214d9bbcSClaudio Imbrenda 	page_ref_unfreeze(page, expected);
184214d9bbcSClaudio Imbrenda 	/* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
185214d9bbcSClaudio Imbrenda 	if (rc)
186214d9bbcSClaudio Imbrenda 		rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
187214d9bbcSClaudio Imbrenda 	return rc;
188214d9bbcSClaudio Imbrenda }
189214d9bbcSClaudio Imbrenda 
190214d9bbcSClaudio Imbrenda /*
191214d9bbcSClaudio Imbrenda  * Requests the Ultravisor to make a page accessible to a guest.
192214d9bbcSClaudio Imbrenda  * If it's brought in the first time, it will be cleared. If
193214d9bbcSClaudio Imbrenda  * it has been exported before, it will be decrypted and integrity
194214d9bbcSClaudio Imbrenda  * checked.
195214d9bbcSClaudio Imbrenda  */
196214d9bbcSClaudio Imbrenda int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
197214d9bbcSClaudio Imbrenda {
198214d9bbcSClaudio Imbrenda 	struct vm_area_struct *vma;
199214d9bbcSClaudio Imbrenda 	bool local_drain = false;
200214d9bbcSClaudio Imbrenda 	spinlock_t *ptelock;
201214d9bbcSClaudio Imbrenda 	unsigned long uaddr;
202214d9bbcSClaudio Imbrenda 	struct page *page;
203214d9bbcSClaudio Imbrenda 	pte_t *ptep;
204214d9bbcSClaudio Imbrenda 	int rc;
205214d9bbcSClaudio Imbrenda 
206214d9bbcSClaudio Imbrenda again:
207214d9bbcSClaudio Imbrenda 	rc = -EFAULT;
208d8ed45c5SMichel Lespinasse 	mmap_read_lock(gmap->mm);
209214d9bbcSClaudio Imbrenda 
210214d9bbcSClaudio Imbrenda 	uaddr = __gmap_translate(gmap, gaddr);
211214d9bbcSClaudio Imbrenda 	if (IS_ERR_VALUE(uaddr))
212214d9bbcSClaudio Imbrenda 		goto out;
213214d9bbcSClaudio Imbrenda 	vma = find_vma(gmap->mm, uaddr);
214214d9bbcSClaudio Imbrenda 	if (!vma)
215214d9bbcSClaudio Imbrenda 		goto out;
216214d9bbcSClaudio Imbrenda 	/*
217214d9bbcSClaudio Imbrenda 	 * Secure pages cannot be huge and userspace should not combine both.
218214d9bbcSClaudio Imbrenda 	 * In case userspace does it anyway this will result in an -EFAULT for
219214d9bbcSClaudio Imbrenda 	 * the unpack. The guest is thus never reaching secure mode. If
220214d9bbcSClaudio Imbrenda 	 * userspace is playing dirty tricky with mapping huge pages later
221214d9bbcSClaudio Imbrenda 	 * on this will result in a segmentation fault.
222214d9bbcSClaudio Imbrenda 	 */
223214d9bbcSClaudio Imbrenda 	if (is_vm_hugetlb_page(vma))
224214d9bbcSClaudio Imbrenda 		goto out;
225214d9bbcSClaudio Imbrenda 
226214d9bbcSClaudio Imbrenda 	rc = -ENXIO;
227214d9bbcSClaudio Imbrenda 	page = follow_page(vma, uaddr, FOLL_WRITE);
228214d9bbcSClaudio Imbrenda 	if (IS_ERR_OR_NULL(page))
229214d9bbcSClaudio Imbrenda 		goto out;
230214d9bbcSClaudio Imbrenda 
231214d9bbcSClaudio Imbrenda 	lock_page(page);
232214d9bbcSClaudio Imbrenda 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
233214d9bbcSClaudio Imbrenda 	rc = make_secure_pte(ptep, uaddr, page, uvcb);
234214d9bbcSClaudio Imbrenda 	pte_unmap_unlock(ptep, ptelock);
235214d9bbcSClaudio Imbrenda 	unlock_page(page);
236214d9bbcSClaudio Imbrenda out:
237d8ed45c5SMichel Lespinasse 	mmap_read_unlock(gmap->mm);
238214d9bbcSClaudio Imbrenda 
239214d9bbcSClaudio Imbrenda 	if (rc == -EAGAIN) {
240214d9bbcSClaudio Imbrenda 		wait_on_page_writeback(page);
241214d9bbcSClaudio Imbrenda 	} else if (rc == -EBUSY) {
242214d9bbcSClaudio Imbrenda 		/*
243214d9bbcSClaudio Imbrenda 		 * If we have tried a local drain and the page refcount
244214d9bbcSClaudio Imbrenda 		 * still does not match our expected safe value, try with a
245214d9bbcSClaudio Imbrenda 		 * system wide drain. This is needed if the pagevecs holding
246214d9bbcSClaudio Imbrenda 		 * the page are on a different CPU.
247214d9bbcSClaudio Imbrenda 		 */
248214d9bbcSClaudio Imbrenda 		if (local_drain) {
249214d9bbcSClaudio Imbrenda 			lru_add_drain_all();
250214d9bbcSClaudio Imbrenda 			/* We give up here, and let the caller try again */
251214d9bbcSClaudio Imbrenda 			return -EAGAIN;
252214d9bbcSClaudio Imbrenda 		}
253214d9bbcSClaudio Imbrenda 		/*
254214d9bbcSClaudio Imbrenda 		 * We are here if the page refcount does not match the
255214d9bbcSClaudio Imbrenda 		 * expected safe value. The main culprits are usually
256214d9bbcSClaudio Imbrenda 		 * pagevecs. With lru_add_drain() we drain the pagevecs
257214d9bbcSClaudio Imbrenda 		 * on the local CPU so that hopefully the refcount will
258214d9bbcSClaudio Imbrenda 		 * reach the expected safe value.
259214d9bbcSClaudio Imbrenda 		 */
260214d9bbcSClaudio Imbrenda 		lru_add_drain();
261214d9bbcSClaudio Imbrenda 		local_drain = true;
262214d9bbcSClaudio Imbrenda 		/* And now we try again immediately after draining */
263214d9bbcSClaudio Imbrenda 		goto again;
264214d9bbcSClaudio Imbrenda 	} else if (rc == -ENXIO) {
265214d9bbcSClaudio Imbrenda 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
266214d9bbcSClaudio Imbrenda 			return -EFAULT;
267214d9bbcSClaudio Imbrenda 		return -EAGAIN;
268214d9bbcSClaudio Imbrenda 	}
269214d9bbcSClaudio Imbrenda 	return rc;
270214d9bbcSClaudio Imbrenda }
271214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_make_secure);
272214d9bbcSClaudio Imbrenda 
273214d9bbcSClaudio Imbrenda int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
274214d9bbcSClaudio Imbrenda {
275214d9bbcSClaudio Imbrenda 	struct uv_cb_cts uvcb = {
276214d9bbcSClaudio Imbrenda 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
277214d9bbcSClaudio Imbrenda 		.header.len = sizeof(uvcb),
278214d9bbcSClaudio Imbrenda 		.guest_handle = gmap->guest_handle,
279214d9bbcSClaudio Imbrenda 		.gaddr = gaddr,
280214d9bbcSClaudio Imbrenda 	};
281214d9bbcSClaudio Imbrenda 
282214d9bbcSClaudio Imbrenda 	return gmap_make_secure(gmap, gaddr, &uvcb);
283214d9bbcSClaudio Imbrenda }
284214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
285214d9bbcSClaudio Imbrenda 
286214d9bbcSClaudio Imbrenda /*
287214d9bbcSClaudio Imbrenda  * To be called with the page locked or with an extra reference! This will
288214d9bbcSClaudio Imbrenda  * prevent gmap_make_secure from touching the page concurrently. Having 2
289214d9bbcSClaudio Imbrenda  * parallel make_page_accessible is fine, as the UV calls will become a
290214d9bbcSClaudio Imbrenda  * no-op if the page is already exported.
291214d9bbcSClaudio Imbrenda  */
292214d9bbcSClaudio Imbrenda int arch_make_page_accessible(struct page *page)
293214d9bbcSClaudio Imbrenda {
294214d9bbcSClaudio Imbrenda 	int rc = 0;
295214d9bbcSClaudio Imbrenda 
296214d9bbcSClaudio Imbrenda 	/* Hugepage cannot be protected, so nothing to do */
297214d9bbcSClaudio Imbrenda 	if (PageHuge(page))
298214d9bbcSClaudio Imbrenda 		return 0;
299214d9bbcSClaudio Imbrenda 
300214d9bbcSClaudio Imbrenda 	/*
301214d9bbcSClaudio Imbrenda 	 * PG_arch_1 is used in 3 places:
302214d9bbcSClaudio Imbrenda 	 * 1. for kernel page tables during early boot
303214d9bbcSClaudio Imbrenda 	 * 2. for storage keys of huge pages and KVM
304214d9bbcSClaudio Imbrenda 	 * 3. As an indication that this page might be secure. This can
305214d9bbcSClaudio Imbrenda 	 *    overindicate, e.g. we set the bit before calling
306214d9bbcSClaudio Imbrenda 	 *    convert_to_secure.
307214d9bbcSClaudio Imbrenda 	 * As secure pages are never huge, all 3 variants can co-exists.
308214d9bbcSClaudio Imbrenda 	 */
309214d9bbcSClaudio Imbrenda 	if (!test_bit(PG_arch_1, &page->flags))
310214d9bbcSClaudio Imbrenda 		return 0;
311214d9bbcSClaudio Imbrenda 
312214d9bbcSClaudio Imbrenda 	rc = uv_pin_shared(page_to_phys(page));
313214d9bbcSClaudio Imbrenda 	if (!rc) {
314214d9bbcSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
315214d9bbcSClaudio Imbrenda 		return 0;
316214d9bbcSClaudio Imbrenda 	}
317214d9bbcSClaudio Imbrenda 
318214d9bbcSClaudio Imbrenda 	rc = uv_convert_from_secure(page_to_phys(page));
319214d9bbcSClaudio Imbrenda 	if (!rc) {
320214d9bbcSClaudio Imbrenda 		clear_bit(PG_arch_1, &page->flags);
321214d9bbcSClaudio Imbrenda 		return 0;
322214d9bbcSClaudio Imbrenda 	}
323214d9bbcSClaudio Imbrenda 
324214d9bbcSClaudio Imbrenda 	return rc;
325214d9bbcSClaudio Imbrenda }
326214d9bbcSClaudio Imbrenda EXPORT_SYMBOL_GPL(arch_make_page_accessible);
327214d9bbcSClaudio Imbrenda 
328ecdc5d84SVasily Gorbik #endif
329a0f60f84SJanosch Frank 
330a0f60f84SJanosch Frank #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
331a0f60f84SJanosch Frank static ssize_t uv_query_facilities(struct kobject *kobj,
332a0f60f84SJanosch Frank 				   struct kobj_attribute *attr, char *page)
333a0f60f84SJanosch Frank {
334*99448016SChen Zhou 	return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
335a0f60f84SJanosch Frank 			uv_info.inst_calls_list[0],
336a0f60f84SJanosch Frank 			uv_info.inst_calls_list[1],
337a0f60f84SJanosch Frank 			uv_info.inst_calls_list[2],
338a0f60f84SJanosch Frank 			uv_info.inst_calls_list[3]);
339a0f60f84SJanosch Frank }
340a0f60f84SJanosch Frank 
341a0f60f84SJanosch Frank static struct kobj_attribute uv_query_facilities_attr =
342a0f60f84SJanosch Frank 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
343a0f60f84SJanosch Frank 
344a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
345a0f60f84SJanosch Frank 				       struct kobj_attribute *attr, char *page)
346a0f60f84SJanosch Frank {
347*99448016SChen Zhou 	return scnprintf(page, PAGE_SIZE, "%d\n",
348a0f60f84SJanosch Frank 			uv_info.max_guest_cpus);
349a0f60f84SJanosch Frank }
350a0f60f84SJanosch Frank 
351a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_cpus_attr =
352a0f60f84SJanosch Frank 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
353a0f60f84SJanosch Frank 
354a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
355a0f60f84SJanosch Frank 				      struct kobj_attribute *attr, char *page)
356a0f60f84SJanosch Frank {
357*99448016SChen Zhou 	return scnprintf(page, PAGE_SIZE, "%d\n",
358a0f60f84SJanosch Frank 			uv_info.max_num_sec_conf);
359a0f60f84SJanosch Frank }
360a0f60f84SJanosch Frank 
361a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_vms_attr =
362a0f60f84SJanosch Frank 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
363a0f60f84SJanosch Frank 
364a0f60f84SJanosch Frank static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
365a0f60f84SJanosch Frank 				       struct kobj_attribute *attr, char *page)
366a0f60f84SJanosch Frank {
367*99448016SChen Zhou 	return scnprintf(page, PAGE_SIZE, "%lx\n",
368a0f60f84SJanosch Frank 			uv_info.max_sec_stor_addr);
369a0f60f84SJanosch Frank }
370a0f60f84SJanosch Frank 
371a0f60f84SJanosch Frank static struct kobj_attribute uv_query_max_guest_addr_attr =
372a0f60f84SJanosch Frank 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
373a0f60f84SJanosch Frank 
374a0f60f84SJanosch Frank static struct attribute *uv_query_attrs[] = {
375a0f60f84SJanosch Frank 	&uv_query_facilities_attr.attr,
376a0f60f84SJanosch Frank 	&uv_query_max_guest_cpus_attr.attr,
377a0f60f84SJanosch Frank 	&uv_query_max_guest_vms_attr.attr,
378a0f60f84SJanosch Frank 	&uv_query_max_guest_addr_attr.attr,
379a0f60f84SJanosch Frank 	NULL,
380a0f60f84SJanosch Frank };
381a0f60f84SJanosch Frank 
382a0f60f84SJanosch Frank static struct attribute_group uv_query_attr_group = {
383a0f60f84SJanosch Frank 	.attrs = uv_query_attrs,
384a0f60f84SJanosch Frank };
385a0f60f84SJanosch Frank 
386a0f60f84SJanosch Frank static struct kset *uv_query_kset;
387a0f60f84SJanosch Frank static struct kobject *uv_kobj;
388a0f60f84SJanosch Frank 
389a0f60f84SJanosch Frank static int __init uv_info_init(void)
390a0f60f84SJanosch Frank {
391a0f60f84SJanosch Frank 	int rc = -ENOMEM;
392a0f60f84SJanosch Frank 
393a0f60f84SJanosch Frank 	if (!test_facility(158))
394a0f60f84SJanosch Frank 		return 0;
395a0f60f84SJanosch Frank 
396a0f60f84SJanosch Frank 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
397a0f60f84SJanosch Frank 	if (!uv_kobj)
398a0f60f84SJanosch Frank 		return -ENOMEM;
399a0f60f84SJanosch Frank 
400a0f60f84SJanosch Frank 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
401a0f60f84SJanosch Frank 	if (!uv_query_kset)
402a0f60f84SJanosch Frank 		goto out_kobj;
403a0f60f84SJanosch Frank 
404a0f60f84SJanosch Frank 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
405a0f60f84SJanosch Frank 	if (!rc)
406a0f60f84SJanosch Frank 		return 0;
407a0f60f84SJanosch Frank 
408a0f60f84SJanosch Frank 	kset_unregister(uv_query_kset);
409a0f60f84SJanosch Frank out_kobj:
410a0f60f84SJanosch Frank 	kobject_del(uv_kobj);
411a0f60f84SJanosch Frank 	kobject_put(uv_kobj);
412a0f60f84SJanosch Frank 	return rc;
413a0f60f84SJanosch Frank }
414a0f60f84SJanosch Frank device_initcall(uv_info_init);
415a0f60f84SJanosch Frank #endif
416