xref: /openbmc/linux/arch/s390/kernel/uv.c (revision 80d0624d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25 
26 /*
27  * uv_info contains both host and guest information but it's currently only
28  * expected to be used within modules if it's the KVM module or for
29  * any PV guest module.
30  *
31  * The kernel itself will write these values once in uv_query_info()
32  * and then make some of them readable via a sysfs interface.
33  */
34 struct uv_info __bootdata_preserved(uv_info);
35 EXPORT_SYMBOL(uv_info);
36 
37 #if IS_ENABLED(CONFIG_KVM)
38 int __bootdata_preserved(prot_virt_host);
39 EXPORT_SYMBOL(prot_virt_host);
40 
41 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
42 {
43 	struct uv_cb_init uvcb = {
44 		.header.cmd = UVC_CMD_INIT_UV,
45 		.header.len = sizeof(uvcb),
46 		.stor_origin = stor_base,
47 		.stor_len = stor_len,
48 	};
49 
50 	if (uv_call(0, (uint64_t)&uvcb)) {
51 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
52 		       uvcb.header.rc, uvcb.header.rrc);
53 		return -1;
54 	}
55 	return 0;
56 }
57 
58 void __init setup_uv(void)
59 {
60 	void *uv_stor_base;
61 
62 	if (!is_prot_virt_host())
63 		return;
64 
65 	uv_stor_base = memblock_alloc_try_nid(
66 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
67 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
68 	if (!uv_stor_base) {
69 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
70 			uv_info.uv_base_stor_len);
71 		goto fail;
72 	}
73 
74 	if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
75 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
76 		goto fail;
77 	}
78 
79 	pr_info("Reserving %luMB as ultravisor base storage\n",
80 		uv_info.uv_base_stor_len >> 20);
81 	return;
82 fail:
83 	pr_info("Disabling support for protected virtualization");
84 	prot_virt_host = 0;
85 }
86 
87 /*
88  * Requests the Ultravisor to pin the page in the shared state. This will
89  * cause an intercept when the guest attempts to unshare the pinned page.
90  */
91 int uv_pin_shared(unsigned long paddr)
92 {
93 	struct uv_cb_cfs uvcb = {
94 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
95 		.header.len = sizeof(uvcb),
96 		.paddr = paddr,
97 	};
98 
99 	if (uv_call(0, (u64)&uvcb))
100 		return -EINVAL;
101 	return 0;
102 }
103 EXPORT_SYMBOL_GPL(uv_pin_shared);
104 
105 /*
106  * Requests the Ultravisor to destroy a guest page and make it
107  * accessible to the host. The destroy clears the page instead of
108  * exporting.
109  *
110  * @paddr: Absolute host address of page to be destroyed
111  */
112 static int uv_destroy_page(unsigned long paddr)
113 {
114 	struct uv_cb_cfs uvcb = {
115 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
116 		.header.len = sizeof(uvcb),
117 		.paddr = paddr
118 	};
119 
120 	if (uv_call(0, (u64)&uvcb)) {
121 		/*
122 		 * Older firmware uses 107/d as an indication of a non secure
123 		 * page. Let us emulate the newer variant (no-op).
124 		 */
125 		if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
126 			return 0;
127 		return -EINVAL;
128 	}
129 	return 0;
130 }
131 
132 /*
133  * The caller must already hold a reference to the page
134  */
135 int uv_destroy_owned_page(unsigned long paddr)
136 {
137 	struct page *page = phys_to_page(paddr);
138 	int rc;
139 
140 	get_page(page);
141 	rc = uv_destroy_page(paddr);
142 	if (!rc)
143 		clear_bit(PG_arch_1, &page->flags);
144 	put_page(page);
145 	return rc;
146 }
147 
148 /*
149  * Requests the Ultravisor to encrypt a guest page and make it
150  * accessible to the host for paging (export).
151  *
152  * @paddr: Absolute host address of page to be exported
153  */
154 int uv_convert_from_secure(unsigned long paddr)
155 {
156 	struct uv_cb_cfs uvcb = {
157 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
158 		.header.len = sizeof(uvcb),
159 		.paddr = paddr
160 	};
161 
162 	if (uv_call(0, (u64)&uvcb))
163 		return -EINVAL;
164 	return 0;
165 }
166 
167 /*
168  * The caller must already hold a reference to the page
169  */
170 int uv_convert_owned_from_secure(unsigned long paddr)
171 {
172 	struct page *page = phys_to_page(paddr);
173 	int rc;
174 
175 	get_page(page);
176 	rc = uv_convert_from_secure(paddr);
177 	if (!rc)
178 		clear_bit(PG_arch_1, &page->flags);
179 	put_page(page);
180 	return rc;
181 }
182 
183 /*
184  * Calculate the expected ref_count for a folio that would otherwise have no
185  * further pins. This was cribbed from similar functions in other places in
186  * the kernel, but with some slight modifications. We know that a secure
187  * folio can not be a large folio, for example.
188  */
189 static int expected_folio_refs(struct folio *folio)
190 {
191 	int res;
192 
193 	res = folio_mapcount(folio);
194 	if (folio_test_swapcache(folio)) {
195 		res++;
196 	} else if (folio_mapping(folio)) {
197 		res++;
198 		if (folio->private)
199 			res++;
200 	}
201 	return res;
202 }
203 
204 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
205 {
206 	int expected, cc = 0;
207 
208 	if (folio_test_writeback(folio))
209 		return -EAGAIN;
210 	expected = expected_folio_refs(folio);
211 	if (!folio_ref_freeze(folio, expected))
212 		return -EBUSY;
213 	set_bit(PG_arch_1, &folio->flags);
214 	/*
215 	 * If the UVC does not succeed or fail immediately, we don't want to
216 	 * loop for long, or we might get stall notifications.
217 	 * On the other hand, this is a complex scenario and we are holding a lot of
218 	 * locks, so we can't easily sleep and reschedule. We try only once,
219 	 * and if the UVC returned busy or partial completion, we return
220 	 * -EAGAIN and we let the callers deal with it.
221 	 */
222 	cc = __uv_call(0, (u64)uvcb);
223 	folio_ref_unfreeze(folio, expected);
224 	/*
225 	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
226 	 * If busy or partially completed, return -EAGAIN.
227 	 */
228 	if (cc == UVC_CC_OK)
229 		return 0;
230 	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
231 		return -EAGAIN;
232 	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
233 }
234 
235 /**
236  * should_export_before_import - Determine whether an export is needed
237  * before an import-like operation
238  * @uvcb: the Ultravisor control block of the UVC to be performed
239  * @mm: the mm of the process
240  *
241  * Returns whether an export is needed before every import-like operation.
242  * This is needed for shared pages, which don't trigger a secure storage
243  * exception when accessed from a different guest.
244  *
245  * Although considered as one, the Unpin Page UVC is not an actual import,
246  * so it is not affected.
247  *
248  * No export is needed also when there is only one protected VM, because the
249  * page cannot belong to the wrong VM in that case (there is no "other VM"
250  * it can belong to).
251  *
252  * Return: true if an export is needed before every import, otherwise false.
253  */
254 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
255 {
256 	/*
257 	 * The misc feature indicates, among other things, that importing a
258 	 * shared page from a different protected VM will automatically also
259 	 * transfer its ownership.
260 	 */
261 	if (uv_has_feature(BIT_UV_FEAT_MISC))
262 		return false;
263 	if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
264 		return false;
265 	return atomic_read(&mm->context.protected_count) > 1;
266 }
267 
268 /*
269  * Requests the Ultravisor to make a page accessible to a guest.
270  * If it's brought in the first time, it will be cleared. If
271  * it has been exported before, it will be decrypted and integrity
272  * checked.
273  */
274 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
275 {
276 	struct vm_area_struct *vma;
277 	bool local_drain = false;
278 	spinlock_t *ptelock;
279 	unsigned long uaddr;
280 	struct folio *folio;
281 	pte_t *ptep;
282 	int rc;
283 
284 again:
285 	rc = -EFAULT;
286 	mmap_read_lock(gmap->mm);
287 
288 	uaddr = __gmap_translate(gmap, gaddr);
289 	if (IS_ERR_VALUE(uaddr))
290 		goto out;
291 	vma = vma_lookup(gmap->mm, uaddr);
292 	if (!vma)
293 		goto out;
294 	/*
295 	 * Secure pages cannot be huge and userspace should not combine both.
296 	 * In case userspace does it anyway this will result in an -EFAULT for
297 	 * the unpack. The guest is thus never reaching secure mode. If
298 	 * userspace is playing dirty tricky with mapping huge pages later
299 	 * on this will result in a segmentation fault.
300 	 */
301 	if (is_vm_hugetlb_page(vma))
302 		goto out;
303 
304 	rc = -ENXIO;
305 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
306 	if (!ptep)
307 		goto out;
308 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
309 		folio = page_folio(pte_page(*ptep));
310 		rc = -EINVAL;
311 		if (folio_test_large(folio))
312 			goto unlock;
313 		rc = -EAGAIN;
314 		if (folio_trylock(folio)) {
315 			if (should_export_before_import(uvcb, gmap->mm))
316 				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
317 			rc = make_folio_secure(folio, uvcb);
318 			folio_unlock(folio);
319 		}
320 
321 		/*
322 		 * Once we drop the PTL, the folio may get unmapped and
323 		 * freed immediately. We need a temporary reference.
324 		 */
325 		if (rc == -EAGAIN)
326 			folio_get(folio);
327 	}
328 unlock:
329 	pte_unmap_unlock(ptep, ptelock);
330 out:
331 	mmap_read_unlock(gmap->mm);
332 
333 	if (rc == -EAGAIN) {
334 		/*
335 		 * If we are here because the UVC returned busy or partial
336 		 * completion, this is just a useless check, but it is safe.
337 		 */
338 		folio_wait_writeback(folio);
339 		folio_put(folio);
340 	} else if (rc == -EBUSY) {
341 		/*
342 		 * If we have tried a local drain and the folio refcount
343 		 * still does not match our expected safe value, try with a
344 		 * system wide drain. This is needed if the pagevecs holding
345 		 * the page are on a different CPU.
346 		 */
347 		if (local_drain) {
348 			lru_add_drain_all();
349 			/* We give up here, and let the caller try again */
350 			return -EAGAIN;
351 		}
352 		/*
353 		 * We are here if the folio refcount does not match the
354 		 * expected safe value. The main culprits are usually
355 		 * pagevecs. With lru_add_drain() we drain the pagevecs
356 		 * on the local CPU so that hopefully the refcount will
357 		 * reach the expected safe value.
358 		 */
359 		lru_add_drain();
360 		local_drain = true;
361 		/* And now we try again immediately after draining */
362 		goto again;
363 	} else if (rc == -ENXIO) {
364 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
365 			return -EFAULT;
366 		return -EAGAIN;
367 	}
368 	return rc;
369 }
370 EXPORT_SYMBOL_GPL(gmap_make_secure);
371 
372 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
373 {
374 	struct uv_cb_cts uvcb = {
375 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
376 		.header.len = sizeof(uvcb),
377 		.guest_handle = gmap->guest_handle,
378 		.gaddr = gaddr,
379 	};
380 
381 	return gmap_make_secure(gmap, gaddr, &uvcb);
382 }
383 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
384 
385 /**
386  * gmap_destroy_page - Destroy a guest page.
387  * @gmap: the gmap of the guest
388  * @gaddr: the guest address to destroy
389  *
390  * An attempt will be made to destroy the given guest page. If the attempt
391  * fails, an attempt is made to export the page. If both attempts fail, an
392  * appropriate error is returned.
393  */
394 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
395 {
396 	struct vm_area_struct *vma;
397 	unsigned long uaddr;
398 	struct page *page;
399 	int rc;
400 
401 	rc = -EFAULT;
402 	mmap_read_lock(gmap->mm);
403 
404 	uaddr = __gmap_translate(gmap, gaddr);
405 	if (IS_ERR_VALUE(uaddr))
406 		goto out;
407 	vma = vma_lookup(gmap->mm, uaddr);
408 	if (!vma)
409 		goto out;
410 	/*
411 	 * Huge pages should not be able to become secure
412 	 */
413 	if (is_vm_hugetlb_page(vma))
414 		goto out;
415 
416 	rc = 0;
417 	/* we take an extra reference here */
418 	page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
419 	if (IS_ERR_OR_NULL(page))
420 		goto out;
421 	rc = uv_destroy_owned_page(page_to_phys(page));
422 	/*
423 	 * Fault handlers can race; it is possible that two CPUs will fault
424 	 * on the same secure page. One CPU can destroy the page, reboot,
425 	 * re-enter secure mode and import it, while the second CPU was
426 	 * stuck at the beginning of the handler. At some point the second
427 	 * CPU will be able to progress, and it will not be able to destroy
428 	 * the page. In that case we do not want to terminate the process,
429 	 * we instead try to export the page.
430 	 */
431 	if (rc)
432 		rc = uv_convert_owned_from_secure(page_to_phys(page));
433 	put_page(page);
434 out:
435 	mmap_read_unlock(gmap->mm);
436 	return rc;
437 }
438 EXPORT_SYMBOL_GPL(gmap_destroy_page);
439 
440 /*
441  * To be called with the page locked or with an extra reference! This will
442  * prevent gmap_make_secure from touching the page concurrently. Having 2
443  * parallel make_page_accessible is fine, as the UV calls will become a
444  * no-op if the page is already exported.
445  */
446 int arch_make_page_accessible(struct page *page)
447 {
448 	int rc = 0;
449 
450 	/* Hugepage cannot be protected, so nothing to do */
451 	if (PageHuge(page))
452 		return 0;
453 
454 	/*
455 	 * PG_arch_1 is used in 3 places:
456 	 * 1. for kernel page tables during early boot
457 	 * 2. for storage keys of huge pages and KVM
458 	 * 3. As an indication that this page might be secure. This can
459 	 *    overindicate, e.g. we set the bit before calling
460 	 *    convert_to_secure.
461 	 * As secure pages are never huge, all 3 variants can co-exists.
462 	 */
463 	if (!test_bit(PG_arch_1, &page->flags))
464 		return 0;
465 
466 	rc = uv_pin_shared(page_to_phys(page));
467 	if (!rc) {
468 		clear_bit(PG_arch_1, &page->flags);
469 		return 0;
470 	}
471 
472 	rc = uv_convert_from_secure(page_to_phys(page));
473 	if (!rc) {
474 		clear_bit(PG_arch_1, &page->flags);
475 		return 0;
476 	}
477 
478 	return rc;
479 }
480 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
481 
482 #endif
483 
484 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
485 static ssize_t uv_query_facilities(struct kobject *kobj,
486 				   struct kobj_attribute *attr, char *buf)
487 {
488 	return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
489 			  uv_info.inst_calls_list[0],
490 			  uv_info.inst_calls_list[1],
491 			  uv_info.inst_calls_list[2],
492 			  uv_info.inst_calls_list[3]);
493 }
494 
495 static struct kobj_attribute uv_query_facilities_attr =
496 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
497 
498 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
499 					struct kobj_attribute *attr, char *buf)
500 {
501 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
502 }
503 
504 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
505 	__ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
506 
507 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
508 					struct kobj_attribute *attr, char *buf)
509 {
510 	return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
511 }
512 
513 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
514 	__ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
515 
516 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
517 				     struct kobj_attribute *attr, char *buf)
518 {
519 	return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
520 }
521 
522 static struct kobj_attribute uv_query_dump_cpu_len_attr =
523 	__ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
524 
525 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
526 					       struct kobj_attribute *attr, char *buf)
527 {
528 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
529 }
530 
531 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
532 	__ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
533 
534 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
535 					  struct kobj_attribute *attr, char *buf)
536 {
537 	return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
538 }
539 
540 static struct kobj_attribute uv_query_dump_finalize_len_attr =
541 	__ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
542 
543 static ssize_t uv_query_feature_indications(struct kobject *kobj,
544 					    struct kobj_attribute *attr, char *buf)
545 {
546 	return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
547 }
548 
549 static struct kobj_attribute uv_query_feature_indications_attr =
550 	__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
551 
552 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
553 				       struct kobj_attribute *attr, char *buf)
554 {
555 	return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
556 }
557 
558 static struct kobj_attribute uv_query_max_guest_cpus_attr =
559 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
560 
561 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
562 				      struct kobj_attribute *attr, char *buf)
563 {
564 	return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
565 }
566 
567 static struct kobj_attribute uv_query_max_guest_vms_attr =
568 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
569 
570 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
571 				       struct kobj_attribute *attr, char *buf)
572 {
573 	return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
574 }
575 
576 static struct kobj_attribute uv_query_max_guest_addr_attr =
577 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
578 
579 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
580 					     struct kobj_attribute *attr, char *buf)
581 {
582 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
583 }
584 
585 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
586 	__ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
587 
588 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
589 					struct kobj_attribute *attr, char *buf)
590 {
591 	return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
592 }
593 
594 static struct kobj_attribute uv_query_supp_att_pflags_attr =
595 	__ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
596 
597 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
598 						struct kobj_attribute *attr, char *buf)
599 {
600 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
601 }
602 
603 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
604 	__ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
605 
606 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
607 					    struct kobj_attribute *attr, char *buf)
608 {
609 	return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
610 }
611 
612 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
613 	__ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
614 
615 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
616 					  struct kobj_attribute *attr, char *buf)
617 {
618 	return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
619 }
620 
621 static struct kobj_attribute uv_query_supp_secret_types_attr =
622 	__ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
623 
624 static ssize_t uv_query_max_secrets(struct kobject *kobj,
625 				    struct kobj_attribute *attr, char *buf)
626 {
627 	return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
628 }
629 
630 static struct kobj_attribute uv_query_max_secrets_attr =
631 	__ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
632 
633 static struct attribute *uv_query_attrs[] = {
634 	&uv_query_facilities_attr.attr,
635 	&uv_query_feature_indications_attr.attr,
636 	&uv_query_max_guest_cpus_attr.attr,
637 	&uv_query_max_guest_vms_attr.attr,
638 	&uv_query_max_guest_addr_attr.attr,
639 	&uv_query_supp_se_hdr_ver_attr.attr,
640 	&uv_query_supp_se_hdr_pcf_attr.attr,
641 	&uv_query_dump_storage_state_len_attr.attr,
642 	&uv_query_dump_finalize_len_attr.attr,
643 	&uv_query_dump_cpu_len_attr.attr,
644 	&uv_query_supp_att_req_hdr_ver_attr.attr,
645 	&uv_query_supp_att_pflags_attr.attr,
646 	&uv_query_supp_add_secret_req_ver_attr.attr,
647 	&uv_query_supp_add_secret_pcf_attr.attr,
648 	&uv_query_supp_secret_types_attr.attr,
649 	&uv_query_max_secrets_attr.attr,
650 	NULL,
651 };
652 
653 static struct attribute_group uv_query_attr_group = {
654 	.attrs = uv_query_attrs,
655 };
656 
657 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
658 				     struct kobj_attribute *attr, char *buf)
659 {
660 	int val = 0;
661 
662 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
663 	val = prot_virt_guest;
664 #endif
665 	return sysfs_emit(buf, "%d\n", val);
666 }
667 
668 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
669 				    struct kobj_attribute *attr, char *buf)
670 {
671 	int val = 0;
672 
673 #if IS_ENABLED(CONFIG_KVM)
674 	val = prot_virt_host;
675 #endif
676 
677 	return sysfs_emit(buf, "%d\n", val);
678 }
679 
680 static struct kobj_attribute uv_prot_virt_guest =
681 	__ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
682 
683 static struct kobj_attribute uv_prot_virt_host =
684 	__ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
685 
686 static const struct attribute *uv_prot_virt_attrs[] = {
687 	&uv_prot_virt_guest.attr,
688 	&uv_prot_virt_host.attr,
689 	NULL,
690 };
691 
692 static struct kset *uv_query_kset;
693 static struct kobject *uv_kobj;
694 
695 static int __init uv_info_init(void)
696 {
697 	int rc = -ENOMEM;
698 
699 	if (!test_facility(158))
700 		return 0;
701 
702 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
703 	if (!uv_kobj)
704 		return -ENOMEM;
705 
706 	rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
707 	if (rc)
708 		goto out_kobj;
709 
710 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
711 	if (!uv_query_kset) {
712 		rc = -ENOMEM;
713 		goto out_ind_files;
714 	}
715 
716 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
717 	if (!rc)
718 		return 0;
719 
720 	kset_unregister(uv_query_kset);
721 out_ind_files:
722 	sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
723 out_kobj:
724 	kobject_del(uv_kobj);
725 	kobject_put(uv_kobj);
726 	return rc;
727 }
728 device_initcall(uv_info_init);
729 #endif
730