xref: /openbmc/linux/arch/s390/kernel/uv.c (revision 835fd614)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25 
26 struct uv_info __bootdata_preserved(uv_info);
27 
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host);
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
32 
33 static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
34 {
35 	struct uv_cb_init uvcb = {
36 		.header.cmd = UVC_CMD_INIT_UV,
37 		.header.len = sizeof(uvcb),
38 		.stor_origin = stor_base,
39 		.stor_len = stor_len,
40 	};
41 
42 	if (uv_call(0, (uint64_t)&uvcb)) {
43 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 		       uvcb.header.rc, uvcb.header.rrc);
45 		return -1;
46 	}
47 	return 0;
48 }
49 
50 void __init setup_uv(void)
51 {
52 	unsigned long uv_stor_base;
53 
54 	/*
55 	 * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
56 	 */
57 	if (!is_prot_virt_host())
58 		return;
59 
60 	if (is_prot_virt_guest()) {
61 		prot_virt_host = 0;
62 		pr_warn("Protected virtualization not available in protected guests.");
63 		return;
64 	}
65 
66 	if (!test_facility(158)) {
67 		prot_virt_host = 0;
68 		pr_warn("Protected virtualization not supported by the hardware.");
69 		return;
70 	}
71 
72 	uv_stor_base = (unsigned long)memblock_alloc_try_nid(
73 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
74 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
75 	if (!uv_stor_base) {
76 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
77 			uv_info.uv_base_stor_len);
78 		goto fail;
79 	}
80 
81 	if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
82 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
83 		goto fail;
84 	}
85 
86 	pr_info("Reserving %luMB as ultravisor base storage\n",
87 		uv_info.uv_base_stor_len >> 20);
88 	return;
89 fail:
90 	pr_info("Disabling support for protected virtualization");
91 	prot_virt_host = 0;
92 }
93 
94 void adjust_to_uv_max(unsigned long *vmax)
95 {
96 	if (uv_info.max_sec_stor_addr)
97 		*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
98 }
99 
100 /*
101  * Requests the Ultravisor to pin the page in the shared state. This will
102  * cause an intercept when the guest attempts to unshare the pinned page.
103  */
104 static int uv_pin_shared(unsigned long paddr)
105 {
106 	struct uv_cb_cfs uvcb = {
107 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
108 		.header.len = sizeof(uvcb),
109 		.paddr = paddr,
110 	};
111 
112 	if (uv_call(0, (u64)&uvcb))
113 		return -EINVAL;
114 	return 0;
115 }
116 
117 /*
118  * Requests the Ultravisor to destroy a guest page and make it
119  * accessible to the host. The destroy clears the page instead of
120  * exporting.
121  *
122  * @paddr: Absolute host address of page to be destroyed
123  */
124 int uv_destroy_page(unsigned long paddr)
125 {
126 	struct uv_cb_cfs uvcb = {
127 		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 		.header.len = sizeof(uvcb),
129 		.paddr = paddr
130 	};
131 
132 	if (uv_call(0, (u64)&uvcb))
133 		return -EINVAL;
134 	return 0;
135 }
136 
137 /*
138  * Requests the Ultravisor to encrypt a guest page and make it
139  * accessible to the host for paging (export).
140  *
141  * @paddr: Absolute host address of page to be exported
142  */
143 int uv_convert_from_secure(unsigned long paddr)
144 {
145 	struct uv_cb_cfs uvcb = {
146 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
147 		.header.len = sizeof(uvcb),
148 		.paddr = paddr
149 	};
150 
151 	if (uv_call(0, (u64)&uvcb))
152 		return -EINVAL;
153 	return 0;
154 }
155 
156 /*
157  * Calculate the expected ref_count for a page that would otherwise have no
158  * further pins. This was cribbed from similar functions in other places in
159  * the kernel, but with some slight modifications. We know that a secure
160  * page can not be a huge page for example.
161  */
162 static int expected_page_refs(struct page *page)
163 {
164 	int res;
165 
166 	res = page_mapcount(page);
167 	if (PageSwapCache(page)) {
168 		res++;
169 	} else if (page_mapping(page)) {
170 		res++;
171 		if (page_has_private(page))
172 			res++;
173 	}
174 	return res;
175 }
176 
177 static int make_secure_pte(pte_t *ptep, unsigned long addr,
178 			   struct page *exp_page, struct uv_cb_header *uvcb)
179 {
180 	pte_t entry = READ_ONCE(*ptep);
181 	struct page *page;
182 	int expected, rc = 0;
183 
184 	if (!pte_present(entry))
185 		return -ENXIO;
186 	if (pte_val(entry) & _PAGE_INVALID)
187 		return -ENXIO;
188 
189 	page = pte_page(entry);
190 	if (page != exp_page)
191 		return -ENXIO;
192 	if (PageWriteback(page))
193 		return -EAGAIN;
194 	expected = expected_page_refs(page);
195 	if (!page_ref_freeze(page, expected))
196 		return -EBUSY;
197 	set_bit(PG_arch_1, &page->flags);
198 	rc = uv_call(0, (u64)uvcb);
199 	page_ref_unfreeze(page, expected);
200 	/* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
201 	if (rc)
202 		rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
203 	return rc;
204 }
205 
206 /*
207  * Requests the Ultravisor to make a page accessible to a guest.
208  * If it's brought in the first time, it will be cleared. If
209  * it has been exported before, it will be decrypted and integrity
210  * checked.
211  */
212 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
213 {
214 	struct vm_area_struct *vma;
215 	bool local_drain = false;
216 	spinlock_t *ptelock;
217 	unsigned long uaddr;
218 	struct page *page;
219 	pte_t *ptep;
220 	int rc;
221 
222 again:
223 	rc = -EFAULT;
224 	mmap_read_lock(gmap->mm);
225 
226 	uaddr = __gmap_translate(gmap, gaddr);
227 	if (IS_ERR_VALUE(uaddr))
228 		goto out;
229 	vma = find_vma(gmap->mm, uaddr);
230 	if (!vma)
231 		goto out;
232 	/*
233 	 * Secure pages cannot be huge and userspace should not combine both.
234 	 * In case userspace does it anyway this will result in an -EFAULT for
235 	 * the unpack. The guest is thus never reaching secure mode. If
236 	 * userspace is playing dirty tricky with mapping huge pages later
237 	 * on this will result in a segmentation fault.
238 	 */
239 	if (is_vm_hugetlb_page(vma))
240 		goto out;
241 
242 	rc = -ENXIO;
243 	page = follow_page(vma, uaddr, FOLL_WRITE);
244 	if (IS_ERR_OR_NULL(page))
245 		goto out;
246 
247 	lock_page(page);
248 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
249 	rc = make_secure_pte(ptep, uaddr, page, uvcb);
250 	pte_unmap_unlock(ptep, ptelock);
251 	unlock_page(page);
252 out:
253 	mmap_read_unlock(gmap->mm);
254 
255 	if (rc == -EAGAIN) {
256 		wait_on_page_writeback(page);
257 	} else if (rc == -EBUSY) {
258 		/*
259 		 * If we have tried a local drain and the page refcount
260 		 * still does not match our expected safe value, try with a
261 		 * system wide drain. This is needed if the pagevecs holding
262 		 * the page are on a different CPU.
263 		 */
264 		if (local_drain) {
265 			lru_add_drain_all();
266 			/* We give up here, and let the caller try again */
267 			return -EAGAIN;
268 		}
269 		/*
270 		 * We are here if the page refcount does not match the
271 		 * expected safe value. The main culprits are usually
272 		 * pagevecs. With lru_add_drain() we drain the pagevecs
273 		 * on the local CPU so that hopefully the refcount will
274 		 * reach the expected safe value.
275 		 */
276 		lru_add_drain();
277 		local_drain = true;
278 		/* And now we try again immediately after draining */
279 		goto again;
280 	} else if (rc == -ENXIO) {
281 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
282 			return -EFAULT;
283 		return -EAGAIN;
284 	}
285 	return rc;
286 }
287 EXPORT_SYMBOL_GPL(gmap_make_secure);
288 
289 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
290 {
291 	struct uv_cb_cts uvcb = {
292 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
293 		.header.len = sizeof(uvcb),
294 		.guest_handle = gmap->guest_handle,
295 		.gaddr = gaddr,
296 	};
297 
298 	return gmap_make_secure(gmap, gaddr, &uvcb);
299 }
300 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
301 
302 /*
303  * To be called with the page locked or with an extra reference! This will
304  * prevent gmap_make_secure from touching the page concurrently. Having 2
305  * parallel make_page_accessible is fine, as the UV calls will become a
306  * no-op if the page is already exported.
307  */
308 int arch_make_page_accessible(struct page *page)
309 {
310 	int rc = 0;
311 
312 	/* Hugepage cannot be protected, so nothing to do */
313 	if (PageHuge(page))
314 		return 0;
315 
316 	/*
317 	 * PG_arch_1 is used in 3 places:
318 	 * 1. for kernel page tables during early boot
319 	 * 2. for storage keys of huge pages and KVM
320 	 * 3. As an indication that this page might be secure. This can
321 	 *    overindicate, e.g. we set the bit before calling
322 	 *    convert_to_secure.
323 	 * As secure pages are never huge, all 3 variants can co-exists.
324 	 */
325 	if (!test_bit(PG_arch_1, &page->flags))
326 		return 0;
327 
328 	rc = uv_pin_shared(page_to_phys(page));
329 	if (!rc) {
330 		clear_bit(PG_arch_1, &page->flags);
331 		return 0;
332 	}
333 
334 	rc = uv_convert_from_secure(page_to_phys(page));
335 	if (!rc) {
336 		clear_bit(PG_arch_1, &page->flags);
337 		return 0;
338 	}
339 
340 	return rc;
341 }
342 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
343 
344 #endif
345 
346 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
347 static ssize_t uv_query_facilities(struct kobject *kobj,
348 				   struct kobj_attribute *attr, char *page)
349 {
350 	return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
351 			uv_info.inst_calls_list[0],
352 			uv_info.inst_calls_list[1],
353 			uv_info.inst_calls_list[2],
354 			uv_info.inst_calls_list[3]);
355 }
356 
357 static struct kobj_attribute uv_query_facilities_attr =
358 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
359 
360 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
361 				       struct kobj_attribute *attr, char *page)
362 {
363 	return scnprintf(page, PAGE_SIZE, "%d\n",
364 			uv_info.max_guest_cpus);
365 }
366 
367 static struct kobj_attribute uv_query_max_guest_cpus_attr =
368 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
369 
370 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
371 				      struct kobj_attribute *attr, char *page)
372 {
373 	return scnprintf(page, PAGE_SIZE, "%d\n",
374 			uv_info.max_num_sec_conf);
375 }
376 
377 static struct kobj_attribute uv_query_max_guest_vms_attr =
378 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
379 
380 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
381 				       struct kobj_attribute *attr, char *page)
382 {
383 	return scnprintf(page, PAGE_SIZE, "%lx\n",
384 			uv_info.max_sec_stor_addr);
385 }
386 
387 static struct kobj_attribute uv_query_max_guest_addr_attr =
388 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
389 
390 static struct attribute *uv_query_attrs[] = {
391 	&uv_query_facilities_attr.attr,
392 	&uv_query_max_guest_cpus_attr.attr,
393 	&uv_query_max_guest_vms_attr.attr,
394 	&uv_query_max_guest_addr_attr.attr,
395 	NULL,
396 };
397 
398 static struct attribute_group uv_query_attr_group = {
399 	.attrs = uv_query_attrs,
400 };
401 
402 static struct kset *uv_query_kset;
403 static struct kobject *uv_kobj;
404 
405 static int __init uv_info_init(void)
406 {
407 	int rc = -ENOMEM;
408 
409 	if (!test_facility(158))
410 		return 0;
411 
412 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
413 	if (!uv_kobj)
414 		return -ENOMEM;
415 
416 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
417 	if (!uv_query_kset)
418 		goto out_kobj;
419 
420 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
421 	if (!rc)
422 		return 0;
423 
424 	kset_unregister(uv_query_kset);
425 out_kobj:
426 	kobject_del(uv_kobj);
427 	kobject_put(uv_kobj);
428 	return rc;
429 }
430 device_initcall(uv_info_init);
431 #endif
432