xref: /openbmc/linux/arch/s390/kernel/uv.c (revision 165f2d28)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Ultravisor functions and initialization
4  *
5  * Copyright IBM Corp. 2019, 2020
6  */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20 
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25 
26 struct uv_info __bootdata_preserved(uv_info);
27 
28 #if IS_ENABLED(CONFIG_KVM)
29 int prot_virt_host;
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
32 
33 static int __init prot_virt_setup(char *val)
34 {
35 	bool enabled;
36 	int rc;
37 
38 	rc = kstrtobool(val, &enabled);
39 	if (!rc && enabled)
40 		prot_virt_host = 1;
41 
42 	if (is_prot_virt_guest() && prot_virt_host) {
43 		prot_virt_host = 0;
44 		pr_warn("Protected virtualization not available in protected guests.");
45 	}
46 
47 	if (prot_virt_host && !test_facility(158)) {
48 		prot_virt_host = 0;
49 		pr_warn("Protected virtualization not supported by the hardware.");
50 	}
51 
52 	return rc;
53 }
54 early_param("prot_virt", prot_virt_setup);
55 
56 static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
57 {
58 	struct uv_cb_init uvcb = {
59 		.header.cmd = UVC_CMD_INIT_UV,
60 		.header.len = sizeof(uvcb),
61 		.stor_origin = stor_base,
62 		.stor_len = stor_len,
63 	};
64 
65 	if (uv_call(0, (uint64_t)&uvcb)) {
66 		pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
67 		       uvcb.header.rc, uvcb.header.rrc);
68 		return -1;
69 	}
70 	return 0;
71 }
72 
73 void __init setup_uv(void)
74 {
75 	unsigned long uv_stor_base;
76 
77 	uv_stor_base = (unsigned long)memblock_alloc_try_nid(
78 		uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
79 		MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
80 	if (!uv_stor_base) {
81 		pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
82 			uv_info.uv_base_stor_len);
83 		goto fail;
84 	}
85 
86 	if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
87 		memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
88 		goto fail;
89 	}
90 
91 	pr_info("Reserving %luMB as ultravisor base storage\n",
92 		uv_info.uv_base_stor_len >> 20);
93 	return;
94 fail:
95 	pr_info("Disabling support for protected virtualization");
96 	prot_virt_host = 0;
97 }
98 
99 void adjust_to_uv_max(unsigned long *vmax)
100 {
101 	*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
102 }
103 
104 /*
105  * Requests the Ultravisor to pin the page in the shared state. This will
106  * cause an intercept when the guest attempts to unshare the pinned page.
107  */
108 static int uv_pin_shared(unsigned long paddr)
109 {
110 	struct uv_cb_cfs uvcb = {
111 		.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
112 		.header.len = sizeof(uvcb),
113 		.paddr = paddr,
114 	};
115 
116 	if (uv_call(0, (u64)&uvcb))
117 		return -EINVAL;
118 	return 0;
119 }
120 
121 /*
122  * Requests the Ultravisor to encrypt a guest page and make it
123  * accessible to the host for paging (export).
124  *
125  * @paddr: Absolute host address of page to be exported
126  */
127 int uv_convert_from_secure(unsigned long paddr)
128 {
129 	struct uv_cb_cfs uvcb = {
130 		.header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
131 		.header.len = sizeof(uvcb),
132 		.paddr = paddr
133 	};
134 
135 	if (uv_call(0, (u64)&uvcb))
136 		return -EINVAL;
137 	return 0;
138 }
139 
140 /*
141  * Calculate the expected ref_count for a page that would otherwise have no
142  * further pins. This was cribbed from similar functions in other places in
143  * the kernel, but with some slight modifications. We know that a secure
144  * page can not be a huge page for example.
145  */
146 static int expected_page_refs(struct page *page)
147 {
148 	int res;
149 
150 	res = page_mapcount(page);
151 	if (PageSwapCache(page)) {
152 		res++;
153 	} else if (page_mapping(page)) {
154 		res++;
155 		if (page_has_private(page))
156 			res++;
157 	}
158 	return res;
159 }
160 
161 static int make_secure_pte(pte_t *ptep, unsigned long addr,
162 			   struct page *exp_page, struct uv_cb_header *uvcb)
163 {
164 	pte_t entry = READ_ONCE(*ptep);
165 	struct page *page;
166 	int expected, rc = 0;
167 
168 	if (!pte_present(entry))
169 		return -ENXIO;
170 	if (pte_val(entry) & _PAGE_INVALID)
171 		return -ENXIO;
172 
173 	page = pte_page(entry);
174 	if (page != exp_page)
175 		return -ENXIO;
176 	if (PageWriteback(page))
177 		return -EAGAIN;
178 	expected = expected_page_refs(page);
179 	if (!page_ref_freeze(page, expected))
180 		return -EBUSY;
181 	set_bit(PG_arch_1, &page->flags);
182 	rc = uv_call(0, (u64)uvcb);
183 	page_ref_unfreeze(page, expected);
184 	/* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
185 	if (rc)
186 		rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
187 	return rc;
188 }
189 
190 /*
191  * Requests the Ultravisor to make a page accessible to a guest.
192  * If it's brought in the first time, it will be cleared. If
193  * it has been exported before, it will be decrypted and integrity
194  * checked.
195  */
196 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
197 {
198 	struct vm_area_struct *vma;
199 	bool local_drain = false;
200 	spinlock_t *ptelock;
201 	unsigned long uaddr;
202 	struct page *page;
203 	pte_t *ptep;
204 	int rc;
205 
206 again:
207 	rc = -EFAULT;
208 	down_read(&gmap->mm->mmap_sem);
209 
210 	uaddr = __gmap_translate(gmap, gaddr);
211 	if (IS_ERR_VALUE(uaddr))
212 		goto out;
213 	vma = find_vma(gmap->mm, uaddr);
214 	if (!vma)
215 		goto out;
216 	/*
217 	 * Secure pages cannot be huge and userspace should not combine both.
218 	 * In case userspace does it anyway this will result in an -EFAULT for
219 	 * the unpack. The guest is thus never reaching secure mode. If
220 	 * userspace is playing dirty tricky with mapping huge pages later
221 	 * on this will result in a segmentation fault.
222 	 */
223 	if (is_vm_hugetlb_page(vma))
224 		goto out;
225 
226 	rc = -ENXIO;
227 	page = follow_page(vma, uaddr, FOLL_WRITE);
228 	if (IS_ERR_OR_NULL(page))
229 		goto out;
230 
231 	lock_page(page);
232 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
233 	rc = make_secure_pte(ptep, uaddr, page, uvcb);
234 	pte_unmap_unlock(ptep, ptelock);
235 	unlock_page(page);
236 out:
237 	up_read(&gmap->mm->mmap_sem);
238 
239 	if (rc == -EAGAIN) {
240 		wait_on_page_writeback(page);
241 	} else if (rc == -EBUSY) {
242 		/*
243 		 * If we have tried a local drain and the page refcount
244 		 * still does not match our expected safe value, try with a
245 		 * system wide drain. This is needed if the pagevecs holding
246 		 * the page are on a different CPU.
247 		 */
248 		if (local_drain) {
249 			lru_add_drain_all();
250 			/* We give up here, and let the caller try again */
251 			return -EAGAIN;
252 		}
253 		/*
254 		 * We are here if the page refcount does not match the
255 		 * expected safe value. The main culprits are usually
256 		 * pagevecs. With lru_add_drain() we drain the pagevecs
257 		 * on the local CPU so that hopefully the refcount will
258 		 * reach the expected safe value.
259 		 */
260 		lru_add_drain();
261 		local_drain = true;
262 		/* And now we try again immediately after draining */
263 		goto again;
264 	} else if (rc == -ENXIO) {
265 		if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
266 			return -EFAULT;
267 		return -EAGAIN;
268 	}
269 	return rc;
270 }
271 EXPORT_SYMBOL_GPL(gmap_make_secure);
272 
273 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
274 {
275 	struct uv_cb_cts uvcb = {
276 		.header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
277 		.header.len = sizeof(uvcb),
278 		.guest_handle = gmap->guest_handle,
279 		.gaddr = gaddr,
280 	};
281 
282 	return gmap_make_secure(gmap, gaddr, &uvcb);
283 }
284 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
285 
286 /*
287  * To be called with the page locked or with an extra reference! This will
288  * prevent gmap_make_secure from touching the page concurrently. Having 2
289  * parallel make_page_accessible is fine, as the UV calls will become a
290  * no-op if the page is already exported.
291  */
292 int arch_make_page_accessible(struct page *page)
293 {
294 	int rc = 0;
295 
296 	/* Hugepage cannot be protected, so nothing to do */
297 	if (PageHuge(page))
298 		return 0;
299 
300 	/*
301 	 * PG_arch_1 is used in 3 places:
302 	 * 1. for kernel page tables during early boot
303 	 * 2. for storage keys of huge pages and KVM
304 	 * 3. As an indication that this page might be secure. This can
305 	 *    overindicate, e.g. we set the bit before calling
306 	 *    convert_to_secure.
307 	 * As secure pages are never huge, all 3 variants can co-exists.
308 	 */
309 	if (!test_bit(PG_arch_1, &page->flags))
310 		return 0;
311 
312 	rc = uv_pin_shared(page_to_phys(page));
313 	if (!rc) {
314 		clear_bit(PG_arch_1, &page->flags);
315 		return 0;
316 	}
317 
318 	rc = uv_convert_from_secure(page_to_phys(page));
319 	if (!rc) {
320 		clear_bit(PG_arch_1, &page->flags);
321 		return 0;
322 	}
323 
324 	return rc;
325 }
326 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
327 
328 #endif
329 
330 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
331 static ssize_t uv_query_facilities(struct kobject *kobj,
332 				   struct kobj_attribute *attr, char *page)
333 {
334 	return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
335 			uv_info.inst_calls_list[0],
336 			uv_info.inst_calls_list[1],
337 			uv_info.inst_calls_list[2],
338 			uv_info.inst_calls_list[3]);
339 }
340 
341 static struct kobj_attribute uv_query_facilities_attr =
342 	__ATTR(facilities, 0444, uv_query_facilities, NULL);
343 
344 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
345 				       struct kobj_attribute *attr, char *page)
346 {
347 	return snprintf(page, PAGE_SIZE, "%d\n",
348 			uv_info.max_guest_cpus);
349 }
350 
351 static struct kobj_attribute uv_query_max_guest_cpus_attr =
352 	__ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
353 
354 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
355 				      struct kobj_attribute *attr, char *page)
356 {
357 	return snprintf(page, PAGE_SIZE, "%d\n",
358 			uv_info.max_num_sec_conf);
359 }
360 
361 static struct kobj_attribute uv_query_max_guest_vms_attr =
362 	__ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
363 
364 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
365 				       struct kobj_attribute *attr, char *page)
366 {
367 	return snprintf(page, PAGE_SIZE, "%lx\n",
368 			uv_info.max_sec_stor_addr);
369 }
370 
371 static struct kobj_attribute uv_query_max_guest_addr_attr =
372 	__ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
373 
374 static struct attribute *uv_query_attrs[] = {
375 	&uv_query_facilities_attr.attr,
376 	&uv_query_max_guest_cpus_attr.attr,
377 	&uv_query_max_guest_vms_attr.attr,
378 	&uv_query_max_guest_addr_attr.attr,
379 	NULL,
380 };
381 
382 static struct attribute_group uv_query_attr_group = {
383 	.attrs = uv_query_attrs,
384 };
385 
386 static struct kset *uv_query_kset;
387 static struct kobject *uv_kobj;
388 
389 static int __init uv_info_init(void)
390 {
391 	int rc = -ENOMEM;
392 
393 	if (!test_facility(158))
394 		return 0;
395 
396 	uv_kobj = kobject_create_and_add("uv", firmware_kobj);
397 	if (!uv_kobj)
398 		return -ENOMEM;
399 
400 	uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
401 	if (!uv_query_kset)
402 		goto out_kobj;
403 
404 	rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
405 	if (!rc)
406 		return 0;
407 
408 	kset_unregister(uv_query_kset);
409 out_kobj:
410 	kobject_del(uv_kobj);
411 	kobject_put(uv_kobj);
412 	return rc;
413 }
414 device_initcall(uv_info_init);
415 #endif
416