xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision 7effbd18)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/fixed_config.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/pkvm.h>
20 #include <nvhe/trap_handler.h>
21 
22 unsigned long hyp_nr_cpus;
23 
24 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
25 			 (unsigned long)__per_cpu_start)
26 
27 static void *vmemmap_base;
28 static void *vm_table_base;
29 static void *hyp_pgt_base;
30 static void *host_s2_pgt_base;
31 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
32 static struct hyp_pool hpool;
33 
34 static int divide_memory_pool(void *virt, unsigned long size)
35 {
36 	unsigned long nr_pages;
37 
38 	hyp_early_alloc_init(virt, size);
39 
40 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
41 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
42 	if (!vmemmap_base)
43 		return -ENOMEM;
44 
45 	nr_pages = hyp_vm_table_pages();
46 	vm_table_base = hyp_early_alloc_contig(nr_pages);
47 	if (!vm_table_base)
48 		return -ENOMEM;
49 
50 	nr_pages = hyp_s1_pgtable_pages();
51 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
52 	if (!hyp_pgt_base)
53 		return -ENOMEM;
54 
55 	nr_pages = host_s2_pgtable_pages();
56 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
57 	if (!host_s2_pgt_base)
58 		return -ENOMEM;
59 
60 	return 0;
61 }
62 
63 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
64 				 unsigned long *per_cpu_base,
65 				 u32 hyp_va_bits)
66 {
67 	void *start, *end, *virt = hyp_phys_to_virt(phys);
68 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
69 	enum kvm_pgtable_prot prot;
70 	int ret, i;
71 
72 	/* Recreate the hyp page-table using the early page allocator */
73 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
74 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
75 				   &hyp_early_alloc_mm_ops);
76 	if (ret)
77 		return ret;
78 
79 	ret = hyp_create_idmap(hyp_va_bits);
80 	if (ret)
81 		return ret;
82 
83 	ret = hyp_map_vectors();
84 	if (ret)
85 		return ret;
86 
87 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
88 	if (ret)
89 		return ret;
90 
91 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
92 	if (ret)
93 		return ret;
94 
95 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
96 	if (ret)
97 		return ret;
98 
99 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
100 	if (ret)
101 		return ret;
102 
103 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
104 	if (ret)
105 		return ret;
106 
107 	for (i = 0; i < hyp_nr_cpus; i++) {
108 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
109 		unsigned long hyp_addr;
110 
111 		start = (void *)kern_hyp_va(per_cpu_base[i]);
112 		end = start + PAGE_ALIGN(hyp_percpu_size);
113 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
114 		if (ret)
115 			return ret;
116 
117 		/*
118 		 * Allocate a contiguous HYP private VA range for the stack
119 		 * and guard page. The allocation is also aligned based on
120 		 * the order of its size.
121 		 */
122 		ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
123 		if (ret)
124 			return ret;
125 
126 		/*
127 		 * Since the stack grows downwards, map the stack to the page
128 		 * at the higher address and leave the lower guard page
129 		 * unbacked.
130 		 *
131 		 * Any valid stack address now has the PAGE_SHIFT bit as 1
132 		 * and addresses corresponding to the guard page have the
133 		 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
134 		 */
135 		hyp_spin_lock(&pkvm_pgd_lock);
136 		ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
137 					PAGE_SIZE, params->stack_pa, PAGE_HYP);
138 		hyp_spin_unlock(&pkvm_pgd_lock);
139 		if (ret)
140 			return ret;
141 
142 		/* Update stack_hyp_va to end of the stack's private VA range */
143 		params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
144 	}
145 
146 	/*
147 	 * Map the host sections RO in the hypervisor, but transfer the
148 	 * ownership from the host to the hypervisor itself to make sure they
149 	 * can't be donated or shared with another entity.
150 	 *
151 	 * The ownership transition requires matching changes in the host
152 	 * stage-2. This will be done later (see finalize_host_mappings()) once
153 	 * the hyp_vmemmap is addressable.
154 	 */
155 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
156 	ret = pkvm_create_mappings(&kvm_vgic_global_state,
157 				   &kvm_vgic_global_state + 1, prot);
158 	if (ret)
159 		return ret;
160 
161 	return 0;
162 }
163 
164 static void update_nvhe_init_params(void)
165 {
166 	struct kvm_nvhe_init_params *params;
167 	unsigned long i;
168 
169 	for (i = 0; i < hyp_nr_cpus; i++) {
170 		params = per_cpu_ptr(&kvm_init_params, i);
171 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
172 		dcache_clean_inval_poc((unsigned long)params,
173 				    (unsigned long)params + sizeof(*params));
174 	}
175 }
176 
177 static void *hyp_zalloc_hyp_page(void *arg)
178 {
179 	return hyp_alloc_pages(&hpool, 0);
180 }
181 
182 static void hpool_get_page(void *addr)
183 {
184 	hyp_get_page(&hpool, addr);
185 }
186 
187 static void hpool_put_page(void *addr)
188 {
189 	hyp_put_page(&hpool, addr);
190 }
191 
192 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
193 				     enum kvm_pgtable_walk_flags visit)
194 {
195 	enum kvm_pgtable_prot prot;
196 	enum pkvm_page_state state;
197 	phys_addr_t phys;
198 
199 	if (!kvm_pte_valid(ctx->old))
200 		return 0;
201 
202 	if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
203 		return -EINVAL;
204 
205 	phys = kvm_pte_to_phys(ctx->old);
206 	if (!addr_is_memory(phys))
207 		return -EINVAL;
208 
209 	/*
210 	 * Adjust the host stage-2 mappings to match the ownership attributes
211 	 * configured in the hypervisor stage-1.
212 	 */
213 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
214 	switch (state) {
215 	case PKVM_PAGE_OWNED:
216 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
217 	case PKVM_PAGE_SHARED_OWNED:
218 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
219 		break;
220 	case PKVM_PAGE_SHARED_BORROWED:
221 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
222 		break;
223 	default:
224 		return -EINVAL;
225 	}
226 
227 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
228 }
229 
230 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
231 					 enum kvm_pgtable_walk_flags visit)
232 {
233 	/*
234 	 * Fix-up the refcount for the page-table pages as the early allocator
235 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
236 	 * initialised the refcount to '1'.
237 	 */
238 	if (kvm_pte_valid(ctx->old))
239 		ctx->mm_ops->get_page(ctx->ptep);
240 
241 	return 0;
242 }
243 
244 static int fix_host_ownership(void)
245 {
246 	struct kvm_pgtable_walker walker = {
247 		.cb	= fix_host_ownership_walker,
248 		.flags	= KVM_PGTABLE_WALK_LEAF,
249 	};
250 	int i, ret;
251 
252 	for (i = 0; i < hyp_memblock_nr; i++) {
253 		struct memblock_region *reg = &hyp_memory[i];
254 		u64 start = (u64)hyp_phys_to_virt(reg->base);
255 
256 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
257 		if (ret)
258 			return ret;
259 	}
260 
261 	return 0;
262 }
263 
264 static int fix_hyp_pgtable_refcnt(void)
265 {
266 	struct kvm_pgtable_walker walker = {
267 		.cb	= fix_hyp_pgtable_refcnt_walker,
268 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
269 		.arg	= pkvm_pgtable.mm_ops,
270 	};
271 
272 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
273 				&walker);
274 }
275 
276 void __noreturn __pkvm_init_finalise(void)
277 {
278 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
279 	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
280 	unsigned long nr_pages, reserved_pages, pfn;
281 	int ret;
282 
283 	/* Now that the vmemmap is backed, install the full-fledged allocator */
284 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
285 	nr_pages = hyp_s1_pgtable_pages();
286 	reserved_pages = hyp_early_alloc_nr_used_pages();
287 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
288 	if (ret)
289 		goto out;
290 
291 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
292 	if (ret)
293 		goto out;
294 
295 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
296 		.zalloc_page = hyp_zalloc_hyp_page,
297 		.phys_to_virt = hyp_phys_to_virt,
298 		.virt_to_phys = hyp_virt_to_phys,
299 		.get_page = hpool_get_page,
300 		.put_page = hpool_put_page,
301 		.page_count = hyp_page_count,
302 	};
303 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
304 
305 	ret = fix_host_ownership();
306 	if (ret)
307 		goto out;
308 
309 	ret = fix_hyp_pgtable_refcnt();
310 	if (ret)
311 		goto out;
312 
313 	ret = hyp_create_pcpu_fixmap();
314 	if (ret)
315 		goto out;
316 
317 	pkvm_hyp_vm_table_init(vm_table_base);
318 out:
319 	/*
320 	 * We tail-called to here from handle___pkvm_init() and will not return,
321 	 * so make sure to propagate the return value to the host.
322 	 */
323 	cpu_reg(host_ctxt, 1) = ret;
324 
325 	__host_enter(host_ctxt);
326 }
327 
328 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
329 		unsigned long *per_cpu_base, u32 hyp_va_bits)
330 {
331 	struct kvm_nvhe_init_params *params;
332 	void *virt = hyp_phys_to_virt(phys);
333 	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
334 	int ret;
335 
336 	BUG_ON(kvm_check_pvm_sysreg_table());
337 
338 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
339 		return -EINVAL;
340 
341 	hyp_spin_lock_init(&pkvm_pgd_lock);
342 	hyp_nr_cpus = nr_cpus;
343 
344 	ret = divide_memory_pool(virt, size);
345 	if (ret)
346 		return ret;
347 
348 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
349 	if (ret)
350 		return ret;
351 
352 	update_nvhe_init_params();
353 
354 	/* Jump in the idmap page to switch to the new page-tables */
355 	params = this_cpu_ptr(&kvm_init_params);
356 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
357 	fn(__hyp_pa(params), __pkvm_init_finalise);
358 
359 	unreachable();
360 }
361