xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision c4c3c32d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/fixed_config.h>
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22 
23 unsigned long hyp_nr_cpus;
24 
25 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26 			 (unsigned long)__per_cpu_start)
27 
28 static void *vmemmap_base;
29 static void *vm_table_base;
30 static void *hyp_pgt_base;
31 static void *host_s2_pgt_base;
32 static void *ffa_proxy_pages;
33 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
34 static struct hyp_pool hpool;
35 
36 static int divide_memory_pool(void *virt, unsigned long size)
37 {
38 	unsigned long nr_pages;
39 
40 	hyp_early_alloc_init(virt, size);
41 
42 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
43 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
44 	if (!vmemmap_base)
45 		return -ENOMEM;
46 
47 	nr_pages = hyp_vm_table_pages();
48 	vm_table_base = hyp_early_alloc_contig(nr_pages);
49 	if (!vm_table_base)
50 		return -ENOMEM;
51 
52 	nr_pages = hyp_s1_pgtable_pages();
53 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
54 	if (!hyp_pgt_base)
55 		return -ENOMEM;
56 
57 	nr_pages = host_s2_pgtable_pages();
58 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
59 	if (!host_s2_pgt_base)
60 		return -ENOMEM;
61 
62 	nr_pages = hyp_ffa_proxy_pages();
63 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
64 	if (!ffa_proxy_pages)
65 		return -ENOMEM;
66 
67 	return 0;
68 }
69 
70 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
71 				 unsigned long *per_cpu_base,
72 				 u32 hyp_va_bits)
73 {
74 	void *start, *end, *virt = hyp_phys_to_virt(phys);
75 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
76 	enum kvm_pgtable_prot prot;
77 	int ret, i;
78 
79 	/* Recreate the hyp page-table using the early page allocator */
80 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
81 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
82 				   &hyp_early_alloc_mm_ops);
83 	if (ret)
84 		return ret;
85 
86 	ret = hyp_create_idmap(hyp_va_bits);
87 	if (ret)
88 		return ret;
89 
90 	ret = hyp_map_vectors();
91 	if (ret)
92 		return ret;
93 
94 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
95 	if (ret)
96 		return ret;
97 
98 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
99 	if (ret)
100 		return ret;
101 
102 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
103 	if (ret)
104 		return ret;
105 
106 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
107 	if (ret)
108 		return ret;
109 
110 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
111 	if (ret)
112 		return ret;
113 
114 	for (i = 0; i < hyp_nr_cpus; i++) {
115 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
116 		unsigned long hyp_addr;
117 
118 		start = (void *)kern_hyp_va(per_cpu_base[i]);
119 		end = start + PAGE_ALIGN(hyp_percpu_size);
120 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
121 		if (ret)
122 			return ret;
123 
124 		/*
125 		 * Allocate a contiguous HYP private VA range for the stack
126 		 * and guard page. The allocation is also aligned based on
127 		 * the order of its size.
128 		 */
129 		ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
130 		if (ret)
131 			return ret;
132 
133 		/*
134 		 * Since the stack grows downwards, map the stack to the page
135 		 * at the higher address and leave the lower guard page
136 		 * unbacked.
137 		 *
138 		 * Any valid stack address now has the PAGE_SHIFT bit as 1
139 		 * and addresses corresponding to the guard page have the
140 		 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
141 		 */
142 		hyp_spin_lock(&pkvm_pgd_lock);
143 		ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
144 					PAGE_SIZE, params->stack_pa, PAGE_HYP);
145 		hyp_spin_unlock(&pkvm_pgd_lock);
146 		if (ret)
147 			return ret;
148 
149 		/* Update stack_hyp_va to end of the stack's private VA range */
150 		params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
151 	}
152 
153 	/*
154 	 * Map the host sections RO in the hypervisor, but transfer the
155 	 * ownership from the host to the hypervisor itself to make sure they
156 	 * can't be donated or shared with another entity.
157 	 *
158 	 * The ownership transition requires matching changes in the host
159 	 * stage-2. This will be done later (see finalize_host_mappings()) once
160 	 * the hyp_vmemmap is addressable.
161 	 */
162 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
163 	ret = pkvm_create_mappings(&kvm_vgic_global_state,
164 				   &kvm_vgic_global_state + 1, prot);
165 	if (ret)
166 		return ret;
167 
168 	return 0;
169 }
170 
171 static void update_nvhe_init_params(void)
172 {
173 	struct kvm_nvhe_init_params *params;
174 	unsigned long i;
175 
176 	for (i = 0; i < hyp_nr_cpus; i++) {
177 		params = per_cpu_ptr(&kvm_init_params, i);
178 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
179 		dcache_clean_inval_poc((unsigned long)params,
180 				    (unsigned long)params + sizeof(*params));
181 	}
182 }
183 
184 static void *hyp_zalloc_hyp_page(void *arg)
185 {
186 	return hyp_alloc_pages(&hpool, 0);
187 }
188 
189 static void hpool_get_page(void *addr)
190 {
191 	hyp_get_page(&hpool, addr);
192 }
193 
194 static void hpool_put_page(void *addr)
195 {
196 	hyp_put_page(&hpool, addr);
197 }
198 
199 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
200 				     enum kvm_pgtable_walk_flags visit)
201 {
202 	enum kvm_pgtable_prot prot;
203 	enum pkvm_page_state state;
204 	phys_addr_t phys;
205 
206 	if (!kvm_pte_valid(ctx->old))
207 		return 0;
208 
209 	if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
210 		return -EINVAL;
211 
212 	phys = kvm_pte_to_phys(ctx->old);
213 	if (!addr_is_memory(phys))
214 		return -EINVAL;
215 
216 	/*
217 	 * Adjust the host stage-2 mappings to match the ownership attributes
218 	 * configured in the hypervisor stage-1.
219 	 */
220 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
221 	switch (state) {
222 	case PKVM_PAGE_OWNED:
223 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
224 	case PKVM_PAGE_SHARED_OWNED:
225 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
226 		break;
227 	case PKVM_PAGE_SHARED_BORROWED:
228 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
229 		break;
230 	default:
231 		return -EINVAL;
232 	}
233 
234 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
235 }
236 
237 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
238 					 enum kvm_pgtable_walk_flags visit)
239 {
240 	/*
241 	 * Fix-up the refcount for the page-table pages as the early allocator
242 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
243 	 * initialised the refcount to '1'.
244 	 */
245 	if (kvm_pte_valid(ctx->old))
246 		ctx->mm_ops->get_page(ctx->ptep);
247 
248 	return 0;
249 }
250 
251 static int fix_host_ownership(void)
252 {
253 	struct kvm_pgtable_walker walker = {
254 		.cb	= fix_host_ownership_walker,
255 		.flags	= KVM_PGTABLE_WALK_LEAF,
256 	};
257 	int i, ret;
258 
259 	for (i = 0; i < hyp_memblock_nr; i++) {
260 		struct memblock_region *reg = &hyp_memory[i];
261 		u64 start = (u64)hyp_phys_to_virt(reg->base);
262 
263 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
264 		if (ret)
265 			return ret;
266 	}
267 
268 	return 0;
269 }
270 
271 static int fix_hyp_pgtable_refcnt(void)
272 {
273 	struct kvm_pgtable_walker walker = {
274 		.cb	= fix_hyp_pgtable_refcnt_walker,
275 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
276 		.arg	= pkvm_pgtable.mm_ops,
277 	};
278 
279 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
280 				&walker);
281 }
282 
283 void __noreturn __pkvm_init_finalise(void)
284 {
285 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
286 	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
287 	unsigned long nr_pages, reserved_pages, pfn;
288 	int ret;
289 
290 	/* Now that the vmemmap is backed, install the full-fledged allocator */
291 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
292 	nr_pages = hyp_s1_pgtable_pages();
293 	reserved_pages = hyp_early_alloc_nr_used_pages();
294 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
295 	if (ret)
296 		goto out;
297 
298 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
299 	if (ret)
300 		goto out;
301 
302 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
303 		.zalloc_page = hyp_zalloc_hyp_page,
304 		.phys_to_virt = hyp_phys_to_virt,
305 		.virt_to_phys = hyp_virt_to_phys,
306 		.get_page = hpool_get_page,
307 		.put_page = hpool_put_page,
308 		.page_count = hyp_page_count,
309 	};
310 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
311 
312 	ret = fix_host_ownership();
313 	if (ret)
314 		goto out;
315 
316 	ret = fix_hyp_pgtable_refcnt();
317 	if (ret)
318 		goto out;
319 
320 	ret = hyp_create_pcpu_fixmap();
321 	if (ret)
322 		goto out;
323 
324 	ret = hyp_ffa_init(ffa_proxy_pages);
325 	if (ret)
326 		goto out;
327 
328 	pkvm_hyp_vm_table_init(vm_table_base);
329 out:
330 	/*
331 	 * We tail-called to here from handle___pkvm_init() and will not return,
332 	 * so make sure to propagate the return value to the host.
333 	 */
334 	cpu_reg(host_ctxt, 1) = ret;
335 
336 	__host_enter(host_ctxt);
337 }
338 
339 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
340 		unsigned long *per_cpu_base, u32 hyp_va_bits)
341 {
342 	struct kvm_nvhe_init_params *params;
343 	void *virt = hyp_phys_to_virt(phys);
344 	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
345 	int ret;
346 
347 	BUG_ON(kvm_check_pvm_sysreg_table());
348 
349 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
350 		return -EINVAL;
351 
352 	hyp_spin_lock_init(&pkvm_pgd_lock);
353 	hyp_nr_cpus = nr_cpus;
354 
355 	ret = divide_memory_pool(virt, size);
356 	if (ret)
357 		return ret;
358 
359 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
360 	if (ret)
361 		return ret;
362 
363 	update_nvhe_init_params();
364 
365 	/* Jump in the idmap page to switch to the new page-tables */
366 	params = this_cpu_ptr(&kvm_init_params);
367 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
368 	fn(__hyp_pa(params), __pkvm_init_finalise);
369 
370 	unreachable();
371 }
372