xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/setup.c (revision f156a7d1)
1f320bc74SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only
2f320bc74SQuentin Perret /*
3f320bc74SQuentin Perret  * Copyright (C) 2020 Google LLC
4f320bc74SQuentin Perret  * Author: Quentin Perret <qperret@google.com>
5f320bc74SQuentin Perret  */
6f320bc74SQuentin Perret 
7f320bc74SQuentin Perret #include <linux/kvm_host.h>
8f320bc74SQuentin Perret #include <asm/kvm_hyp.h>
9f320bc74SQuentin Perret #include <asm/kvm_mmu.h>
10f320bc74SQuentin Perret #include <asm/kvm_pgtable.h>
119429f4b0SWill Deacon #include <asm/kvm_pkvm.h>
12f320bc74SQuentin Perret 
13f320bc74SQuentin Perret #include <nvhe/early_alloc.h>
1412bdce4fSWill Deacon #include <nvhe/ffa.h>
153061725dSMarc Zyngier #include <nvhe/fixed_config.h>
16f320bc74SQuentin Perret #include <nvhe/gfp.h>
17f320bc74SQuentin Perret #include <nvhe/memory.h>
181025c8c0SQuentin Perret #include <nvhe/mem_protect.h>
19f320bc74SQuentin Perret #include <nvhe/mm.h>
20a1ec5c70SFuad Tabba #include <nvhe/pkvm.h>
21f320bc74SQuentin Perret #include <nvhe/trap_handler.h>
22f320bc74SQuentin Perret 
23f320bc74SQuentin Perret unsigned long hyp_nr_cpus;
24f320bc74SQuentin Perret 
25f320bc74SQuentin Perret #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
26f320bc74SQuentin Perret 			 (unsigned long)__per_cpu_start)
27f320bc74SQuentin Perret 
28f320bc74SQuentin Perret static void *vmemmap_base;
29a1ec5c70SFuad Tabba static void *vm_table_base;
30f320bc74SQuentin Perret static void *hyp_pgt_base;
317c350ea3SQuentin Perret static void *host_s2_pgt_base;
32bc3888a0SWill Deacon static void *ffa_proxy_pages;
33eaa9b88dSQuentin Perret static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
3453a563b0SQuentin Perret static struct hyp_pool hpool;
35f320bc74SQuentin Perret 
divide_memory_pool(void * virt,unsigned long size)36f320bc74SQuentin Perret static int divide_memory_pool(void *virt, unsigned long size)
37f320bc74SQuentin Perret {
388e6bcc3aSQuentin Perret 	unsigned long nr_pages;
39f320bc74SQuentin Perret 
40f320bc74SQuentin Perret 	hyp_early_alloc_init(virt, size);
41f320bc74SQuentin Perret 
428e6bcc3aSQuentin Perret 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
43f320bc74SQuentin Perret 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
44f320bc74SQuentin Perret 	if (!vmemmap_base)
45f320bc74SQuentin Perret 		return -ENOMEM;
46f320bc74SQuentin Perret 
47a1ec5c70SFuad Tabba 	nr_pages = hyp_vm_table_pages();
48a1ec5c70SFuad Tabba 	vm_table_base = hyp_early_alloc_contig(nr_pages);
49a1ec5c70SFuad Tabba 	if (!vm_table_base)
50a1ec5c70SFuad Tabba 		return -ENOMEM;
51a1ec5c70SFuad Tabba 
52f320bc74SQuentin Perret 	nr_pages = hyp_s1_pgtable_pages();
53f320bc74SQuentin Perret 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
54f320bc74SQuentin Perret 	if (!hyp_pgt_base)
55f320bc74SQuentin Perret 		return -ENOMEM;
56f320bc74SQuentin Perret 
577c350ea3SQuentin Perret 	nr_pages = host_s2_pgtable_pages();
587c350ea3SQuentin Perret 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
597c350ea3SQuentin Perret 	if (!host_s2_pgt_base)
6004e5de03SQuentin Perret 		return -ENOMEM;
6104e5de03SQuentin Perret 
62bc3888a0SWill Deacon 	nr_pages = hyp_ffa_proxy_pages();
63bc3888a0SWill Deacon 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
64bc3888a0SWill Deacon 	if (!ffa_proxy_pages)
65bc3888a0SWill Deacon 		return -ENOMEM;
66bc3888a0SWill Deacon 
67f320bc74SQuentin Perret 	return 0;
68f320bc74SQuentin Perret }
69f320bc74SQuentin Perret 
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)70f320bc74SQuentin Perret static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
71f320bc74SQuentin Perret 				 unsigned long *per_cpu_base,
72f320bc74SQuentin Perret 				 u32 hyp_va_bits)
73f320bc74SQuentin Perret {
74f320bc74SQuentin Perret 	void *start, *end, *virt = hyp_phys_to_virt(phys);
75f320bc74SQuentin Perret 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
762c50166cSQuentin Perret 	enum kvm_pgtable_prot prot;
77f320bc74SQuentin Perret 	int ret, i;
78f320bc74SQuentin Perret 
79f320bc74SQuentin Perret 	/* Recreate the hyp page-table using the early page allocator */
80f320bc74SQuentin Perret 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
81f320bc74SQuentin Perret 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
82f320bc74SQuentin Perret 				   &hyp_early_alloc_mm_ops);
83f320bc74SQuentin Perret 	if (ret)
84f320bc74SQuentin Perret 		return ret;
85f320bc74SQuentin Perret 
86f320bc74SQuentin Perret 	ret = hyp_create_idmap(hyp_va_bits);
87f320bc74SQuentin Perret 	if (ret)
88f320bc74SQuentin Perret 		return ret;
89f320bc74SQuentin Perret 
90f320bc74SQuentin Perret 	ret = hyp_map_vectors();
91f320bc74SQuentin Perret 	if (ret)
92f320bc74SQuentin Perret 		return ret;
93f320bc74SQuentin Perret 
948e6bcc3aSQuentin Perret 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
95f320bc74SQuentin Perret 	if (ret)
96f320bc74SQuentin Perret 		return ret;
97f320bc74SQuentin Perret 
98f320bc74SQuentin Perret 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
99f320bc74SQuentin Perret 	if (ret)
100f320bc74SQuentin Perret 		return ret;
101f320bc74SQuentin Perret 
102f320bc74SQuentin Perret 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
103f320bc74SQuentin Perret 	if (ret)
104f320bc74SQuentin Perret 		return ret;
105f320bc74SQuentin Perret 
106f320bc74SQuentin Perret 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
107f320bc74SQuentin Perret 	if (ret)
108f320bc74SQuentin Perret 		return ret;
109f320bc74SQuentin Perret 
110f320bc74SQuentin Perret 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
111f320bc74SQuentin Perret 	if (ret)
112f320bc74SQuentin Perret 		return ret;
113f320bc74SQuentin Perret 
114f320bc74SQuentin Perret 	for (i = 0; i < hyp_nr_cpus; i++) {
1151a919b17SKalesh Singh 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
1161a919b17SKalesh Singh 
117f320bc74SQuentin Perret 		start = (void *)kern_hyp_va(per_cpu_base[i]);
118f320bc74SQuentin Perret 		end = start + PAGE_ALIGN(hyp_percpu_size);
119f320bc74SQuentin Perret 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
120f320bc74SQuentin Perret 		if (ret)
121f320bc74SQuentin Perret 			return ret;
122f320bc74SQuentin Perret 
123*f156a7d1SVincent Donnefort 		ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
124f320bc74SQuentin Perret 		if (ret)
125f320bc74SQuentin Perret 			return ret;
126f320bc74SQuentin Perret 	}
127f320bc74SQuentin Perret 
1282c50166cSQuentin Perret 	/*
129169cd0f8SQuentin Perret 	 * Map the host sections RO in the hypervisor, but transfer the
130169cd0f8SQuentin Perret 	 * ownership from the host to the hypervisor itself to make sure they
131169cd0f8SQuentin Perret 	 * can't be donated or shared with another entity.
1322c50166cSQuentin Perret 	 *
1332c50166cSQuentin Perret 	 * The ownership transition requires matching changes in the host
1342c50166cSQuentin Perret 	 * stage-2. This will be done later (see finalize_host_mappings()) once
1352c50166cSQuentin Perret 	 * the hyp_vmemmap is addressable.
1362c50166cSQuentin Perret 	 */
1372c50166cSQuentin Perret 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
13827eb26bfSQuentin Perret 	ret = pkvm_create_mappings(&kvm_vgic_global_state,
13927eb26bfSQuentin Perret 				   &kvm_vgic_global_state + 1, prot);
1402c50166cSQuentin Perret 	if (ret)
1412c50166cSQuentin Perret 		return ret;
1422c50166cSQuentin Perret 
143f320bc74SQuentin Perret 	return 0;
144f320bc74SQuentin Perret }
145f320bc74SQuentin Perret 
update_nvhe_init_params(void)146f320bc74SQuentin Perret static void update_nvhe_init_params(void)
147f320bc74SQuentin Perret {
148f320bc74SQuentin Perret 	struct kvm_nvhe_init_params *params;
149f320bc74SQuentin Perret 	unsigned long i;
150f320bc74SQuentin Perret 
151f320bc74SQuentin Perret 	for (i = 0; i < hyp_nr_cpus; i++) {
152f320bc74SQuentin Perret 		params = per_cpu_ptr(&kvm_init_params, i);
153f320bc74SQuentin Perret 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
154fade9c2cSFuad Tabba 		dcache_clean_inval_poc((unsigned long)params,
155814b1860SFuad Tabba 				    (unsigned long)params + sizeof(*params));
156f320bc74SQuentin Perret 	}
157f320bc74SQuentin Perret }
158f320bc74SQuentin Perret 
hyp_zalloc_hyp_page(void * arg)159f320bc74SQuentin Perret static void *hyp_zalloc_hyp_page(void *arg)
160f320bc74SQuentin Perret {
161f320bc74SQuentin Perret 	return hyp_alloc_pages(&hpool, 0);
162f320bc74SQuentin Perret }
163f320bc74SQuentin Perret 
hpool_get_page(void * addr)164d978b9cfSQuentin Perret static void hpool_get_page(void *addr)
165d978b9cfSQuentin Perret {
166d978b9cfSQuentin Perret 	hyp_get_page(&hpool, addr);
167d978b9cfSQuentin Perret }
168d978b9cfSQuentin Perret 
hpool_put_page(void * addr)169d978b9cfSQuentin Perret static void hpool_put_page(void *addr)
170d978b9cfSQuentin Perret {
171d978b9cfSQuentin Perret 	hyp_put_page(&hpool, addr);
172d978b9cfSQuentin Perret }
173d978b9cfSQuentin Perret 
fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)174cfa72993SMarc Zyngier static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
175dfc7a776SOliver Upton 				     enum kvm_pgtable_walk_flags visit)
1762c50166cSQuentin Perret {
1772c50166cSQuentin Perret 	enum kvm_pgtable_prot prot;
1782c50166cSQuentin Perret 	enum pkvm_page_state state;
1792c50166cSQuentin Perret 	phys_addr_t phys;
1802c50166cSQuentin Perret 
18183844a23SOliver Upton 	if (!kvm_pte_valid(ctx->old))
1822c50166cSQuentin Perret 		return 0;
1832c50166cSQuentin Perret 
184dfc7a776SOliver Upton 	if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
1852c50166cSQuentin Perret 		return -EINVAL;
1862c50166cSQuentin Perret 
18783844a23SOliver Upton 	phys = kvm_pte_to_phys(ctx->old);
1882c50166cSQuentin Perret 	if (!addr_is_memory(phys))
18950a8d331SQuentin Perret 		return -EINVAL;
1902c50166cSQuentin Perret 
1912c50166cSQuentin Perret 	/*
1922c50166cSQuentin Perret 	 * Adjust the host stage-2 mappings to match the ownership attributes
1932c50166cSQuentin Perret 	 * configured in the hypervisor stage-1.
1942c50166cSQuentin Perret 	 */
19583844a23SOliver Upton 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
1962c50166cSQuentin Perret 	switch (state) {
1972c50166cSQuentin Perret 	case PKVM_PAGE_OWNED:
19833bc332dSWill Deacon 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
1992c50166cSQuentin Perret 	case PKVM_PAGE_SHARED_OWNED:
2002c50166cSQuentin Perret 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
2012c50166cSQuentin Perret 		break;
2022c50166cSQuentin Perret 	case PKVM_PAGE_SHARED_BORROWED:
2032c50166cSQuentin Perret 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
2042c50166cSQuentin Perret 		break;
2052c50166cSQuentin Perret 	default:
2062c50166cSQuentin Perret 		return -EINVAL;
2072c50166cSQuentin Perret 	}
2082c50166cSQuentin Perret 
2092c50166cSQuentin Perret 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
2102c50166cSQuentin Perret }
2112c50166cSQuentin Perret 
fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)212cfa72993SMarc Zyngier static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
213cfa72993SMarc Zyngier 					 enum kvm_pgtable_walk_flags visit)
2140d16d12eSQuentin Perret {
2150d16d12eSQuentin Perret 	/*
2160d16d12eSQuentin Perret 	 * Fix-up the refcount for the page-table pages as the early allocator
2170d16d12eSQuentin Perret 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
2180d16d12eSQuentin Perret 	 * initialised the refcount to '1'.
2190d16d12eSQuentin Perret 	 */
220cfa72993SMarc Zyngier 	if (kvm_pte_valid(ctx->old))
221cfa72993SMarc Zyngier 		ctx->mm_ops->get_page(ctx->ptep);
2220d16d12eSQuentin Perret 
2230d16d12eSQuentin Perret 	return 0;
2240d16d12eSQuentin Perret }
2250d16d12eSQuentin Perret 
fix_host_ownership(void)2260d16d12eSQuentin Perret static int fix_host_ownership(void)
2272c50166cSQuentin Perret {
2282c50166cSQuentin Perret 	struct kvm_pgtable_walker walker = {
2290d16d12eSQuentin Perret 		.cb	= fix_host_ownership_walker,
2300d16d12eSQuentin Perret 		.flags	= KVM_PGTABLE_WALK_LEAF,
2312c50166cSQuentin Perret 	};
23250a8d331SQuentin Perret 	int i, ret;
2332c50166cSQuentin Perret 
23450a8d331SQuentin Perret 	for (i = 0; i < hyp_memblock_nr; i++) {
23550a8d331SQuentin Perret 		struct memblock_region *reg = &hyp_memory[i];
23650a8d331SQuentin Perret 		u64 start = (u64)hyp_phys_to_virt(reg->base);
23750a8d331SQuentin Perret 
23850a8d331SQuentin Perret 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
23950a8d331SQuentin Perret 		if (ret)
24050a8d331SQuentin Perret 			return ret;
24150a8d331SQuentin Perret 	}
24250a8d331SQuentin Perret 
24350a8d331SQuentin Perret 	return 0;
2442c50166cSQuentin Perret }
2452c50166cSQuentin Perret 
fix_hyp_pgtable_refcnt(void)2460d16d12eSQuentin Perret static int fix_hyp_pgtable_refcnt(void)
2470d16d12eSQuentin Perret {
2480d16d12eSQuentin Perret 	struct kvm_pgtable_walker walker = {
2490d16d12eSQuentin Perret 		.cb	= fix_hyp_pgtable_refcnt_walker,
2500d16d12eSQuentin Perret 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
2510d16d12eSQuentin Perret 		.arg	= pkvm_pgtable.mm_ops,
2520d16d12eSQuentin Perret 	};
2530d16d12eSQuentin Perret 
2540d16d12eSQuentin Perret 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
2550d16d12eSQuentin Perret 				&walker);
2560d16d12eSQuentin Perret }
2570d16d12eSQuentin Perret 
__pkvm_init_finalise(void)258f320bc74SQuentin Perret void __noreturn __pkvm_init_finalise(void)
259f320bc74SQuentin Perret {
260f320bc74SQuentin Perret 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
261f320bc74SQuentin Perret 	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
262f320bc74SQuentin Perret 	unsigned long nr_pages, reserved_pages, pfn;
263f320bc74SQuentin Perret 	int ret;
264f320bc74SQuentin Perret 
265f320bc74SQuentin Perret 	/* Now that the vmemmap is backed, install the full-fledged allocator */
266f320bc74SQuentin Perret 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
267f320bc74SQuentin Perret 	nr_pages = hyp_s1_pgtable_pages();
268f320bc74SQuentin Perret 	reserved_pages = hyp_early_alloc_nr_used_pages();
269f320bc74SQuentin Perret 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
270f320bc74SQuentin Perret 	if (ret)
271f320bc74SQuentin Perret 		goto out;
272f320bc74SQuentin Perret 
2737c350ea3SQuentin Perret 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
2741025c8c0SQuentin Perret 	if (ret)
2751025c8c0SQuentin Perret 		goto out;
2761025c8c0SQuentin Perret 
277f320bc74SQuentin Perret 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
278f320bc74SQuentin Perret 		.zalloc_page = hyp_zalloc_hyp_page,
279f320bc74SQuentin Perret 		.phys_to_virt = hyp_phys_to_virt,
280f320bc74SQuentin Perret 		.virt_to_phys = hyp_virt_to_phys,
281d978b9cfSQuentin Perret 		.get_page = hpool_get_page,
282d978b9cfSQuentin Perret 		.put_page = hpool_put_page,
28334ec7cbfSWill Deacon 		.page_count = hyp_page_count,
284f320bc74SQuentin Perret 	};
285f320bc74SQuentin Perret 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
286f320bc74SQuentin Perret 
2870d16d12eSQuentin Perret 	ret = fix_host_ownership();
288d6b4bd3fSQuentin Perret 	if (ret)
289d6b4bd3fSQuentin Perret 		goto out;
290d6b4bd3fSQuentin Perret 
2910d16d12eSQuentin Perret 	ret = fix_hyp_pgtable_refcnt();
292f320bc74SQuentin Perret 	if (ret)
293f320bc74SQuentin Perret 		goto out;
294f320bc74SQuentin Perret 
295aa6948f8SQuentin Perret 	ret = hyp_create_pcpu_fixmap();
296aa6948f8SQuentin Perret 	if (ret)
297aa6948f8SQuentin Perret 		goto out;
298aa6948f8SQuentin Perret 
299bc3888a0SWill Deacon 	ret = hyp_ffa_init(ffa_proxy_pages);
30012bdce4fSWill Deacon 	if (ret)
30112bdce4fSWill Deacon 		goto out;
30212bdce4fSWill Deacon 
303a1ec5c70SFuad Tabba 	pkvm_hyp_vm_table_init(vm_table_base);
304f320bc74SQuentin Perret out:
305f320bc74SQuentin Perret 	/*
306f320bc74SQuentin Perret 	 * We tail-called to here from handle___pkvm_init() and will not return,
307f320bc74SQuentin Perret 	 * so make sure to propagate the return value to the host.
308f320bc74SQuentin Perret 	 */
309f320bc74SQuentin Perret 	cpu_reg(host_ctxt, 1) = ret;
310f320bc74SQuentin Perret 
311f320bc74SQuentin Perret 	__host_enter(host_ctxt);
312f320bc74SQuentin Perret }
313f320bc74SQuentin Perret 
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)314f320bc74SQuentin Perret int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
315f320bc74SQuentin Perret 		unsigned long *per_cpu_base, u32 hyp_va_bits)
316f320bc74SQuentin Perret {
317f320bc74SQuentin Perret 	struct kvm_nvhe_init_params *params;
318f320bc74SQuentin Perret 	void *virt = hyp_phys_to_virt(phys);
319f320bc74SQuentin Perret 	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
320f320bc74SQuentin Perret 	int ret;
321f320bc74SQuentin Perret 
3226c30bfb1SFuad Tabba 	BUG_ON(kvm_check_pvm_sysreg_table());
3236c30bfb1SFuad Tabba 
324f320bc74SQuentin Perret 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
325f320bc74SQuentin Perret 		return -EINVAL;
326f320bc74SQuentin Perret 
327f320bc74SQuentin Perret 	hyp_spin_lock_init(&pkvm_pgd_lock);
328f320bc74SQuentin Perret 	hyp_nr_cpus = nr_cpus;
329f320bc74SQuentin Perret 
330f320bc74SQuentin Perret 	ret = divide_memory_pool(virt, size);
331f320bc74SQuentin Perret 	if (ret)
332f320bc74SQuentin Perret 		return ret;
333f320bc74SQuentin Perret 
334f320bc74SQuentin Perret 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
335f320bc74SQuentin Perret 	if (ret)
336f320bc74SQuentin Perret 		return ret;
337f320bc74SQuentin Perret 
338f320bc74SQuentin Perret 	update_nvhe_init_params();
339f320bc74SQuentin Perret 
340f320bc74SQuentin Perret 	/* Jump in the idmap page to switch to the new page-tables */
341f320bc74SQuentin Perret 	params = this_cpu_ptr(&kvm_init_params);
342f320bc74SQuentin Perret 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
343f320bc74SQuentin Perret 	fn(__hyp_pa(params), __pkvm_init_finalise);
344f320bc74SQuentin Perret 
345f320bc74SQuentin Perret 	unreachable();
346f320bc74SQuentin Perret }
347