xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/mm.c (revision f320bc742bc23c1d43567712fe2814bf04b19ebc)
1*f320bc74SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only
2*f320bc74SQuentin Perret /*
3*f320bc74SQuentin Perret  * Copyright (C) 2020 Google LLC
4*f320bc74SQuentin Perret  * Author: Quentin Perret <qperret@google.com>
5*f320bc74SQuentin Perret  */
6*f320bc74SQuentin Perret 
7*f320bc74SQuentin Perret #include <linux/kvm_host.h>
8*f320bc74SQuentin Perret #include <asm/kvm_hyp.h>
9*f320bc74SQuentin Perret #include <asm/kvm_mmu.h>
10*f320bc74SQuentin Perret #include <asm/kvm_pgtable.h>
11*f320bc74SQuentin Perret #include <asm/spectre.h>
12*f320bc74SQuentin Perret 
13*f320bc74SQuentin Perret #include <nvhe/early_alloc.h>
14*f320bc74SQuentin Perret #include <nvhe/gfp.h>
15*f320bc74SQuentin Perret #include <nvhe/memory.h>
16*f320bc74SQuentin Perret #include <nvhe/mm.h>
17*f320bc74SQuentin Perret #include <nvhe/spinlock.h>
18*f320bc74SQuentin Perret 
19*f320bc74SQuentin Perret struct kvm_pgtable pkvm_pgtable;
20*f320bc74SQuentin Perret hyp_spinlock_t pkvm_pgd_lock;
21*f320bc74SQuentin Perret u64 __io_map_base;
22*f320bc74SQuentin Perret 
23*f320bc74SQuentin Perret struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24*f320bc74SQuentin Perret unsigned int hyp_memblock_nr;
25*f320bc74SQuentin Perret 
26*f320bc74SQuentin Perret int __pkvm_create_mappings(unsigned long start, unsigned long size,
27*f320bc74SQuentin Perret 			  unsigned long phys, enum kvm_pgtable_prot prot)
28*f320bc74SQuentin Perret {
29*f320bc74SQuentin Perret 	int err;
30*f320bc74SQuentin Perret 
31*f320bc74SQuentin Perret 	hyp_spin_lock(&pkvm_pgd_lock);
32*f320bc74SQuentin Perret 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
33*f320bc74SQuentin Perret 	hyp_spin_unlock(&pkvm_pgd_lock);
34*f320bc74SQuentin Perret 
35*f320bc74SQuentin Perret 	return err;
36*f320bc74SQuentin Perret }
37*f320bc74SQuentin Perret 
38*f320bc74SQuentin Perret unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
39*f320bc74SQuentin Perret 					    enum kvm_pgtable_prot prot)
40*f320bc74SQuentin Perret {
41*f320bc74SQuentin Perret 	unsigned long addr;
42*f320bc74SQuentin Perret 	int err;
43*f320bc74SQuentin Perret 
44*f320bc74SQuentin Perret 	hyp_spin_lock(&pkvm_pgd_lock);
45*f320bc74SQuentin Perret 
46*f320bc74SQuentin Perret 	size = PAGE_ALIGN(size + offset_in_page(phys));
47*f320bc74SQuentin Perret 	addr = __io_map_base;
48*f320bc74SQuentin Perret 	__io_map_base += size;
49*f320bc74SQuentin Perret 
50*f320bc74SQuentin Perret 	/* Are we overflowing on the vmemmap ? */
51*f320bc74SQuentin Perret 	if (__io_map_base > __hyp_vmemmap) {
52*f320bc74SQuentin Perret 		__io_map_base -= size;
53*f320bc74SQuentin Perret 		addr = (unsigned long)ERR_PTR(-ENOMEM);
54*f320bc74SQuentin Perret 		goto out;
55*f320bc74SQuentin Perret 	}
56*f320bc74SQuentin Perret 
57*f320bc74SQuentin Perret 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
58*f320bc74SQuentin Perret 	if (err) {
59*f320bc74SQuentin Perret 		addr = (unsigned long)ERR_PTR(err);
60*f320bc74SQuentin Perret 		goto out;
61*f320bc74SQuentin Perret 	}
62*f320bc74SQuentin Perret 
63*f320bc74SQuentin Perret 	addr = addr + offset_in_page(phys);
64*f320bc74SQuentin Perret out:
65*f320bc74SQuentin Perret 	hyp_spin_unlock(&pkvm_pgd_lock);
66*f320bc74SQuentin Perret 
67*f320bc74SQuentin Perret 	return addr;
68*f320bc74SQuentin Perret }
69*f320bc74SQuentin Perret 
70*f320bc74SQuentin Perret int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
71*f320bc74SQuentin Perret {
72*f320bc74SQuentin Perret 	unsigned long start = (unsigned long)from;
73*f320bc74SQuentin Perret 	unsigned long end = (unsigned long)to;
74*f320bc74SQuentin Perret 	unsigned long virt_addr;
75*f320bc74SQuentin Perret 	phys_addr_t phys;
76*f320bc74SQuentin Perret 
77*f320bc74SQuentin Perret 	start = start & PAGE_MASK;
78*f320bc74SQuentin Perret 	end = PAGE_ALIGN(end);
79*f320bc74SQuentin Perret 
80*f320bc74SQuentin Perret 	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
81*f320bc74SQuentin Perret 		int err;
82*f320bc74SQuentin Perret 
83*f320bc74SQuentin Perret 		phys = hyp_virt_to_phys((void *)virt_addr);
84*f320bc74SQuentin Perret 		err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot);
85*f320bc74SQuentin Perret 		if (err)
86*f320bc74SQuentin Perret 			return err;
87*f320bc74SQuentin Perret 	}
88*f320bc74SQuentin Perret 
89*f320bc74SQuentin Perret 	return 0;
90*f320bc74SQuentin Perret }
91*f320bc74SQuentin Perret 
92*f320bc74SQuentin Perret int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
93*f320bc74SQuentin Perret {
94*f320bc74SQuentin Perret 	unsigned long start, end;
95*f320bc74SQuentin Perret 
96*f320bc74SQuentin Perret 	hyp_vmemmap_range(phys, size, &start, &end);
97*f320bc74SQuentin Perret 
98*f320bc74SQuentin Perret 	return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
99*f320bc74SQuentin Perret }
100*f320bc74SQuentin Perret 
101*f320bc74SQuentin Perret static void *__hyp_bp_vect_base;
102*f320bc74SQuentin Perret int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
103*f320bc74SQuentin Perret {
104*f320bc74SQuentin Perret 	void *vector;
105*f320bc74SQuentin Perret 
106*f320bc74SQuentin Perret 	switch (slot) {
107*f320bc74SQuentin Perret 	case HYP_VECTOR_DIRECT: {
108*f320bc74SQuentin Perret 		vector = __kvm_hyp_vector;
109*f320bc74SQuentin Perret 		break;
110*f320bc74SQuentin Perret 	}
111*f320bc74SQuentin Perret 	case HYP_VECTOR_SPECTRE_DIRECT: {
112*f320bc74SQuentin Perret 		vector = __bp_harden_hyp_vecs;
113*f320bc74SQuentin Perret 		break;
114*f320bc74SQuentin Perret 	}
115*f320bc74SQuentin Perret 	case HYP_VECTOR_INDIRECT:
116*f320bc74SQuentin Perret 	case HYP_VECTOR_SPECTRE_INDIRECT: {
117*f320bc74SQuentin Perret 		vector = (void *)__hyp_bp_vect_base;
118*f320bc74SQuentin Perret 		break;
119*f320bc74SQuentin Perret 	}
120*f320bc74SQuentin Perret 	default:
121*f320bc74SQuentin Perret 		return -EINVAL;
122*f320bc74SQuentin Perret 	}
123*f320bc74SQuentin Perret 
124*f320bc74SQuentin Perret 	vector = __kvm_vector_slot2addr(vector, slot);
125*f320bc74SQuentin Perret 	*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
126*f320bc74SQuentin Perret 
127*f320bc74SQuentin Perret 	return 0;
128*f320bc74SQuentin Perret }
129*f320bc74SQuentin Perret 
130*f320bc74SQuentin Perret int hyp_map_vectors(void)
131*f320bc74SQuentin Perret {
132*f320bc74SQuentin Perret 	phys_addr_t phys;
133*f320bc74SQuentin Perret 	void *bp_base;
134*f320bc74SQuentin Perret 
135*f320bc74SQuentin Perret 	if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
136*f320bc74SQuentin Perret 		return 0;
137*f320bc74SQuentin Perret 
138*f320bc74SQuentin Perret 	phys = __hyp_pa(__bp_harden_hyp_vecs);
139*f320bc74SQuentin Perret 	bp_base = (void *)__pkvm_create_private_mapping(phys,
140*f320bc74SQuentin Perret 							__BP_HARDEN_HYP_VECS_SZ,
141*f320bc74SQuentin Perret 							PAGE_HYP_EXEC);
142*f320bc74SQuentin Perret 	if (IS_ERR_OR_NULL(bp_base))
143*f320bc74SQuentin Perret 		return PTR_ERR(bp_base);
144*f320bc74SQuentin Perret 
145*f320bc74SQuentin Perret 	__hyp_bp_vect_base = bp_base;
146*f320bc74SQuentin Perret 
147*f320bc74SQuentin Perret 	return 0;
148*f320bc74SQuentin Perret }
149*f320bc74SQuentin Perret 
150*f320bc74SQuentin Perret int hyp_create_idmap(u32 hyp_va_bits)
151*f320bc74SQuentin Perret {
152*f320bc74SQuentin Perret 	unsigned long start, end;
153*f320bc74SQuentin Perret 
154*f320bc74SQuentin Perret 	start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
155*f320bc74SQuentin Perret 	start = ALIGN_DOWN(start, PAGE_SIZE);
156*f320bc74SQuentin Perret 
157*f320bc74SQuentin Perret 	end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
158*f320bc74SQuentin Perret 	end = ALIGN(end, PAGE_SIZE);
159*f320bc74SQuentin Perret 
160*f320bc74SQuentin Perret 	/*
161*f320bc74SQuentin Perret 	 * One half of the VA space is reserved to linearly map portions of
162*f320bc74SQuentin Perret 	 * memory -- see va_layout.c for more details. The other half of the VA
163*f320bc74SQuentin Perret 	 * space contains the trampoline page, and needs some care. Split that
164*f320bc74SQuentin Perret 	 * second half in two and find the quarter of VA space not conflicting
165*f320bc74SQuentin Perret 	 * with the idmap to place the IOs and the vmemmap. IOs use the lower
166*f320bc74SQuentin Perret 	 * half of the quarter and the vmemmap the upper half.
167*f320bc74SQuentin Perret 	 */
168*f320bc74SQuentin Perret 	__io_map_base = start & BIT(hyp_va_bits - 2);
169*f320bc74SQuentin Perret 	__io_map_base ^= BIT(hyp_va_bits - 2);
170*f320bc74SQuentin Perret 	__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
171*f320bc74SQuentin Perret 
172*f320bc74SQuentin Perret 	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
173*f320bc74SQuentin Perret }
174