xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/mm.c (revision f71a261a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
13 
14 #include <nvhe/early_alloc.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mm.h>
18 #include <nvhe/spinlock.h>
19 
20 struct kvm_pgtable pkvm_pgtable;
21 hyp_spinlock_t pkvm_pgd_lock;
22 
23 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24 unsigned int hyp_memblock_nr;
25 
26 static u64 __io_map_base;
27 
28 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
29 				  unsigned long phys, enum kvm_pgtable_prot prot)
30 {
31 	int err;
32 
33 	hyp_spin_lock(&pkvm_pgd_lock);
34 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
35 	hyp_spin_unlock(&pkvm_pgd_lock);
36 
37 	return err;
38 }
39 
40 /**
41  * pkvm_alloc_private_va_range - Allocates a private VA range.
42  * @size:	The size of the VA range to reserve.
43  * @haddr:	The hypervisor virtual start address of the allocation.
44  *
45  * The private virtual address (VA) range is allocated above __io_map_base
46  * and aligned based on the order of @size.
47  *
48  * Return: 0 on success or negative error code on failure.
49  */
50 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
51 {
52 	unsigned long base, addr;
53 	int ret = 0;
54 
55 	hyp_spin_lock(&pkvm_pgd_lock);
56 
57 	/* Align the allocation based on the order of its size */
58 	addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
59 
60 	/* The allocated size is always a multiple of PAGE_SIZE */
61 	base = addr + PAGE_ALIGN(size);
62 
63 	/* Are we overflowing on the vmemmap ? */
64 	if (!addr || base > __hyp_vmemmap)
65 		ret = -ENOMEM;
66 	else {
67 		__io_map_base = base;
68 		*haddr = addr;
69 	}
70 
71 	hyp_spin_unlock(&pkvm_pgd_lock);
72 
73 	return ret;
74 }
75 
76 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
77 				  enum kvm_pgtable_prot prot,
78 				  unsigned long *haddr)
79 {
80 	unsigned long addr;
81 	int err;
82 
83 	size = PAGE_ALIGN(size + offset_in_page(phys));
84 	err = pkvm_alloc_private_va_range(size, &addr);
85 	if (err)
86 		return err;
87 
88 	err = __pkvm_create_mappings(addr, size, phys, prot);
89 	if (err)
90 		return err;
91 
92 	*haddr = addr + offset_in_page(phys);
93 	return err;
94 }
95 
96 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
97 {
98 	unsigned long start = (unsigned long)from;
99 	unsigned long end = (unsigned long)to;
100 	unsigned long virt_addr;
101 	phys_addr_t phys;
102 
103 	hyp_assert_lock_held(&pkvm_pgd_lock);
104 
105 	start = start & PAGE_MASK;
106 	end = PAGE_ALIGN(end);
107 
108 	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
109 		int err;
110 
111 		phys = hyp_virt_to_phys((void *)virt_addr);
112 		err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
113 					  phys, prot);
114 		if (err)
115 			return err;
116 	}
117 
118 	return 0;
119 }
120 
121 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
122 {
123 	int ret;
124 
125 	hyp_spin_lock(&pkvm_pgd_lock);
126 	ret = pkvm_create_mappings_locked(from, to, prot);
127 	hyp_spin_unlock(&pkvm_pgd_lock);
128 
129 	return ret;
130 }
131 
132 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
133 {
134 	unsigned long start, end;
135 
136 	hyp_vmemmap_range(phys, size, &start, &end);
137 
138 	return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
139 }
140 
141 static void *__hyp_bp_vect_base;
142 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
143 {
144 	void *vector;
145 
146 	switch (slot) {
147 	case HYP_VECTOR_DIRECT: {
148 		vector = __kvm_hyp_vector;
149 		break;
150 	}
151 	case HYP_VECTOR_SPECTRE_DIRECT: {
152 		vector = __bp_harden_hyp_vecs;
153 		break;
154 	}
155 	case HYP_VECTOR_INDIRECT:
156 	case HYP_VECTOR_SPECTRE_INDIRECT: {
157 		vector = (void *)__hyp_bp_vect_base;
158 		break;
159 	}
160 	default:
161 		return -EINVAL;
162 	}
163 
164 	vector = __kvm_vector_slot2addr(vector, slot);
165 	*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
166 
167 	return 0;
168 }
169 
170 int hyp_map_vectors(void)
171 {
172 	phys_addr_t phys;
173 	unsigned long bp_base;
174 	int ret;
175 
176 	if (!kvm_system_needs_idmapped_vectors()) {
177 		__hyp_bp_vect_base = __bp_harden_hyp_vecs;
178 		return 0;
179 	}
180 
181 	phys = __hyp_pa(__bp_harden_hyp_vecs);
182 	ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
183 					    PAGE_HYP_EXEC, &bp_base);
184 	if (ret)
185 		return ret;
186 
187 	__hyp_bp_vect_base = (void *)bp_base;
188 
189 	return 0;
190 }
191 
192 int hyp_create_idmap(u32 hyp_va_bits)
193 {
194 	unsigned long start, end;
195 
196 	start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
197 	start = ALIGN_DOWN(start, PAGE_SIZE);
198 
199 	end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
200 	end = ALIGN(end, PAGE_SIZE);
201 
202 	/*
203 	 * One half of the VA space is reserved to linearly map portions of
204 	 * memory -- see va_layout.c for more details. The other half of the VA
205 	 * space contains the trampoline page, and needs some care. Split that
206 	 * second half in two and find the quarter of VA space not conflicting
207 	 * with the idmap to place the IOs and the vmemmap. IOs use the lower
208 	 * half of the quarter and the vmemmap the upper half.
209 	 */
210 	__io_map_base = start & BIT(hyp_va_bits - 2);
211 	__io_map_base ^= BIT(hyp_va_bits - 2);
212 	__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
213 
214 	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
215 }
216