xref: /openbmc/qemu/accel/hvf/hvf-all.c (revision 5a28fa5ba17254d0398a854657b47af3096bd86a)
1 /*
2  * QEMU Hypervisor.framework support
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.  See
5  * the COPYING file in the top-level directory.
6  *
7  * Contributions after 2012-01-13 are licensed under the terms of the
8  * GNU GPL, version 2 or (at your option) any later version.
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "system/address-spaces.h"
14 #include "system/memory.h"
15 #include "system/hvf.h"
16 #include "system/hvf_int.h"
17 #include "hw/core/cpu.h"
18 #include "hw/boards.h"
19 #include "trace.h"
20 
21 bool hvf_allowed;
22 
23 struct mac_slot {
24     int present;
25     uint64_t size;
26     uint64_t gpa_start;
27     uint64_t gva;
28 };
29 
30 struct mac_slot mac_slots[32];
31 
32 const char *hvf_return_string(hv_return_t ret)
33 {
34     switch (ret) {
35     case HV_SUCCESS:      return "HV_SUCCESS";
36     case HV_ERROR:        return "HV_ERROR";
37     case HV_BUSY:         return "HV_BUSY";
38     case HV_BAD_ARGUMENT: return "HV_BAD_ARGUMENT";
39     case HV_NO_RESOURCES: return "HV_NO_RESOURCES";
40     case HV_NO_DEVICE:    return "HV_NO_DEVICE";
41     case HV_UNSUPPORTED:  return "HV_UNSUPPORTED";
42     case HV_DENIED:       return "HV_DENIED";
43     default:              return "[unknown hv_return value]";
44     }
45 }
46 
47 void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
48                         const char *exp)
49 {
50     if (ret == HV_SUCCESS) {
51         return;
52     }
53 
54     error_report("Error: %s = %s (0x%x, at %s:%u)",
55         exp, hvf_return_string(ret), ret, file, line);
56 
57     abort();
58 }
59 
60 static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
61 {
62     struct mac_slot *macslot;
63     hv_return_t ret;
64 
65     macslot = &mac_slots[slot->slot_id];
66 
67     if (macslot->present) {
68         if (macslot->size != slot->size) {
69             macslot->present = 0;
70             trace_hvf_vm_unmap(macslot->gpa_start, macslot->size);
71             ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
72             assert_hvf_ok(ret);
73         }
74     }
75 
76     if (!slot->size) {
77         return 0;
78     }
79 
80     macslot->present = 1;
81     macslot->gpa_start = slot->start;
82     macslot->size = slot->size;
83     trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags,
84                      flags & HV_MEMORY_READ ?  'R' : '-',
85                      flags & HV_MEMORY_WRITE ? 'W' : '-',
86                      flags & HV_MEMORY_EXEC ?  'E' : '-');
87     ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
88     assert_hvf_ok(ret);
89     return 0;
90 }
91 
92 static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
93 {
94     hvf_slot *mem;
95     MemoryRegion *area = section->mr;
96     bool writable = !area->readonly && !area->rom_device;
97     hv_memory_flags_t flags;
98     uint64_t page_size = qemu_real_host_page_size();
99 
100     if (!memory_region_is_ram(area)) {
101         if (writable) {
102             return;
103         } else if (!memory_region_is_romd(area)) {
104             /*
105              * If the memory device is not in romd_mode, then we actually want
106              * to remove the hvf memory slot so all accesses will trap.
107              */
108              add = false;
109         }
110     }
111 
112     if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
113         !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
114         /* Not page aligned, so we can not map as RAM */
115         add = false;
116     }
117 
118     mem = hvf_find_overlap_slot(
119             section->offset_within_address_space,
120             int128_get64(section->size));
121 
122     if (mem && add) {
123         if (mem->size == int128_get64(section->size) &&
124             mem->start == section->offset_within_address_space &&
125             mem->mem == (memory_region_get_ram_ptr(area) +
126             section->offset_within_region)) {
127             return; /* Same region was attempted to register, go away. */
128         }
129     }
130 
131     /* Region needs to be reset. set the size to 0 and remap it. */
132     if (mem) {
133         mem->size = 0;
134         if (do_hvf_set_memory(mem, 0)) {
135             error_report("Failed to reset overlapping slot");
136             abort();
137         }
138     }
139 
140     if (!add) {
141         return;
142     }
143 
144     if (area->readonly ||
145         (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
146         flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
147     } else {
148         flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
149     }
150 
151     /* Now make a new slot. */
152     int x;
153 
154     for (x = 0; x < hvf_state->num_slots; ++x) {
155         mem = &hvf_state->slots[x];
156         if (!mem->size) {
157             break;
158         }
159     }
160 
161     if (x == hvf_state->num_slots) {
162         error_report("No free slots");
163         abort();
164     }
165 
166     mem->size = int128_get64(section->size);
167     mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
168     mem->start = section->offset_within_address_space;
169     mem->region = area;
170 
171     if (do_hvf_set_memory(mem, flags)) {
172         error_report("Error registering new memory slot");
173         abort();
174     }
175 }
176 
177 static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
178 {
179     hvf_slot *slot;
180 
181     slot = hvf_find_overlap_slot(
182             section->offset_within_address_space,
183             int128_get64(section->size));
184 
185     /* protect region against writes; begin tracking it */
186     if (on) {
187         slot->flags |= HVF_SLOT_LOG;
188         hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
189                       HV_MEMORY_READ | HV_MEMORY_EXEC);
190     /* stop tracking region*/
191     } else {
192         slot->flags &= ~HVF_SLOT_LOG;
193         hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
194                       HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
195     }
196 }
197 
198 static void hvf_log_start(MemoryListener *listener,
199                           MemoryRegionSection *section, int old, int new)
200 {
201     if (old != 0) {
202         return;
203     }
204 
205     hvf_set_dirty_tracking(section, 1);
206 }
207 
208 static void hvf_log_stop(MemoryListener *listener,
209                          MemoryRegionSection *section, int old, int new)
210 {
211     if (new != 0) {
212         return;
213     }
214 
215     hvf_set_dirty_tracking(section, 0);
216 }
217 
218 static void hvf_log_sync(MemoryListener *listener,
219                          MemoryRegionSection *section)
220 {
221     /*
222      * sync of dirty pages is handled elsewhere; just make sure we keep
223      * tracking the region.
224      */
225     hvf_set_dirty_tracking(section, 1);
226 }
227 
228 static void hvf_region_add(MemoryListener *listener,
229                            MemoryRegionSection *section)
230 {
231     hvf_set_phys_mem(section, true);
232 }
233 
234 static void hvf_region_del(MemoryListener *listener,
235                            MemoryRegionSection *section)
236 {
237     hvf_set_phys_mem(section, false);
238 }
239 
240 static MemoryListener hvf_memory_listener = {
241     .name = "hvf",
242     .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
243     .region_add = hvf_region_add,
244     .region_del = hvf_region_del,
245     .log_start = hvf_log_start,
246     .log_stop = hvf_log_stop,
247     .log_sync = hvf_log_sync,
248 };
249 
250 static int hvf_accel_init(AccelState *as, MachineState *ms)
251 {
252     int x;
253     hv_return_t ret;
254     HVFState *s = HVF_STATE(as);
255     int pa_range = 36;
256     MachineClass *mc = MACHINE_GET_CLASS(ms);
257 
258     if (mc->hvf_get_physical_address_range) {
259         pa_range = mc->hvf_get_physical_address_range(ms);
260         if (pa_range < 0) {
261             return -EINVAL;
262         }
263     }
264 
265     ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
266     if (ret == HV_DENIED) {
267         error_report("Could not access HVF. Is the executable signed"
268                      " with com.apple.security.hypervisor entitlement?");
269         exit(1);
270     }
271     assert_hvf_ok(ret);
272 
273     s->num_slots = ARRAY_SIZE(s->slots);
274     for (x = 0; x < s->num_slots; ++x) {
275         s->slots[x].size = 0;
276         s->slots[x].slot_id = x;
277     }
278 
279     QTAILQ_INIT(&s->hvf_sw_breakpoints);
280 
281     hvf_state = s;
282     memory_listener_register(&hvf_memory_listener, &address_space_memory);
283 
284     return hvf_arch_init();
285 }
286 
287 static int hvf_gdbstub_sstep_flags(AccelState *as)
288 {
289     return SSTEP_ENABLE | SSTEP_NOIRQ;
290 }
291 
292 static void hvf_accel_class_init(ObjectClass *oc, const void *data)
293 {
294     AccelClass *ac = ACCEL_CLASS(oc);
295     ac->name = "HVF";
296     ac->init_machine = hvf_accel_init;
297     ac->allowed = &hvf_allowed;
298     ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
299 }
300 
301 static const TypeInfo hvf_accel_type = {
302     .name = TYPE_HVF_ACCEL,
303     .parent = TYPE_ACCEL,
304     .instance_size = sizeof(HVFState),
305     .class_init = hvf_accel_class_init,
306 };
307 
308 static void hvf_type_init(void)
309 {
310     type_register_static(&hvf_accel_type);
311 }
312 
313 type_init(hvf_type_init);
314