Lines Matching +full:protect +full:- +full:exec
51 #include "qemu/error-report.h"
52 #include "qemu/main-loop.h"
53 #include "exec/address-spaces.h"
54 #include "exec/exec-all.h"
61 #include "qemu/guest-random.h"
71 for (x = 0; x < hvf_state->num_slots; ++x) { in hvf_find_overlap_slot()
72 slot = &hvf_state->slots[x]; in hvf_find_overlap_slot()
73 if (slot->size && start < (slot->start + slot->size) && in hvf_find_overlap_slot()
74 (start + size) > slot->start) { in hvf_find_overlap_slot()
95 macslot = &mac_slots[slot->slot_id]; in do_hvf_set_memory()
97 if (macslot->present) { in do_hvf_set_memory()
98 if (macslot->size != slot->size) { in do_hvf_set_memory()
99 macslot->present = 0; in do_hvf_set_memory()
100 ret = hv_vm_unmap(macslot->gpa_start, macslot->size); in do_hvf_set_memory()
105 if (!slot->size) { in do_hvf_set_memory()
109 macslot->present = 1; in do_hvf_set_memory()
110 macslot->gpa_start = slot->start; in do_hvf_set_memory()
111 macslot->size = slot->size; in do_hvf_set_memory()
112 ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); in do_hvf_set_memory()
120 MemoryRegion *area = section->mr; in hvf_set_phys_mem()
121 bool writable = !area->readonly && !area->rom_device; in hvf_set_phys_mem()
137 if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || in hvf_set_phys_mem()
138 !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { in hvf_set_phys_mem()
144 section->offset_within_address_space, in hvf_set_phys_mem()
145 int128_get64(section->size)); in hvf_set_phys_mem()
148 if (mem->size == int128_get64(section->size) && in hvf_set_phys_mem()
149 mem->start == section->offset_within_address_space && in hvf_set_phys_mem()
150 mem->mem == (memory_region_get_ram_ptr(area) + in hvf_set_phys_mem()
151 section->offset_within_region)) { in hvf_set_phys_mem()
158 mem->size = 0; in hvf_set_phys_mem()
169 if (area->readonly || in hvf_set_phys_mem()
179 for (x = 0; x < hvf_state->num_slots; ++x) { in hvf_set_phys_mem()
180 mem = &hvf_state->slots[x]; in hvf_set_phys_mem()
181 if (!mem->size) { in hvf_set_phys_mem()
186 if (x == hvf_state->num_slots) { in hvf_set_phys_mem()
191 mem->size = int128_get64(section->size); in hvf_set_phys_mem()
192 mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; in hvf_set_phys_mem()
193 mem->start = section->offset_within_address_space; in hvf_set_phys_mem()
194 mem->region = area; in hvf_set_phys_mem()
204 if (!cpu->accel->dirty) { in do_hvf_cpu_synchronize_state()
206 cpu->accel->dirty = true; in do_hvf_cpu_synchronize_state()
212 if (!cpu->accel->dirty) { in hvf_cpu_synchronize_state()
221 cpu->accel->dirty = true; in do_hvf_cpu_synchronize_set_dirty()
244 section->offset_within_address_space, in hvf_set_dirty_tracking()
245 int128_get64(section->size)); in hvf_set_dirty_tracking()
247 /* protect region against writes; begin tracking it */ in hvf_set_dirty_tracking()
249 slot->flags |= HVF_SLOT_LOG; in hvf_set_dirty_tracking()
250 hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, in hvf_set_dirty_tracking()
254 slot->flags &= ~HVF_SLOT_LOG; in hvf_set_dirty_tracking()
255 hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, in hvf_set_dirty_tracking()
326 if (mc->hvf_get_physical_address_range) { in hvf_accel_init()
327 pa_range = mc->hvf_get_physical_address_range(ms); in hvf_accel_init()
329 return -EINVAL; in hvf_accel_init()
338 s->num_slots = ARRAY_SIZE(s->slots); in hvf_accel_init()
339 for (x = 0; x < s->num_slots; ++x) { in hvf_accel_init()
340 s->slots[x].size = 0; in hvf_accel_init()
341 s->slots[x].slot_id = x; in hvf_accel_init()
344 QTAILQ_INIT(&s->hvf_sw_breakpoints); in hvf_accel_init()
360 ac->name = "HVF"; in hvf_accel_class_init()
361 ac->init_machine = hvf_accel_init; in hvf_accel_class_init()
362 ac->allowed = &hvf_allowed; in hvf_accel_class_init()
363 ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; in hvf_accel_class_init()
381 hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); in hvf_vcpu_destroy()
385 g_free(cpu->accel); in hvf_vcpu_destroy()
386 cpu->accel = NULL; in hvf_vcpu_destroy()
393 cpu->accel = g_new0(AccelCPUState, 1); in hvf_init_vcpu()
402 pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); in hvf_init_vcpu()
403 sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); in hvf_init_vcpu()
406 r = hv_vcpu_create(&cpu->accel->fd, in hvf_init_vcpu()
407 (hv_vcpu_exit_t **)&cpu->accel->exit, NULL); in hvf_init_vcpu()
409 r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT); in hvf_init_vcpu()
411 cpu->accel->dirty = true; in hvf_init_vcpu()
414 cpu->accel->guest_debug_enabled = false; in hvf_init_vcpu()
420 * The HVF-specific vCPU thread function. This one should only run when the host
434 qemu_thread_get_self(cpu->thread); in hvf_cpu_thread_fn()
436 cpu->thread_id = qemu_get_thread_id(); in hvf_cpu_thread_fn()
443 qemu_guest_random_seed_thread_part2(cpu->random_seed); in hvf_cpu_thread_fn()
453 } while (!cpu->unplug || cpu_can_run(cpu)); in hvf_cpu_thread_fn()
468 * unrestricted-guest mode. in hvf_start_vcpu_thread()
473 cpu->cpu_index); in hvf_start_vcpu_thread()
474 qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn, in hvf_start_vcpu_thread()
486 bp->use_count++; in hvf_insert_breakpoint()
491 bp->pc = addr; in hvf_insert_breakpoint()
492 bp->use_count = 1; in hvf_insert_breakpoint()
499 QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry); in hvf_insert_breakpoint()
524 return -ENOENT; in hvf_remove_breakpoint()
527 if (bp->use_count > 1) { in hvf_remove_breakpoint()
528 bp->use_count--; in hvf_remove_breakpoint()
537 QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); in hvf_remove_breakpoint()
560 QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) { in hvf_remove_all_breakpoints()
570 QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry); in hvf_remove_all_breakpoints()
584 ops->create_vcpu_thread = hvf_start_vcpu_thread; in hvf_accel_ops_class_init()
585 ops->kick_vcpu_thread = hvf_kick_vcpu_thread; in hvf_accel_ops_class_init()
587 ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; in hvf_accel_ops_class_init()
588 ops->synchronize_post_init = hvf_cpu_synchronize_post_init; in hvf_accel_ops_class_init()
589 ops->synchronize_state = hvf_cpu_synchronize_state; in hvf_accel_ops_class_init()
590 ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm; in hvf_accel_ops_class_init()
592 ops->insert_breakpoint = hvf_insert_breakpoint; in hvf_accel_ops_class_init()
593 ops->remove_breakpoint = hvf_remove_breakpoint; in hvf_accel_ops_class_init()
594 ops->remove_all_breakpoints = hvf_remove_all_breakpoints; in hvf_accel_ops_class_init()
595 ops->update_guest_debug = hvf_update_guest_debug; in hvf_accel_ops_class_init()
596 ops->supports_guest_debug = hvf_arch_supports_guest_debug; in hvf_accel_ops_class_init()