1 /* 2 * QEMU TCG vCPU common functionality 3 * 4 * Functionality common to all TCG vCPU variants: mttcg, rr and icount. 5 * 6 * Copyright (c) 2003-2008 Fabrice Bellard 7 * Copyright (c) 2014 Red Hat Inc. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a copy 10 * of this software and associated documentation files (the "Software"), to deal 11 * in the Software without restriction, including without limitation the rights 12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 * copies of the Software, and to permit persons to whom the Software is 14 * furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 25 * THE SOFTWARE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "sysemu/tcg.h" 30 #include "sysemu/replay.h" 31 #include "sysemu/cpu-timers.h" 32 #include "qemu/main-loop.h" 33 #include "qemu/guest-random.h" 34 #include "qemu/timer.h" 35 #include "exec/exec-all.h" 36 #include "exec/hwaddr.h" 37 #include "exec/gdbstub.h" 38 39 #include "tcg-accel-ops.h" 40 #include "tcg-accel-ops-mttcg.h" 41 #include "tcg-accel-ops-rr.h" 42 #include "tcg-accel-ops-icount.h" 43 44 /* common functionality among all TCG variants */ 45 46 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) 47 { 48 uint32_t cflags; 49 50 /* 51 * Include the cluster number in the hash we use to look up TBs. 52 * This is important because a TB that is valid for one cluster at 53 * a given physical address and set of CPU flags is not necessarily 54 * valid for another: 55 * the two clusters may have different views of physical memory, or 56 * may have different CPU features (eg FPU present or absent). 57 */ 58 cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; 59 60 cflags |= parallel ? CF_PARALLEL : 0; 61 cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; 62 cpu->tcg_cflags |= cflags; 63 } 64 65 void tcg_cpus_destroy(CPUState *cpu) 66 { 67 cpu_thread_signal_destroyed(cpu); 68 } 69 70 int tcg_cpus_exec(CPUState *cpu) 71 { 72 int ret; 73 assert(tcg_enabled()); 74 cpu_exec_start(cpu); 75 ret = cpu_exec(cpu); 76 cpu_exec_end(cpu); 77 return ret; 78 } 79 80 /* mask must never be zero, except for A20 change call */ 81 void tcg_handle_interrupt(CPUState *cpu, int mask) 82 { 83 g_assert(qemu_mutex_iothread_locked()); 84 85 cpu->interrupt_request |= mask; 86 87 /* 88 * If called from iothread context, wake the target cpu in 89 * case its halted. 90 */ 91 if (!qemu_cpu_is_self(cpu)) { 92 qemu_cpu_kick(cpu); 93 } else { 94 qatomic_set(&cpu->neg.icount_decr.u16.high, -1); 95 } 96 } 97 98 static bool tcg_supports_guest_debug(void) 99 { 100 return true; 101 } 102 103 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */ 104 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) 105 { 106 static const int xlat[] = { 107 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE, 108 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ, 109 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS, 110 }; 111 112 CPUClass *cc = CPU_GET_CLASS(cpu); 113 int cputype = xlat[gdbtype]; 114 115 if (cc->gdb_stop_before_watchpoint) { 116 cputype |= BP_STOP_BEFORE_ACCESS; 117 } 118 return cputype; 119 } 120 121 static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) 122 { 123 CPUState *cpu; 124 int err = 0; 125 126 switch (type) { 127 case GDB_BREAKPOINT_SW: 128 case GDB_BREAKPOINT_HW: 129 CPU_FOREACH(cpu) { 130 err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL); 131 if (err) { 132 break; 133 } 134 } 135 return err; 136 case GDB_WATCHPOINT_WRITE: 137 case GDB_WATCHPOINT_READ: 138 case GDB_WATCHPOINT_ACCESS: 139 CPU_FOREACH(cpu) { 140 err = cpu_watchpoint_insert(cpu, addr, len, 141 xlat_gdb_type(cpu, type), NULL); 142 if (err) { 143 break; 144 } 145 } 146 return err; 147 default: 148 return -ENOSYS; 149 } 150 } 151 152 static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len) 153 { 154 CPUState *cpu; 155 int err = 0; 156 157 switch (type) { 158 case GDB_BREAKPOINT_SW: 159 case GDB_BREAKPOINT_HW: 160 CPU_FOREACH(cpu) { 161 err = cpu_breakpoint_remove(cpu, addr, BP_GDB); 162 if (err) { 163 break; 164 } 165 } 166 return err; 167 case GDB_WATCHPOINT_WRITE: 168 case GDB_WATCHPOINT_READ: 169 case GDB_WATCHPOINT_ACCESS: 170 CPU_FOREACH(cpu) { 171 err = cpu_watchpoint_remove(cpu, addr, len, 172 xlat_gdb_type(cpu, type)); 173 if (err) { 174 break; 175 } 176 } 177 return err; 178 default: 179 return -ENOSYS; 180 } 181 } 182 183 static inline void tcg_remove_all_breakpoints(CPUState *cpu) 184 { 185 cpu_breakpoint_remove_all(cpu, BP_GDB); 186 cpu_watchpoint_remove_all(cpu, BP_GDB); 187 } 188 189 static void tcg_accel_ops_init(AccelOpsClass *ops) 190 { 191 if (qemu_tcg_mttcg_enabled()) { 192 ops->create_vcpu_thread = mttcg_start_vcpu_thread; 193 ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; 194 ops->handle_interrupt = tcg_handle_interrupt; 195 } else { 196 ops->create_vcpu_thread = rr_start_vcpu_thread; 197 ops->kick_vcpu_thread = rr_kick_vcpu_thread; 198 199 if (icount_enabled()) { 200 ops->handle_interrupt = icount_handle_interrupt; 201 ops->get_virtual_clock = icount_get; 202 ops->get_elapsed_ticks = icount_get; 203 } else { 204 ops->handle_interrupt = tcg_handle_interrupt; 205 } 206 } 207 208 ops->supports_guest_debug = tcg_supports_guest_debug; 209 ops->insert_breakpoint = tcg_insert_breakpoint; 210 ops->remove_breakpoint = tcg_remove_breakpoint; 211 ops->remove_all_breakpoints = tcg_remove_all_breakpoints; 212 } 213 214 static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) 215 { 216 AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); 217 218 ops->ops_init = tcg_accel_ops_init; 219 } 220 221 static const TypeInfo tcg_accel_ops_type = { 222 .name = ACCEL_OPS_NAME("tcg"), 223 224 .parent = TYPE_ACCEL_OPS, 225 .class_init = tcg_accel_ops_class_init, 226 .abstract = true, 227 }; 228 module_obj(ACCEL_OPS_NAME("tcg")); 229 230 static void tcg_accel_ops_register_types(void) 231 { 232 type_register_static(&tcg_accel_ops_type); 233 } 234 type_init(tcg_accel_ops_register_types); 235