1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef _EXEC_ALL_H_ 21 #define _EXEC_ALL_H_ 22 23 #include "qemu-common.h" 24 25 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 26 #define DEBUG_DISAS 27 28 /* Page tracking code uses ram addresses in system mode, and virtual 29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 30 type. */ 31 #if defined(CONFIG_USER_ONLY) 32 typedef abi_ulong tb_page_addr_t; 33 #else 34 typedef ram_addr_t tb_page_addr_t; 35 #endif 36 37 /* is_jmp field values */ 38 #define DISAS_NEXT 0 /* next instruction can be analyzed */ 39 #define DISAS_JUMP 1 /* only pc was modified dynamically */ 40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ 41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */ 42 43 struct TranslationBlock; 44 typedef struct TranslationBlock TranslationBlock; 45 46 /* XXX: make safe guess about sizes */ 47 #define MAX_OP_PER_INSTR 266 48 49 #if HOST_LONG_BITS == 32 50 #define MAX_OPC_PARAM_PER_ARG 2 51 #else 52 #define MAX_OPC_PARAM_PER_ARG 1 53 #endif 54 #define MAX_OPC_PARAM_IARGS 5 55 #define MAX_OPC_PARAM_OARGS 1 56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) 57 58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs, 59 * and up to 4 + N parameters on 64-bit archs 60 * (N = number of input arguments + output arguments). */ 61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) 62 #define OPC_BUF_SIZE 640 63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) 64 65 /* Maximum size a TCG op can expand to. This is complicated because a 66 single op may require several host instructions and register reloads. 67 For now take a wild guess at 192 bytes, which should allow at least 68 a couple of fixup instructions per argument. */ 69 #define TCG_MAX_OP_SIZE 192 70 71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) 72 73 #include "qemu/log.h" 74 75 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); 76 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); 77 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, 78 int pc_pos); 79 80 void cpu_gen_init(void); 81 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, 82 int *gen_code_size_ptr); 83 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); 84 void page_size_init(void); 85 86 void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc); 87 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); 88 TranslationBlock *tb_gen_code(CPUState *cpu, 89 target_ulong pc, target_ulong cs_base, int flags, 90 int cflags); 91 void cpu_exec_init(CPUArchState *env); 92 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 93 94 #if !defined(CONFIG_USER_ONLY) 95 bool qemu_in_vcpu_thread(void); 96 void cpu_reload_memory_map(CPUState *cpu); 97 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); 98 /* cputlb.c */ 99 void tlb_flush_page(CPUState *cpu, target_ulong addr); 100 void tlb_flush(CPUState *cpu, int flush_global); 101 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 102 hwaddr paddr, int prot, 103 int mmu_idx, target_ulong size); 104 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 105 hwaddr paddr, MemTxAttrs attrs, 106 int prot, int mmu_idx, target_ulong size); 107 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); 108 #else 109 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 110 { 111 } 112 113 static inline void tlb_flush(CPUState *cpu, int flush_global) 114 { 115 } 116 #endif 117 118 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 119 120 #define CODE_GEN_PHYS_HASH_BITS 15 121 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) 122 123 /* estimated block size for TB allocation */ 124 /* XXX: use a per code average code fragment size and modulate it 125 according to the host CPU */ 126 #if defined(CONFIG_SOFTMMU) 127 #define CODE_GEN_AVG_BLOCK_SIZE 128 128 #else 129 #define CODE_GEN_AVG_BLOCK_SIZE 64 130 #endif 131 132 #if defined(__arm__) || defined(_ARCH_PPC) \ 133 || defined(__x86_64__) || defined(__i386__) \ 134 || defined(__sparc__) || defined(__aarch64__) \ 135 || defined(__s390x__) || defined(__mips__) \ 136 || defined(CONFIG_TCG_INTERPRETER) 137 #define USE_DIRECT_JUMP 138 #endif 139 140 struct TranslationBlock { 141 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 142 target_ulong cs_base; /* CS base for this block */ 143 uint64_t flags; /* flags defining in which context the code was generated */ 144 uint16_t size; /* size of target code for this block (1 <= 145 size <= TARGET_PAGE_SIZE) */ 146 uint16_t icount; 147 uint32_t cflags; /* compile flags */ 148 #define CF_COUNT_MASK 0x7fff 149 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ 150 #define CF_NOCACHE 0x10000 /* To be freed after execution */ 151 #define CF_USE_ICOUNT 0x20000 152 153 void *tc_ptr; /* pointer to the translated code */ 154 /* next matching tb for physical address. */ 155 struct TranslationBlock *phys_hash_next; 156 /* first and second physical page containing code. The lower bit 157 of the pointer tells the index in page_next[] */ 158 struct TranslationBlock *page_next[2]; 159 tb_page_addr_t page_addr[2]; 160 161 /* the following data are used to directly call another TB from 162 the code of this one. */ 163 uint16_t tb_next_offset[2]; /* offset of original jump target */ 164 #ifdef USE_DIRECT_JUMP 165 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ 166 #else 167 uintptr_t tb_next[2]; /* address of jump generated code */ 168 #endif 169 /* list of TBs jumping to this one. This is a circular list using 170 the two least significant bits of the pointers to tell what is 171 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = 172 jmp_first */ 173 struct TranslationBlock *jmp_next[2]; 174 struct TranslationBlock *jmp_first; 175 }; 176 177 #include "exec/spinlock.h" 178 179 typedef struct TBContext TBContext; 180 181 struct TBContext { 182 183 TranslationBlock *tbs; 184 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 185 int nb_tbs; 186 /* any access to the tbs or the page table must use this lock */ 187 spinlock_t tb_lock; 188 189 /* statistics */ 190 int tb_flush_count; 191 int tb_phys_invalidate_count; 192 193 int tb_invalidated_flag; 194 }; 195 196 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) 197 { 198 target_ulong tmp; 199 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 200 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; 201 } 202 203 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) 204 { 205 target_ulong tmp; 206 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 207 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) 208 | (tmp & TB_JMP_ADDR_MASK)); 209 } 210 211 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) 212 { 213 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); 214 } 215 216 void tb_free(TranslationBlock *tb); 217 void tb_flush(CPUArchState *env); 218 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 219 220 #if defined(USE_DIRECT_JUMP) 221 222 #if defined(CONFIG_TCG_INTERPRETER) 223 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 224 { 225 /* patch the branch destination */ 226 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); 227 /* no need to flush icache explicitly */ 228 } 229 #elif defined(_ARCH_PPC) 230 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); 231 #define tb_set_jmp_target1 ppc_tb_set_jmp_target 232 #elif defined(__i386__) || defined(__x86_64__) 233 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 234 { 235 /* patch the branch destination */ 236 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4)); 237 /* no need to flush icache explicitly */ 238 } 239 #elif defined(__s390x__) 240 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 241 { 242 /* patch the branch destination */ 243 intptr_t disp = addr - (jmp_addr - 2); 244 stl_be_p((void*)jmp_addr, disp / 2); 245 /* no need to flush icache explicitly */ 246 } 247 #elif defined(__aarch64__) 248 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); 249 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target 250 #elif defined(__arm__) 251 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 252 { 253 #if !QEMU_GNUC_PREREQ(4, 1) 254 register unsigned long _beg __asm ("a1"); 255 register unsigned long _end __asm ("a2"); 256 register unsigned long _flg __asm ("a3"); 257 #endif 258 259 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ 260 *(uint32_t *)jmp_addr = 261 (*(uint32_t *)jmp_addr & ~0xffffff) 262 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); 263 264 #if QEMU_GNUC_PREREQ(4, 1) 265 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); 266 #else 267 /* flush icache */ 268 _beg = jmp_addr; 269 _end = jmp_addr + 4; 270 _flg = 0; 271 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); 272 #endif 273 } 274 #elif defined(__sparc__) || defined(__mips__) 275 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); 276 #else 277 #error tb_set_jmp_target1 is missing 278 #endif 279 280 static inline void tb_set_jmp_target(TranslationBlock *tb, 281 int n, uintptr_t addr) 282 { 283 uint16_t offset = tb->tb_jmp_offset[n]; 284 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); 285 } 286 287 #else 288 289 /* set the jump target */ 290 static inline void tb_set_jmp_target(TranslationBlock *tb, 291 int n, uintptr_t addr) 292 { 293 tb->tb_next[n] = addr; 294 } 295 296 #endif 297 298 static inline void tb_add_jump(TranslationBlock *tb, int n, 299 TranslationBlock *tb_next) 300 { 301 /* NOTE: this test is only needed for thread safety */ 302 if (!tb->jmp_next[n]) { 303 /* patch the native jump address */ 304 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); 305 306 /* add in TB jmp circular list */ 307 tb->jmp_next[n] = tb_next->jmp_first; 308 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); 309 } 310 } 311 312 /* GETRA is the true target of the return instruction that we'll execute, 313 defined here for simplicity of defining the follow-up macros. */ 314 #if defined(CONFIG_TCG_INTERPRETER) 315 extern uintptr_t tci_tb_ptr; 316 # define GETRA() tci_tb_ptr 317 #else 318 # define GETRA() \ 319 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 320 #endif 321 322 /* The true return address will often point to a host insn that is part of 323 the next translated guest insn. Adjust the address backward to point to 324 the middle of the call insn. Subtracting one would do the job except for 325 several compressed mode architectures (arm, mips) which set the low bit 326 to indicate the compressed mode; subtracting two works around that. It 327 is also the case that there are no host isas that contain a call insn 328 smaller than 4 bytes, so we don't worry about special-casing this. */ 329 #if defined(CONFIG_TCG_INTERPRETER) 330 # define GETPC_ADJ 0 331 #else 332 # define GETPC_ADJ 2 333 #endif 334 335 #define GETPC() (GETRA() - GETPC_ADJ) 336 337 #if !defined(CONFIG_USER_ONLY) 338 339 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)); 340 341 struct MemoryRegion *iotlb_to_region(CPUState *cpu, 342 hwaddr index); 343 344 void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, 345 uintptr_t retaddr); 346 347 #endif 348 349 #if defined(CONFIG_USER_ONLY) 350 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) 351 { 352 return addr; 353 } 354 #else 355 /* cputlb.c */ 356 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); 357 #endif 358 359 /* vl.c */ 360 extern int singlestep; 361 362 /* cpu-exec.c */ 363 extern volatile sig_atomic_t exit_request; 364 365 /** 366 * cpu_can_do_io: 367 * @cpu: The CPU for which to check IO. 368 * 369 * Deterministic execution requires that IO only be performed on the last 370 * instruction of a TB so that interrupts take effect immediately. 371 * 372 * Returns: %true if memory-mapped IO is safe, %false otherwise. 373 */ 374 static inline bool cpu_can_do_io(CPUState *cpu) 375 { 376 if (!use_icount) { 377 return true; 378 } 379 /* If not executing code then assume we are ok. */ 380 if (cpu->current_tb == NULL) { 381 return true; 382 } 383 return cpu->can_do_io != 0; 384 } 385 386 #endif 387