1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "qemu-common.h" 24 #include "exec/tb-context.h" 25 #include "sysemu/cpus.h" 26 27 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 28 #define DEBUG_DISAS 29 30 /* Page tracking code uses ram addresses in system mode, and virtual 31 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 32 type. */ 33 #if defined(CONFIG_USER_ONLY) 34 typedef abi_ulong tb_page_addr_t; 35 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 36 #else 37 typedef ram_addr_t tb_page_addr_t; 38 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 39 #endif 40 41 #include "qemu/log.h" 42 43 void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); 44 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, 45 target_ulong *data); 46 47 void cpu_gen_init(void); 48 49 /** 50 * cpu_restore_state: 51 * @cpu: the vCPU state is to be restore to 52 * @searched_pc: the host PC the fault occurred at 53 * @return: true if state was restored, false otherwise 54 * 55 * Attempt to restore the state for a fault occurring in translated 56 * code. If the searched_pc is not in translated code no state is 57 * restored and the function returns false. 58 */ 59 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); 60 61 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 62 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); 63 TranslationBlock *tb_gen_code(CPUState *cpu, 64 target_ulong pc, target_ulong cs_base, 65 uint32_t flags, 66 int cflags); 67 68 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 69 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 70 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 71 72 #if !defined(CONFIG_USER_ONLY) 73 void cpu_reloading_memory_map(void); 74 /** 75 * cpu_address_space_init: 76 * @cpu: CPU to add this address space to 77 * @as: address space to add 78 * @asidx: integer index of this address space 79 * 80 * Add the specified address space to the CPU's cpu_ases list. 81 * The address space added with @asidx 0 is the one used for the 82 * convenience pointer cpu->as. 83 * The target-specific code which registers ASes is responsible 84 * for defining what semantics address space 0, 1, 2, etc have. 85 * 86 * Before the first call to this function, the caller must set 87 * cpu->num_ases to the total number of address spaces it needs 88 * to support. 89 * 90 * Note that with KVM only one address space is supported. 91 */ 92 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx); 93 #endif 94 95 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 96 /* cputlb.c */ 97 /** 98 * tlb_flush_page: 99 * @cpu: CPU whose TLB should be flushed 100 * @addr: virtual address of page to be flushed 101 * 102 * Flush one page from the TLB of the specified CPU, for all 103 * MMU indexes. 104 */ 105 void tlb_flush_page(CPUState *cpu, target_ulong addr); 106 /** 107 * tlb_flush_page_all_cpus: 108 * @cpu: src CPU of the flush 109 * @addr: virtual address of page to be flushed 110 * 111 * Flush one page from the TLB of the specified CPU, for all 112 * MMU indexes. 113 */ 114 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 115 /** 116 * tlb_flush_page_all_cpus_synced: 117 * @cpu: src CPU of the flush 118 * @addr: virtual address of page to be flushed 119 * 120 * Flush one page from the TLB of the specified CPU, for all MMU 121 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 122 * is scheduled as safe work meaning all flushes will be complete once 123 * the source vCPUs safe work is complete. This will depend on when 124 * the guests translation ends the TB. 125 */ 126 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 127 /** 128 * tlb_flush: 129 * @cpu: CPU whose TLB should be flushed 130 * 131 * Flush the entire TLB for the specified CPU. Most CPU architectures 132 * allow the implementation to drop entries from the TLB at any time 133 * so this is generally safe. If more selective flushing is required 134 * use one of the other functions for efficiency. 135 */ 136 void tlb_flush(CPUState *cpu); 137 /** 138 * tlb_flush_all_cpus: 139 * @cpu: src CPU of the flush 140 */ 141 void tlb_flush_all_cpus(CPUState *src_cpu); 142 /** 143 * tlb_flush_all_cpus_synced: 144 * @cpu: src CPU of the flush 145 * 146 * Like tlb_flush_all_cpus except this except the source vCPUs work is 147 * scheduled as safe work meaning all flushes will be complete once 148 * the source vCPUs safe work is complete. This will depend on when 149 * the guests translation ends the TB. 150 */ 151 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 152 /** 153 * tlb_flush_page_by_mmuidx: 154 * @cpu: CPU whose TLB should be flushed 155 * @addr: virtual address of page to be flushed 156 * @idxmap: bitmap of MMU indexes to flush 157 * 158 * Flush one page from the TLB of the specified CPU, for the specified 159 * MMU indexes. 160 */ 161 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 162 uint16_t idxmap); 163 /** 164 * tlb_flush_page_by_mmuidx_all_cpus: 165 * @cpu: Originating CPU of the flush 166 * @addr: virtual address of page to be flushed 167 * @idxmap: bitmap of MMU indexes to flush 168 * 169 * Flush one page from the TLB of all CPUs, for the specified 170 * MMU indexes. 171 */ 172 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 173 uint16_t idxmap); 174 /** 175 * tlb_flush_page_by_mmuidx_all_cpus_synced: 176 * @cpu: Originating CPU of the flush 177 * @addr: virtual address of page to be flushed 178 * @idxmap: bitmap of MMU indexes to flush 179 * 180 * Flush one page from the TLB of all CPUs, for the specified MMU 181 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 182 * vCPUs work is scheduled as safe work meaning all flushes will be 183 * complete once the source vCPUs safe work is complete. This will 184 * depend on when the guests translation ends the TB. 185 */ 186 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 187 uint16_t idxmap); 188 /** 189 * tlb_flush_by_mmuidx: 190 * @cpu: CPU whose TLB should be flushed 191 * @wait: If true ensure synchronisation by exiting the cpu_loop 192 * @idxmap: bitmap of MMU indexes to flush 193 * 194 * Flush all entries from the TLB of the specified CPU, for the specified 195 * MMU indexes. 196 */ 197 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 198 /** 199 * tlb_flush_by_mmuidx_all_cpus: 200 * @cpu: Originating CPU of the flush 201 * @idxmap: bitmap of MMU indexes to flush 202 * 203 * Flush all entries from all TLBs of all CPUs, for the specified 204 * MMU indexes. 205 */ 206 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 207 /** 208 * tlb_flush_by_mmuidx_all_cpus_synced: 209 * @cpu: Originating CPU of the flush 210 * @idxmap: bitmap of MMU indexes to flush 211 * 212 * Flush all entries from all TLBs of all CPUs, for the specified 213 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 214 * vCPUs work is scheduled as safe work meaning all flushes will be 215 * complete once the source vCPUs safe work is complete. This will 216 * depend on when the guests translation ends the TB. 217 */ 218 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 219 /** 220 * tlb_set_page_with_attrs: 221 * @cpu: CPU to add this TLB entry for 222 * @vaddr: virtual address of page to add entry for 223 * @paddr: physical address of the page 224 * @attrs: memory transaction attributes 225 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 226 * @mmu_idx: MMU index to insert TLB entry for 227 * @size: size of the page in bytes 228 * 229 * Add an entry to this CPU's TLB (a mapping from virtual address 230 * @vaddr to physical address @paddr) with the specified memory 231 * transaction attributes. This is generally called by the target CPU 232 * specific code after it has been called through the tlb_fill() 233 * entry point and performed a successful page table walk to find 234 * the physical address and attributes for the virtual address 235 * which provoked the TLB miss. 236 * 237 * At most one entry for a given virtual address is permitted. Only a 238 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 239 * used by tlb_flush_page. 240 */ 241 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 242 hwaddr paddr, MemTxAttrs attrs, 243 int prot, int mmu_idx, target_ulong size); 244 /* tlb_set_page: 245 * 246 * This function is equivalent to calling tlb_set_page_with_attrs() 247 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 248 * as a convenience for CPUs which don't use memory transaction attributes. 249 */ 250 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 251 hwaddr paddr, int prot, 252 int mmu_idx, target_ulong size); 253 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); 254 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, 255 uintptr_t retaddr); 256 #else 257 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 258 { 259 } 260 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 261 { 262 } 263 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 264 target_ulong addr) 265 { 266 } 267 static inline void tlb_flush(CPUState *cpu) 268 { 269 } 270 static inline void tlb_flush_all_cpus(CPUState *src_cpu) 271 { 272 } 273 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 274 { 275 } 276 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 277 target_ulong addr, uint16_t idxmap) 278 { 279 } 280 281 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 282 { 283 } 284 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 285 target_ulong addr, 286 uint16_t idxmap) 287 { 288 } 289 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 290 target_ulong addr, 291 uint16_t idxmap) 292 { 293 } 294 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 295 { 296 } 297 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 298 uint16_t idxmap) 299 { 300 } 301 static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) 302 { 303 } 304 #endif 305 306 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 307 308 /* Estimated block size for TB allocation. */ 309 /* ??? The following is based on a 2015 survey of x86_64 host output. 310 Better would seem to be some sort of dynamically sized TB array, 311 adapting to the block sizes actually being produced. */ 312 #if defined(CONFIG_SOFTMMU) 313 #define CODE_GEN_AVG_BLOCK_SIZE 400 314 #else 315 #define CODE_GEN_AVG_BLOCK_SIZE 150 316 #endif 317 318 /* 319 * Translation Cache-related fields of a TB. 320 * This struct exists just for convenience; we keep track of TB's in a binary 321 * search tree, and the only fields needed to compare TB's in the tree are 322 * @ptr and @size. 323 * Note: the address of search data can be obtained by adding @size to @ptr. 324 */ 325 struct tb_tc { 326 void *ptr; /* pointer to the translated code */ 327 size_t size; 328 }; 329 330 struct TranslationBlock { 331 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 332 target_ulong cs_base; /* CS base for this block */ 333 uint32_t flags; /* flags defining in which context the code was generated */ 334 uint16_t size; /* size of target code for this block (1 <= 335 size <= TARGET_PAGE_SIZE) */ 336 uint16_t icount; 337 uint32_t cflags; /* compile flags */ 338 #define CF_COUNT_MASK 0x00007fff 339 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 340 #define CF_NOCACHE 0x00010000 /* To be freed after execution */ 341 #define CF_USE_ICOUNT 0x00020000 342 #define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */ 343 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 344 /* cflags' mask for hashing/comparison */ 345 #define CF_HASH_MASK \ 346 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL) 347 348 /* Per-vCPU dynamic tracing state used to generate this TB */ 349 uint32_t trace_vcpu_dstate; 350 351 struct tb_tc tc; 352 353 /* original tb when cflags has CF_NOCACHE */ 354 struct TranslationBlock *orig_tb; 355 /* first and second physical page containing code. The lower bit 356 of the pointer tells the index in page_next[] */ 357 struct TranslationBlock *page_next[2]; 358 tb_page_addr_t page_addr[2]; 359 360 /* The following data are used to directly call another TB from 361 * the code of this one. This can be done either by emitting direct or 362 * indirect native jump instructions. These jumps are reset so that the TB 363 * just continues its execution. The TB can be linked to another one by 364 * setting one of the jump targets (or patching the jump instruction). Only 365 * two of such jumps are supported. 366 */ 367 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 368 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 369 uintptr_t jmp_target_arg[2]; /* target address or offset */ 370 371 /* Each TB has an associated circular list of TBs jumping to this one. 372 * jmp_list_first points to the first TB jumping to this one. 373 * jmp_list_next is used to point to the next TB in a list. 374 * Since each TB can have two jumps, it can participate in two lists. 375 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a 376 * TranslationBlock structure, but the two least significant bits of 377 * them are used to encode which data field of the pointed TB should 378 * be used to traverse the list further from that TB: 379 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. 380 * In other words, 0/1 tells which jump is used in the pointed TB, 381 * and 2 means that this is a pointer back to the target TB of this list. 382 */ 383 uintptr_t jmp_list_next[2]; 384 uintptr_t jmp_list_first; 385 }; 386 387 extern bool parallel_cpus; 388 389 /* Hide the atomic_read to make code a little easier on the eyes */ 390 static inline uint32_t tb_cflags(const TranslationBlock *tb) 391 { 392 return atomic_read(&tb->cflags); 393 } 394 395 /* current cflags for hashing/comparison */ 396 static inline uint32_t curr_cflags(void) 397 { 398 return (parallel_cpus ? CF_PARALLEL : 0) 399 | (use_icount ? CF_USE_ICOUNT : 0); 400 } 401 402 void tb_remove(TranslationBlock *tb); 403 void tb_flush(CPUState *cpu); 404 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 405 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 406 target_ulong cs_base, uint32_t flags, 407 uint32_t cf_mask); 408 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 409 410 /* GETPC is the true target of the return instruction that we'll execute. */ 411 #if defined(CONFIG_TCG_INTERPRETER) 412 extern uintptr_t tci_tb_ptr; 413 # define GETPC() tci_tb_ptr 414 #else 415 # define GETPC() \ 416 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 417 #endif 418 419 /* The true return address will often point to a host insn that is part of 420 the next translated guest insn. Adjust the address backward to point to 421 the middle of the call insn. Subtracting one would do the job except for 422 several compressed mode architectures (arm, mips) which set the low bit 423 to indicate the compressed mode; subtracting two works around that. It 424 is also the case that there are no host isas that contain a call insn 425 smaller than 4 bytes, so we don't worry about special-casing this. */ 426 #define GETPC_ADJ 2 427 428 void tb_lock(void); 429 void tb_unlock(void); 430 void tb_lock_reset(void); 431 432 #if !defined(CONFIG_USER_ONLY) 433 434 struct MemoryRegion *iotlb_to_region(CPUState *cpu, 435 hwaddr index, MemTxAttrs attrs); 436 437 void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type, 438 int mmu_idx, uintptr_t retaddr); 439 440 #endif 441 442 #if defined(CONFIG_USER_ONLY) 443 void mmap_lock(void); 444 void mmap_unlock(void); 445 bool have_mmap_lock(void); 446 447 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) 448 { 449 return addr; 450 } 451 #else 452 static inline void mmap_lock(void) {} 453 static inline void mmap_unlock(void) {} 454 455 /* cputlb.c */ 456 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); 457 458 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 459 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 460 461 /* exec.c */ 462 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); 463 464 MemoryRegionSection * 465 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 466 hwaddr *xlat, hwaddr *plen); 467 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 468 MemoryRegionSection *section, 469 target_ulong vaddr, 470 hwaddr paddr, hwaddr xlat, 471 int prot, 472 target_ulong *address); 473 bool memory_region_is_unassigned(MemoryRegion *mr); 474 475 #endif 476 477 /* vl.c */ 478 extern int singlestep; 479 480 #endif 481