1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "qemu-common.h" 24 #include "exec/tb-context.h" 25 #include "sysemu/cpus.h" 26 27 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 28 #define DEBUG_DISAS 29 30 /* Page tracking code uses ram addresses in system mode, and virtual 31 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 32 type. */ 33 #if defined(CONFIG_USER_ONLY) 34 typedef abi_ulong tb_page_addr_t; 35 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 36 #else 37 typedef ram_addr_t tb_page_addr_t; 38 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 39 #endif 40 41 #include "qemu/log.h" 42 43 void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); 44 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, 45 target_ulong *data); 46 47 void cpu_gen_init(void); 48 49 /** 50 * cpu_restore_state: 51 * @cpu: the vCPU state is to be restore to 52 * @searched_pc: the host PC the fault occurred at 53 * @will_exit: true if the TB executed will be interrupted after some 54 cpu adjustments. Required for maintaining the correct 55 icount valus 56 * @return: true if state was restored, false otherwise 57 * 58 * Attempt to restore the state for a fault occurring in translated 59 * code. If the searched_pc is not in translated code no state is 60 * restored and the function returns false. 61 */ 62 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 63 64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 65 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); 66 TranslationBlock *tb_gen_code(CPUState *cpu, 67 target_ulong pc, target_ulong cs_base, 68 uint32_t flags, 69 int cflags); 70 71 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 72 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 73 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 74 75 #if !defined(CONFIG_USER_ONLY) 76 void cpu_reloading_memory_map(void); 77 /** 78 * cpu_address_space_init: 79 * @cpu: CPU to add this address space to 80 * @asidx: integer index of this address space 81 * @prefix: prefix to be used as name of address space 82 * @mr: the root memory region of address space 83 * 84 * Add the specified address space to the CPU's cpu_ases list. 85 * The address space added with @asidx 0 is the one used for the 86 * convenience pointer cpu->as. 87 * The target-specific code which registers ASes is responsible 88 * for defining what semantics address space 0, 1, 2, etc have. 89 * 90 * Before the first call to this function, the caller must set 91 * cpu->num_ases to the total number of address spaces it needs 92 * to support. 93 * 94 * Note that with KVM only one address space is supported. 95 */ 96 void cpu_address_space_init(CPUState *cpu, int asidx, 97 const char *prefix, MemoryRegion *mr); 98 #endif 99 100 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 101 /* cputlb.c */ 102 /** 103 * tlb_flush_page: 104 * @cpu: CPU whose TLB should be flushed 105 * @addr: virtual address of page to be flushed 106 * 107 * Flush one page from the TLB of the specified CPU, for all 108 * MMU indexes. 109 */ 110 void tlb_flush_page(CPUState *cpu, target_ulong addr); 111 /** 112 * tlb_flush_page_all_cpus: 113 * @cpu: src CPU of the flush 114 * @addr: virtual address of page to be flushed 115 * 116 * Flush one page from the TLB of the specified CPU, for all 117 * MMU indexes. 118 */ 119 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 120 /** 121 * tlb_flush_page_all_cpus_synced: 122 * @cpu: src CPU of the flush 123 * @addr: virtual address of page to be flushed 124 * 125 * Flush one page from the TLB of the specified CPU, for all MMU 126 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 127 * is scheduled as safe work meaning all flushes will be complete once 128 * the source vCPUs safe work is complete. This will depend on when 129 * the guests translation ends the TB. 130 */ 131 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 132 /** 133 * tlb_flush: 134 * @cpu: CPU whose TLB should be flushed 135 * 136 * Flush the entire TLB for the specified CPU. Most CPU architectures 137 * allow the implementation to drop entries from the TLB at any time 138 * so this is generally safe. If more selective flushing is required 139 * use one of the other functions for efficiency. 140 */ 141 void tlb_flush(CPUState *cpu); 142 /** 143 * tlb_flush_all_cpus: 144 * @cpu: src CPU of the flush 145 */ 146 void tlb_flush_all_cpus(CPUState *src_cpu); 147 /** 148 * tlb_flush_all_cpus_synced: 149 * @cpu: src CPU of the flush 150 * 151 * Like tlb_flush_all_cpus except this except the source vCPUs work is 152 * scheduled as safe work meaning all flushes will be complete once 153 * the source vCPUs safe work is complete. This will depend on when 154 * the guests translation ends the TB. 155 */ 156 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 157 /** 158 * tlb_flush_page_by_mmuidx: 159 * @cpu: CPU whose TLB should be flushed 160 * @addr: virtual address of page to be flushed 161 * @idxmap: bitmap of MMU indexes to flush 162 * 163 * Flush one page from the TLB of the specified CPU, for the specified 164 * MMU indexes. 165 */ 166 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 167 uint16_t idxmap); 168 /** 169 * tlb_flush_page_by_mmuidx_all_cpus: 170 * @cpu: Originating CPU of the flush 171 * @addr: virtual address of page to be flushed 172 * @idxmap: bitmap of MMU indexes to flush 173 * 174 * Flush one page from the TLB of all CPUs, for the specified 175 * MMU indexes. 176 */ 177 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 178 uint16_t idxmap); 179 /** 180 * tlb_flush_page_by_mmuidx_all_cpus_synced: 181 * @cpu: Originating CPU of the flush 182 * @addr: virtual address of page to be flushed 183 * @idxmap: bitmap of MMU indexes to flush 184 * 185 * Flush one page from the TLB of all CPUs, for the specified MMU 186 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 187 * vCPUs work is scheduled as safe work meaning all flushes will be 188 * complete once the source vCPUs safe work is complete. This will 189 * depend on when the guests translation ends the TB. 190 */ 191 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 192 uint16_t idxmap); 193 /** 194 * tlb_flush_by_mmuidx: 195 * @cpu: CPU whose TLB should be flushed 196 * @wait: If true ensure synchronisation by exiting the cpu_loop 197 * @idxmap: bitmap of MMU indexes to flush 198 * 199 * Flush all entries from the TLB of the specified CPU, for the specified 200 * MMU indexes. 201 */ 202 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 203 /** 204 * tlb_flush_by_mmuidx_all_cpus: 205 * @cpu: Originating CPU of the flush 206 * @idxmap: bitmap of MMU indexes to flush 207 * 208 * Flush all entries from all TLBs of all CPUs, for the specified 209 * MMU indexes. 210 */ 211 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 212 /** 213 * tlb_flush_by_mmuidx_all_cpus_synced: 214 * @cpu: Originating CPU of the flush 215 * @idxmap: bitmap of MMU indexes to flush 216 * 217 * Flush all entries from all TLBs of all CPUs, for the specified 218 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 219 * vCPUs work is scheduled as safe work meaning all flushes will be 220 * complete once the source vCPUs safe work is complete. This will 221 * depend on when the guests translation ends the TB. 222 */ 223 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 224 /** 225 * tlb_set_page_with_attrs: 226 * @cpu: CPU to add this TLB entry for 227 * @vaddr: virtual address of page to add entry for 228 * @paddr: physical address of the page 229 * @attrs: memory transaction attributes 230 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 231 * @mmu_idx: MMU index to insert TLB entry for 232 * @size: size of the page in bytes 233 * 234 * Add an entry to this CPU's TLB (a mapping from virtual address 235 * @vaddr to physical address @paddr) with the specified memory 236 * transaction attributes. This is generally called by the target CPU 237 * specific code after it has been called through the tlb_fill() 238 * entry point and performed a successful page table walk to find 239 * the physical address and attributes for the virtual address 240 * which provoked the TLB miss. 241 * 242 * At most one entry for a given virtual address is permitted. Only a 243 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 244 * used by tlb_flush_page. 245 */ 246 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 247 hwaddr paddr, MemTxAttrs attrs, 248 int prot, int mmu_idx, target_ulong size); 249 /* tlb_set_page: 250 * 251 * This function is equivalent to calling tlb_set_page_with_attrs() 252 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 253 * as a convenience for CPUs which don't use memory transaction attributes. 254 */ 255 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 256 hwaddr paddr, int prot, 257 int mmu_idx, target_ulong size); 258 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); 259 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 260 uintptr_t retaddr); 261 #else 262 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 263 { 264 } 265 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 266 { 267 } 268 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 269 target_ulong addr) 270 { 271 } 272 static inline void tlb_flush(CPUState *cpu) 273 { 274 } 275 static inline void tlb_flush_all_cpus(CPUState *src_cpu) 276 { 277 } 278 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 279 { 280 } 281 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 282 target_ulong addr, uint16_t idxmap) 283 { 284 } 285 286 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 287 { 288 } 289 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 290 target_ulong addr, 291 uint16_t idxmap) 292 { 293 } 294 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 295 target_ulong addr, 296 uint16_t idxmap) 297 { 298 } 299 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 300 { 301 } 302 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 303 uint16_t idxmap) 304 { 305 } 306 static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, 307 MemTxAttrs attrs) 308 { 309 } 310 #endif 311 312 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 313 314 /* Estimated block size for TB allocation. */ 315 /* ??? The following is based on a 2015 survey of x86_64 host output. 316 Better would seem to be some sort of dynamically sized TB array, 317 adapting to the block sizes actually being produced. */ 318 #if defined(CONFIG_SOFTMMU) 319 #define CODE_GEN_AVG_BLOCK_SIZE 400 320 #else 321 #define CODE_GEN_AVG_BLOCK_SIZE 150 322 #endif 323 324 /* 325 * Translation Cache-related fields of a TB. 326 * This struct exists just for convenience; we keep track of TB's in a binary 327 * search tree, and the only fields needed to compare TB's in the tree are 328 * @ptr and @size. 329 * Note: the address of search data can be obtained by adding @size to @ptr. 330 */ 331 struct tb_tc { 332 void *ptr; /* pointer to the translated code */ 333 size_t size; 334 }; 335 336 struct TranslationBlock { 337 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 338 target_ulong cs_base; /* CS base for this block */ 339 uint32_t flags; /* flags defining in which context the code was generated */ 340 uint16_t size; /* size of target code for this block (1 <= 341 size <= TARGET_PAGE_SIZE) */ 342 uint16_t icount; 343 uint32_t cflags; /* compile flags */ 344 #define CF_COUNT_MASK 0x00007fff 345 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 346 #define CF_NOCACHE 0x00010000 /* To be freed after execution */ 347 #define CF_USE_ICOUNT 0x00020000 348 #define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */ 349 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 350 /* cflags' mask for hashing/comparison */ 351 #define CF_HASH_MASK \ 352 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL) 353 354 /* Per-vCPU dynamic tracing state used to generate this TB */ 355 uint32_t trace_vcpu_dstate; 356 357 struct tb_tc tc; 358 359 /* original tb when cflags has CF_NOCACHE */ 360 struct TranslationBlock *orig_tb; 361 /* first and second physical page containing code. The lower bit 362 of the pointer tells the index in page_next[]. 363 The list is protected by the TB's page('s) lock(s) */ 364 uintptr_t page_next[2]; 365 tb_page_addr_t page_addr[2]; 366 367 /* The following data are used to directly call another TB from 368 * the code of this one. This can be done either by emitting direct or 369 * indirect native jump instructions. These jumps are reset so that the TB 370 * just continues its execution. The TB can be linked to another one by 371 * setting one of the jump targets (or patching the jump instruction). Only 372 * two of such jumps are supported. 373 */ 374 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 375 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 376 uintptr_t jmp_target_arg[2]; /* target address or offset */ 377 378 /* Each TB has an associated circular list of TBs jumping to this one. 379 * jmp_list_first points to the first TB jumping to this one. 380 * jmp_list_next is used to point to the next TB in a list. 381 * Since each TB can have two jumps, it can participate in two lists. 382 * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a 383 * TranslationBlock structure, but the two least significant bits of 384 * them are used to encode which data field of the pointed TB should 385 * be used to traverse the list further from that TB: 386 * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. 387 * In other words, 0/1 tells which jump is used in the pointed TB, 388 * and 2 means that this is a pointer back to the target TB of this list. 389 */ 390 uintptr_t jmp_list_next[2]; 391 uintptr_t jmp_list_first; 392 }; 393 394 extern bool parallel_cpus; 395 396 /* Hide the atomic_read to make code a little easier on the eyes */ 397 static inline uint32_t tb_cflags(const TranslationBlock *tb) 398 { 399 return atomic_read(&tb->cflags); 400 } 401 402 /* current cflags for hashing/comparison */ 403 static inline uint32_t curr_cflags(void) 404 { 405 return (parallel_cpus ? CF_PARALLEL : 0) 406 | (use_icount ? CF_USE_ICOUNT : 0); 407 } 408 409 void tb_flush(CPUState *cpu); 410 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 411 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 412 target_ulong cs_base, uint32_t flags, 413 uint32_t cf_mask); 414 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 415 416 /* GETPC is the true target of the return instruction that we'll execute. */ 417 #if defined(CONFIG_TCG_INTERPRETER) 418 extern uintptr_t tci_tb_ptr; 419 # define GETPC() tci_tb_ptr 420 #else 421 # define GETPC() \ 422 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 423 #endif 424 425 /* The true return address will often point to a host insn that is part of 426 the next translated guest insn. Adjust the address backward to point to 427 the middle of the call insn. Subtracting one would do the job except for 428 several compressed mode architectures (arm, mips) which set the low bit 429 to indicate the compressed mode; subtracting two works around that. It 430 is also the case that there are no host isas that contain a call insn 431 smaller than 4 bytes, so we don't worry about special-casing this. */ 432 #define GETPC_ADJ 2 433 434 void tb_lock(void); 435 void tb_unlock(void); 436 void tb_lock_reset(void); 437 438 #if !defined(CONFIG_USER_ONLY) 439 440 /** 441 * iotlb_to_section: 442 * @cpu: CPU performing the access 443 * @index: TCG CPU IOTLB entry 444 * 445 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 446 * it refers to. @index will have been initially created and returned 447 * by memory_region_section_get_iotlb(). 448 */ 449 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 450 hwaddr index, MemTxAttrs attrs); 451 452 void tlb_fill(CPUState *cpu, target_ulong addr, int size, 453 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 454 455 #endif 456 457 #if defined(CONFIG_USER_ONLY) 458 void mmap_lock(void); 459 void mmap_unlock(void); 460 bool have_mmap_lock(void); 461 462 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) 463 { 464 return addr; 465 } 466 #else 467 static inline void mmap_lock(void) {} 468 static inline void mmap_unlock(void) {} 469 470 /* cputlb.c */ 471 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); 472 473 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 474 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 475 476 /* exec.c */ 477 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); 478 479 MemoryRegionSection * 480 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 481 hwaddr *xlat, hwaddr *plen, 482 MemTxAttrs attrs, int *prot); 483 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 484 MemoryRegionSection *section, 485 target_ulong vaddr, 486 hwaddr paddr, hwaddr xlat, 487 int prot, 488 target_ulong *address); 489 bool memory_region_is_unassigned(MemoryRegion *mr); 490 491 #endif 492 493 /* vl.c */ 494 extern int singlestep; 495 496 #endif 497