1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "cpu.h" 24 #ifdef CONFIG_TCG 25 #include "exec/cpu_ldst.h" 26 #endif 27 #include "sysemu/cpu-timers.h" 28 29 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 30 #define DEBUG_DISAS 31 32 /* Page tracking code uses ram addresses in system mode, and virtual 33 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 34 type. */ 35 #if defined(CONFIG_USER_ONLY) 36 typedef abi_ulong tb_page_addr_t; 37 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 38 #else 39 typedef ram_addr_t tb_page_addr_t; 40 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 41 #endif 42 43 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); 44 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, 45 target_ulong *data); 46 47 /** 48 * cpu_restore_state: 49 * @cpu: the vCPU state is to be restore to 50 * @searched_pc: the host PC the fault occurred at 51 * @will_exit: true if the TB executed will be interrupted after some 52 cpu adjustments. Required for maintaining the correct 53 icount valus 54 * @return: true if state was restored, false otherwise 55 * 56 * Attempt to restore the state for a fault occurring in translated 57 * code. If the searched_pc is not in translated code no state is 58 * restored and the function returns false. 59 */ 60 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 61 62 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); 63 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); 64 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 65 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 66 67 /** 68 * cpu_loop_exit_requested: 69 * @cpu: The CPU state to be tested 70 * 71 * Indicate if somebody asked for a return of the CPU to the main loop 72 * (e.g., via cpu_exit() or cpu_interrupt()). 73 * 74 * This is helpful for architectures that support interruptible 75 * instructions. After writing back all state to registers/memory, this 76 * call can be used to check if it makes sense to return to the main loop 77 * or to continue executing the interruptible instruction. 78 */ 79 static inline bool cpu_loop_exit_requested(CPUState *cpu) 80 { 81 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; 82 } 83 84 #if !defined(CONFIG_USER_ONLY) 85 void cpu_reloading_memory_map(void); 86 /** 87 * cpu_address_space_init: 88 * @cpu: CPU to add this address space to 89 * @asidx: integer index of this address space 90 * @prefix: prefix to be used as name of address space 91 * @mr: the root memory region of address space 92 * 93 * Add the specified address space to the CPU's cpu_ases list. 94 * The address space added with @asidx 0 is the one used for the 95 * convenience pointer cpu->as. 96 * The target-specific code which registers ASes is responsible 97 * for defining what semantics address space 0, 1, 2, etc have. 98 * 99 * Before the first call to this function, the caller must set 100 * cpu->num_ases to the total number of address spaces it needs 101 * to support. 102 * 103 * Note that with KVM only one address space is supported. 104 */ 105 void cpu_address_space_init(CPUState *cpu, int asidx, 106 const char *prefix, MemoryRegion *mr); 107 #endif 108 109 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 110 /* cputlb.c */ 111 /** 112 * tlb_init - initialize a CPU's TLB 113 * @cpu: CPU whose TLB should be initialized 114 */ 115 void tlb_init(CPUState *cpu); 116 /** 117 * tlb_destroy - destroy a CPU's TLB 118 * @cpu: CPU whose TLB should be destroyed 119 */ 120 void tlb_destroy(CPUState *cpu); 121 /** 122 * tlb_flush_page: 123 * @cpu: CPU whose TLB should be flushed 124 * @addr: virtual address of page to be flushed 125 * 126 * Flush one page from the TLB of the specified CPU, for all 127 * MMU indexes. 128 */ 129 void tlb_flush_page(CPUState *cpu, target_ulong addr); 130 /** 131 * tlb_flush_page_all_cpus: 132 * @cpu: src CPU of the flush 133 * @addr: virtual address of page to be flushed 134 * 135 * Flush one page from the TLB of the specified CPU, for all 136 * MMU indexes. 137 */ 138 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 139 /** 140 * tlb_flush_page_all_cpus_synced: 141 * @cpu: src CPU of the flush 142 * @addr: virtual address of page to be flushed 143 * 144 * Flush one page from the TLB of the specified CPU, for all MMU 145 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 146 * is scheduled as safe work meaning all flushes will be complete once 147 * the source vCPUs safe work is complete. This will depend on when 148 * the guests translation ends the TB. 149 */ 150 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 151 /** 152 * tlb_flush: 153 * @cpu: CPU whose TLB should be flushed 154 * 155 * Flush the entire TLB for the specified CPU. Most CPU architectures 156 * allow the implementation to drop entries from the TLB at any time 157 * so this is generally safe. If more selective flushing is required 158 * use one of the other functions for efficiency. 159 */ 160 void tlb_flush(CPUState *cpu); 161 /** 162 * tlb_flush_all_cpus: 163 * @cpu: src CPU of the flush 164 */ 165 void tlb_flush_all_cpus(CPUState *src_cpu); 166 /** 167 * tlb_flush_all_cpus_synced: 168 * @cpu: src CPU of the flush 169 * 170 * Like tlb_flush_all_cpus except this except the source vCPUs work is 171 * scheduled as safe work meaning all flushes will be complete once 172 * the source vCPUs safe work is complete. This will depend on when 173 * the guests translation ends the TB. 174 */ 175 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 176 /** 177 * tlb_flush_page_by_mmuidx: 178 * @cpu: CPU whose TLB should be flushed 179 * @addr: virtual address of page to be flushed 180 * @idxmap: bitmap of MMU indexes to flush 181 * 182 * Flush one page from the TLB of the specified CPU, for the specified 183 * MMU indexes. 184 */ 185 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 186 uint16_t idxmap); 187 /** 188 * tlb_flush_page_by_mmuidx_all_cpus: 189 * @cpu: Originating CPU of the flush 190 * @addr: virtual address of page to be flushed 191 * @idxmap: bitmap of MMU indexes to flush 192 * 193 * Flush one page from the TLB of all CPUs, for the specified 194 * MMU indexes. 195 */ 196 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 197 uint16_t idxmap); 198 /** 199 * tlb_flush_page_by_mmuidx_all_cpus_synced: 200 * @cpu: Originating CPU of the flush 201 * @addr: virtual address of page to be flushed 202 * @idxmap: bitmap of MMU indexes to flush 203 * 204 * Flush one page from the TLB of all CPUs, for the specified MMU 205 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 206 * vCPUs work is scheduled as safe work meaning all flushes will be 207 * complete once the source vCPUs safe work is complete. This will 208 * depend on when the guests translation ends the TB. 209 */ 210 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 211 uint16_t idxmap); 212 /** 213 * tlb_flush_by_mmuidx: 214 * @cpu: CPU whose TLB should be flushed 215 * @wait: If true ensure synchronisation by exiting the cpu_loop 216 * @idxmap: bitmap of MMU indexes to flush 217 * 218 * Flush all entries from the TLB of the specified CPU, for the specified 219 * MMU indexes. 220 */ 221 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 222 /** 223 * tlb_flush_by_mmuidx_all_cpus: 224 * @cpu: Originating CPU of the flush 225 * @idxmap: bitmap of MMU indexes to flush 226 * 227 * Flush all entries from all TLBs of all CPUs, for the specified 228 * MMU indexes. 229 */ 230 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 231 /** 232 * tlb_flush_by_mmuidx_all_cpus_synced: 233 * @cpu: Originating CPU of the flush 234 * @idxmap: bitmap of MMU indexes to flush 235 * 236 * Flush all entries from all TLBs of all CPUs, for the specified 237 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 238 * vCPUs work is scheduled as safe work meaning all flushes will be 239 * complete once the source vCPUs safe work is complete. This will 240 * depend on when the guests translation ends the TB. 241 */ 242 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 243 244 /** 245 * tlb_flush_page_bits_by_mmuidx 246 * @cpu: CPU whose TLB should be flushed 247 * @addr: virtual address of page to be flushed 248 * @idxmap: bitmap of mmu indexes to flush 249 * @bits: number of significant bits in address 250 * 251 * Similar to tlb_flush_page_mask, but with a bitmap of indexes. 252 */ 253 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 254 uint16_t idxmap, unsigned bits); 255 256 /* Similarly, with broadcast and syncing. */ 257 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 258 uint16_t idxmap, unsigned bits); 259 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced 260 (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits); 261 262 /** 263 * tlb_flush_range_by_mmuidx 264 * @cpu: CPU whose TLB should be flushed 265 * @addr: virtual address of the start of the range to be flushed 266 * @len: length of range to be flushed 267 * @idxmap: bitmap of mmu indexes to flush 268 * @bits: number of significant bits in address 269 * 270 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), 271 * comparing only the low @bits worth of each virtual page. 272 */ 273 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 274 target_ulong len, uint16_t idxmap, 275 unsigned bits); 276 277 /* Similarly, with broadcast and syncing. */ 278 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 279 target_ulong len, uint16_t idxmap, 280 unsigned bits); 281 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 282 target_ulong addr, 283 target_ulong len, 284 uint16_t idxmap, 285 unsigned bits); 286 287 /** 288 * tlb_set_page_with_attrs: 289 * @cpu: CPU to add this TLB entry for 290 * @vaddr: virtual address of page to add entry for 291 * @paddr: physical address of the page 292 * @attrs: memory transaction attributes 293 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 294 * @mmu_idx: MMU index to insert TLB entry for 295 * @size: size of the page in bytes 296 * 297 * Add an entry to this CPU's TLB (a mapping from virtual address 298 * @vaddr to physical address @paddr) with the specified memory 299 * transaction attributes. This is generally called by the target CPU 300 * specific code after it has been called through the tlb_fill() 301 * entry point and performed a successful page table walk to find 302 * the physical address and attributes for the virtual address 303 * which provoked the TLB miss. 304 * 305 * At most one entry for a given virtual address is permitted. Only a 306 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 307 * used by tlb_flush_page. 308 */ 309 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 310 hwaddr paddr, MemTxAttrs attrs, 311 int prot, int mmu_idx, target_ulong size); 312 /* tlb_set_page: 313 * 314 * This function is equivalent to calling tlb_set_page_with_attrs() 315 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 316 * as a convenience for CPUs which don't use memory transaction attributes. 317 */ 318 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 319 hwaddr paddr, int prot, 320 int mmu_idx, target_ulong size); 321 #else 322 static inline void tlb_init(CPUState *cpu) 323 { 324 } 325 static inline void tlb_destroy(CPUState *cpu) 326 { 327 } 328 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 329 { 330 } 331 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 332 { 333 } 334 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 335 target_ulong addr) 336 { 337 } 338 static inline void tlb_flush(CPUState *cpu) 339 { 340 } 341 static inline void tlb_flush_all_cpus(CPUState *src_cpu) 342 { 343 } 344 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 345 { 346 } 347 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 348 target_ulong addr, uint16_t idxmap) 349 { 350 } 351 352 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 353 { 354 } 355 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 356 target_ulong addr, 357 uint16_t idxmap) 358 { 359 } 360 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 361 target_ulong addr, 362 uint16_t idxmap) 363 { 364 } 365 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 366 { 367 } 368 369 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 370 uint16_t idxmap) 371 { 372 } 373 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, 374 target_ulong addr, 375 uint16_t idxmap, 376 unsigned bits) 377 { 378 } 379 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, 380 target_ulong addr, 381 uint16_t idxmap, 382 unsigned bits) 383 { 384 } 385 static inline void 386 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 387 uint16_t idxmap, unsigned bits) 388 { 389 } 390 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 391 target_ulong len, uint16_t idxmap, 392 unsigned bits) 393 { 394 } 395 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, 396 target_ulong addr, 397 target_ulong len, 398 uint16_t idxmap, 399 unsigned bits) 400 { 401 } 402 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 403 target_ulong addr, 404 target_long len, 405 uint16_t idxmap, 406 unsigned bits) 407 { 408 } 409 #endif 410 /** 411 * probe_access: 412 * @env: CPUArchState 413 * @addr: guest virtual address to look up 414 * @size: size of the access 415 * @access_type: read, write or execute permission 416 * @mmu_idx: MMU index to use for lookup 417 * @retaddr: return address for unwinding 418 * 419 * Look up the guest virtual address @addr. Raise an exception if the 420 * page does not satisfy @access_type. Raise an exception if the 421 * access (@addr, @size) hits a watchpoint. For writes, mark a clean 422 * page as dirty. 423 * 424 * Finally, return the host address for a page that is backed by RAM, 425 * or NULL if the page requires I/O. 426 */ 427 void *probe_access(CPUArchState *env, target_ulong addr, int size, 428 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 429 430 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, 431 int mmu_idx, uintptr_t retaddr) 432 { 433 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 434 } 435 436 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, 437 int mmu_idx, uintptr_t retaddr) 438 { 439 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 440 } 441 442 /** 443 * probe_access_flags: 444 * @env: CPUArchState 445 * @addr: guest virtual address to look up 446 * @access_type: read, write or execute permission 447 * @mmu_idx: MMU index to use for lookup 448 * @nonfault: suppress the fault 449 * @phost: return value for host address 450 * @retaddr: return address for unwinding 451 * 452 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for 453 * the page, and storing the host address for RAM in @phost. 454 * 455 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. 456 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. 457 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. 458 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. 459 */ 460 int probe_access_flags(CPUArchState *env, target_ulong addr, 461 MMUAccessType access_type, int mmu_idx, 462 bool nonfault, void **phost, uintptr_t retaddr); 463 464 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 465 466 /* Estimated block size for TB allocation. */ 467 /* ??? The following is based on a 2015 survey of x86_64 host output. 468 Better would seem to be some sort of dynamically sized TB array, 469 adapting to the block sizes actually being produced. */ 470 #if defined(CONFIG_SOFTMMU) 471 #define CODE_GEN_AVG_BLOCK_SIZE 400 472 #else 473 #define CODE_GEN_AVG_BLOCK_SIZE 150 474 #endif 475 476 /* 477 * Translation Cache-related fields of a TB. 478 * This struct exists just for convenience; we keep track of TB's in a binary 479 * search tree, and the only fields needed to compare TB's in the tree are 480 * @ptr and @size. 481 * Note: the address of search data can be obtained by adding @size to @ptr. 482 */ 483 struct tb_tc { 484 const void *ptr; /* pointer to the translated code */ 485 size_t size; 486 }; 487 488 struct TranslationBlock { 489 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 490 target_ulong cs_base; /* CS base for this block */ 491 uint32_t flags; /* flags defining in which context the code was generated */ 492 uint32_t cflags; /* compile flags */ 493 494 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */ 495 #define CF_COUNT_MASK 0x000001ff 496 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */ 497 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */ 498 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */ 499 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 500 #define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */ 501 #define CF_USE_ICOUNT 0x00020000 502 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ 503 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 504 #define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */ 505 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ 506 #define CF_CLUSTER_SHIFT 24 507 508 /* Per-vCPU dynamic tracing state used to generate this TB */ 509 uint32_t trace_vcpu_dstate; 510 511 /* 512 * Above fields used for comparing 513 */ 514 515 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ 516 uint16_t size; 517 uint16_t icount; 518 519 struct tb_tc tc; 520 521 /* first and second physical page containing code. The lower bit 522 of the pointer tells the index in page_next[]. 523 The list is protected by the TB's page('s) lock(s) */ 524 uintptr_t page_next[2]; 525 tb_page_addr_t page_addr[2]; 526 527 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ 528 QemuSpin jmp_lock; 529 530 /* The following data are used to directly call another TB from 531 * the code of this one. This can be done either by emitting direct or 532 * indirect native jump instructions. These jumps are reset so that the TB 533 * just continues its execution. The TB can be linked to another one by 534 * setting one of the jump targets (or patching the jump instruction). Only 535 * two of such jumps are supported. 536 */ 537 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 538 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 539 uintptr_t jmp_target_arg[2]; /* target address or offset */ 540 541 /* 542 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. 543 * Each TB can have two outgoing jumps, and therefore can participate 544 * in two lists. The list entries are kept in jmp_list_next[2]. The least 545 * significant bit (LSB) of the pointers in these lists is used to encode 546 * which of the two list entries is to be used in the pointed TB. 547 * 548 * List traversals are protected by jmp_lock. The destination TB of each 549 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock 550 * can be acquired from any origin TB. 551 * 552 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is 553 * being invalidated, so that no further outgoing jumps from it can be set. 554 * 555 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained 556 * to a destination TB that has CF_INVALID set. 557 */ 558 uintptr_t jmp_list_head; 559 uintptr_t jmp_list_next[2]; 560 uintptr_t jmp_dest[2]; 561 }; 562 563 /* Hide the qatomic_read to make code a little easier on the eyes */ 564 static inline uint32_t tb_cflags(const TranslationBlock *tb) 565 { 566 return qatomic_read(&tb->cflags); 567 } 568 569 /* current cflags for hashing/comparison */ 570 uint32_t curr_cflags(CPUState *cpu); 571 572 /* TranslationBlock invalidate API */ 573 #if defined(CONFIG_USER_ONLY) 574 void tb_invalidate_phys_addr(target_ulong addr); 575 void tb_invalidate_phys_range(target_ulong start, target_ulong end); 576 #else 577 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); 578 #endif 579 void tb_flush(CPUState *cpu); 580 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 581 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 582 target_ulong cs_base, uint32_t flags, 583 uint32_t cflags); 584 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 585 586 /* GETPC is the true target of the return instruction that we'll execute. */ 587 #if defined(CONFIG_TCG_INTERPRETER) 588 extern __thread uintptr_t tci_tb_ptr; 589 # define GETPC() tci_tb_ptr 590 #else 591 # define GETPC() \ 592 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 593 #endif 594 595 /* The true return address will often point to a host insn that is part of 596 the next translated guest insn. Adjust the address backward to point to 597 the middle of the call insn. Subtracting one would do the job except for 598 several compressed mode architectures (arm, mips) which set the low bit 599 to indicate the compressed mode; subtracting two works around that. It 600 is also the case that there are no host isas that contain a call insn 601 smaller than 4 bytes, so we don't worry about special-casing this. */ 602 #define GETPC_ADJ 2 603 604 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) 605 void assert_no_pages_locked(void); 606 #else 607 static inline void assert_no_pages_locked(void) 608 { 609 } 610 #endif 611 612 #if !defined(CONFIG_USER_ONLY) 613 614 /** 615 * iotlb_to_section: 616 * @cpu: CPU performing the access 617 * @index: TCG CPU IOTLB entry 618 * 619 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 620 * it refers to. @index will have been initially created and returned 621 * by memory_region_section_get_iotlb(). 622 */ 623 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 624 hwaddr index, MemTxAttrs attrs); 625 #endif 626 627 #if defined(CONFIG_USER_ONLY) 628 void mmap_lock(void); 629 void mmap_unlock(void); 630 bool have_mmap_lock(void); 631 632 /** 633 * get_page_addr_code() - user-mode version 634 * @env: CPUArchState 635 * @addr: guest virtual address of guest code 636 * 637 * Returns @addr. 638 */ 639 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, 640 target_ulong addr) 641 { 642 return addr; 643 } 644 645 /** 646 * get_page_addr_code_hostp() - user-mode version 647 * @env: CPUArchState 648 * @addr: guest virtual address of guest code 649 * 650 * Returns @addr. 651 * 652 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content 653 * is kept. 654 */ 655 static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, 656 target_ulong addr, 657 void **hostp) 658 { 659 if (hostp) { 660 *hostp = g2h_untagged(addr); 661 } 662 return addr; 663 } 664 665 /** 666 * adjust_signal_pc: 667 * @pc: raw pc from the host signal ucontext_t. 668 * @is_write: host memory operation was write, or read-modify-write. 669 * 670 * Alter @pc as required for unwinding. Return the type of the 671 * guest memory access -- host reads may be for guest execution. 672 */ 673 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); 674 675 /** 676 * handle_sigsegv_accerr_write: 677 * @cpu: the cpu context 678 * @old_set: the sigset_t from the signal ucontext_t 679 * @host_pc: the host pc, adjusted for the signal 680 * @host_addr: the host address of the fault 681 * 682 * Return true if the write fault has been handled, and should be re-tried. 683 */ 684 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 685 uintptr_t host_pc, abi_ptr guest_addr); 686 687 /** 688 * cpu_loop_exit_sigsegv: 689 * @cpu: the cpu context 690 * @addr: the guest address of the fault 691 * @access_type: access was read/write/execute 692 * @maperr: true for invalid page, false for permission fault 693 * @ra: host pc for unwinding 694 * 695 * Use the TCGCPUOps hook to record cpu state, do guest operating system 696 * specific things to raise SIGSEGV, and jump to the main cpu loop. 697 */ 698 void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, 699 MMUAccessType access_type, 700 bool maperr, uintptr_t ra); 701 702 /** 703 * cpu_loop_exit_sigbus: 704 * @cpu: the cpu context 705 * @addr: the guest address of the alignment fault 706 * @access_type: access was read/write/execute 707 * @ra: host pc for unwinding 708 * 709 * Use the TCGCPUOps hook to record cpu state, do guest operating system 710 * specific things to raise SIGBUS, and jump to the main cpu loop. 711 */ 712 void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, 713 MMUAccessType access_type, 714 uintptr_t ra); 715 716 #else 717 static inline void mmap_lock(void) {} 718 static inline void mmap_unlock(void) {} 719 720 /** 721 * get_page_addr_code() - full-system version 722 * @env: CPUArchState 723 * @addr: guest virtual address of guest code 724 * 725 * If we cannot translate and execute from the entire RAM page, or if 726 * the region is not backed by RAM, returns -1. Otherwise, returns the 727 * ram_addr_t corresponding to the guest code at @addr. 728 * 729 * Note: this function can trigger an exception. 730 */ 731 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); 732 733 /** 734 * get_page_addr_code_hostp() - full-system version 735 * @env: CPUArchState 736 * @addr: guest virtual address of guest code 737 * 738 * See get_page_addr_code() (full-system version) for documentation on the 739 * return value. 740 * 741 * Sets *@hostp (when @hostp is non-NULL) as follows. 742 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp 743 * to the host address where @addr's content is kept. 744 * 745 * Note: this function can trigger an exception. 746 */ 747 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 748 void **hostp); 749 750 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 751 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 752 753 MemoryRegionSection * 754 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 755 hwaddr *xlat, hwaddr *plen, 756 MemTxAttrs attrs, int *prot); 757 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 758 MemoryRegionSection *section); 759 #endif 760 761 #endif 762