1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "cpu.h" 24 #ifdef CONFIG_TCG 25 #include "exec/cpu_ldst.h" 26 #endif 27 28 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 29 #define DEBUG_DISAS 30 31 /* Page tracking code uses ram addresses in system mode, and virtual 32 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 33 type. */ 34 #if defined(CONFIG_USER_ONLY) 35 typedef abi_ulong tb_page_addr_t; 36 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx 37 #else 38 typedef ram_addr_t tb_page_addr_t; 39 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT 40 #endif 41 42 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, 43 target_ulong *data); 44 45 /** 46 * cpu_restore_state: 47 * @cpu: the vCPU state is to be restore to 48 * @searched_pc: the host PC the fault occurred at 49 * @will_exit: true if the TB executed will be interrupted after some 50 cpu adjustments. Required for maintaining the correct 51 icount valus 52 * @return: true if state was restored, false otherwise 53 * 54 * Attempt to restore the state for a fault occurring in translated 55 * code. If the searched_pc is not in translated code no state is 56 * restored and the function returns false. 57 */ 58 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); 59 60 G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); 61 G_NORETURN void cpu_loop_exit(CPUState *cpu); 62 G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); 63 G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); 64 65 /** 66 * cpu_loop_exit_requested: 67 * @cpu: The CPU state to be tested 68 * 69 * Indicate if somebody asked for a return of the CPU to the main loop 70 * (e.g., via cpu_exit() or cpu_interrupt()). 71 * 72 * This is helpful for architectures that support interruptible 73 * instructions. After writing back all state to registers/memory, this 74 * call can be used to check if it makes sense to return to the main loop 75 * or to continue executing the interruptible instruction. 76 */ 77 static inline bool cpu_loop_exit_requested(CPUState *cpu) 78 { 79 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; 80 } 81 82 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 83 /* cputlb.c */ 84 /** 85 * tlb_init - initialize a CPU's TLB 86 * @cpu: CPU whose TLB should be initialized 87 */ 88 void tlb_init(CPUState *cpu); 89 /** 90 * tlb_destroy - destroy a CPU's TLB 91 * @cpu: CPU whose TLB should be destroyed 92 */ 93 void tlb_destroy(CPUState *cpu); 94 /** 95 * tlb_flush_page: 96 * @cpu: CPU whose TLB should be flushed 97 * @addr: virtual address of page to be flushed 98 * 99 * Flush one page from the TLB of the specified CPU, for all 100 * MMU indexes. 101 */ 102 void tlb_flush_page(CPUState *cpu, target_ulong addr); 103 /** 104 * tlb_flush_page_all_cpus: 105 * @cpu: src CPU of the flush 106 * @addr: virtual address of page to be flushed 107 * 108 * Flush one page from the TLB of the specified CPU, for all 109 * MMU indexes. 110 */ 111 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); 112 /** 113 * tlb_flush_page_all_cpus_synced: 114 * @cpu: src CPU of the flush 115 * @addr: virtual address of page to be flushed 116 * 117 * Flush one page from the TLB of the specified CPU, for all MMU 118 * indexes like tlb_flush_page_all_cpus except the source vCPUs work 119 * is scheduled as safe work meaning all flushes will be complete once 120 * the source vCPUs safe work is complete. This will depend on when 121 * the guests translation ends the TB. 122 */ 123 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); 124 /** 125 * tlb_flush: 126 * @cpu: CPU whose TLB should be flushed 127 * 128 * Flush the entire TLB for the specified CPU. Most CPU architectures 129 * allow the implementation to drop entries from the TLB at any time 130 * so this is generally safe. If more selective flushing is required 131 * use one of the other functions for efficiency. 132 */ 133 void tlb_flush(CPUState *cpu); 134 /** 135 * tlb_flush_all_cpus: 136 * @cpu: src CPU of the flush 137 */ 138 void tlb_flush_all_cpus(CPUState *src_cpu); 139 /** 140 * tlb_flush_all_cpus_synced: 141 * @cpu: src CPU of the flush 142 * 143 * Like tlb_flush_all_cpus except this except the source vCPUs work is 144 * scheduled as safe work meaning all flushes will be complete once 145 * the source vCPUs safe work is complete. This will depend on when 146 * the guests translation ends the TB. 147 */ 148 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 149 /** 150 * tlb_flush_page_by_mmuidx: 151 * @cpu: CPU whose TLB should be flushed 152 * @addr: virtual address of page to be flushed 153 * @idxmap: bitmap of MMU indexes to flush 154 * 155 * Flush one page from the TLB of the specified CPU, for the specified 156 * MMU indexes. 157 */ 158 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, 159 uint16_t idxmap); 160 /** 161 * tlb_flush_page_by_mmuidx_all_cpus: 162 * @cpu: Originating CPU of the flush 163 * @addr: virtual address of page to be flushed 164 * @idxmap: bitmap of MMU indexes to flush 165 * 166 * Flush one page from the TLB of all CPUs, for the specified 167 * MMU indexes. 168 */ 169 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 170 uint16_t idxmap); 171 /** 172 * tlb_flush_page_by_mmuidx_all_cpus_synced: 173 * @cpu: Originating CPU of the flush 174 * @addr: virtual address of page to be flushed 175 * @idxmap: bitmap of MMU indexes to flush 176 * 177 * Flush one page from the TLB of all CPUs, for the specified MMU 178 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source 179 * vCPUs work is scheduled as safe work meaning all flushes will be 180 * complete once the source vCPUs safe work is complete. This will 181 * depend on when the guests translation ends the TB. 182 */ 183 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 184 uint16_t idxmap); 185 /** 186 * tlb_flush_by_mmuidx: 187 * @cpu: CPU whose TLB should be flushed 188 * @wait: If true ensure synchronisation by exiting the cpu_loop 189 * @idxmap: bitmap of MMU indexes to flush 190 * 191 * Flush all entries from the TLB of the specified CPU, for the specified 192 * MMU indexes. 193 */ 194 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 195 /** 196 * tlb_flush_by_mmuidx_all_cpus: 197 * @cpu: Originating CPU of the flush 198 * @idxmap: bitmap of MMU indexes to flush 199 * 200 * Flush all entries from all TLBs of all CPUs, for the specified 201 * MMU indexes. 202 */ 203 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); 204 /** 205 * tlb_flush_by_mmuidx_all_cpus_synced: 206 * @cpu: Originating CPU of the flush 207 * @idxmap: bitmap of MMU indexes to flush 208 * 209 * Flush all entries from all TLBs of all CPUs, for the specified 210 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source 211 * vCPUs work is scheduled as safe work meaning all flushes will be 212 * complete once the source vCPUs safe work is complete. This will 213 * depend on when the guests translation ends the TB. 214 */ 215 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 216 217 /** 218 * tlb_flush_page_bits_by_mmuidx 219 * @cpu: CPU whose TLB should be flushed 220 * @addr: virtual address of page to be flushed 221 * @idxmap: bitmap of mmu indexes to flush 222 * @bits: number of significant bits in address 223 * 224 * Similar to tlb_flush_page_mask, but with a bitmap of indexes. 225 */ 226 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 227 uint16_t idxmap, unsigned bits); 228 229 /* Similarly, with broadcast and syncing. */ 230 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 231 uint16_t idxmap, unsigned bits); 232 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced 233 (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits); 234 235 /** 236 * tlb_flush_range_by_mmuidx 237 * @cpu: CPU whose TLB should be flushed 238 * @addr: virtual address of the start of the range to be flushed 239 * @len: length of range to be flushed 240 * @idxmap: bitmap of mmu indexes to flush 241 * @bits: number of significant bits in address 242 * 243 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), 244 * comparing only the low @bits worth of each virtual page. 245 */ 246 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 247 target_ulong len, uint16_t idxmap, 248 unsigned bits); 249 250 /* Similarly, with broadcast and syncing. */ 251 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, 252 target_ulong len, uint16_t idxmap, 253 unsigned bits); 254 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 255 target_ulong addr, 256 target_ulong len, 257 uint16_t idxmap, 258 unsigned bits); 259 260 /** 261 * tlb_set_page_with_attrs: 262 * @cpu: CPU to add this TLB entry for 263 * @vaddr: virtual address of page to add entry for 264 * @paddr: physical address of the page 265 * @attrs: memory transaction attributes 266 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 267 * @mmu_idx: MMU index to insert TLB entry for 268 * @size: size of the page in bytes 269 * 270 * Add an entry to this CPU's TLB (a mapping from virtual address 271 * @vaddr to physical address @paddr) with the specified memory 272 * transaction attributes. This is generally called by the target CPU 273 * specific code after it has been called through the tlb_fill() 274 * entry point and performed a successful page table walk to find 275 * the physical address and attributes for the virtual address 276 * which provoked the TLB miss. 277 * 278 * At most one entry for a given virtual address is permitted. Only a 279 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 280 * used by tlb_flush_page. 281 */ 282 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 283 hwaddr paddr, MemTxAttrs attrs, 284 int prot, int mmu_idx, target_ulong size); 285 /* tlb_set_page: 286 * 287 * This function is equivalent to calling tlb_set_page_with_attrs() 288 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 289 * as a convenience for CPUs which don't use memory transaction attributes. 290 */ 291 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 292 hwaddr paddr, int prot, 293 int mmu_idx, target_ulong size); 294 #else 295 static inline void tlb_init(CPUState *cpu) 296 { 297 } 298 static inline void tlb_destroy(CPUState *cpu) 299 { 300 } 301 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) 302 { 303 } 304 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 305 { 306 } 307 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, 308 target_ulong addr) 309 { 310 } 311 static inline void tlb_flush(CPUState *cpu) 312 { 313 } 314 static inline void tlb_flush_all_cpus(CPUState *src_cpu) 315 { 316 } 317 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 318 { 319 } 320 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 321 target_ulong addr, uint16_t idxmap) 322 { 323 } 324 325 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 326 { 327 } 328 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, 329 target_ulong addr, 330 uint16_t idxmap) 331 { 332 } 333 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 334 target_ulong addr, 335 uint16_t idxmap) 336 { 337 } 338 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) 339 { 340 } 341 342 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 343 uint16_t idxmap) 344 { 345 } 346 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, 347 target_ulong addr, 348 uint16_t idxmap, 349 unsigned bits) 350 { 351 } 352 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, 353 target_ulong addr, 354 uint16_t idxmap, 355 unsigned bits) 356 { 357 } 358 static inline void 359 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, 360 uint16_t idxmap, unsigned bits) 361 { 362 } 363 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 364 target_ulong len, uint16_t idxmap, 365 unsigned bits) 366 { 367 } 368 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, 369 target_ulong addr, 370 target_ulong len, 371 uint16_t idxmap, 372 unsigned bits) 373 { 374 } 375 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 376 target_ulong addr, 377 target_long len, 378 uint16_t idxmap, 379 unsigned bits) 380 { 381 } 382 #endif 383 /** 384 * probe_access: 385 * @env: CPUArchState 386 * @addr: guest virtual address to look up 387 * @size: size of the access 388 * @access_type: read, write or execute permission 389 * @mmu_idx: MMU index to use for lookup 390 * @retaddr: return address for unwinding 391 * 392 * Look up the guest virtual address @addr. Raise an exception if the 393 * page does not satisfy @access_type. Raise an exception if the 394 * access (@addr, @size) hits a watchpoint. For writes, mark a clean 395 * page as dirty. 396 * 397 * Finally, return the host address for a page that is backed by RAM, 398 * or NULL if the page requires I/O. 399 */ 400 void *probe_access(CPUArchState *env, target_ulong addr, int size, 401 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 402 403 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, 404 int mmu_idx, uintptr_t retaddr) 405 { 406 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 407 } 408 409 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, 410 int mmu_idx, uintptr_t retaddr) 411 { 412 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 413 } 414 415 /** 416 * probe_access_flags: 417 * @env: CPUArchState 418 * @addr: guest virtual address to look up 419 * @access_type: read, write or execute permission 420 * @mmu_idx: MMU index to use for lookup 421 * @nonfault: suppress the fault 422 * @phost: return value for host address 423 * @retaddr: return address for unwinding 424 * 425 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for 426 * the page, and storing the host address for RAM in @phost. 427 * 428 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. 429 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. 430 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. 431 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. 432 */ 433 int probe_access_flags(CPUArchState *env, target_ulong addr, 434 MMUAccessType access_type, int mmu_idx, 435 bool nonfault, void **phost, uintptr_t retaddr); 436 437 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 438 439 /* Estimated block size for TB allocation. */ 440 /* ??? The following is based on a 2015 survey of x86_64 host output. 441 Better would seem to be some sort of dynamically sized TB array, 442 adapting to the block sizes actually being produced. */ 443 #if defined(CONFIG_SOFTMMU) 444 #define CODE_GEN_AVG_BLOCK_SIZE 400 445 #else 446 #define CODE_GEN_AVG_BLOCK_SIZE 150 447 #endif 448 449 /* 450 * Translation Cache-related fields of a TB. 451 * This struct exists just for convenience; we keep track of TB's in a binary 452 * search tree, and the only fields needed to compare TB's in the tree are 453 * @ptr and @size. 454 * Note: the address of search data can be obtained by adding @size to @ptr. 455 */ 456 struct tb_tc { 457 const void *ptr; /* pointer to the translated code */ 458 size_t size; 459 }; 460 461 struct TranslationBlock { 462 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 463 target_ulong cs_base; /* CS base for this block */ 464 uint32_t flags; /* flags defining in which context the code was generated */ 465 uint32_t cflags; /* compile flags */ 466 467 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */ 468 #define CF_COUNT_MASK 0x000001ff 469 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */ 470 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */ 471 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */ 472 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ 473 #define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */ 474 #define CF_USE_ICOUNT 0x00020000 475 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ 476 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ 477 #define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */ 478 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ 479 #define CF_CLUSTER_SHIFT 24 480 481 /* Per-vCPU dynamic tracing state used to generate this TB */ 482 uint32_t trace_vcpu_dstate; 483 484 /* 485 * Above fields used for comparing 486 */ 487 488 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ 489 uint16_t size; 490 uint16_t icount; 491 492 struct tb_tc tc; 493 494 /* first and second physical page containing code. The lower bit 495 of the pointer tells the index in page_next[]. 496 The list is protected by the TB's page('s) lock(s) */ 497 uintptr_t page_next[2]; 498 tb_page_addr_t page_addr[2]; 499 500 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ 501 QemuSpin jmp_lock; 502 503 /* The following data are used to directly call another TB from 504 * the code of this one. This can be done either by emitting direct or 505 * indirect native jump instructions. These jumps are reset so that the TB 506 * just continues its execution. The TB can be linked to another one by 507 * setting one of the jump targets (or patching the jump instruction). Only 508 * two of such jumps are supported. 509 */ 510 uint16_t jmp_reset_offset[2]; /* offset of original jump target */ 511 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ 512 uintptr_t jmp_target_arg[2]; /* target address or offset */ 513 514 /* 515 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. 516 * Each TB can have two outgoing jumps, and therefore can participate 517 * in two lists. The list entries are kept in jmp_list_next[2]. The least 518 * significant bit (LSB) of the pointers in these lists is used to encode 519 * which of the two list entries is to be used in the pointed TB. 520 * 521 * List traversals are protected by jmp_lock. The destination TB of each 522 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock 523 * can be acquired from any origin TB. 524 * 525 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is 526 * being invalidated, so that no further outgoing jumps from it can be set. 527 * 528 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained 529 * to a destination TB that has CF_INVALID set. 530 */ 531 uintptr_t jmp_list_head; 532 uintptr_t jmp_list_next[2]; 533 uintptr_t jmp_dest[2]; 534 }; 535 536 /* Hide the qatomic_read to make code a little easier on the eyes */ 537 static inline uint32_t tb_cflags(const TranslationBlock *tb) 538 { 539 return qatomic_read(&tb->cflags); 540 } 541 542 /* current cflags for hashing/comparison */ 543 uint32_t curr_cflags(CPUState *cpu); 544 545 /* TranslationBlock invalidate API */ 546 #if defined(CONFIG_USER_ONLY) 547 void tb_invalidate_phys_addr(target_ulong addr); 548 void tb_invalidate_phys_range(target_ulong start, target_ulong end); 549 #else 550 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); 551 #endif 552 void tb_flush(CPUState *cpu); 553 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 554 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 555 556 /* GETPC is the true target of the return instruction that we'll execute. */ 557 #if defined(CONFIG_TCG_INTERPRETER) 558 extern __thread uintptr_t tci_tb_ptr; 559 # define GETPC() tci_tb_ptr 560 #else 561 # define GETPC() \ 562 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 563 #endif 564 565 /* The true return address will often point to a host insn that is part of 566 the next translated guest insn. Adjust the address backward to point to 567 the middle of the call insn. Subtracting one would do the job except for 568 several compressed mode architectures (arm, mips) which set the low bit 569 to indicate the compressed mode; subtracting two works around that. It 570 is also the case that there are no host isas that contain a call insn 571 smaller than 4 bytes, so we don't worry about special-casing this. */ 572 #define GETPC_ADJ 2 573 574 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) 575 void assert_no_pages_locked(void); 576 #else 577 static inline void assert_no_pages_locked(void) 578 { 579 } 580 #endif 581 582 #if !defined(CONFIG_USER_ONLY) 583 584 /** 585 * iotlb_to_section: 586 * @cpu: CPU performing the access 587 * @index: TCG CPU IOTLB entry 588 * 589 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 590 * it refers to. @index will have been initially created and returned 591 * by memory_region_section_get_iotlb(). 592 */ 593 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 594 hwaddr index, MemTxAttrs attrs); 595 #endif 596 597 /** 598 * get_page_addr_code_hostp() 599 * @env: CPUArchState 600 * @addr: guest virtual address of guest code 601 * 602 * See get_page_addr_code() (full-system version) for documentation on the 603 * return value. 604 * 605 * Sets *@hostp (when @hostp is non-NULL) as follows. 606 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp 607 * to the host address where @addr's content is kept. 608 * 609 * Note: this function can trigger an exception. 610 */ 611 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 612 void **hostp); 613 614 /** 615 * get_page_addr_code() 616 * @env: CPUArchState 617 * @addr: guest virtual address of guest code 618 * 619 * If we cannot translate and execute from the entire RAM page, or if 620 * the region is not backed by RAM, returns -1. Otherwise, returns the 621 * ram_addr_t corresponding to the guest code at @addr. 622 * 623 * Note: this function can trigger an exception. 624 */ 625 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, 626 target_ulong addr) 627 { 628 return get_page_addr_code_hostp(env, addr, NULL); 629 } 630 631 #if defined(CONFIG_USER_ONLY) 632 void mmap_lock(void); 633 void mmap_unlock(void); 634 bool have_mmap_lock(void); 635 636 /** 637 * adjust_signal_pc: 638 * @pc: raw pc from the host signal ucontext_t. 639 * @is_write: host memory operation was write, or read-modify-write. 640 * 641 * Alter @pc as required for unwinding. Return the type of the 642 * guest memory access -- host reads may be for guest execution. 643 */ 644 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); 645 646 /** 647 * handle_sigsegv_accerr_write: 648 * @cpu: the cpu context 649 * @old_set: the sigset_t from the signal ucontext_t 650 * @host_pc: the host pc, adjusted for the signal 651 * @host_addr: the host address of the fault 652 * 653 * Return true if the write fault has been handled, and should be re-tried. 654 */ 655 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 656 uintptr_t host_pc, abi_ptr guest_addr); 657 658 /** 659 * cpu_loop_exit_sigsegv: 660 * @cpu: the cpu context 661 * @addr: the guest address of the fault 662 * @access_type: access was read/write/execute 663 * @maperr: true for invalid page, false for permission fault 664 * @ra: host pc for unwinding 665 * 666 * Use the TCGCPUOps hook to record cpu state, do guest operating system 667 * specific things to raise SIGSEGV, and jump to the main cpu loop. 668 */ 669 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, 670 MMUAccessType access_type, 671 bool maperr, uintptr_t ra); 672 673 /** 674 * cpu_loop_exit_sigbus: 675 * @cpu: the cpu context 676 * @addr: the guest address of the alignment fault 677 * @access_type: access was read/write/execute 678 * @ra: host pc for unwinding 679 * 680 * Use the TCGCPUOps hook to record cpu state, do guest operating system 681 * specific things to raise SIGBUS, and jump to the main cpu loop. 682 */ 683 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, 684 MMUAccessType access_type, 685 uintptr_t ra); 686 687 #else 688 static inline void mmap_lock(void) {} 689 static inline void mmap_unlock(void) {} 690 691 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 692 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); 693 694 MemoryRegionSection * 695 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 696 hwaddr *xlat, hwaddr *plen, 697 MemTxAttrs attrs, int *prot); 698 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 699 MemoryRegionSection *section); 700 #endif 701 702 #endif 703