1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef EXEC_ALL_H 21 #define EXEC_ALL_H 22 23 #include "cpu.h" 24 #if defined(CONFIG_USER_ONLY) 25 #include "exec/abi_ptr.h" 26 #include "exec/cpu_ldst.h" 27 #endif 28 #include "exec/mmu-access-type.h" 29 #include "exec/translation-block.h" 30 #include "qemu/clang-tsa.h" 31 32 /** 33 * cpu_loop_exit_requested: 34 * @cpu: The CPU state to be tested 35 * 36 * Indicate if somebody asked for a return of the CPU to the main loop 37 * (e.g., via cpu_exit() or cpu_interrupt()). 38 * 39 * This is helpful for architectures that support interruptible 40 * instructions. After writing back all state to registers/memory, this 41 * call can be used to check if it makes sense to return to the main loop 42 * or to continue executing the interruptible instruction. 43 */ 44 static inline bool cpu_loop_exit_requested(CPUState *cpu) 45 { 46 return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0; 47 } 48 49 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 50 /* cputlb.c */ 51 /** 52 * tlb_init - initialize a CPU's TLB 53 * @cpu: CPU whose TLB should be initialized 54 */ 55 void tlb_init(CPUState *cpu); 56 /** 57 * tlb_destroy - destroy a CPU's TLB 58 * @cpu: CPU whose TLB should be destroyed 59 */ 60 void tlb_destroy(CPUState *cpu); 61 /** 62 * tlb_flush_page: 63 * @cpu: CPU whose TLB should be flushed 64 * @addr: virtual address of page to be flushed 65 * 66 * Flush one page from the TLB of the specified CPU, for all 67 * MMU indexes. 68 */ 69 void tlb_flush_page(CPUState *cpu, vaddr addr); 70 /** 71 * tlb_flush_page_all_cpus_synced: 72 * @cpu: src CPU of the flush 73 * @addr: virtual address of page to be flushed 74 * 75 * Flush one page from the TLB of all CPUs, for all 76 * MMU indexes. 77 * 78 * When this function returns, no CPUs will subsequently perform 79 * translations using the flushed TLBs. 80 */ 81 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); 82 /** 83 * tlb_flush: 84 * @cpu: CPU whose TLB should be flushed 85 * 86 * Flush the entire TLB for the specified CPU. Most CPU architectures 87 * allow the implementation to drop entries from the TLB at any time 88 * so this is generally safe. If more selective flushing is required 89 * use one of the other functions for efficiency. 90 */ 91 void tlb_flush(CPUState *cpu); 92 /** 93 * tlb_flush_all_cpus_synced: 94 * @cpu: src CPU of the flush 95 * 96 * Flush the entire TLB for all CPUs, for all MMU indexes. 97 * 98 * When this function returns, no CPUs will subsequently perform 99 * translations using the flushed TLBs. 100 */ 101 void tlb_flush_all_cpus_synced(CPUState *src_cpu); 102 /** 103 * tlb_flush_page_by_mmuidx: 104 * @cpu: CPU whose TLB should be flushed 105 * @addr: virtual address of page to be flushed 106 * @idxmap: bitmap of MMU indexes to flush 107 * 108 * Flush one page from the TLB of the specified CPU, for the specified 109 * MMU indexes. 110 */ 111 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, 112 uint16_t idxmap); 113 /** 114 * tlb_flush_page_by_mmuidx_all_cpus_synced: 115 * @cpu: Originating CPU of the flush 116 * @addr: virtual address of page to be flushed 117 * @idxmap: bitmap of MMU indexes to flush 118 * 119 * Flush one page from the TLB of all CPUs, for the specified 120 * MMU indexes. 121 * 122 * When this function returns, no CPUs will subsequently perform 123 * translations using the flushed TLBs. 124 */ 125 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, 126 uint16_t idxmap); 127 /** 128 * tlb_flush_by_mmuidx: 129 * @cpu: CPU whose TLB should be flushed 130 * @wait: If true ensure synchronisation by exiting the cpu_loop 131 * @idxmap: bitmap of MMU indexes to flush 132 * 133 * Flush all entries from the TLB of the specified CPU, for the specified 134 * MMU indexes. 135 */ 136 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); 137 /** 138 * tlb_flush_by_mmuidx_all_cpus_synced: 139 * @cpu: Originating CPU of the flush 140 * @idxmap: bitmap of MMU indexes to flush 141 * 142 * Flush all entries from the TLB of all CPUs, for the specified 143 * MMU indexes. 144 * 145 * When this function returns, no CPUs will subsequently perform 146 * translations using the flushed TLBs. 147 */ 148 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); 149 150 /** 151 * tlb_flush_page_bits_by_mmuidx 152 * @cpu: CPU whose TLB should be flushed 153 * @addr: virtual address of page to be flushed 154 * @idxmap: bitmap of mmu indexes to flush 155 * @bits: number of significant bits in address 156 * 157 * Similar to tlb_flush_page_mask, but with a bitmap of indexes. 158 */ 159 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, 160 uint16_t idxmap, unsigned bits); 161 162 /* Similarly, with broadcast and syncing. */ 163 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced 164 (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); 165 166 /** 167 * tlb_flush_range_by_mmuidx 168 * @cpu: CPU whose TLB should be flushed 169 * @addr: virtual address of the start of the range to be flushed 170 * @len: length of range to be flushed 171 * @idxmap: bitmap of mmu indexes to flush 172 * @bits: number of significant bits in address 173 * 174 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), 175 * comparing only the low @bits worth of each virtual page. 176 */ 177 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 178 vaddr len, uint16_t idxmap, 179 unsigned bits); 180 181 /* Similarly, with broadcast and syncing. */ 182 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 183 vaddr addr, 184 vaddr len, 185 uint16_t idxmap, 186 unsigned bits); 187 188 /** 189 * tlb_set_page_full: 190 * @cpu: CPU context 191 * @mmu_idx: mmu index of the tlb to modify 192 * @addr: virtual address of the entry to add 193 * @full: the details of the tlb entry 194 * 195 * Add an entry to @cpu tlb index @mmu_idx. All of the fields of 196 * @full must be filled, except for xlat_section, and constitute 197 * the complete description of the translated page. 198 * 199 * This is generally called by the target tlb_fill function after 200 * having performed a successful page table walk to find the physical 201 * address and attributes for the translation. 202 * 203 * At most one entry for a given virtual address is permitted. Only a 204 * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only 205 * used by tlb_flush_page. 206 */ 207 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, 208 CPUTLBEntryFull *full); 209 210 /** 211 * tlb_set_page_with_attrs: 212 * @cpu: CPU to add this TLB entry for 213 * @addr: virtual address of page to add entry for 214 * @paddr: physical address of the page 215 * @attrs: memory transaction attributes 216 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) 217 * @mmu_idx: MMU index to insert TLB entry for 218 * @size: size of the page in bytes 219 * 220 * Add an entry to this CPU's TLB (a mapping from virtual address 221 * @addr to physical address @paddr) with the specified memory 222 * transaction attributes. This is generally called by the target CPU 223 * specific code after it has been called through the tlb_fill() 224 * entry point and performed a successful page table walk to find 225 * the physical address and attributes for the virtual address 226 * which provoked the TLB miss. 227 * 228 * At most one entry for a given virtual address is permitted. Only a 229 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only 230 * used by tlb_flush_page. 231 */ 232 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, 233 hwaddr paddr, MemTxAttrs attrs, 234 int prot, int mmu_idx, vaddr size); 235 /* tlb_set_page: 236 * 237 * This function is equivalent to calling tlb_set_page_with_attrs() 238 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided 239 * as a convenience for CPUs which don't use memory transaction attributes. 240 */ 241 void tlb_set_page(CPUState *cpu, vaddr addr, 242 hwaddr paddr, int prot, 243 int mmu_idx, vaddr size); 244 #else 245 static inline void tlb_init(CPUState *cpu) 246 { 247 } 248 static inline void tlb_destroy(CPUState *cpu) 249 { 250 } 251 static inline void tlb_flush_page(CPUState *cpu, vaddr addr) 252 { 253 } 254 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) 255 { 256 } 257 static inline void tlb_flush(CPUState *cpu) 258 { 259 } 260 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) 261 { 262 } 263 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, 264 vaddr addr, uint16_t idxmap) 265 { 266 } 267 268 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 269 { 270 } 271 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, 272 vaddr addr, 273 uint16_t idxmap) 274 { 275 } 276 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, 277 uint16_t idxmap) 278 { 279 } 280 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, 281 vaddr addr, 282 uint16_t idxmap, 283 unsigned bits) 284 { 285 } 286 static inline void 287 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, 288 uint16_t idxmap, unsigned bits) 289 { 290 } 291 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 292 vaddr len, uint16_t idxmap, 293 unsigned bits) 294 { 295 } 296 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, 297 vaddr addr, 298 vaddr len, 299 uint16_t idxmap, 300 unsigned bits) 301 { 302 } 303 #endif 304 305 #if defined(CONFIG_TCG) 306 307 /** 308 * probe_access: 309 * @env: CPUArchState 310 * @addr: guest virtual address to look up 311 * @size: size of the access 312 * @access_type: read, write or execute permission 313 * @mmu_idx: MMU index to use for lookup 314 * @retaddr: return address for unwinding 315 * 316 * Look up the guest virtual address @addr. Raise an exception if the 317 * page does not satisfy @access_type. Raise an exception if the 318 * access (@addr, @size) hits a watchpoint. For writes, mark a clean 319 * page as dirty. 320 * 321 * Finally, return the host address for a page that is backed by RAM, 322 * or NULL if the page requires I/O. 323 */ 324 void *probe_access(CPUArchState *env, vaddr addr, int size, 325 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); 326 327 static inline void *probe_write(CPUArchState *env, vaddr addr, int size, 328 int mmu_idx, uintptr_t retaddr) 329 { 330 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 331 } 332 333 static inline void *probe_read(CPUArchState *env, vaddr addr, int size, 334 int mmu_idx, uintptr_t retaddr) 335 { 336 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 337 } 338 339 /** 340 * probe_access_flags: 341 * @env: CPUArchState 342 * @addr: guest virtual address to look up 343 * @size: size of the access 344 * @access_type: read, write or execute permission 345 * @mmu_idx: MMU index to use for lookup 346 * @nonfault: suppress the fault 347 * @phost: return value for host address 348 * @retaddr: return address for unwinding 349 * 350 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for 351 * the page, and storing the host address for RAM in @phost. 352 * 353 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. 354 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. 355 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. 356 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. 357 */ 358 int probe_access_flags(CPUArchState *env, vaddr addr, int size, 359 MMUAccessType access_type, int mmu_idx, 360 bool nonfault, void **phost, uintptr_t retaddr); 361 362 #ifndef CONFIG_USER_ONLY 363 364 /** 365 * probe_access_full: 366 * Like probe_access_flags, except also return into @pfull. 367 * 368 * The CPUTLBEntryFull structure returned via @pfull is transient 369 * and must be consumed or copied immediately, before any further 370 * access or changes to TLB @mmu_idx. 371 * 372 * This function will not fault if @nonfault is set, but will 373 * return TLB_INVALID_MASK if the page is not mapped, or is not 374 * accessible with @access_type. 375 * 376 * This function will return TLB_MMIO in order to force the access 377 * to be handled out-of-line if plugins wish to instrument the access. 378 */ 379 int probe_access_full(CPUArchState *env, vaddr addr, int size, 380 MMUAccessType access_type, int mmu_idx, 381 bool nonfault, void **phost, 382 CPUTLBEntryFull **pfull, uintptr_t retaddr); 383 384 /** 385 * probe_access_full_mmu: 386 * Like probe_access_full, except: 387 * 388 * This function is intended to be used for page table accesses by 389 * the target mmu itself. Since such page walking happens while 390 * handling another potential mmu fault, this function never raises 391 * exceptions (akin to @nonfault true for probe_access_full). 392 * Likewise this function does not trigger plugin instrumentation. 393 */ 394 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, 395 MMUAccessType access_type, int mmu_idx, 396 void **phost, CPUTLBEntryFull **pfull); 397 398 #endif /* !CONFIG_USER_ONLY */ 399 #endif /* CONFIG_TCG */ 400 401 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) 402 { 403 #ifdef CONFIG_USER_ONLY 404 return tb->itree.start; 405 #else 406 return tb->page_addr[0]; 407 #endif 408 } 409 410 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) 411 { 412 #ifdef CONFIG_USER_ONLY 413 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK; 414 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next; 415 #else 416 return tb->page_addr[1]; 417 #endif 418 } 419 420 static inline void tb_set_page_addr0(TranslationBlock *tb, 421 tb_page_addr_t addr) 422 { 423 #ifdef CONFIG_USER_ONLY 424 tb->itree.start = addr; 425 /* 426 * To begin, we record an interval of one byte. When the translation 427 * loop encounters a second page, the interval will be extended to 428 * include the first byte of the second page, which is sufficient to 429 * allow tb_page_addr1() above to work properly. The final corrected 430 * interval will be set by tb_page_add() from tb->size before the 431 * node is added to the interval tree. 432 */ 433 tb->itree.last = addr; 434 #else 435 tb->page_addr[0] = addr; 436 #endif 437 } 438 439 static inline void tb_set_page_addr1(TranslationBlock *tb, 440 tb_page_addr_t addr) 441 { 442 #ifdef CONFIG_USER_ONLY 443 /* Extend the interval to the first byte of the second page. See above. */ 444 tb->itree.last = addr; 445 #else 446 tb->page_addr[1] = addr; 447 #endif 448 } 449 450 /* TranslationBlock invalidate API */ 451 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 452 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); 453 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); 454 455 /* GETPC is the true target of the return instruction that we'll execute. */ 456 #if defined(CONFIG_TCG_INTERPRETER) 457 extern __thread uintptr_t tci_tb_ptr; 458 # define GETPC() tci_tb_ptr 459 #else 460 # define GETPC() \ 461 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) 462 #endif 463 464 /* The true return address will often point to a host insn that is part of 465 the next translated guest insn. Adjust the address backward to point to 466 the middle of the call insn. Subtracting one would do the job except for 467 several compressed mode architectures (arm, mips) which set the low bit 468 to indicate the compressed mode; subtracting two works around that. It 469 is also the case that there are no host isas that contain a call insn 470 smaller than 4 bytes, so we don't worry about special-casing this. */ 471 #define GETPC_ADJ 2 472 473 #if !defined(CONFIG_USER_ONLY) 474 475 /** 476 * iotlb_to_section: 477 * @cpu: CPU performing the access 478 * @index: TCG CPU IOTLB entry 479 * 480 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that 481 * it refers to. @index will have been initially created and returned 482 * by memory_region_section_get_iotlb(). 483 */ 484 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, 485 hwaddr index, MemTxAttrs attrs); 486 #endif 487 488 /** 489 * get_page_addr_code_hostp() 490 * @env: CPUArchState 491 * @addr: guest virtual address of guest code 492 * 493 * See get_page_addr_code() (full-system version) for documentation on the 494 * return value. 495 * 496 * Sets *@hostp (when @hostp is non-NULL) as follows. 497 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp 498 * to the host address where @addr's content is kept. 499 * 500 * Note: this function can trigger an exception. 501 */ 502 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, 503 void **hostp); 504 505 /** 506 * get_page_addr_code() 507 * @env: CPUArchState 508 * @addr: guest virtual address of guest code 509 * 510 * If we cannot translate and execute from the entire RAM page, or if 511 * the region is not backed by RAM, returns -1. Otherwise, returns the 512 * ram_addr_t corresponding to the guest code at @addr. 513 * 514 * Note: this function can trigger an exception. 515 */ 516 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, 517 vaddr addr) 518 { 519 return get_page_addr_code_hostp(env, addr, NULL); 520 } 521 522 #if defined(CONFIG_USER_ONLY) 523 void TSA_NO_TSA mmap_lock(void); 524 void TSA_NO_TSA mmap_unlock(void); 525 bool have_mmap_lock(void); 526 527 static inline void mmap_unlock_guard(void *unused) 528 { 529 mmap_unlock(); 530 } 531 532 #define WITH_MMAP_LOCK_GUARD() \ 533 for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \ 534 = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1) 535 536 /** 537 * adjust_signal_pc: 538 * @pc: raw pc from the host signal ucontext_t. 539 * @is_write: host memory operation was write, or read-modify-write. 540 * 541 * Alter @pc as required for unwinding. Return the type of the 542 * guest memory access -- host reads may be for guest execution. 543 */ 544 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); 545 546 /** 547 * handle_sigsegv_accerr_write: 548 * @cpu: the cpu context 549 * @old_set: the sigset_t from the signal ucontext_t 550 * @host_pc: the host pc, adjusted for the signal 551 * @host_addr: the host address of the fault 552 * 553 * Return true if the write fault has been handled, and should be re-tried. 554 */ 555 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 556 uintptr_t host_pc, abi_ptr guest_addr); 557 558 /** 559 * cpu_loop_exit_sigsegv: 560 * @cpu: the cpu context 561 * @addr: the guest address of the fault 562 * @access_type: access was read/write/execute 563 * @maperr: true for invalid page, false for permission fault 564 * @ra: host pc for unwinding 565 * 566 * Use the TCGCPUOps hook to record cpu state, do guest operating system 567 * specific things to raise SIGSEGV, and jump to the main cpu loop. 568 */ 569 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, 570 MMUAccessType access_type, 571 bool maperr, uintptr_t ra); 572 573 /** 574 * cpu_loop_exit_sigbus: 575 * @cpu: the cpu context 576 * @addr: the guest address of the alignment fault 577 * @access_type: access was read/write/execute 578 * @ra: host pc for unwinding 579 * 580 * Use the TCGCPUOps hook to record cpu state, do guest operating system 581 * specific things to raise SIGBUS, and jump to the main cpu loop. 582 */ 583 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, 584 MMUAccessType access_type, 585 uintptr_t ra); 586 587 #else 588 static inline void mmap_lock(void) {} 589 static inline void mmap_unlock(void) {} 590 #define WITH_MMAP_LOCK_GUARD() 591 592 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); 593 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); 594 595 MemoryRegionSection * 596 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, 597 hwaddr *xlat, hwaddr *plen, 598 MemTxAttrs attrs, int *prot); 599 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 600 MemoryRegionSection *section); 601 #endif 602 603 #endif 604