xref: /openbmc/qemu/include/exec/exec-all.h (revision 641f32f6)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "exec/cpu_ldst.h"
26 #endif
27 #include "sysemu/cpu-timers.h"
28 
29 /* allow to see translation results - the slowdown should be negligible, so we leave it */
30 #define DEBUG_DISAS
31 
32 /* Page tracking code uses ram addresses in system mode, and virtual
33    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
34    type.  */
35 #if defined(CONFIG_USER_ONLY)
36 typedef abi_ulong tb_page_addr_t;
37 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
38 #else
39 typedef ram_addr_t tb_page_addr_t;
40 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41 #endif
42 
43 #include "qemu/log.h"
44 
45 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
46 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
47                           target_ulong *data);
48 
49 /**
50  * cpu_restore_state:
51  * @cpu: the vCPU state is to be restore to
52  * @searched_pc: the host PC the fault occurred at
53  * @will_exit: true if the TB executed will be interrupted after some
54                cpu adjustments. Required for maintaining the correct
55                icount valus
56  * @return: true if state was restored, false otherwise
57  *
58  * Attempt to restore the state for a fault occurring in translated
59  * code. If the searched_pc is not in translated code no state is
60  * restored and the function returns false.
61  */
62 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
63 
64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
65 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
66 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
67 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
68 
69 /**
70  * cpu_loop_exit_requested:
71  * @cpu: The CPU state to be tested
72  *
73  * Indicate if somebody asked for a return of the CPU to the main loop
74  * (e.g., via cpu_exit() or cpu_interrupt()).
75  *
76  * This is helpful for architectures that support interruptible
77  * instructions. After writing back all state to registers/memory, this
78  * call can be used to check if it makes sense to return to the main loop
79  * or to continue executing the interruptible instruction.
80  */
81 static inline bool cpu_loop_exit_requested(CPUState *cpu)
82 {
83     return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
84 }
85 
86 #if !defined(CONFIG_USER_ONLY)
87 void cpu_reloading_memory_map(void);
88 /**
89  * cpu_address_space_init:
90  * @cpu: CPU to add this address space to
91  * @asidx: integer index of this address space
92  * @prefix: prefix to be used as name of address space
93  * @mr: the root memory region of address space
94  *
95  * Add the specified address space to the CPU's cpu_ases list.
96  * The address space added with @asidx 0 is the one used for the
97  * convenience pointer cpu->as.
98  * The target-specific code which registers ASes is responsible
99  * for defining what semantics address space 0, 1, 2, etc have.
100  *
101  * Before the first call to this function, the caller must set
102  * cpu->num_ases to the total number of address spaces it needs
103  * to support.
104  *
105  * Note that with KVM only one address space is supported.
106  */
107 void cpu_address_space_init(CPUState *cpu, int asidx,
108                             const char *prefix, MemoryRegion *mr);
109 #endif
110 
111 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
112 /* cputlb.c */
113 /**
114  * tlb_init - initialize a CPU's TLB
115  * @cpu: CPU whose TLB should be initialized
116  */
117 void tlb_init(CPUState *cpu);
118 /**
119  * tlb_destroy - destroy a CPU's TLB
120  * @cpu: CPU whose TLB should be destroyed
121  */
122 void tlb_destroy(CPUState *cpu);
123 /**
124  * tlb_flush_page:
125  * @cpu: CPU whose TLB should be flushed
126  * @addr: virtual address of page to be flushed
127  *
128  * Flush one page from the TLB of the specified CPU, for all
129  * MMU indexes.
130  */
131 void tlb_flush_page(CPUState *cpu, target_ulong addr);
132 /**
133  * tlb_flush_page_all_cpus:
134  * @cpu: src CPU of the flush
135  * @addr: virtual address of page to be flushed
136  *
137  * Flush one page from the TLB of the specified CPU, for all
138  * MMU indexes.
139  */
140 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
141 /**
142  * tlb_flush_page_all_cpus_synced:
143  * @cpu: src CPU of the flush
144  * @addr: virtual address of page to be flushed
145  *
146  * Flush one page from the TLB of the specified CPU, for all MMU
147  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
148  * is scheduled as safe work meaning all flushes will be complete once
149  * the source vCPUs safe work is complete. This will depend on when
150  * the guests translation ends the TB.
151  */
152 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
153 /**
154  * tlb_flush:
155  * @cpu: CPU whose TLB should be flushed
156  *
157  * Flush the entire TLB for the specified CPU. Most CPU architectures
158  * allow the implementation to drop entries from the TLB at any time
159  * so this is generally safe. If more selective flushing is required
160  * use one of the other functions for efficiency.
161  */
162 void tlb_flush(CPUState *cpu);
163 /**
164  * tlb_flush_all_cpus:
165  * @cpu: src CPU of the flush
166  */
167 void tlb_flush_all_cpus(CPUState *src_cpu);
168 /**
169  * tlb_flush_all_cpus_synced:
170  * @cpu: src CPU of the flush
171  *
172  * Like tlb_flush_all_cpus except this except the source vCPUs work is
173  * scheduled as safe work meaning all flushes will be complete once
174  * the source vCPUs safe work is complete. This will depend on when
175  * the guests translation ends the TB.
176  */
177 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
178 /**
179  * tlb_flush_page_by_mmuidx:
180  * @cpu: CPU whose TLB should be flushed
181  * @addr: virtual address of page to be flushed
182  * @idxmap: bitmap of MMU indexes to flush
183  *
184  * Flush one page from the TLB of the specified CPU, for the specified
185  * MMU indexes.
186  */
187 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
188                               uint16_t idxmap);
189 /**
190  * tlb_flush_page_by_mmuidx_all_cpus:
191  * @cpu: Originating CPU of the flush
192  * @addr: virtual address of page to be flushed
193  * @idxmap: bitmap of MMU indexes to flush
194  *
195  * Flush one page from the TLB of all CPUs, for the specified
196  * MMU indexes.
197  */
198 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
199                                        uint16_t idxmap);
200 /**
201  * tlb_flush_page_by_mmuidx_all_cpus_synced:
202  * @cpu: Originating CPU of the flush
203  * @addr: virtual address of page to be flushed
204  * @idxmap: bitmap of MMU indexes to flush
205  *
206  * Flush one page from the TLB of all CPUs, for the specified MMU
207  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
208  * vCPUs work is scheduled as safe work meaning all flushes will be
209  * complete once  the source vCPUs safe work is complete. This will
210  * depend on when the guests translation ends the TB.
211  */
212 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
213                                               uint16_t idxmap);
214 /**
215  * tlb_flush_by_mmuidx:
216  * @cpu: CPU whose TLB should be flushed
217  * @wait: If true ensure synchronisation by exiting the cpu_loop
218  * @idxmap: bitmap of MMU indexes to flush
219  *
220  * Flush all entries from the TLB of the specified CPU, for the specified
221  * MMU indexes.
222  */
223 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
224 /**
225  * tlb_flush_by_mmuidx_all_cpus:
226  * @cpu: Originating CPU of the flush
227  * @idxmap: bitmap of MMU indexes to flush
228  *
229  * Flush all entries from all TLBs of all CPUs, for the specified
230  * MMU indexes.
231  */
232 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
233 /**
234  * tlb_flush_by_mmuidx_all_cpus_synced:
235  * @cpu: Originating CPU of the flush
236  * @idxmap: bitmap of MMU indexes to flush
237  *
238  * Flush all entries from all TLBs of all CPUs, for the specified
239  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
240  * vCPUs work is scheduled as safe work meaning all flushes will be
241  * complete once  the source vCPUs safe work is complete. This will
242  * depend on when the guests translation ends the TB.
243  */
244 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
245 
246 /**
247  * tlb_flush_page_bits_by_mmuidx
248  * @cpu: CPU whose TLB should be flushed
249  * @addr: virtual address of page to be flushed
250  * @idxmap: bitmap of mmu indexes to flush
251  * @bits: number of significant bits in address
252  *
253  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
254  */
255 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
256                                    uint16_t idxmap, unsigned bits);
257 
258 /* Similarly, with broadcast and syncing. */
259 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
260                                             uint16_t idxmap, unsigned bits);
261 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
262     (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
263 
264 /**
265  * tlb_flush_range_by_mmuidx
266  * @cpu: CPU whose TLB should be flushed
267  * @addr: virtual address of the start of the range to be flushed
268  * @len: length of range to be flushed
269  * @idxmap: bitmap of mmu indexes to flush
270  * @bits: number of significant bits in address
271  *
272  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
273  * comparing only the low @bits worth of each virtual page.
274  */
275 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
276                                target_ulong len, uint16_t idxmap,
277                                unsigned bits);
278 
279 /* Similarly, with broadcast and syncing. */
280 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
281                                         target_ulong len, uint16_t idxmap,
282                                         unsigned bits);
283 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
284                                                target_ulong addr,
285                                                target_ulong len,
286                                                uint16_t idxmap,
287                                                unsigned bits);
288 
289 /**
290  * tlb_set_page_with_attrs:
291  * @cpu: CPU to add this TLB entry for
292  * @vaddr: virtual address of page to add entry for
293  * @paddr: physical address of the page
294  * @attrs: memory transaction attributes
295  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
296  * @mmu_idx: MMU index to insert TLB entry for
297  * @size: size of the page in bytes
298  *
299  * Add an entry to this CPU's TLB (a mapping from virtual address
300  * @vaddr to physical address @paddr) with the specified memory
301  * transaction attributes. This is generally called by the target CPU
302  * specific code after it has been called through the tlb_fill()
303  * entry point and performed a successful page table walk to find
304  * the physical address and attributes for the virtual address
305  * which provoked the TLB miss.
306  *
307  * At most one entry for a given virtual address is permitted. Only a
308  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
309  * used by tlb_flush_page.
310  */
311 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
312                              hwaddr paddr, MemTxAttrs attrs,
313                              int prot, int mmu_idx, target_ulong size);
314 /* tlb_set_page:
315  *
316  * This function is equivalent to calling tlb_set_page_with_attrs()
317  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
318  * as a convenience for CPUs which don't use memory transaction attributes.
319  */
320 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
321                   hwaddr paddr, int prot,
322                   int mmu_idx, target_ulong size);
323 #else
324 static inline void tlb_init(CPUState *cpu)
325 {
326 }
327 static inline void tlb_destroy(CPUState *cpu)
328 {
329 }
330 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
331 {
332 }
333 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
334 {
335 }
336 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
337                                                   target_ulong addr)
338 {
339 }
340 static inline void tlb_flush(CPUState *cpu)
341 {
342 }
343 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
344 {
345 }
346 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
347 {
348 }
349 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
350                                             target_ulong addr, uint16_t idxmap)
351 {
352 }
353 
354 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
355 {
356 }
357 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
358                                                      target_ulong addr,
359                                                      uint16_t idxmap)
360 {
361 }
362 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
363                                                             target_ulong addr,
364                                                             uint16_t idxmap)
365 {
366 }
367 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
368 {
369 }
370 
371 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
372                                                        uint16_t idxmap)
373 {
374 }
375 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
376                                                  target_ulong addr,
377                                                  uint16_t idxmap,
378                                                  unsigned bits)
379 {
380 }
381 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
382                                                           target_ulong addr,
383                                                           uint16_t idxmap,
384                                                           unsigned bits)
385 {
386 }
387 static inline void
388 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
389                                               uint16_t idxmap, unsigned bits)
390 {
391 }
392 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
393                                              target_ulong len, uint16_t idxmap,
394                                              unsigned bits)
395 {
396 }
397 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
398                                                       target_ulong addr,
399                                                       target_ulong len,
400                                                       uint16_t idxmap,
401                                                       unsigned bits)
402 {
403 }
404 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
405                                                              target_ulong addr,
406                                                              target_long len,
407                                                              uint16_t idxmap,
408                                                              unsigned bits)
409 {
410 }
411 #endif
412 /**
413  * probe_access:
414  * @env: CPUArchState
415  * @addr: guest virtual address to look up
416  * @size: size of the access
417  * @access_type: read, write or execute permission
418  * @mmu_idx: MMU index to use for lookup
419  * @retaddr: return address for unwinding
420  *
421  * Look up the guest virtual address @addr.  Raise an exception if the
422  * page does not satisfy @access_type.  Raise an exception if the
423  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
424  * page as dirty.
425  *
426  * Finally, return the host address for a page that is backed by RAM,
427  * or NULL if the page requires I/O.
428  */
429 void *probe_access(CPUArchState *env, target_ulong addr, int size,
430                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
431 
432 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
433                                 int mmu_idx, uintptr_t retaddr)
434 {
435     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
436 }
437 
438 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
439                                int mmu_idx, uintptr_t retaddr)
440 {
441     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
442 }
443 
444 /**
445  * probe_access_flags:
446  * @env: CPUArchState
447  * @addr: guest virtual address to look up
448  * @access_type: read, write or execute permission
449  * @mmu_idx: MMU index to use for lookup
450  * @nonfault: suppress the fault
451  * @phost: return value for host address
452  * @retaddr: return address for unwinding
453  *
454  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
455  * the page, and storing the host address for RAM in @phost.
456  *
457  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
458  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
459  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
460  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
461  */
462 int probe_access_flags(CPUArchState *env, target_ulong addr,
463                        MMUAccessType access_type, int mmu_idx,
464                        bool nonfault, void **phost, uintptr_t retaddr);
465 
466 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
467 
468 /* Estimated block size for TB allocation.  */
469 /* ??? The following is based on a 2015 survey of x86_64 host output.
470    Better would seem to be some sort of dynamically sized TB array,
471    adapting to the block sizes actually being produced.  */
472 #if defined(CONFIG_SOFTMMU)
473 #define CODE_GEN_AVG_BLOCK_SIZE 400
474 #else
475 #define CODE_GEN_AVG_BLOCK_SIZE 150
476 #endif
477 
478 /*
479  * Translation Cache-related fields of a TB.
480  * This struct exists just for convenience; we keep track of TB's in a binary
481  * search tree, and the only fields needed to compare TB's in the tree are
482  * @ptr and @size.
483  * Note: the address of search data can be obtained by adding @size to @ptr.
484  */
485 struct tb_tc {
486     const void *ptr;    /* pointer to the translated code */
487     size_t size;
488 };
489 
490 struct TranslationBlock {
491     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
492     target_ulong cs_base; /* CS base for this block */
493     uint32_t flags; /* flags defining in which context the code was generated */
494     uint32_t cflags;    /* compile flags */
495 
496 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
497 #define CF_COUNT_MASK    0x000001ff
498 #define CF_NO_GOTO_TB    0x00000200 /* Do not chain with goto_tb */
499 #define CF_NO_GOTO_PTR   0x00000400 /* Do not chain with goto_ptr */
500 #define CF_SINGLE_STEP   0x00000800 /* gdbstub single-step in effect */
501 #define CF_LAST_IO       0x00008000 /* Last insn may be an IO access.  */
502 #define CF_MEMI_ONLY     0x00010000 /* Only instrument memory ops */
503 #define CF_USE_ICOUNT    0x00020000
504 #define CF_INVALID       0x00040000 /* TB is stale. Set with @jmp_lock held */
505 #define CF_PARALLEL      0x00080000 /* Generate code for a parallel context */
506 #define CF_NOIRQ         0x00100000 /* Generate an uninterruptible TB */
507 #define CF_CLUSTER_MASK  0xff000000 /* Top 8 bits are cluster ID */
508 #define CF_CLUSTER_SHIFT 24
509 
510     /* Per-vCPU dynamic tracing state used to generate this TB */
511     uint32_t trace_vcpu_dstate;
512 
513     /*
514      * Above fields used for comparing
515      */
516 
517     /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
518     uint16_t size;
519     uint16_t icount;
520 
521     struct tb_tc tc;
522 
523     /* first and second physical page containing code. The lower bit
524        of the pointer tells the index in page_next[].
525        The list is protected by the TB's page('s) lock(s) */
526     uintptr_t page_next[2];
527     tb_page_addr_t page_addr[2];
528 
529     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
530     QemuSpin jmp_lock;
531 
532     /* The following data are used to directly call another TB from
533      * the code of this one. This can be done either by emitting direct or
534      * indirect native jump instructions. These jumps are reset so that the TB
535      * just continues its execution. The TB can be linked to another one by
536      * setting one of the jump targets (or patching the jump instruction). Only
537      * two of such jumps are supported.
538      */
539     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
540 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
541     uintptr_t jmp_target_arg[2];  /* target address or offset */
542 
543     /*
544      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
545      * Each TB can have two outgoing jumps, and therefore can participate
546      * in two lists. The list entries are kept in jmp_list_next[2]. The least
547      * significant bit (LSB) of the pointers in these lists is used to encode
548      * which of the two list entries is to be used in the pointed TB.
549      *
550      * List traversals are protected by jmp_lock. The destination TB of each
551      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
552      * can be acquired from any origin TB.
553      *
554      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
555      * being invalidated, so that no further outgoing jumps from it can be set.
556      *
557      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
558      * to a destination TB that has CF_INVALID set.
559      */
560     uintptr_t jmp_list_head;
561     uintptr_t jmp_list_next[2];
562     uintptr_t jmp_dest[2];
563 };
564 
565 /* Hide the qatomic_read to make code a little easier on the eyes */
566 static inline uint32_t tb_cflags(const TranslationBlock *tb)
567 {
568     return qatomic_read(&tb->cflags);
569 }
570 
571 /* current cflags for hashing/comparison */
572 uint32_t curr_cflags(CPUState *cpu);
573 
574 /* TranslationBlock invalidate API */
575 #if defined(CONFIG_USER_ONLY)
576 void tb_invalidate_phys_addr(target_ulong addr);
577 void tb_invalidate_phys_range(target_ulong start, target_ulong end);
578 #else
579 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
580 #endif
581 void tb_flush(CPUState *cpu);
582 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
583 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
584                                    target_ulong cs_base, uint32_t flags,
585                                    uint32_t cflags);
586 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
587 
588 /* GETPC is the true target of the return instruction that we'll execute.  */
589 #if defined(CONFIG_TCG_INTERPRETER)
590 extern __thread uintptr_t tci_tb_ptr;
591 # define GETPC() tci_tb_ptr
592 #else
593 # define GETPC() \
594     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
595 #endif
596 
597 /* The true return address will often point to a host insn that is part of
598    the next translated guest insn.  Adjust the address backward to point to
599    the middle of the call insn.  Subtracting one would do the job except for
600    several compressed mode architectures (arm, mips) which set the low bit
601    to indicate the compressed mode; subtracting two works around that.  It
602    is also the case that there are no host isas that contain a call insn
603    smaller than 4 bytes, so we don't worry about special-casing this.  */
604 #define GETPC_ADJ   2
605 
606 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
607 void assert_no_pages_locked(void);
608 #else
609 static inline void assert_no_pages_locked(void)
610 {
611 }
612 #endif
613 
614 #if !defined(CONFIG_USER_ONLY)
615 
616 /**
617  * iotlb_to_section:
618  * @cpu: CPU performing the access
619  * @index: TCG CPU IOTLB entry
620  *
621  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
622  * it refers to. @index will have been initially created and returned
623  * by memory_region_section_get_iotlb().
624  */
625 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
626                                              hwaddr index, MemTxAttrs attrs);
627 #endif
628 
629 #if defined(CONFIG_USER_ONLY)
630 void mmap_lock(void);
631 void mmap_unlock(void);
632 bool have_mmap_lock(void);
633 
634 /**
635  * get_page_addr_code() - user-mode version
636  * @env: CPUArchState
637  * @addr: guest virtual address of guest code
638  *
639  * Returns @addr.
640  */
641 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
642                                                 target_ulong addr)
643 {
644     return addr;
645 }
646 
647 /**
648  * get_page_addr_code_hostp() - user-mode version
649  * @env: CPUArchState
650  * @addr: guest virtual address of guest code
651  *
652  * Returns @addr.
653  *
654  * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
655  * is kept.
656  */
657 static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
658                                                       target_ulong addr,
659                                                       void **hostp)
660 {
661     if (hostp) {
662         *hostp = g2h_untagged(addr);
663     }
664     return addr;
665 }
666 
667 /**
668  * adjust_signal_pc:
669  * @pc: raw pc from the host signal ucontext_t.
670  * @is_write: host memory operation was write, or read-modify-write.
671  *
672  * Alter @pc as required for unwinding.  Return the type of the
673  * guest memory access -- host reads may be for guest execution.
674  */
675 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
676 
677 /**
678  * handle_sigsegv_accerr_write:
679  * @cpu: the cpu context
680  * @old_set: the sigset_t from the signal ucontext_t
681  * @host_pc: the host pc, adjusted for the signal
682  * @host_addr: the host address of the fault
683  *
684  * Return true if the write fault has been handled, and should be re-tried.
685  */
686 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
687                                  uintptr_t host_pc, abi_ptr guest_addr);
688 
689 /**
690  * cpu_loop_exit_sigsegv:
691  * @cpu: the cpu context
692  * @addr: the guest address of the fault
693  * @access_type: access was read/write/execute
694  * @maperr: true for invalid page, false for permission fault
695  * @ra: host pc for unwinding
696  *
697  * Use the TCGCPUOps hook to record cpu state, do guest operating system
698  * specific things to raise SIGSEGV, and jump to the main cpu loop.
699  */
700 void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
701                                          MMUAccessType access_type,
702                                          bool maperr, uintptr_t ra);
703 
704 /**
705  * cpu_loop_exit_sigbus:
706  * @cpu: the cpu context
707  * @addr: the guest address of the alignment fault
708  * @access_type: access was read/write/execute
709  * @ra: host pc for unwinding
710  *
711  * Use the TCGCPUOps hook to record cpu state, do guest operating system
712  * specific things to raise SIGBUS, and jump to the main cpu loop.
713  */
714 void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
715                                         MMUAccessType access_type,
716                                         uintptr_t ra);
717 
718 #else
719 static inline void mmap_lock(void) {}
720 static inline void mmap_unlock(void) {}
721 
722 /**
723  * get_page_addr_code() - full-system version
724  * @env: CPUArchState
725  * @addr: guest virtual address of guest code
726  *
727  * If we cannot translate and execute from the entire RAM page, or if
728  * the region is not backed by RAM, returns -1. Otherwise, returns the
729  * ram_addr_t corresponding to the guest code at @addr.
730  *
731  * Note: this function can trigger an exception.
732  */
733 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
734 
735 /**
736  * get_page_addr_code_hostp() - full-system version
737  * @env: CPUArchState
738  * @addr: guest virtual address of guest code
739  *
740  * See get_page_addr_code() (full-system version) for documentation on the
741  * return value.
742  *
743  * Sets *@hostp (when @hostp is non-NULL) as follows.
744  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
745  * to the host address where @addr's content is kept.
746  *
747  * Note: this function can trigger an exception.
748  */
749 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
750                                         void **hostp);
751 
752 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
753 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
754 
755 MemoryRegionSection *
756 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
757                                   hwaddr *xlat, hwaddr *plen,
758                                   MemTxAttrs attrs, int *prot);
759 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
760                                        MemoryRegionSection *section);
761 #endif
762 
763 #endif
764