xref: /openbmc/qemu/include/exec/exec-all.h (revision 03ff4f8d)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "exec/cpu_ldst.h"
26 #endif
27 
28 /* allow to see translation results - the slowdown should be negligible, so we leave it */
29 #define DEBUG_DISAS
30 
31 /* Page tracking code uses ram addresses in system mode, and virtual
32    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
33    type.  */
34 #if defined(CONFIG_USER_ONLY)
35 typedef abi_ulong tb_page_addr_t;
36 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
37 #else
38 typedef ram_addr_t tb_page_addr_t;
39 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
40 #endif
41 
42 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
43 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
44                           target_ulong *data);
45 
46 /**
47  * cpu_restore_state:
48  * @cpu: the vCPU state is to be restore to
49  * @searched_pc: the host PC the fault occurred at
50  * @will_exit: true if the TB executed will be interrupted after some
51                cpu adjustments. Required for maintaining the correct
52                icount valus
53  * @return: true if state was restored, false otherwise
54  *
55  * Attempt to restore the state for a fault occurring in translated
56  * code. If the searched_pc is not in translated code no state is
57  * restored and the function returns false.
58  */
59 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
60 
61 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
62 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
63 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
64 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
65 
66 /**
67  * cpu_loop_exit_requested:
68  * @cpu: The CPU state to be tested
69  *
70  * Indicate if somebody asked for a return of the CPU to the main loop
71  * (e.g., via cpu_exit() or cpu_interrupt()).
72  *
73  * This is helpful for architectures that support interruptible
74  * instructions. After writing back all state to registers/memory, this
75  * call can be used to check if it makes sense to return to the main loop
76  * or to continue executing the interruptible instruction.
77  */
78 static inline bool cpu_loop_exit_requested(CPUState *cpu)
79 {
80     return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
81 }
82 
83 #if !defined(CONFIG_USER_ONLY)
84 void cpu_reloading_memory_map(void);
85 /**
86  * cpu_address_space_init:
87  * @cpu: CPU to add this address space to
88  * @asidx: integer index of this address space
89  * @prefix: prefix to be used as name of address space
90  * @mr: the root memory region of address space
91  *
92  * Add the specified address space to the CPU's cpu_ases list.
93  * The address space added with @asidx 0 is the one used for the
94  * convenience pointer cpu->as.
95  * The target-specific code which registers ASes is responsible
96  * for defining what semantics address space 0, 1, 2, etc have.
97  *
98  * Before the first call to this function, the caller must set
99  * cpu->num_ases to the total number of address spaces it needs
100  * to support.
101  *
102  * Note that with KVM only one address space is supported.
103  */
104 void cpu_address_space_init(CPUState *cpu, int asidx,
105                             const char *prefix, MemoryRegion *mr);
106 #endif
107 
108 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
109 /* cputlb.c */
110 /**
111  * tlb_init - initialize a CPU's TLB
112  * @cpu: CPU whose TLB should be initialized
113  */
114 void tlb_init(CPUState *cpu);
115 /**
116  * tlb_destroy - destroy a CPU's TLB
117  * @cpu: CPU whose TLB should be destroyed
118  */
119 void tlb_destroy(CPUState *cpu);
120 /**
121  * tlb_flush_page:
122  * @cpu: CPU whose TLB should be flushed
123  * @addr: virtual address of page to be flushed
124  *
125  * Flush one page from the TLB of the specified CPU, for all
126  * MMU indexes.
127  */
128 void tlb_flush_page(CPUState *cpu, target_ulong addr);
129 /**
130  * tlb_flush_page_all_cpus:
131  * @cpu: src CPU of the flush
132  * @addr: virtual address of page to be flushed
133  *
134  * Flush one page from the TLB of the specified CPU, for all
135  * MMU indexes.
136  */
137 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
138 /**
139  * tlb_flush_page_all_cpus_synced:
140  * @cpu: src CPU of the flush
141  * @addr: virtual address of page to be flushed
142  *
143  * Flush one page from the TLB of the specified CPU, for all MMU
144  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
145  * is scheduled as safe work meaning all flushes will be complete once
146  * the source vCPUs safe work is complete. This will depend on when
147  * the guests translation ends the TB.
148  */
149 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
150 /**
151  * tlb_flush:
152  * @cpu: CPU whose TLB should be flushed
153  *
154  * Flush the entire TLB for the specified CPU. Most CPU architectures
155  * allow the implementation to drop entries from the TLB at any time
156  * so this is generally safe. If more selective flushing is required
157  * use one of the other functions for efficiency.
158  */
159 void tlb_flush(CPUState *cpu);
160 /**
161  * tlb_flush_all_cpus:
162  * @cpu: src CPU of the flush
163  */
164 void tlb_flush_all_cpus(CPUState *src_cpu);
165 /**
166  * tlb_flush_all_cpus_synced:
167  * @cpu: src CPU of the flush
168  *
169  * Like tlb_flush_all_cpus except this except the source vCPUs work is
170  * scheduled as safe work meaning all flushes will be complete once
171  * the source vCPUs safe work is complete. This will depend on when
172  * the guests translation ends the TB.
173  */
174 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
175 /**
176  * tlb_flush_page_by_mmuidx:
177  * @cpu: CPU whose TLB should be flushed
178  * @addr: virtual address of page to be flushed
179  * @idxmap: bitmap of MMU indexes to flush
180  *
181  * Flush one page from the TLB of the specified CPU, for the specified
182  * MMU indexes.
183  */
184 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
185                               uint16_t idxmap);
186 /**
187  * tlb_flush_page_by_mmuidx_all_cpus:
188  * @cpu: Originating CPU of the flush
189  * @addr: virtual address of page to be flushed
190  * @idxmap: bitmap of MMU indexes to flush
191  *
192  * Flush one page from the TLB of all CPUs, for the specified
193  * MMU indexes.
194  */
195 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
196                                        uint16_t idxmap);
197 /**
198  * tlb_flush_page_by_mmuidx_all_cpus_synced:
199  * @cpu: Originating CPU of the flush
200  * @addr: virtual address of page to be flushed
201  * @idxmap: bitmap of MMU indexes to flush
202  *
203  * Flush one page from the TLB of all CPUs, for the specified MMU
204  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
205  * vCPUs work is scheduled as safe work meaning all flushes will be
206  * complete once  the source vCPUs safe work is complete. This will
207  * depend on when the guests translation ends the TB.
208  */
209 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
210                                               uint16_t idxmap);
211 /**
212  * tlb_flush_by_mmuidx:
213  * @cpu: CPU whose TLB should be flushed
214  * @wait: If true ensure synchronisation by exiting the cpu_loop
215  * @idxmap: bitmap of MMU indexes to flush
216  *
217  * Flush all entries from the TLB of the specified CPU, for the specified
218  * MMU indexes.
219  */
220 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
221 /**
222  * tlb_flush_by_mmuidx_all_cpus:
223  * @cpu: Originating CPU of the flush
224  * @idxmap: bitmap of MMU indexes to flush
225  *
226  * Flush all entries from all TLBs of all CPUs, for the specified
227  * MMU indexes.
228  */
229 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
230 /**
231  * tlb_flush_by_mmuidx_all_cpus_synced:
232  * @cpu: Originating CPU of the flush
233  * @idxmap: bitmap of MMU indexes to flush
234  *
235  * Flush all entries from all TLBs of all CPUs, for the specified
236  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
237  * vCPUs work is scheduled as safe work meaning all flushes will be
238  * complete once  the source vCPUs safe work is complete. This will
239  * depend on when the guests translation ends the TB.
240  */
241 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
242 
243 /**
244  * tlb_flush_page_bits_by_mmuidx
245  * @cpu: CPU whose TLB should be flushed
246  * @addr: virtual address of page to be flushed
247  * @idxmap: bitmap of mmu indexes to flush
248  * @bits: number of significant bits in address
249  *
250  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
251  */
252 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
253                                    uint16_t idxmap, unsigned bits);
254 
255 /* Similarly, with broadcast and syncing. */
256 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
257                                             uint16_t idxmap, unsigned bits);
258 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
259     (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
260 
261 /**
262  * tlb_flush_range_by_mmuidx
263  * @cpu: CPU whose TLB should be flushed
264  * @addr: virtual address of the start of the range to be flushed
265  * @len: length of range to be flushed
266  * @idxmap: bitmap of mmu indexes to flush
267  * @bits: number of significant bits in address
268  *
269  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
270  * comparing only the low @bits worth of each virtual page.
271  */
272 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
273                                target_ulong len, uint16_t idxmap,
274                                unsigned bits);
275 
276 /* Similarly, with broadcast and syncing. */
277 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
278                                         target_ulong len, uint16_t idxmap,
279                                         unsigned bits);
280 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
281                                                target_ulong addr,
282                                                target_ulong len,
283                                                uint16_t idxmap,
284                                                unsigned bits);
285 
286 /**
287  * tlb_set_page_with_attrs:
288  * @cpu: CPU to add this TLB entry for
289  * @vaddr: virtual address of page to add entry for
290  * @paddr: physical address of the page
291  * @attrs: memory transaction attributes
292  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
293  * @mmu_idx: MMU index to insert TLB entry for
294  * @size: size of the page in bytes
295  *
296  * Add an entry to this CPU's TLB (a mapping from virtual address
297  * @vaddr to physical address @paddr) with the specified memory
298  * transaction attributes. This is generally called by the target CPU
299  * specific code after it has been called through the tlb_fill()
300  * entry point and performed a successful page table walk to find
301  * the physical address and attributes for the virtual address
302  * which provoked the TLB miss.
303  *
304  * At most one entry for a given virtual address is permitted. Only a
305  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
306  * used by tlb_flush_page.
307  */
308 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
309                              hwaddr paddr, MemTxAttrs attrs,
310                              int prot, int mmu_idx, target_ulong size);
311 /* tlb_set_page:
312  *
313  * This function is equivalent to calling tlb_set_page_with_attrs()
314  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
315  * as a convenience for CPUs which don't use memory transaction attributes.
316  */
317 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
318                   hwaddr paddr, int prot,
319                   int mmu_idx, target_ulong size);
320 #else
321 static inline void tlb_init(CPUState *cpu)
322 {
323 }
324 static inline void tlb_destroy(CPUState *cpu)
325 {
326 }
327 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
328 {
329 }
330 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
331 {
332 }
333 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
334                                                   target_ulong addr)
335 {
336 }
337 static inline void tlb_flush(CPUState *cpu)
338 {
339 }
340 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
341 {
342 }
343 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
344 {
345 }
346 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
347                                             target_ulong addr, uint16_t idxmap)
348 {
349 }
350 
351 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
352 {
353 }
354 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
355                                                      target_ulong addr,
356                                                      uint16_t idxmap)
357 {
358 }
359 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
360                                                             target_ulong addr,
361                                                             uint16_t idxmap)
362 {
363 }
364 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
365 {
366 }
367 
368 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
369                                                        uint16_t idxmap)
370 {
371 }
372 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
373                                                  target_ulong addr,
374                                                  uint16_t idxmap,
375                                                  unsigned bits)
376 {
377 }
378 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
379                                                           target_ulong addr,
380                                                           uint16_t idxmap,
381                                                           unsigned bits)
382 {
383 }
384 static inline void
385 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
386                                               uint16_t idxmap, unsigned bits)
387 {
388 }
389 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
390                                              target_ulong len, uint16_t idxmap,
391                                              unsigned bits)
392 {
393 }
394 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
395                                                       target_ulong addr,
396                                                       target_ulong len,
397                                                       uint16_t idxmap,
398                                                       unsigned bits)
399 {
400 }
401 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
402                                                              target_ulong addr,
403                                                              target_long len,
404                                                              uint16_t idxmap,
405                                                              unsigned bits)
406 {
407 }
408 #endif
409 /**
410  * probe_access:
411  * @env: CPUArchState
412  * @addr: guest virtual address to look up
413  * @size: size of the access
414  * @access_type: read, write or execute permission
415  * @mmu_idx: MMU index to use for lookup
416  * @retaddr: return address for unwinding
417  *
418  * Look up the guest virtual address @addr.  Raise an exception if the
419  * page does not satisfy @access_type.  Raise an exception if the
420  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
421  * page as dirty.
422  *
423  * Finally, return the host address for a page that is backed by RAM,
424  * or NULL if the page requires I/O.
425  */
426 void *probe_access(CPUArchState *env, target_ulong addr, int size,
427                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
428 
429 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
430                                 int mmu_idx, uintptr_t retaddr)
431 {
432     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
433 }
434 
435 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
436                                int mmu_idx, uintptr_t retaddr)
437 {
438     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
439 }
440 
441 /**
442  * probe_access_flags:
443  * @env: CPUArchState
444  * @addr: guest virtual address to look up
445  * @access_type: read, write or execute permission
446  * @mmu_idx: MMU index to use for lookup
447  * @nonfault: suppress the fault
448  * @phost: return value for host address
449  * @retaddr: return address for unwinding
450  *
451  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
452  * the page, and storing the host address for RAM in @phost.
453  *
454  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
455  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
456  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
457  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
458  */
459 int probe_access_flags(CPUArchState *env, target_ulong addr,
460                        MMUAccessType access_type, int mmu_idx,
461                        bool nonfault, void **phost, uintptr_t retaddr);
462 
463 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
464 
465 /* Estimated block size for TB allocation.  */
466 /* ??? The following is based on a 2015 survey of x86_64 host output.
467    Better would seem to be some sort of dynamically sized TB array,
468    adapting to the block sizes actually being produced.  */
469 #if defined(CONFIG_SOFTMMU)
470 #define CODE_GEN_AVG_BLOCK_SIZE 400
471 #else
472 #define CODE_GEN_AVG_BLOCK_SIZE 150
473 #endif
474 
475 /*
476  * Translation Cache-related fields of a TB.
477  * This struct exists just for convenience; we keep track of TB's in a binary
478  * search tree, and the only fields needed to compare TB's in the tree are
479  * @ptr and @size.
480  * Note: the address of search data can be obtained by adding @size to @ptr.
481  */
482 struct tb_tc {
483     const void *ptr;    /* pointer to the translated code */
484     size_t size;
485 };
486 
487 struct TranslationBlock {
488     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
489     target_ulong cs_base; /* CS base for this block */
490     uint32_t flags; /* flags defining in which context the code was generated */
491     uint32_t cflags;    /* compile flags */
492 
493 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
494 #define CF_COUNT_MASK    0x000001ff
495 #define CF_NO_GOTO_TB    0x00000200 /* Do not chain with goto_tb */
496 #define CF_NO_GOTO_PTR   0x00000400 /* Do not chain with goto_ptr */
497 #define CF_SINGLE_STEP   0x00000800 /* gdbstub single-step in effect */
498 #define CF_LAST_IO       0x00008000 /* Last insn may be an IO access.  */
499 #define CF_MEMI_ONLY     0x00010000 /* Only instrument memory ops */
500 #define CF_USE_ICOUNT    0x00020000
501 #define CF_INVALID       0x00040000 /* TB is stale. Set with @jmp_lock held */
502 #define CF_PARALLEL      0x00080000 /* Generate code for a parallel context */
503 #define CF_NOIRQ         0x00100000 /* Generate an uninterruptible TB */
504 #define CF_CLUSTER_MASK  0xff000000 /* Top 8 bits are cluster ID */
505 #define CF_CLUSTER_SHIFT 24
506 
507     /* Per-vCPU dynamic tracing state used to generate this TB */
508     uint32_t trace_vcpu_dstate;
509 
510     /*
511      * Above fields used for comparing
512      */
513 
514     /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
515     uint16_t size;
516     uint16_t icount;
517 
518     struct tb_tc tc;
519 
520     /* first and second physical page containing code. The lower bit
521        of the pointer tells the index in page_next[].
522        The list is protected by the TB's page('s) lock(s) */
523     uintptr_t page_next[2];
524     tb_page_addr_t page_addr[2];
525 
526     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
527     QemuSpin jmp_lock;
528 
529     /* The following data are used to directly call another TB from
530      * the code of this one. This can be done either by emitting direct or
531      * indirect native jump instructions. These jumps are reset so that the TB
532      * just continues its execution. The TB can be linked to another one by
533      * setting one of the jump targets (or patching the jump instruction). Only
534      * two of such jumps are supported.
535      */
536     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
537 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
538     uintptr_t jmp_target_arg[2];  /* target address or offset */
539 
540     /*
541      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
542      * Each TB can have two outgoing jumps, and therefore can participate
543      * in two lists. The list entries are kept in jmp_list_next[2]. The least
544      * significant bit (LSB) of the pointers in these lists is used to encode
545      * which of the two list entries is to be used in the pointed TB.
546      *
547      * List traversals are protected by jmp_lock. The destination TB of each
548      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
549      * can be acquired from any origin TB.
550      *
551      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
552      * being invalidated, so that no further outgoing jumps from it can be set.
553      *
554      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
555      * to a destination TB that has CF_INVALID set.
556      */
557     uintptr_t jmp_list_head;
558     uintptr_t jmp_list_next[2];
559     uintptr_t jmp_dest[2];
560 };
561 
562 /* Hide the qatomic_read to make code a little easier on the eyes */
563 static inline uint32_t tb_cflags(const TranslationBlock *tb)
564 {
565     return qatomic_read(&tb->cflags);
566 }
567 
568 /* current cflags for hashing/comparison */
569 uint32_t curr_cflags(CPUState *cpu);
570 
571 /* TranslationBlock invalidate API */
572 #if defined(CONFIG_USER_ONLY)
573 void tb_invalidate_phys_addr(target_ulong addr);
574 void tb_invalidate_phys_range(target_ulong start, target_ulong end);
575 #else
576 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
577 #endif
578 void tb_flush(CPUState *cpu);
579 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
580 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
581                                    target_ulong cs_base, uint32_t flags,
582                                    uint32_t cflags);
583 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
584 
585 /* GETPC is the true target of the return instruction that we'll execute.  */
586 #if defined(CONFIG_TCG_INTERPRETER)
587 extern __thread uintptr_t tci_tb_ptr;
588 # define GETPC() tci_tb_ptr
589 #else
590 # define GETPC() \
591     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
592 #endif
593 
594 /* The true return address will often point to a host insn that is part of
595    the next translated guest insn.  Adjust the address backward to point to
596    the middle of the call insn.  Subtracting one would do the job except for
597    several compressed mode architectures (arm, mips) which set the low bit
598    to indicate the compressed mode; subtracting two works around that.  It
599    is also the case that there are no host isas that contain a call insn
600    smaller than 4 bytes, so we don't worry about special-casing this.  */
601 #define GETPC_ADJ   2
602 
603 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
604 void assert_no_pages_locked(void);
605 #else
606 static inline void assert_no_pages_locked(void)
607 {
608 }
609 #endif
610 
611 #if !defined(CONFIG_USER_ONLY)
612 
613 /**
614  * iotlb_to_section:
615  * @cpu: CPU performing the access
616  * @index: TCG CPU IOTLB entry
617  *
618  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
619  * it refers to. @index will have been initially created and returned
620  * by memory_region_section_get_iotlb().
621  */
622 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
623                                              hwaddr index, MemTxAttrs attrs);
624 #endif
625 
626 #if defined(CONFIG_USER_ONLY)
627 void mmap_lock(void);
628 void mmap_unlock(void);
629 bool have_mmap_lock(void);
630 
631 /**
632  * get_page_addr_code() - user-mode version
633  * @env: CPUArchState
634  * @addr: guest virtual address of guest code
635  *
636  * Returns @addr.
637  */
638 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
639                                                 target_ulong addr)
640 {
641     return addr;
642 }
643 
644 /**
645  * get_page_addr_code_hostp() - user-mode version
646  * @env: CPUArchState
647  * @addr: guest virtual address of guest code
648  *
649  * Returns @addr.
650  *
651  * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
652  * is kept.
653  */
654 static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
655                                                       target_ulong addr,
656                                                       void **hostp)
657 {
658     if (hostp) {
659         *hostp = g2h_untagged(addr);
660     }
661     return addr;
662 }
663 
664 /**
665  * adjust_signal_pc:
666  * @pc: raw pc from the host signal ucontext_t.
667  * @is_write: host memory operation was write, or read-modify-write.
668  *
669  * Alter @pc as required for unwinding.  Return the type of the
670  * guest memory access -- host reads may be for guest execution.
671  */
672 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
673 
674 /**
675  * handle_sigsegv_accerr_write:
676  * @cpu: the cpu context
677  * @old_set: the sigset_t from the signal ucontext_t
678  * @host_pc: the host pc, adjusted for the signal
679  * @host_addr: the host address of the fault
680  *
681  * Return true if the write fault has been handled, and should be re-tried.
682  */
683 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
684                                  uintptr_t host_pc, abi_ptr guest_addr);
685 
686 /**
687  * cpu_loop_exit_sigsegv:
688  * @cpu: the cpu context
689  * @addr: the guest address of the fault
690  * @access_type: access was read/write/execute
691  * @maperr: true for invalid page, false for permission fault
692  * @ra: host pc for unwinding
693  *
694  * Use the TCGCPUOps hook to record cpu state, do guest operating system
695  * specific things to raise SIGSEGV, and jump to the main cpu loop.
696  */
697 void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
698                                          MMUAccessType access_type,
699                                          bool maperr, uintptr_t ra);
700 
701 /**
702  * cpu_loop_exit_sigbus:
703  * @cpu: the cpu context
704  * @addr: the guest address of the alignment fault
705  * @access_type: access was read/write/execute
706  * @ra: host pc for unwinding
707  *
708  * Use the TCGCPUOps hook to record cpu state, do guest operating system
709  * specific things to raise SIGBUS, and jump to the main cpu loop.
710  */
711 void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
712                                         MMUAccessType access_type,
713                                         uintptr_t ra);
714 
715 #else
716 static inline void mmap_lock(void) {}
717 static inline void mmap_unlock(void) {}
718 
719 /**
720  * get_page_addr_code() - full-system version
721  * @env: CPUArchState
722  * @addr: guest virtual address of guest code
723  *
724  * If we cannot translate and execute from the entire RAM page, or if
725  * the region is not backed by RAM, returns -1. Otherwise, returns the
726  * ram_addr_t corresponding to the guest code at @addr.
727  *
728  * Note: this function can trigger an exception.
729  */
730 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
731 
732 /**
733  * get_page_addr_code_hostp() - full-system version
734  * @env: CPUArchState
735  * @addr: guest virtual address of guest code
736  *
737  * See get_page_addr_code() (full-system version) for documentation on the
738  * return value.
739  *
740  * Sets *@hostp (when @hostp is non-NULL) as follows.
741  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
742  * to the host address where @addr's content is kept.
743  *
744  * Note: this function can trigger an exception.
745  */
746 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
747                                         void **hostp);
748 
749 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
750 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
751 
752 MemoryRegionSection *
753 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
754                                   hwaddr *xlat, hwaddr *plen,
755                                   MemTxAttrs attrs, int *prot);
756 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
757                                        MemoryRegionSection *section);
758 #endif
759 
760 #endif
761