xref: /openbmc/qemu/include/exec/exec-all.h (revision 4b9fa0b4)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #include "exec/tb-context.h"
25 #include "exec/cpu_ldst.h"
26 #include "sysemu/cpus.h"
27 
28 /* allow to see translation results - the slowdown should be negligible, so we leave it */
29 #define DEBUG_DISAS
30 
31 /* Page tracking code uses ram addresses in system mode, and virtual
32    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
33    type.  */
34 #if defined(CONFIG_USER_ONLY)
35 typedef abi_ulong tb_page_addr_t;
36 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
37 #else
38 typedef ram_addr_t tb_page_addr_t;
39 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
40 #endif
41 
42 #include "qemu/log.h"
43 
44 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
45 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
46                           target_ulong *data);
47 
48 void cpu_gen_init(void);
49 
50 /**
51  * cpu_restore_state:
52  * @cpu: the vCPU state is to be restore to
53  * @searched_pc: the host PC the fault occurred at
54  * @will_exit: true if the TB executed will be interrupted after some
55                cpu adjustments. Required for maintaining the correct
56                icount valus
57  * @return: true if state was restored, false otherwise
58  *
59  * Attempt to restore the state for a fault occurring in translated
60  * code. If the searched_pc is not in translated code no state is
61  * restored and the function returns false.
62  */
63 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
64 
65 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
66 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
67 TranslationBlock *tb_gen_code(CPUState *cpu,
68                               target_ulong pc, target_ulong cs_base,
69                               uint32_t flags,
70                               int cflags);
71 
72 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
73 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
74 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
75 
76 /**
77  * cpu_loop_exit_requested:
78  * @cpu: The CPU state to be tested
79  *
80  * Indicate if somebody asked for a return of the CPU to the main loop
81  * (e.g., via cpu_exit() or cpu_interrupt()).
82  *
83  * This is helpful for architectures that support interruptible
84  * instructions. After writing back all state to registers/memory, this
85  * call can be used to check if it makes sense to return to the main loop
86  * or to continue executing the interruptible instruction.
87  */
88 static inline bool cpu_loop_exit_requested(CPUState *cpu)
89 {
90     return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
91 }
92 
93 #if !defined(CONFIG_USER_ONLY)
94 void cpu_reloading_memory_map(void);
95 /**
96  * cpu_address_space_init:
97  * @cpu: CPU to add this address space to
98  * @asidx: integer index of this address space
99  * @prefix: prefix to be used as name of address space
100  * @mr: the root memory region of address space
101  *
102  * Add the specified address space to the CPU's cpu_ases list.
103  * The address space added with @asidx 0 is the one used for the
104  * convenience pointer cpu->as.
105  * The target-specific code which registers ASes is responsible
106  * for defining what semantics address space 0, 1, 2, etc have.
107  *
108  * Before the first call to this function, the caller must set
109  * cpu->num_ases to the total number of address spaces it needs
110  * to support.
111  *
112  * Note that with KVM only one address space is supported.
113  */
114 void cpu_address_space_init(CPUState *cpu, int asidx,
115                             const char *prefix, MemoryRegion *mr);
116 #endif
117 
118 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
119 /* cputlb.c */
120 /**
121  * tlb_init - initialize a CPU's TLB
122  * @cpu: CPU whose TLB should be initialized
123  */
124 void tlb_init(CPUState *cpu);
125 /**
126  * tlb_flush_page:
127  * @cpu: CPU whose TLB should be flushed
128  * @addr: virtual address of page to be flushed
129  *
130  * Flush one page from the TLB of the specified CPU, for all
131  * MMU indexes.
132  */
133 void tlb_flush_page(CPUState *cpu, target_ulong addr);
134 /**
135  * tlb_flush_page_all_cpus:
136  * @cpu: src CPU of the flush
137  * @addr: virtual address of page to be flushed
138  *
139  * Flush one page from the TLB of the specified CPU, for all
140  * MMU indexes.
141  */
142 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
143 /**
144  * tlb_flush_page_all_cpus_synced:
145  * @cpu: src CPU of the flush
146  * @addr: virtual address of page to be flushed
147  *
148  * Flush one page from the TLB of the specified CPU, for all MMU
149  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
150  * is scheduled as safe work meaning all flushes will be complete once
151  * the source vCPUs safe work is complete. This will depend on when
152  * the guests translation ends the TB.
153  */
154 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
155 /**
156  * tlb_flush:
157  * @cpu: CPU whose TLB should be flushed
158  *
159  * Flush the entire TLB for the specified CPU. Most CPU architectures
160  * allow the implementation to drop entries from the TLB at any time
161  * so this is generally safe. If more selective flushing is required
162  * use one of the other functions for efficiency.
163  */
164 void tlb_flush(CPUState *cpu);
165 /**
166  * tlb_flush_all_cpus:
167  * @cpu: src CPU of the flush
168  */
169 void tlb_flush_all_cpus(CPUState *src_cpu);
170 /**
171  * tlb_flush_all_cpus_synced:
172  * @cpu: src CPU of the flush
173  *
174  * Like tlb_flush_all_cpus except this except the source vCPUs work is
175  * scheduled as safe work meaning all flushes will be complete once
176  * the source vCPUs safe work is complete. This will depend on when
177  * the guests translation ends the TB.
178  */
179 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
180 /**
181  * tlb_flush_page_by_mmuidx:
182  * @cpu: CPU whose TLB should be flushed
183  * @addr: virtual address of page to be flushed
184  * @idxmap: bitmap of MMU indexes to flush
185  *
186  * Flush one page from the TLB of the specified CPU, for the specified
187  * MMU indexes.
188  */
189 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
190                               uint16_t idxmap);
191 /**
192  * tlb_flush_page_by_mmuidx_all_cpus:
193  * @cpu: Originating CPU of the flush
194  * @addr: virtual address of page to be flushed
195  * @idxmap: bitmap of MMU indexes to flush
196  *
197  * Flush one page from the TLB of all CPUs, for the specified
198  * MMU indexes.
199  */
200 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
201                                        uint16_t idxmap);
202 /**
203  * tlb_flush_page_by_mmuidx_all_cpus_synced:
204  * @cpu: Originating CPU of the flush
205  * @addr: virtual address of page to be flushed
206  * @idxmap: bitmap of MMU indexes to flush
207  *
208  * Flush one page from the TLB of all CPUs, for the specified MMU
209  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
210  * vCPUs work is scheduled as safe work meaning all flushes will be
211  * complete once  the source vCPUs safe work is complete. This will
212  * depend on when the guests translation ends the TB.
213  */
214 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
215                                               uint16_t idxmap);
216 /**
217  * tlb_flush_by_mmuidx:
218  * @cpu: CPU whose TLB should be flushed
219  * @wait: If true ensure synchronisation by exiting the cpu_loop
220  * @idxmap: bitmap of MMU indexes to flush
221  *
222  * Flush all entries from the TLB of the specified CPU, for the specified
223  * MMU indexes.
224  */
225 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
226 /**
227  * tlb_flush_by_mmuidx_all_cpus:
228  * @cpu: Originating CPU of the flush
229  * @idxmap: bitmap of MMU indexes to flush
230  *
231  * Flush all entries from all TLBs of all CPUs, for the specified
232  * MMU indexes.
233  */
234 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
235 /**
236  * tlb_flush_by_mmuidx_all_cpus_synced:
237  * @cpu: Originating CPU of the flush
238  * @idxmap: bitmap of MMU indexes to flush
239  *
240  * Flush all entries from all TLBs of all CPUs, for the specified
241  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
242  * vCPUs work is scheduled as safe work meaning all flushes will be
243  * complete once  the source vCPUs safe work is complete. This will
244  * depend on when the guests translation ends the TB.
245  */
246 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
247 /**
248  * tlb_set_page_with_attrs:
249  * @cpu: CPU to add this TLB entry for
250  * @vaddr: virtual address of page to add entry for
251  * @paddr: physical address of the page
252  * @attrs: memory transaction attributes
253  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
254  * @mmu_idx: MMU index to insert TLB entry for
255  * @size: size of the page in bytes
256  *
257  * Add an entry to this CPU's TLB (a mapping from virtual address
258  * @vaddr to physical address @paddr) with the specified memory
259  * transaction attributes. This is generally called by the target CPU
260  * specific code after it has been called through the tlb_fill()
261  * entry point and performed a successful page table walk to find
262  * the physical address and attributes for the virtual address
263  * which provoked the TLB miss.
264  *
265  * At most one entry for a given virtual address is permitted. Only a
266  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
267  * used by tlb_flush_page.
268  */
269 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
270                              hwaddr paddr, MemTxAttrs attrs,
271                              int prot, int mmu_idx, target_ulong size);
272 /* tlb_set_page:
273  *
274  * This function is equivalent to calling tlb_set_page_with_attrs()
275  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
276  * as a convenience for CPUs which don't use memory transaction attributes.
277  */
278 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
279                   hwaddr paddr, int prot,
280                   int mmu_idx, target_ulong size);
281 #else
282 static inline void tlb_init(CPUState *cpu)
283 {
284 }
285 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
286 {
287 }
288 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
289 {
290 }
291 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
292                                                   target_ulong addr)
293 {
294 }
295 static inline void tlb_flush(CPUState *cpu)
296 {
297 }
298 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
299 {
300 }
301 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
302 {
303 }
304 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
305                                             target_ulong addr, uint16_t idxmap)
306 {
307 }
308 
309 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
310 {
311 }
312 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
313                                                      target_ulong addr,
314                                                      uint16_t idxmap)
315 {
316 }
317 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
318                                                             target_ulong addr,
319                                                             uint16_t idxmap)
320 {
321 }
322 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
323 {
324 }
325 
326 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
327                                                        uint16_t idxmap)
328 {
329 }
330 #endif
331 void *probe_access(CPUArchState *env, target_ulong addr, int size,
332                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
333 
334 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
335                                 int mmu_idx, uintptr_t retaddr)
336 {
337     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
338 }
339 
340 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
341 
342 /* Estimated block size for TB allocation.  */
343 /* ??? The following is based on a 2015 survey of x86_64 host output.
344    Better would seem to be some sort of dynamically sized TB array,
345    adapting to the block sizes actually being produced.  */
346 #if defined(CONFIG_SOFTMMU)
347 #define CODE_GEN_AVG_BLOCK_SIZE 400
348 #else
349 #define CODE_GEN_AVG_BLOCK_SIZE 150
350 #endif
351 
352 /*
353  * Translation Cache-related fields of a TB.
354  * This struct exists just for convenience; we keep track of TB's in a binary
355  * search tree, and the only fields needed to compare TB's in the tree are
356  * @ptr and @size.
357  * Note: the address of search data can be obtained by adding @size to @ptr.
358  */
359 struct tb_tc {
360     void *ptr;    /* pointer to the translated code */
361     size_t size;
362 };
363 
364 struct TranslationBlock {
365     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
366     target_ulong cs_base; /* CS base for this block */
367     uint32_t flags; /* flags defining in which context the code was generated */
368     uint16_t size;      /* size of target code for this block (1 <=
369                            size <= TARGET_PAGE_SIZE) */
370     uint16_t icount;
371     uint32_t cflags;    /* compile flags */
372 #define CF_COUNT_MASK  0x00007fff
373 #define CF_LAST_IO     0x00008000 /* Last insn may be an IO access.  */
374 #define CF_NOCACHE     0x00010000 /* To be freed after execution */
375 #define CF_USE_ICOUNT  0x00020000
376 #define CF_INVALID     0x00040000 /* TB is stale. Set with @jmp_lock held */
377 #define CF_PARALLEL    0x00080000 /* Generate code for a parallel context */
378 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
379 #define CF_CLUSTER_SHIFT 24
380 /* cflags' mask for hashing/comparison */
381 #define CF_HASH_MASK   \
382     (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
383 
384     /* Per-vCPU dynamic tracing state used to generate this TB */
385     uint32_t trace_vcpu_dstate;
386 
387     struct tb_tc tc;
388 
389     /* original tb when cflags has CF_NOCACHE */
390     struct TranslationBlock *orig_tb;
391     /* first and second physical page containing code. The lower bit
392        of the pointer tells the index in page_next[].
393        The list is protected by the TB's page('s) lock(s) */
394     uintptr_t page_next[2];
395     tb_page_addr_t page_addr[2];
396 
397     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
398     QemuSpin jmp_lock;
399 
400     /* The following data are used to directly call another TB from
401      * the code of this one. This can be done either by emitting direct or
402      * indirect native jump instructions. These jumps are reset so that the TB
403      * just continues its execution. The TB can be linked to another one by
404      * setting one of the jump targets (or patching the jump instruction). Only
405      * two of such jumps are supported.
406      */
407     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
408 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
409     uintptr_t jmp_target_arg[2];  /* target address or offset */
410 
411     /*
412      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
413      * Each TB can have two outgoing jumps, and therefore can participate
414      * in two lists. The list entries are kept in jmp_list_next[2]. The least
415      * significant bit (LSB) of the pointers in these lists is used to encode
416      * which of the two list entries is to be used in the pointed TB.
417      *
418      * List traversals are protected by jmp_lock. The destination TB of each
419      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
420      * can be acquired from any origin TB.
421      *
422      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
423      * being invalidated, so that no further outgoing jumps from it can be set.
424      *
425      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
426      * to a destination TB that has CF_INVALID set.
427      */
428     uintptr_t jmp_list_head;
429     uintptr_t jmp_list_next[2];
430     uintptr_t jmp_dest[2];
431 };
432 
433 extern bool parallel_cpus;
434 
435 /* Hide the atomic_read to make code a little easier on the eyes */
436 static inline uint32_t tb_cflags(const TranslationBlock *tb)
437 {
438     return atomic_read(&tb->cflags);
439 }
440 
441 /* current cflags for hashing/comparison */
442 static inline uint32_t curr_cflags(void)
443 {
444     return (parallel_cpus ? CF_PARALLEL : 0)
445          | (use_icount ? CF_USE_ICOUNT : 0);
446 }
447 
448 /* TranslationBlock invalidate API */
449 #if defined(CONFIG_USER_ONLY)
450 void tb_invalidate_phys_addr(target_ulong addr);
451 void tb_invalidate_phys_range(target_ulong start, target_ulong end);
452 #else
453 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
454 #endif
455 void tb_flush(CPUState *cpu);
456 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
457 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
458                                    target_ulong cs_base, uint32_t flags,
459                                    uint32_t cf_mask);
460 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
461 
462 /* GETPC is the true target of the return instruction that we'll execute.  */
463 #if defined(CONFIG_TCG_INTERPRETER)
464 extern uintptr_t tci_tb_ptr;
465 # define GETPC() tci_tb_ptr
466 #else
467 # define GETPC() \
468     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
469 #endif
470 
471 /* The true return address will often point to a host insn that is part of
472    the next translated guest insn.  Adjust the address backward to point to
473    the middle of the call insn.  Subtracting one would do the job except for
474    several compressed mode architectures (arm, mips) which set the low bit
475    to indicate the compressed mode; subtracting two works around that.  It
476    is also the case that there are no host isas that contain a call insn
477    smaller than 4 bytes, so we don't worry about special-casing this.  */
478 #define GETPC_ADJ   2
479 
480 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
481 void assert_no_pages_locked(void);
482 #else
483 static inline void assert_no_pages_locked(void)
484 {
485 }
486 #endif
487 
488 #if !defined(CONFIG_USER_ONLY)
489 
490 /**
491  * iotlb_to_section:
492  * @cpu: CPU performing the access
493  * @index: TCG CPU IOTLB entry
494  *
495  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
496  * it refers to. @index will have been initially created and returned
497  * by memory_region_section_get_iotlb().
498  */
499 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
500                                              hwaddr index, MemTxAttrs attrs);
501 #endif
502 
503 #if defined(CONFIG_USER_ONLY)
504 void mmap_lock(void);
505 void mmap_unlock(void);
506 bool have_mmap_lock(void);
507 
508 /**
509  * get_page_addr_code() - user-mode version
510  * @env: CPUArchState
511  * @addr: guest virtual address of guest code
512  *
513  * Returns @addr.
514  */
515 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
516                                                 target_ulong addr)
517 {
518     return addr;
519 }
520 
521 /**
522  * get_page_addr_code_hostp() - user-mode version
523  * @env: CPUArchState
524  * @addr: guest virtual address of guest code
525  *
526  * Returns @addr.
527  *
528  * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
529  * is kept.
530  */
531 static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
532                                                       target_ulong addr,
533                                                       void **hostp)
534 {
535     if (hostp) {
536         *hostp = g2h(addr);
537     }
538     return addr;
539 }
540 #else
541 static inline void mmap_lock(void) {}
542 static inline void mmap_unlock(void) {}
543 
544 /**
545  * get_page_addr_code() - full-system version
546  * @env: CPUArchState
547  * @addr: guest virtual address of guest code
548  *
549  * If we cannot translate and execute from the entire RAM page, or if
550  * the region is not backed by RAM, returns -1. Otherwise, returns the
551  * ram_addr_t corresponding to the guest code at @addr.
552  *
553  * Note: this function can trigger an exception.
554  */
555 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
556 
557 /**
558  * get_page_addr_code_hostp() - full-system version
559  * @env: CPUArchState
560  * @addr: guest virtual address of guest code
561  *
562  * See get_page_addr_code() (full-system version) for documentation on the
563  * return value.
564  *
565  * Sets *@hostp (when @hostp is non-NULL) as follows.
566  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
567  * to the host address where @addr's content is kept.
568  *
569  * Note: this function can trigger an exception.
570  */
571 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
572                                         void **hostp);
573 
574 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
575 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
576 
577 /* exec.c */
578 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
579 
580 MemoryRegionSection *
581 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
582                                   hwaddr *xlat, hwaddr *plen,
583                                   MemTxAttrs attrs, int *prot);
584 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
585                                        MemoryRegionSection *section);
586 #endif
587 
588 /* vl.c */
589 extern int singlestep;
590 
591 #endif
592