xref: /openbmc/qemu/include/exec/exec-all.h (revision 84ce4b9b)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #if defined(CONFIG_USER_ONLY)
25 #include "exec/abi_ptr.h"
26 #include "exec/cpu_ldst.h"
27 #endif
28 #include "exec/mmu-access-type.h"
29 #include "exec/translation-block.h"
30 #include "qemu/clang-tsa.h"
31 
32 /**
33  * cpu_loop_exit_requested:
34  * @cpu: The CPU state to be tested
35  *
36  * Indicate if somebody asked for a return of the CPU to the main loop
37  * (e.g., via cpu_exit() or cpu_interrupt()).
38  *
39  * This is helpful for architectures that support interruptible
40  * instructions. After writing back all state to registers/memory, this
41  * call can be used to check if it makes sense to return to the main loop
42  * or to continue executing the interruptible instruction.
43  */
44 static inline bool cpu_loop_exit_requested(CPUState *cpu)
45 {
46     return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
47 }
48 
49 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
50 /* cputlb.c */
51 /**
52  * tlb_init - initialize a CPU's TLB
53  * @cpu: CPU whose TLB should be initialized
54  */
55 void tlb_init(CPUState *cpu);
56 /**
57  * tlb_destroy - destroy a CPU's TLB
58  * @cpu: CPU whose TLB should be destroyed
59  */
60 void tlb_destroy(CPUState *cpu);
61 /**
62  * tlb_flush_page:
63  * @cpu: CPU whose TLB should be flushed
64  * @addr: virtual address of page to be flushed
65  *
66  * Flush one page from the TLB of the specified CPU, for all
67  * MMU indexes.
68  */
69 void tlb_flush_page(CPUState *cpu, vaddr addr);
70 /**
71  * tlb_flush_page_all_cpus_synced:
72  * @cpu: src CPU of the flush
73  * @addr: virtual address of page to be flushed
74  *
75  * Flush one page from the TLB of all CPUs, for all
76  * MMU indexes.
77  *
78  * When this function returns, no CPUs will subsequently perform
79  * translations using the flushed TLBs.
80  */
81 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
82 /**
83  * tlb_flush:
84  * @cpu: CPU whose TLB should be flushed
85  *
86  * Flush the entire TLB for the specified CPU. Most CPU architectures
87  * allow the implementation to drop entries from the TLB at any time
88  * so this is generally safe. If more selective flushing is required
89  * use one of the other functions for efficiency.
90  */
91 void tlb_flush(CPUState *cpu);
92 /**
93  * tlb_flush_all_cpus_synced:
94  * @cpu: src CPU of the flush
95  *
96  * Flush the entire TLB for all CPUs, for all MMU indexes.
97  *
98  * When this function returns, no CPUs will subsequently perform
99  * translations using the flushed TLBs.
100  */
101 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
102 /**
103  * tlb_flush_page_by_mmuidx:
104  * @cpu: CPU whose TLB should be flushed
105  * @addr: virtual address of page to be flushed
106  * @idxmap: bitmap of MMU indexes to flush
107  *
108  * Flush one page from the TLB of the specified CPU, for the specified
109  * MMU indexes.
110  */
111 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
112                               uint16_t idxmap);
113 /**
114  * tlb_flush_page_by_mmuidx_all_cpus_synced:
115  * @cpu: Originating CPU of the flush
116  * @addr: virtual address of page to be flushed
117  * @idxmap: bitmap of MMU indexes to flush
118  *
119  * Flush one page from the TLB of all CPUs, for the specified
120  * MMU indexes.
121  *
122  * When this function returns, no CPUs will subsequently perform
123  * translations using the flushed TLBs.
124  */
125 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
126                                               uint16_t idxmap);
127 /**
128  * tlb_flush_by_mmuidx:
129  * @cpu: CPU whose TLB should be flushed
130  * @wait: If true ensure synchronisation by exiting the cpu_loop
131  * @idxmap: bitmap of MMU indexes to flush
132  *
133  * Flush all entries from the TLB of the specified CPU, for the specified
134  * MMU indexes.
135  */
136 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
137 /**
138  * tlb_flush_by_mmuidx_all_cpus_synced:
139  * @cpu: Originating CPU of the flush
140  * @idxmap: bitmap of MMU indexes to flush
141  *
142  * Flush all entries from the TLB of all CPUs, for the specified
143  * MMU indexes.
144  *
145  * When this function returns, no CPUs will subsequently perform
146  * translations using the flushed TLBs.
147  */
148 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
149 
150 /**
151  * tlb_flush_page_bits_by_mmuidx
152  * @cpu: CPU whose TLB should be flushed
153  * @addr: virtual address of page to be flushed
154  * @idxmap: bitmap of mmu indexes to flush
155  * @bits: number of significant bits in address
156  *
157  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
158  */
159 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
160                                    uint16_t idxmap, unsigned bits);
161 
162 /* Similarly, with broadcast and syncing. */
163 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
164     (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
165 
166 /**
167  * tlb_flush_range_by_mmuidx
168  * @cpu: CPU whose TLB should be flushed
169  * @addr: virtual address of the start of the range to be flushed
170  * @len: length of range to be flushed
171  * @idxmap: bitmap of mmu indexes to flush
172  * @bits: number of significant bits in address
173  *
174  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
175  * comparing only the low @bits worth of each virtual page.
176  */
177 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
178                                vaddr len, uint16_t idxmap,
179                                unsigned bits);
180 
181 /* Similarly, with broadcast and syncing. */
182 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
183                                                vaddr addr,
184                                                vaddr len,
185                                                uint16_t idxmap,
186                                                unsigned bits);
187 
188 /**
189  * tlb_set_page_full:
190  * @cpu: CPU context
191  * @mmu_idx: mmu index of the tlb to modify
192  * @addr: virtual address of the entry to add
193  * @full: the details of the tlb entry
194  *
195  * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
196  * @full must be filled, except for xlat_section, and constitute
197  * the complete description of the translated page.
198  *
199  * This is generally called by the target tlb_fill function after
200  * having performed a successful page table walk to find the physical
201  * address and attributes for the translation.
202  *
203  * At most one entry for a given virtual address is permitted. Only a
204  * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
205  * used by tlb_flush_page.
206  */
207 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
208                        CPUTLBEntryFull *full);
209 
210 /**
211  * tlb_set_page_with_attrs:
212  * @cpu: CPU to add this TLB entry for
213  * @addr: virtual address of page to add entry for
214  * @paddr: physical address of the page
215  * @attrs: memory transaction attributes
216  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
217  * @mmu_idx: MMU index to insert TLB entry for
218  * @size: size of the page in bytes
219  *
220  * Add an entry to this CPU's TLB (a mapping from virtual address
221  * @addr to physical address @paddr) with the specified memory
222  * transaction attributes. This is generally called by the target CPU
223  * specific code after it has been called through the tlb_fill()
224  * entry point and performed a successful page table walk to find
225  * the physical address and attributes for the virtual address
226  * which provoked the TLB miss.
227  *
228  * At most one entry for a given virtual address is permitted. Only a
229  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
230  * used by tlb_flush_page.
231  */
232 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
233                              hwaddr paddr, MemTxAttrs attrs,
234                              int prot, int mmu_idx, vaddr size);
235 /* tlb_set_page:
236  *
237  * This function is equivalent to calling tlb_set_page_with_attrs()
238  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
239  * as a convenience for CPUs which don't use memory transaction attributes.
240  */
241 void tlb_set_page(CPUState *cpu, vaddr addr,
242                   hwaddr paddr, int prot,
243                   int mmu_idx, vaddr size);
244 #else
245 static inline void tlb_init(CPUState *cpu)
246 {
247 }
248 static inline void tlb_destroy(CPUState *cpu)
249 {
250 }
251 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
252 {
253 }
254 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
255 {
256 }
257 static inline void tlb_flush(CPUState *cpu)
258 {
259 }
260 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
261 {
262 }
263 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
264                                             vaddr addr, uint16_t idxmap)
265 {
266 }
267 
268 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
269 {
270 }
271 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
272                                                             vaddr addr,
273                                                             uint16_t idxmap)
274 {
275 }
276 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
277                                                        uint16_t idxmap)
278 {
279 }
280 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
281                                                  vaddr addr,
282                                                  uint16_t idxmap,
283                                                  unsigned bits)
284 {
285 }
286 static inline void
287 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
288                                               uint16_t idxmap, unsigned bits)
289 {
290 }
291 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
292                                              vaddr len, uint16_t idxmap,
293                                              unsigned bits)
294 {
295 }
296 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
297                                                              vaddr addr,
298                                                              vaddr len,
299                                                              uint16_t idxmap,
300                                                              unsigned bits)
301 {
302 }
303 #endif
304 /**
305  * probe_access:
306  * @env: CPUArchState
307  * @addr: guest virtual address to look up
308  * @size: size of the access
309  * @access_type: read, write or execute permission
310  * @mmu_idx: MMU index to use for lookup
311  * @retaddr: return address for unwinding
312  *
313  * Look up the guest virtual address @addr.  Raise an exception if the
314  * page does not satisfy @access_type.  Raise an exception if the
315  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
316  * page as dirty.
317  *
318  * Finally, return the host address for a page that is backed by RAM,
319  * or NULL if the page requires I/O.
320  */
321 void *probe_access(CPUArchState *env, vaddr addr, int size,
322                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
323 
324 static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
325                                 int mmu_idx, uintptr_t retaddr)
326 {
327     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
328 }
329 
330 static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
331                                int mmu_idx, uintptr_t retaddr)
332 {
333     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
334 }
335 
336 /**
337  * probe_access_flags:
338  * @env: CPUArchState
339  * @addr: guest virtual address to look up
340  * @size: size of the access
341  * @access_type: read, write or execute permission
342  * @mmu_idx: MMU index to use for lookup
343  * @nonfault: suppress the fault
344  * @phost: return value for host address
345  * @retaddr: return address for unwinding
346  *
347  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
348  * the page, and storing the host address for RAM in @phost.
349  *
350  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
351  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
352  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
353  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
354  */
355 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
356                        MMUAccessType access_type, int mmu_idx,
357                        bool nonfault, void **phost, uintptr_t retaddr);
358 
359 #ifndef CONFIG_USER_ONLY
360 /**
361  * probe_access_full:
362  * Like probe_access_flags, except also return into @pfull.
363  *
364  * The CPUTLBEntryFull structure returned via @pfull is transient
365  * and must be consumed or copied immediately, before any further
366  * access or changes to TLB @mmu_idx.
367  */
368 int probe_access_full(CPUArchState *env, vaddr addr, int size,
369                       MMUAccessType access_type, int mmu_idx,
370                       bool nonfault, void **phost,
371                       CPUTLBEntryFull **pfull, uintptr_t retaddr);
372 
373 /**
374  * probe_access_mmu() - Like probe_access_full except cannot fault and
375  * doesn't trigger instrumentation.
376  *
377  * @env: CPUArchState
378  * @vaddr: virtual address to probe
379  * @size: size of the probe
380  * @access_type: read, write or execute permission
381  * @mmu_idx: softmmu index
382  * @phost: ptr to return value host address or NULL
383  * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
384  *
385  * The CPUTLBEntryFull structure returned via @pfull is transient
386  * and must be consumed or copied immediately, before any further
387  * access or changes to TLB @mmu_idx.
388  *
389  * Returns: TLB flags as per probe_access_flags()
390  */
391 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
392                           MMUAccessType access_type, int mmu_idx,
393                           void **phost, CPUTLBEntryFull **pfull);
394 
395 #endif
396 
397 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
398 {
399 #ifdef CONFIG_USER_ONLY
400     return tb->itree.start;
401 #else
402     return tb->page_addr[0];
403 #endif
404 }
405 
406 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
407 {
408 #ifdef CONFIG_USER_ONLY
409     tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
410     return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
411 #else
412     return tb->page_addr[1];
413 #endif
414 }
415 
416 static inline void tb_set_page_addr0(TranslationBlock *tb,
417                                      tb_page_addr_t addr)
418 {
419 #ifdef CONFIG_USER_ONLY
420     tb->itree.start = addr;
421     /*
422      * To begin, we record an interval of one byte.  When the translation
423      * loop encounters a second page, the interval will be extended to
424      * include the first byte of the second page, which is sufficient to
425      * allow tb_page_addr1() above to work properly.  The final corrected
426      * interval will be set by tb_page_add() from tb->size before the
427      * node is added to the interval tree.
428      */
429     tb->itree.last = addr;
430 #else
431     tb->page_addr[0] = addr;
432 #endif
433 }
434 
435 static inline void tb_set_page_addr1(TranslationBlock *tb,
436                                      tb_page_addr_t addr)
437 {
438 #ifdef CONFIG_USER_ONLY
439     /* Extend the interval to the first byte of the second page.  See above. */
440     tb->itree.last = addr;
441 #else
442     tb->page_addr[1] = addr;
443 #endif
444 }
445 
446 /* TranslationBlock invalidate API */
447 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
448 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
449 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
450 
451 /* GETPC is the true target of the return instruction that we'll execute.  */
452 #if defined(CONFIG_TCG_INTERPRETER)
453 extern __thread uintptr_t tci_tb_ptr;
454 # define GETPC() tci_tb_ptr
455 #else
456 # define GETPC() \
457     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
458 #endif
459 
460 /* The true return address will often point to a host insn that is part of
461    the next translated guest insn.  Adjust the address backward to point to
462    the middle of the call insn.  Subtracting one would do the job except for
463    several compressed mode architectures (arm, mips) which set the low bit
464    to indicate the compressed mode; subtracting two works around that.  It
465    is also the case that there are no host isas that contain a call insn
466    smaller than 4 bytes, so we don't worry about special-casing this.  */
467 #define GETPC_ADJ   2
468 
469 #if !defined(CONFIG_USER_ONLY)
470 
471 /**
472  * iotlb_to_section:
473  * @cpu: CPU performing the access
474  * @index: TCG CPU IOTLB entry
475  *
476  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
477  * it refers to. @index will have been initially created and returned
478  * by memory_region_section_get_iotlb().
479  */
480 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
481                                              hwaddr index, MemTxAttrs attrs);
482 #endif
483 
484 /**
485  * get_page_addr_code_hostp()
486  * @env: CPUArchState
487  * @addr: guest virtual address of guest code
488  *
489  * See get_page_addr_code() (full-system version) for documentation on the
490  * return value.
491  *
492  * Sets *@hostp (when @hostp is non-NULL) as follows.
493  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
494  * to the host address where @addr's content is kept.
495  *
496  * Note: this function can trigger an exception.
497  */
498 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
499                                         void **hostp);
500 
501 /**
502  * get_page_addr_code()
503  * @env: CPUArchState
504  * @addr: guest virtual address of guest code
505  *
506  * If we cannot translate and execute from the entire RAM page, or if
507  * the region is not backed by RAM, returns -1. Otherwise, returns the
508  * ram_addr_t corresponding to the guest code at @addr.
509  *
510  * Note: this function can trigger an exception.
511  */
512 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
513                                                 vaddr addr)
514 {
515     return get_page_addr_code_hostp(env, addr, NULL);
516 }
517 
518 #if defined(CONFIG_USER_ONLY)
519 void TSA_NO_TSA mmap_lock(void);
520 void TSA_NO_TSA mmap_unlock(void);
521 bool have_mmap_lock(void);
522 
523 static inline void mmap_unlock_guard(void *unused)
524 {
525     mmap_unlock();
526 }
527 
528 #define WITH_MMAP_LOCK_GUARD()                                            \
529     for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard)))  \
530          = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
531 
532 /**
533  * adjust_signal_pc:
534  * @pc: raw pc from the host signal ucontext_t.
535  * @is_write: host memory operation was write, or read-modify-write.
536  *
537  * Alter @pc as required for unwinding.  Return the type of the
538  * guest memory access -- host reads may be for guest execution.
539  */
540 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
541 
542 /**
543  * handle_sigsegv_accerr_write:
544  * @cpu: the cpu context
545  * @old_set: the sigset_t from the signal ucontext_t
546  * @host_pc: the host pc, adjusted for the signal
547  * @host_addr: the host address of the fault
548  *
549  * Return true if the write fault has been handled, and should be re-tried.
550  */
551 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
552                                  uintptr_t host_pc, abi_ptr guest_addr);
553 
554 /**
555  * cpu_loop_exit_sigsegv:
556  * @cpu: the cpu context
557  * @addr: the guest address of the fault
558  * @access_type: access was read/write/execute
559  * @maperr: true for invalid page, false for permission fault
560  * @ra: host pc for unwinding
561  *
562  * Use the TCGCPUOps hook to record cpu state, do guest operating system
563  * specific things to raise SIGSEGV, and jump to the main cpu loop.
564  */
565 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
566                                       MMUAccessType access_type,
567                                       bool maperr, uintptr_t ra);
568 
569 /**
570  * cpu_loop_exit_sigbus:
571  * @cpu: the cpu context
572  * @addr: the guest address of the alignment fault
573  * @access_type: access was read/write/execute
574  * @ra: host pc for unwinding
575  *
576  * Use the TCGCPUOps hook to record cpu state, do guest operating system
577  * specific things to raise SIGBUS, and jump to the main cpu loop.
578  */
579 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
580                                      MMUAccessType access_type,
581                                      uintptr_t ra);
582 
583 #else
584 static inline void mmap_lock(void) {}
585 static inline void mmap_unlock(void) {}
586 #define WITH_MMAP_LOCK_GUARD()
587 
588 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
589 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
590 
591 MemoryRegionSection *
592 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
593                                   hwaddr *xlat, hwaddr *plen,
594                                   MemTxAttrs attrs, int *prot);
595 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
596                                        MemoryRegionSection *section);
597 #endif
598 
599 #endif
600