xref: /openbmc/qemu/include/exec/exec-all.h (revision 407bc4bf9027f7ac4333e47cd900d773b99a23e3)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #if defined(CONFIG_USER_ONLY)
25 #include "exec/cpu_ldst.h"
26 #endif
27 #include "exec/mmu-access-type.h"
28 #include "exec/translation-block.h"
29 
30 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
31 /* cputlb.c */
32 /**
33  * tlb_init - initialize a CPU's TLB
34  * @cpu: CPU whose TLB should be initialized
35  */
36 void tlb_init(CPUState *cpu);
37 /**
38  * tlb_destroy - destroy a CPU's TLB
39  * @cpu: CPU whose TLB should be destroyed
40  */
41 void tlb_destroy(CPUState *cpu);
42 /**
43  * tlb_flush_page:
44  * @cpu: CPU whose TLB should be flushed
45  * @addr: virtual address of page to be flushed
46  *
47  * Flush one page from the TLB of the specified CPU, for all
48  * MMU indexes.
49  */
50 void tlb_flush_page(CPUState *cpu, vaddr addr);
51 /**
52  * tlb_flush_page_all_cpus_synced:
53  * @cpu: src CPU of the flush
54  * @addr: virtual address of page to be flushed
55  *
56  * Flush one page from the TLB of all CPUs, for all
57  * MMU indexes.
58  *
59  * When this function returns, no CPUs will subsequently perform
60  * translations using the flushed TLBs.
61  */
62 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
63 /**
64  * tlb_flush:
65  * @cpu: CPU whose TLB should be flushed
66  *
67  * Flush the entire TLB for the specified CPU. Most CPU architectures
68  * allow the implementation to drop entries from the TLB at any time
69  * so this is generally safe. If more selective flushing is required
70  * use one of the other functions for efficiency.
71  */
72 void tlb_flush(CPUState *cpu);
73 /**
74  * tlb_flush_all_cpus_synced:
75  * @cpu: src CPU of the flush
76  *
77  * Flush the entire TLB for all CPUs, for all MMU indexes.
78  *
79  * When this function returns, no CPUs will subsequently perform
80  * translations using the flushed TLBs.
81  */
82 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
83 /**
84  * tlb_flush_page_by_mmuidx:
85  * @cpu: CPU whose TLB should be flushed
86  * @addr: virtual address of page to be flushed
87  * @idxmap: bitmap of MMU indexes to flush
88  *
89  * Flush one page from the TLB of the specified CPU, for the specified
90  * MMU indexes.
91  */
92 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
93                               uint16_t idxmap);
94 /**
95  * tlb_flush_page_by_mmuidx_all_cpus_synced:
96  * @cpu: Originating CPU of the flush
97  * @addr: virtual address of page to be flushed
98  * @idxmap: bitmap of MMU indexes to flush
99  *
100  * Flush one page from the TLB of all CPUs, for the specified
101  * MMU indexes.
102  *
103  * When this function returns, no CPUs will subsequently perform
104  * translations using the flushed TLBs.
105  */
106 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
107                                               uint16_t idxmap);
108 /**
109  * tlb_flush_by_mmuidx:
110  * @cpu: CPU whose TLB should be flushed
111  * @wait: If true ensure synchronisation by exiting the cpu_loop
112  * @idxmap: bitmap of MMU indexes to flush
113  *
114  * Flush all entries from the TLB of the specified CPU, for the specified
115  * MMU indexes.
116  */
117 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
118 /**
119  * tlb_flush_by_mmuidx_all_cpus_synced:
120  * @cpu: Originating CPU of the flush
121  * @idxmap: bitmap of MMU indexes to flush
122  *
123  * Flush all entries from the TLB of all CPUs, for the specified
124  * MMU indexes.
125  *
126  * When this function returns, no CPUs will subsequently perform
127  * translations using the flushed TLBs.
128  */
129 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
130 
131 /**
132  * tlb_flush_page_bits_by_mmuidx
133  * @cpu: CPU whose TLB should be flushed
134  * @addr: virtual address of page to be flushed
135  * @idxmap: bitmap of mmu indexes to flush
136  * @bits: number of significant bits in address
137  *
138  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
139  */
140 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
141                                    uint16_t idxmap, unsigned bits);
142 
143 /* Similarly, with broadcast and syncing. */
144 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
145     (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
146 
147 /**
148  * tlb_flush_range_by_mmuidx
149  * @cpu: CPU whose TLB should be flushed
150  * @addr: virtual address of the start of the range to be flushed
151  * @len: length of range to be flushed
152  * @idxmap: bitmap of mmu indexes to flush
153  * @bits: number of significant bits in address
154  *
155  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
156  * comparing only the low @bits worth of each virtual page.
157  */
158 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
159                                vaddr len, uint16_t idxmap,
160                                unsigned bits);
161 
162 /* Similarly, with broadcast and syncing. */
163 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
164                                                vaddr addr,
165                                                vaddr len,
166                                                uint16_t idxmap,
167                                                unsigned bits);
168 
169 /**
170  * tlb_set_page_full:
171  * @cpu: CPU context
172  * @mmu_idx: mmu index of the tlb to modify
173  * @addr: virtual address of the entry to add
174  * @full: the details of the tlb entry
175  *
176  * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
177  * @full must be filled, except for xlat_section, and constitute
178  * the complete description of the translated page.
179  *
180  * This is generally called by the target tlb_fill function after
181  * having performed a successful page table walk to find the physical
182  * address and attributes for the translation.
183  *
184  * At most one entry for a given virtual address is permitted. Only a
185  * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
186  * used by tlb_flush_page.
187  */
188 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
189                        CPUTLBEntryFull *full);
190 
191 /**
192  * tlb_set_page_with_attrs:
193  * @cpu: CPU to add this TLB entry for
194  * @addr: virtual address of page to add entry for
195  * @paddr: physical address of the page
196  * @attrs: memory transaction attributes
197  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
198  * @mmu_idx: MMU index to insert TLB entry for
199  * @size: size of the page in bytes
200  *
201  * Add an entry to this CPU's TLB (a mapping from virtual address
202  * @addr to physical address @paddr) with the specified memory
203  * transaction attributes. This is generally called by the target CPU
204  * specific code after it has been called through the tlb_fill()
205  * entry point and performed a successful page table walk to find
206  * the physical address and attributes for the virtual address
207  * which provoked the TLB miss.
208  *
209  * At most one entry for a given virtual address is permitted. Only a
210  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
211  * used by tlb_flush_page.
212  */
213 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
214                              hwaddr paddr, MemTxAttrs attrs,
215                              int prot, int mmu_idx, vaddr size);
216 /* tlb_set_page:
217  *
218  * This function is equivalent to calling tlb_set_page_with_attrs()
219  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
220  * as a convenience for CPUs which don't use memory transaction attributes.
221  */
222 void tlb_set_page(CPUState *cpu, vaddr addr,
223                   hwaddr paddr, int prot,
224                   int mmu_idx, vaddr size);
225 #else
226 static inline void tlb_init(CPUState *cpu)
227 {
228 }
229 static inline void tlb_destroy(CPUState *cpu)
230 {
231 }
232 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
233 {
234 }
235 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
236 {
237 }
238 static inline void tlb_flush(CPUState *cpu)
239 {
240 }
241 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
242 {
243 }
244 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
245                                             vaddr addr, uint16_t idxmap)
246 {
247 }
248 
249 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
250 {
251 }
252 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
253                                                             vaddr addr,
254                                                             uint16_t idxmap)
255 {
256 }
257 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
258                                                        uint16_t idxmap)
259 {
260 }
261 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
262                                                  vaddr addr,
263                                                  uint16_t idxmap,
264                                                  unsigned bits)
265 {
266 }
267 static inline void
268 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
269                                               uint16_t idxmap, unsigned bits)
270 {
271 }
272 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
273                                              vaddr len, uint16_t idxmap,
274                                              unsigned bits)
275 {
276 }
277 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
278                                                              vaddr addr,
279                                                              vaddr len,
280                                                              uint16_t idxmap,
281                                                              unsigned bits)
282 {
283 }
284 #endif
285 
286 #if defined(CONFIG_TCG)
287 
288 /**
289  * probe_access:
290  * @env: CPUArchState
291  * @addr: guest virtual address to look up
292  * @size: size of the access
293  * @access_type: read, write or execute permission
294  * @mmu_idx: MMU index to use for lookup
295  * @retaddr: return address for unwinding
296  *
297  * Look up the guest virtual address @addr.  Raise an exception if the
298  * page does not satisfy @access_type.  Raise an exception if the
299  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
300  * page as dirty.
301  *
302  * Finally, return the host address for a page that is backed by RAM,
303  * or NULL if the page requires I/O.
304  */
305 void *probe_access(CPUArchState *env, vaddr addr, int size,
306                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
307 
308 static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
309                                 int mmu_idx, uintptr_t retaddr)
310 {
311     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
312 }
313 
314 static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
315                                int mmu_idx, uintptr_t retaddr)
316 {
317     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
318 }
319 
320 /**
321  * probe_access_flags:
322  * @env: CPUArchState
323  * @addr: guest virtual address to look up
324  * @size: size of the access
325  * @access_type: read, write or execute permission
326  * @mmu_idx: MMU index to use for lookup
327  * @nonfault: suppress the fault
328  * @phost: return value for host address
329  * @retaddr: return address for unwinding
330  *
331  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
332  * the page, and storing the host address for RAM in @phost.
333  *
334  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
335  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
336  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
337  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
338  */
339 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
340                        MMUAccessType access_type, int mmu_idx,
341                        bool nonfault, void **phost, uintptr_t retaddr);
342 
343 #ifndef CONFIG_USER_ONLY
344 
345 /**
346  * probe_access_full:
347  * Like probe_access_flags, except also return into @pfull.
348  *
349  * The CPUTLBEntryFull structure returned via @pfull is transient
350  * and must be consumed or copied immediately, before any further
351  * access or changes to TLB @mmu_idx.
352  *
353  * This function will not fault if @nonfault is set, but will
354  * return TLB_INVALID_MASK if the page is not mapped, or is not
355  * accessible with @access_type.
356  *
357  * This function will return TLB_MMIO in order to force the access
358  * to be handled out-of-line if plugins wish to instrument the access.
359  */
360 int probe_access_full(CPUArchState *env, vaddr addr, int size,
361                       MMUAccessType access_type, int mmu_idx,
362                       bool nonfault, void **phost,
363                       CPUTLBEntryFull **pfull, uintptr_t retaddr);
364 
365 /**
366  * probe_access_full_mmu:
367  * Like probe_access_full, except:
368  *
369  * This function is intended to be used for page table accesses by
370  * the target mmu itself.  Since such page walking happens while
371  * handling another potential mmu fault, this function never raises
372  * exceptions (akin to @nonfault true for probe_access_full).
373  * Likewise this function does not trigger plugin instrumentation.
374  */
375 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
376                           MMUAccessType access_type, int mmu_idx,
377                           void **phost, CPUTLBEntryFull **pfull);
378 
379 #endif /* !CONFIG_USER_ONLY */
380 #endif /* CONFIG_TCG */
381 
382 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
383 {
384 #ifdef CONFIG_USER_ONLY
385     return tb->itree.start;
386 #else
387     return tb->page_addr[0];
388 #endif
389 }
390 
391 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
392 {
393 #ifdef CONFIG_USER_ONLY
394     tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
395     return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
396 #else
397     return tb->page_addr[1];
398 #endif
399 }
400 
401 static inline void tb_set_page_addr0(TranslationBlock *tb,
402                                      tb_page_addr_t addr)
403 {
404 #ifdef CONFIG_USER_ONLY
405     tb->itree.start = addr;
406     /*
407      * To begin, we record an interval of one byte.  When the translation
408      * loop encounters a second page, the interval will be extended to
409      * include the first byte of the second page, which is sufficient to
410      * allow tb_page_addr1() above to work properly.  The final corrected
411      * interval will be set by tb_page_add() from tb->size before the
412      * node is added to the interval tree.
413      */
414     tb->itree.last = addr;
415 #else
416     tb->page_addr[0] = addr;
417 #endif
418 }
419 
420 static inline void tb_set_page_addr1(TranslationBlock *tb,
421                                      tb_page_addr_t addr)
422 {
423 #ifdef CONFIG_USER_ONLY
424     /* Extend the interval to the first byte of the second page.  See above. */
425     tb->itree.last = addr;
426 #else
427     tb->page_addr[1] = addr;
428 #endif
429 }
430 
431 /* TranslationBlock invalidate API */
432 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
433 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
434 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
435 
436 /* GETPC is the true target of the return instruction that we'll execute.  */
437 #if defined(CONFIG_TCG_INTERPRETER)
438 extern __thread uintptr_t tci_tb_ptr;
439 # define GETPC() tci_tb_ptr
440 #else
441 # define GETPC() \
442     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
443 #endif
444 
445 /* The true return address will often point to a host insn that is part of
446    the next translated guest insn.  Adjust the address backward to point to
447    the middle of the call insn.  Subtracting one would do the job except for
448    several compressed mode architectures (arm, mips) which set the low bit
449    to indicate the compressed mode; subtracting two works around that.  It
450    is also the case that there are no host isas that contain a call insn
451    smaller than 4 bytes, so we don't worry about special-casing this.  */
452 #define GETPC_ADJ   2
453 
454 #if !defined(CONFIG_USER_ONLY)
455 
456 /**
457  * iotlb_to_section:
458  * @cpu: CPU performing the access
459  * @index: TCG CPU IOTLB entry
460  *
461  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
462  * it refers to. @index will have been initially created and returned
463  * by memory_region_section_get_iotlb().
464  */
465 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
466                                              hwaddr index, MemTxAttrs attrs);
467 #endif
468 
469 /**
470  * get_page_addr_code_hostp()
471  * @env: CPUArchState
472  * @addr: guest virtual address of guest code
473  *
474  * See get_page_addr_code() (full-system version) for documentation on the
475  * return value.
476  *
477  * Sets *@hostp (when @hostp is non-NULL) as follows.
478  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
479  * to the host address where @addr's content is kept.
480  *
481  * Note: this function can trigger an exception.
482  */
483 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
484                                         void **hostp);
485 
486 /**
487  * get_page_addr_code()
488  * @env: CPUArchState
489  * @addr: guest virtual address of guest code
490  *
491  * If we cannot translate and execute from the entire RAM page, or if
492  * the region is not backed by RAM, returns -1. Otherwise, returns the
493  * ram_addr_t corresponding to the guest code at @addr.
494  *
495  * Note: this function can trigger an exception.
496  */
497 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
498                                                 vaddr addr)
499 {
500     return get_page_addr_code_hostp(env, addr, NULL);
501 }
502 
503 #if !defined(CONFIG_USER_ONLY)
504 
505 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
506 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
507 
508 MemoryRegionSection *
509 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
510                                   hwaddr *xlat, hwaddr *plen,
511                                   MemTxAttrs attrs, int *prot);
512 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
513                                        MemoryRegionSection *section);
514 #endif
515 
516 #endif
517