1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22
23 #include "cpu.h"
24 #if defined(CONFIG_USER_ONLY)
25 #include "exec/cpu_ldst.h"
26 #endif
27 #include "exec/mmu-access-type.h"
28 #include "exec/translation-block.h"
29
30 #if defined(CONFIG_TCG)
31 #include "accel/tcg/getpc.h"
32
33 /**
34 * probe_access:
35 * @env: CPUArchState
36 * @addr: guest virtual address to look up
37 * @size: size of the access
38 * @access_type: read, write or execute permission
39 * @mmu_idx: MMU index to use for lookup
40 * @retaddr: return address for unwinding
41 *
42 * Look up the guest virtual address @addr. Raise an exception if the
43 * page does not satisfy @access_type. Raise an exception if the
44 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
45 * page as dirty.
46 *
47 * Finally, return the host address for a page that is backed by RAM,
48 * or NULL if the page requires I/O.
49 */
50 void *probe_access(CPUArchState *env, vaddr addr, int size,
51 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
52
probe_write(CPUArchState * env,vaddr addr,int size,int mmu_idx,uintptr_t retaddr)53 static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
54 int mmu_idx, uintptr_t retaddr)
55 {
56 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
57 }
58
probe_read(CPUArchState * env,vaddr addr,int size,int mmu_idx,uintptr_t retaddr)59 static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
60 int mmu_idx, uintptr_t retaddr)
61 {
62 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
63 }
64
65 /**
66 * probe_access_flags:
67 * @env: CPUArchState
68 * @addr: guest virtual address to look up
69 * @size: size of the access
70 * @access_type: read, write or execute permission
71 * @mmu_idx: MMU index to use for lookup
72 * @nonfault: suppress the fault
73 * @phost: return value for host address
74 * @retaddr: return address for unwinding
75 *
76 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
77 * the page, and storing the host address for RAM in @phost.
78 *
79 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
80 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
81 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
82 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
83 */
84 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
85 MMUAccessType access_type, int mmu_idx,
86 bool nonfault, void **phost, uintptr_t retaddr);
87
88 #ifndef CONFIG_USER_ONLY
89
90 /**
91 * probe_access_full:
92 * Like probe_access_flags, except also return into @pfull.
93 *
94 * The CPUTLBEntryFull structure returned via @pfull is transient
95 * and must be consumed or copied immediately, before any further
96 * access or changes to TLB @mmu_idx.
97 *
98 * This function will not fault if @nonfault is set, but will
99 * return TLB_INVALID_MASK if the page is not mapped, or is not
100 * accessible with @access_type.
101 *
102 * This function will return TLB_MMIO in order to force the access
103 * to be handled out-of-line if plugins wish to instrument the access.
104 */
105 int probe_access_full(CPUArchState *env, vaddr addr, int size,
106 MMUAccessType access_type, int mmu_idx,
107 bool nonfault, void **phost,
108 CPUTLBEntryFull **pfull, uintptr_t retaddr);
109
110 /**
111 * probe_access_full_mmu:
112 * Like probe_access_full, except:
113 *
114 * This function is intended to be used for page table accesses by
115 * the target mmu itself. Since such page walking happens while
116 * handling another potential mmu fault, this function never raises
117 * exceptions (akin to @nonfault true for probe_access_full).
118 * Likewise this function does not trigger plugin instrumentation.
119 */
120 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
121 MMUAccessType access_type, int mmu_idx,
122 void **phost, CPUTLBEntryFull **pfull);
123
124 #endif /* !CONFIG_USER_ONLY */
125 #endif /* CONFIG_TCG */
126
tb_page_addr0(const TranslationBlock * tb)127 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
128 {
129 #ifdef CONFIG_USER_ONLY
130 return tb->itree.start;
131 #else
132 return tb->page_addr[0];
133 #endif
134 }
135
tb_page_addr1(const TranslationBlock * tb)136 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
137 {
138 #ifdef CONFIG_USER_ONLY
139 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
140 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
141 #else
142 return tb->page_addr[1];
143 #endif
144 }
145
tb_set_page_addr0(TranslationBlock * tb,tb_page_addr_t addr)146 static inline void tb_set_page_addr0(TranslationBlock *tb,
147 tb_page_addr_t addr)
148 {
149 #ifdef CONFIG_USER_ONLY
150 tb->itree.start = addr;
151 /*
152 * To begin, we record an interval of one byte. When the translation
153 * loop encounters a second page, the interval will be extended to
154 * include the first byte of the second page, which is sufficient to
155 * allow tb_page_addr1() above to work properly. The final corrected
156 * interval will be set by tb_page_add() from tb->size before the
157 * node is added to the interval tree.
158 */
159 tb->itree.last = addr;
160 #else
161 tb->page_addr[0] = addr;
162 #endif
163 }
164
tb_set_page_addr1(TranslationBlock * tb,tb_page_addr_t addr)165 static inline void tb_set_page_addr1(TranslationBlock *tb,
166 tb_page_addr_t addr)
167 {
168 #ifdef CONFIG_USER_ONLY
169 /* Extend the interval to the first byte of the second page. See above. */
170 tb->itree.last = addr;
171 #else
172 tb->page_addr[1] = addr;
173 #endif
174 }
175
176 /* TranslationBlock invalidate API */
177 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
178 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
179 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
180
181 #if !defined(CONFIG_USER_ONLY)
182
183 /**
184 * iotlb_to_section:
185 * @cpu: CPU performing the access
186 * @index: TCG CPU IOTLB entry
187 *
188 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
189 * it refers to. @index will have been initially created and returned
190 * by memory_region_section_get_iotlb().
191 */
192 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
193 hwaddr index, MemTxAttrs attrs);
194 #endif
195
196 /**
197 * get_page_addr_code_hostp()
198 * @env: CPUArchState
199 * @addr: guest virtual address of guest code
200 *
201 * See get_page_addr_code() (full-system version) for documentation on the
202 * return value.
203 *
204 * Sets *@hostp (when @hostp is non-NULL) as follows.
205 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
206 * to the host address where @addr's content is kept.
207 *
208 * Note: this function can trigger an exception.
209 */
210 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
211 void **hostp);
212
213 /**
214 * get_page_addr_code()
215 * @env: CPUArchState
216 * @addr: guest virtual address of guest code
217 *
218 * If we cannot translate and execute from the entire RAM page, or if
219 * the region is not backed by RAM, returns -1. Otherwise, returns the
220 * ram_addr_t corresponding to the guest code at @addr.
221 *
222 * Note: this function can trigger an exception.
223 */
get_page_addr_code(CPUArchState * env,vaddr addr)224 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
225 vaddr addr)
226 {
227 return get_page_addr_code_hostp(env, addr, NULL);
228 }
229
230 #if !defined(CONFIG_USER_ONLY)
231
232 MemoryRegionSection *
233 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
234 hwaddr *xlat, hwaddr *plen,
235 MemTxAttrs attrs, int *prot);
236 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
237 MemoryRegionSection *section);
238 #endif
239
240 #endif
241