xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 71b9a45330fe220d11a7a187efc477745dffbd26)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35d9bb58e5SYang Zhong 
36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37d9bb58e5SYang Zhong /* #define DEBUG_TLB */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
39d9bb58e5SYang Zhong 
40d9bb58e5SYang Zhong #ifdef DEBUG_TLB
41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
43d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
44d9bb58e5SYang Zhong # else
45d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
46d9bb58e5SYang Zhong # endif
47d9bb58e5SYang Zhong #else
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
50d9bb58e5SYang Zhong #endif
51d9bb58e5SYang Zhong 
52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
53d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
54d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
56d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
57d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58d9bb58e5SYang Zhong     } \
59d9bb58e5SYang Zhong } while (0)
60d9bb58e5SYang Zhong 
61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do {                         \
62d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
63d9bb58e5SYang Zhong             g_assert(!cpu->created || qemu_cpu_is_self(cpu));     \
64d9bb58e5SYang Zhong         }                                                         \
65d9bb58e5SYang Zhong     } while (0)
66d9bb58e5SYang Zhong 
67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
68d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70d9bb58e5SYang Zhong 
71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72d9bb58e5SYang Zhong  */
73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75d9bb58e5SYang Zhong 
76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
77d9bb58e5SYang Zhong  *
78d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
79d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
80d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
81d9bb58e5SYang Zhong  * again.
82d9bb58e5SYang Zhong  */
83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84d9bb58e5SYang Zhong                              run_on_cpu_data d)
85d9bb58e5SYang Zhong {
86d9bb58e5SYang Zhong     CPUState *cpu;
87d9bb58e5SYang Zhong 
88d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
89d9bb58e5SYang Zhong         if (cpu != src) {
90d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
91d9bb58e5SYang Zhong         }
92d9bb58e5SYang Zhong     }
93d9bb58e5SYang Zhong }
94d9bb58e5SYang Zhong 
95d9bb58e5SYang Zhong /* statistics */
96d9bb58e5SYang Zhong int tlb_flush_count;
97d9bb58e5SYang Zhong 
98d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
99d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
100d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
101d9bb58e5SYang Zhong  * not a correctness issue.
102d9bb58e5SYang Zhong  */
103d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
104d9bb58e5SYang Zhong {
105d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
106d9bb58e5SYang Zhong 
107d9bb58e5SYang Zhong     /* The QOM tests will trigger tlb_flushes without setting up TCG
108d9bb58e5SYang Zhong      * so we bug out here in that case.
109d9bb58e5SYang Zhong      */
110d9bb58e5SYang Zhong     if (!tcg_enabled()) {
111d9bb58e5SYang Zhong         return;
112d9bb58e5SYang Zhong     }
113d9bb58e5SYang Zhong 
114d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
115d9bb58e5SYang Zhong     tlb_debug("(count: %d)\n", tlb_flush_count++);
116d9bb58e5SYang Zhong 
117d9bb58e5SYang Zhong     tb_lock();
118d9bb58e5SYang Zhong 
119d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
120d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
121d9bb58e5SYang Zhong     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
122d9bb58e5SYang Zhong 
123d9bb58e5SYang Zhong     env->vtlb_index = 0;
124d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
125d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
126d9bb58e5SYang Zhong 
127d9bb58e5SYang Zhong     tb_unlock();
128d9bb58e5SYang Zhong 
129d9bb58e5SYang Zhong     atomic_mb_set(&cpu->pending_tlb_flush, 0);
130d9bb58e5SYang Zhong }
131d9bb58e5SYang Zhong 
132d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
133d9bb58e5SYang Zhong {
134d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
135d9bb58e5SYang Zhong }
136d9bb58e5SYang Zhong 
137d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
138d9bb58e5SYang Zhong {
139d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
140d9bb58e5SYang Zhong         if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
141d9bb58e5SYang Zhong             atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
142d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
143d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
144d9bb58e5SYang Zhong         }
145d9bb58e5SYang Zhong     } else {
146d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
147d9bb58e5SYang Zhong     }
148d9bb58e5SYang Zhong }
149d9bb58e5SYang Zhong 
150d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
151d9bb58e5SYang Zhong {
152d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
153d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
154d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
155d9bb58e5SYang Zhong }
156d9bb58e5SYang Zhong 
157d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
158d9bb58e5SYang Zhong {
159d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
160d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
161d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
162d9bb58e5SYang Zhong }
163d9bb58e5SYang Zhong 
164d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
165d9bb58e5SYang Zhong {
166d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
167d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
168d9bb58e5SYang Zhong     int mmu_idx;
169d9bb58e5SYang Zhong 
170d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
171d9bb58e5SYang Zhong 
172d9bb58e5SYang Zhong     tb_lock();
173d9bb58e5SYang Zhong 
174d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
175d9bb58e5SYang Zhong 
176d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
177d9bb58e5SYang Zhong 
178d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
179d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
180d9bb58e5SYang Zhong 
181d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
182d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
183d9bb58e5SYang Zhong         }
184d9bb58e5SYang Zhong     }
185d9bb58e5SYang Zhong 
186d9bb58e5SYang Zhong     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
187d9bb58e5SYang Zhong 
188d9bb58e5SYang Zhong     tlb_debug("done\n");
189d9bb58e5SYang Zhong 
190d9bb58e5SYang Zhong     tb_unlock();
191d9bb58e5SYang Zhong }
192d9bb58e5SYang Zhong 
193d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
194d9bb58e5SYang Zhong {
195d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
196d9bb58e5SYang Zhong 
197d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
198d9bb58e5SYang Zhong         uint16_t pending_flushes = idxmap;
199d9bb58e5SYang Zhong         pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
200d9bb58e5SYang Zhong 
201d9bb58e5SYang Zhong         if (pending_flushes) {
202d9bb58e5SYang Zhong             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
203d9bb58e5SYang Zhong 
204d9bb58e5SYang Zhong             atomic_or(&cpu->pending_tlb_flush, pending_flushes);
205d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
206d9bb58e5SYang Zhong                              RUN_ON_CPU_HOST_INT(pending_flushes));
207d9bb58e5SYang Zhong         }
208d9bb58e5SYang Zhong     } else {
209d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
210d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(idxmap));
211d9bb58e5SYang Zhong     }
212d9bb58e5SYang Zhong }
213d9bb58e5SYang Zhong 
214d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
215d9bb58e5SYang Zhong {
216d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
217d9bb58e5SYang Zhong 
218d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
219d9bb58e5SYang Zhong 
220d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
221d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
222d9bb58e5SYang Zhong }
223d9bb58e5SYang Zhong 
224d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
225d9bb58e5SYang Zhong                                                        uint16_t idxmap)
226d9bb58e5SYang Zhong {
227d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
228d9bb58e5SYang Zhong 
229d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
230d9bb58e5SYang Zhong 
231d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
232d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
233d9bb58e5SYang Zhong }
234d9bb58e5SYang Zhong 
235d9bb58e5SYang Zhong 
236d9bb58e5SYang Zhong 
237d9bb58e5SYang Zhong static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
238d9bb58e5SYang Zhong {
239d9bb58e5SYang Zhong     if (addr == (tlb_entry->addr_read &
240d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
241d9bb58e5SYang Zhong         addr == (tlb_entry->addr_write &
242d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
243d9bb58e5SYang Zhong         addr == (tlb_entry->addr_code &
244d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
245d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
246d9bb58e5SYang Zhong     }
247d9bb58e5SYang Zhong }
248d9bb58e5SYang Zhong 
249d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
250d9bb58e5SYang Zhong {
251d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
252d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
253d9bb58e5SYang Zhong     int i;
254d9bb58e5SYang Zhong     int mmu_idx;
255d9bb58e5SYang Zhong 
256d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
257d9bb58e5SYang Zhong 
258d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
259d9bb58e5SYang Zhong 
260d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
261d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
262d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
263d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
264d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
265d9bb58e5SYang Zhong 
266d9bb58e5SYang Zhong         tlb_flush(cpu);
267d9bb58e5SYang Zhong         return;
268d9bb58e5SYang Zhong     }
269d9bb58e5SYang Zhong 
270d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
271d9bb58e5SYang Zhong     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
272d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
273d9bb58e5SYang Zhong         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
274d9bb58e5SYang Zhong     }
275d9bb58e5SYang Zhong 
276d9bb58e5SYang Zhong     /* check whether there are entries that need to be flushed in the vtlb */
277d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
278d9bb58e5SYang Zhong         int k;
279d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
280d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
281d9bb58e5SYang Zhong         }
282d9bb58e5SYang Zhong     }
283d9bb58e5SYang Zhong 
284d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
285d9bb58e5SYang Zhong }
286d9bb58e5SYang Zhong 
287d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
288d9bb58e5SYang Zhong {
289d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
290d9bb58e5SYang Zhong 
291d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
292d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
293d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
294d9bb58e5SYang Zhong     } else {
295d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
296d9bb58e5SYang Zhong     }
297d9bb58e5SYang Zhong }
298d9bb58e5SYang Zhong 
299d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
300d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
301d9bb58e5SYang Zhong  */
302d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
303d9bb58e5SYang Zhong 
304d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
305d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
306d9bb58e5SYang Zhong {
307d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
308d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
309d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
310d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
311d9bb58e5SYang Zhong     int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
312d9bb58e5SYang Zhong     int mmu_idx;
313d9bb58e5SYang Zhong     int i;
314d9bb58e5SYang Zhong 
315d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
316d9bb58e5SYang Zhong 
317d9bb58e5SYang Zhong     tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
318d9bb58e5SYang Zhong               page, addr, mmu_idx_bitmap);
319d9bb58e5SYang Zhong 
320d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
321d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
322d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
323d9bb58e5SYang Zhong 
324d9bb58e5SYang Zhong             /* check whether there are vltb entries that need to be flushed */
325d9bb58e5SYang Zhong             for (i = 0; i < CPU_VTLB_SIZE; i++) {
326d9bb58e5SYang Zhong                 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
327d9bb58e5SYang Zhong             }
328d9bb58e5SYang Zhong         }
329d9bb58e5SYang Zhong     }
330d9bb58e5SYang Zhong 
331d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
332d9bb58e5SYang Zhong }
333d9bb58e5SYang Zhong 
334d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
335d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
336d9bb58e5SYang Zhong {
337d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
338d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
339d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
340d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
341d9bb58e5SYang Zhong 
342d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
343d9bb58e5SYang Zhong 
344d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
345d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
346d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
347d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
348d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
349d9bb58e5SYang Zhong 
350d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
351d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
352d9bb58e5SYang Zhong     } else {
353d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
354d9bb58e5SYang Zhong     }
355d9bb58e5SYang Zhong }
356d9bb58e5SYang Zhong 
357d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
358d9bb58e5SYang Zhong {
359d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
360d9bb58e5SYang Zhong 
361d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
362d9bb58e5SYang Zhong 
363d9bb58e5SYang Zhong     /* This should already be page aligned */
364d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
365d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
366d9bb58e5SYang Zhong 
367d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
368d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
369d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
370d9bb58e5SYang Zhong     } else {
371d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
372d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
373d9bb58e5SYang Zhong     }
374d9bb58e5SYang Zhong }
375d9bb58e5SYang Zhong 
376d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
377d9bb58e5SYang Zhong                                        uint16_t idxmap)
378d9bb58e5SYang Zhong {
379d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
380d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
381d9bb58e5SYang Zhong 
382d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
383d9bb58e5SYang Zhong 
384d9bb58e5SYang Zhong     /* This should already be page aligned */
385d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
386d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
387d9bb58e5SYang Zhong 
388d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
389d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
390d9bb58e5SYang Zhong }
391d9bb58e5SYang Zhong 
392d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
393d9bb58e5SYang Zhong                                                             target_ulong addr,
394d9bb58e5SYang Zhong                                                             uint16_t idxmap)
395d9bb58e5SYang Zhong {
396d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
397d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
398d9bb58e5SYang Zhong 
399d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
400d9bb58e5SYang Zhong 
401d9bb58e5SYang Zhong     /* This should already be page aligned */
402d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
403d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
404d9bb58e5SYang Zhong 
405d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
406d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
407d9bb58e5SYang Zhong }
408d9bb58e5SYang Zhong 
409d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
410d9bb58e5SYang Zhong {
411d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
412d9bb58e5SYang Zhong 
413d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
414d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
415d9bb58e5SYang Zhong }
416d9bb58e5SYang Zhong 
417d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
418d9bb58e5SYang Zhong                                                   target_ulong addr)
419d9bb58e5SYang Zhong {
420d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
421d9bb58e5SYang Zhong 
422d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
423d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
424d9bb58e5SYang Zhong }
425d9bb58e5SYang Zhong 
426d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
427d9bb58e5SYang Zhong    can be detected */
428d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
429d9bb58e5SYang Zhong {
430d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
431d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
432d9bb58e5SYang Zhong }
433d9bb58e5SYang Zhong 
434d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
435d9bb58e5SYang Zhong    tested for self modifying code */
436d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
437d9bb58e5SYang Zhong {
438d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
439d9bb58e5SYang Zhong }
440d9bb58e5SYang Zhong 
441d9bb58e5SYang Zhong 
442d9bb58e5SYang Zhong /*
443d9bb58e5SYang Zhong  * Dirty write flag handling
444d9bb58e5SYang Zhong  *
445d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
446d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
447d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
448d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
449d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
450d9bb58e5SYang Zhong  * generated code.
451d9bb58e5SYang Zhong  *
452d9bb58e5SYang Zhong  * Because we want other vCPUs to respond to changes straight away we
453d9bb58e5SYang Zhong  * update the te->addr_write field atomically. If the TLB entry has
454d9bb58e5SYang Zhong  * been changed by the vCPU in the mean time we skip the update.
455d9bb58e5SYang Zhong  *
456d9bb58e5SYang Zhong  * As this function uses atomic accesses we also need to ensure
457d9bb58e5SYang Zhong  * updates to tlb_entries follow the same access rules. We don't need
458d9bb58e5SYang Zhong  * to worry about this for oversized guests as MTTCG is disabled for
459d9bb58e5SYang Zhong  * them.
460d9bb58e5SYang Zhong  */
461d9bb58e5SYang Zhong 
462d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
463d9bb58e5SYang Zhong                            uintptr_t length)
464d9bb58e5SYang Zhong {
465d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
466d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
467d9bb58e5SYang Zhong 
468d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
469d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
470d9bb58e5SYang Zhong         addr += tlb_entry->addend;
471d9bb58e5SYang Zhong         if ((addr - start) < length) {
472d9bb58e5SYang Zhong             tlb_entry->addr_write |= TLB_NOTDIRTY;
473d9bb58e5SYang Zhong         }
474d9bb58e5SYang Zhong     }
475d9bb58e5SYang Zhong #else
476d9bb58e5SYang Zhong     /* paired with atomic_mb_set in tlb_set_page_with_attrs */
477d9bb58e5SYang Zhong     uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
478d9bb58e5SYang Zhong     uintptr_t addr = orig_addr;
479d9bb58e5SYang Zhong 
480d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
481d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
482d9bb58e5SYang Zhong         addr += atomic_read(&tlb_entry->addend);
483d9bb58e5SYang Zhong         if ((addr - start) < length) {
484d9bb58e5SYang Zhong             uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
485d9bb58e5SYang Zhong             atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
486d9bb58e5SYang Zhong         }
487d9bb58e5SYang Zhong     }
488d9bb58e5SYang Zhong #endif
489d9bb58e5SYang Zhong }
490d9bb58e5SYang Zhong 
491d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right
492d9bb58e5SYang Zhong  * primitives when copying entries */
493d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
494d9bb58e5SYang Zhong                                    bool atomic_set)
495d9bb58e5SYang Zhong {
496d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
497d9bb58e5SYang Zhong     *d = *s;
498d9bb58e5SYang Zhong #else
499d9bb58e5SYang Zhong     if (atomic_set) {
500d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
501d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
502d9bb58e5SYang Zhong         atomic_set(&d->addend, atomic_read(&s->addend));
503d9bb58e5SYang Zhong         /* Pairs with flag setting in tlb_reset_dirty_range */
504d9bb58e5SYang Zhong         atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
505d9bb58e5SYang Zhong     } else {
506d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
507d9bb58e5SYang Zhong         d->addr_write = atomic_read(&s->addr_write);
508d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
509d9bb58e5SYang Zhong         d->addend = atomic_read(&s->addend);
510d9bb58e5SYang Zhong     }
511d9bb58e5SYang Zhong #endif
512d9bb58e5SYang Zhong }
513d9bb58e5SYang Zhong 
514d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
515d9bb58e5SYang Zhong  * the target vCPU). As such care needs to be taken that we don't
516d9bb58e5SYang Zhong  * dangerously race with another vCPU update. The only thing actually
517d9bb58e5SYang Zhong  * updated is the target TLB entry ->addr_write flags.
518d9bb58e5SYang Zhong  */
519d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
520d9bb58e5SYang Zhong {
521d9bb58e5SYang Zhong     CPUArchState *env;
522d9bb58e5SYang Zhong 
523d9bb58e5SYang Zhong     int mmu_idx;
524d9bb58e5SYang Zhong 
525d9bb58e5SYang Zhong     env = cpu->env_ptr;
526d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
527d9bb58e5SYang Zhong         unsigned int i;
528d9bb58e5SYang Zhong 
529d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
530d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
531d9bb58e5SYang Zhong                                   start1, length);
532d9bb58e5SYang Zhong         }
533d9bb58e5SYang Zhong 
534d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
535d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
536d9bb58e5SYang Zhong                                   start1, length);
537d9bb58e5SYang Zhong         }
538d9bb58e5SYang Zhong     }
539d9bb58e5SYang Zhong }
540d9bb58e5SYang Zhong 
541d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
542d9bb58e5SYang Zhong {
543d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
544d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
545d9bb58e5SYang Zhong     }
546d9bb58e5SYang Zhong }
547d9bb58e5SYang Zhong 
548d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
549d9bb58e5SYang Zhong    so that it is no longer dirty */
550d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
551d9bb58e5SYang Zhong {
552d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
553d9bb58e5SYang Zhong     int i;
554d9bb58e5SYang Zhong     int mmu_idx;
555d9bb58e5SYang Zhong 
556d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
557d9bb58e5SYang Zhong 
558d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
559d9bb58e5SYang Zhong     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
560d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
561d9bb58e5SYang Zhong         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
562d9bb58e5SYang Zhong     }
563d9bb58e5SYang Zhong 
564d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
565d9bb58e5SYang Zhong         int k;
566d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
567d9bb58e5SYang Zhong             tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
568d9bb58e5SYang Zhong         }
569d9bb58e5SYang Zhong     }
570d9bb58e5SYang Zhong }
571d9bb58e5SYang Zhong 
572d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
573d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
574d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
575d9bb58e5SYang Zhong                                target_ulong size)
576d9bb58e5SYang Zhong {
577d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
578d9bb58e5SYang Zhong 
579d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
580d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
581d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
582d9bb58e5SYang Zhong         return;
583d9bb58e5SYang Zhong     }
584d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
585d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
586d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
587d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
588d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
589d9bb58e5SYang Zhong         mask <<= 1;
590d9bb58e5SYang Zhong     }
591d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
592d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
593d9bb58e5SYang Zhong }
594d9bb58e5SYang Zhong 
595d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
596d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
597d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
598d9bb58e5SYang Zhong  *
599d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
600d9bb58e5SYang Zhong  * critical section.
601d9bb58e5SYang Zhong  */
602d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
603d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
604d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
605d9bb58e5SYang Zhong {
606d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
607d9bb58e5SYang Zhong     MemoryRegionSection *section;
608d9bb58e5SYang Zhong     unsigned int index;
609d9bb58e5SYang Zhong     target_ulong address;
610d9bb58e5SYang Zhong     target_ulong code_address;
611d9bb58e5SYang Zhong     uintptr_t addend;
612d9bb58e5SYang Zhong     CPUTLBEntry *te, *tv, tn;
613d9bb58e5SYang Zhong     hwaddr iotlb, xlat, sz;
614d9bb58e5SYang Zhong     unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
615d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
616d9bb58e5SYang Zhong 
617d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
618d9bb58e5SYang Zhong     assert(size >= TARGET_PAGE_SIZE);
619d9bb58e5SYang Zhong     if (size != TARGET_PAGE_SIZE) {
620d9bb58e5SYang Zhong         tlb_add_large_page(env, vaddr, size);
621d9bb58e5SYang Zhong     }
622d9bb58e5SYang Zhong 
623d9bb58e5SYang Zhong     sz = size;
624d9bb58e5SYang Zhong     section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
625d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
626d9bb58e5SYang Zhong 
627d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
628d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
629d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
630d9bb58e5SYang Zhong 
631d9bb58e5SYang Zhong     address = vaddr;
632d9bb58e5SYang Zhong     if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
633d9bb58e5SYang Zhong         /* IO memory case */
634d9bb58e5SYang Zhong         address |= TLB_MMIO;
635d9bb58e5SYang Zhong         addend = 0;
636d9bb58e5SYang Zhong     } else {
637d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
638d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
639d9bb58e5SYang Zhong     }
640d9bb58e5SYang Zhong 
641d9bb58e5SYang Zhong     code_address = address;
642d9bb58e5SYang Zhong     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
643d9bb58e5SYang Zhong                                             prot, &address);
644d9bb58e5SYang Zhong 
645d9bb58e5SYang Zhong     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
646d9bb58e5SYang Zhong     te = &env->tlb_table[mmu_idx][index];
647d9bb58e5SYang Zhong     /* do not discard the translation in te, evict it into a victim tlb */
648d9bb58e5SYang Zhong     tv = &env->tlb_v_table[mmu_idx][vidx];
649d9bb58e5SYang Zhong 
650d9bb58e5SYang Zhong     /* addr_write can race with tlb_reset_dirty_range */
651d9bb58e5SYang Zhong     copy_tlb_helper(tv, te, true);
652d9bb58e5SYang Zhong 
653d9bb58e5SYang Zhong     env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
654d9bb58e5SYang Zhong 
655d9bb58e5SYang Zhong     /* refill the tlb */
656d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
657d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
658d9bb58e5SYang Zhong 
659d9bb58e5SYang Zhong     /* Now calculate the new entry */
660d9bb58e5SYang Zhong     tn.addend = addend - vaddr;
661d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
662d9bb58e5SYang Zhong         tn.addr_read = address;
663d9bb58e5SYang Zhong     } else {
664d9bb58e5SYang Zhong         tn.addr_read = -1;
665d9bb58e5SYang Zhong     }
666d9bb58e5SYang Zhong 
667d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
668d9bb58e5SYang Zhong         tn.addr_code = code_address;
669d9bb58e5SYang Zhong     } else {
670d9bb58e5SYang Zhong         tn.addr_code = -1;
671d9bb58e5SYang Zhong     }
672d9bb58e5SYang Zhong 
673d9bb58e5SYang Zhong     tn.addr_write = -1;
674d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
675d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
676d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
677d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
678d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
679d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
680d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
681d9bb58e5SYang Zhong                         memory_region_get_ram_addr(section->mr) + xlat)) {
682d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
683d9bb58e5SYang Zhong         } else {
684d9bb58e5SYang Zhong             tn.addr_write = address;
685d9bb58e5SYang Zhong         }
686d9bb58e5SYang Zhong     }
687d9bb58e5SYang Zhong 
688d9bb58e5SYang Zhong     /* Pairs with flag setting in tlb_reset_dirty_range */
689d9bb58e5SYang Zhong     copy_tlb_helper(te, &tn, true);
690d9bb58e5SYang Zhong     /* atomic_mb_set(&te->addr_write, write_address); */
691d9bb58e5SYang Zhong }
692d9bb58e5SYang Zhong 
693d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
694d9bb58e5SYang Zhong  * transaction attributes to be used.
695d9bb58e5SYang Zhong  */
696d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
697d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
698d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
699d9bb58e5SYang Zhong {
700d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
701d9bb58e5SYang Zhong                             prot, mmu_idx, size);
702d9bb58e5SYang Zhong }
703d9bb58e5SYang Zhong 
704d9bb58e5SYang Zhong static void report_bad_exec(CPUState *cpu, target_ulong addr)
705d9bb58e5SYang Zhong {
706d9bb58e5SYang Zhong     /* Accidentally executing outside RAM or ROM is quite common for
707d9bb58e5SYang Zhong      * several user-error situations, so report it in a way that
708d9bb58e5SYang Zhong      * makes it clear that this isn't a QEMU bug and provide suggestions
709d9bb58e5SYang Zhong      * about what a user could do to fix things.
710d9bb58e5SYang Zhong      */
711d9bb58e5SYang Zhong     error_report("Trying to execute code outside RAM or ROM at 0x"
712d9bb58e5SYang Zhong                  TARGET_FMT_lx, addr);
713d9bb58e5SYang Zhong     error_printf("This usually means one of the following happened:\n\n"
714d9bb58e5SYang Zhong                  "(1) You told QEMU to execute a kernel for the wrong machine "
715d9bb58e5SYang Zhong                  "type, and it crashed on startup (eg trying to run a "
716d9bb58e5SYang Zhong                  "raspberry pi kernel on a versatilepb QEMU machine)\n"
717d9bb58e5SYang Zhong                  "(2) You didn't give QEMU a kernel or BIOS filename at all, "
718d9bb58e5SYang Zhong                  "and QEMU executed a ROM full of no-op instructions until "
719d9bb58e5SYang Zhong                  "it fell off the end\n"
720d9bb58e5SYang Zhong                  "(3) Your guest kernel has a bug and crashed by jumping "
721d9bb58e5SYang Zhong                  "off into nowhere\n\n"
722d9bb58e5SYang Zhong                  "This is almost always one of the first two, so check your "
723d9bb58e5SYang Zhong                  "command line and that you are using the right type of kernel "
724d9bb58e5SYang Zhong                  "for this machine.\n"
725d9bb58e5SYang Zhong                  "If you think option (3) is likely then you can try debugging "
726d9bb58e5SYang Zhong                  "your guest with the -d debug options; in particular "
727d9bb58e5SYang Zhong                  "-d guest_errors will cause the log to include a dump of the "
728d9bb58e5SYang Zhong                  "guest register state at this point.\n\n"
729d9bb58e5SYang Zhong                  "Execution cannot continue; stopping here.\n\n");
730d9bb58e5SYang Zhong 
731d9bb58e5SYang Zhong     /* Report also to the logs, with more detail including register dump */
732d9bb58e5SYang Zhong     qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
733d9bb58e5SYang Zhong                   "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
734d9bb58e5SYang Zhong     log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
735d9bb58e5SYang Zhong }
736d9bb58e5SYang Zhong 
737d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
738d9bb58e5SYang Zhong {
739d9bb58e5SYang Zhong     ram_addr_t ram_addr;
740d9bb58e5SYang Zhong 
741d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
742d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
743d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
744d9bb58e5SYang Zhong         abort();
745d9bb58e5SYang Zhong     }
746d9bb58e5SYang Zhong     return ram_addr;
747d9bb58e5SYang Zhong }
748d9bb58e5SYang Zhong 
749d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
750d9bb58e5SYang Zhong                          target_ulong addr, uintptr_t retaddr, int size)
751d9bb58e5SYang Zhong {
752d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
753d9bb58e5SYang Zhong     hwaddr physaddr = iotlbentry->addr;
754d9bb58e5SYang Zhong     MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
755d9bb58e5SYang Zhong     uint64_t val;
756d9bb58e5SYang Zhong     bool locked = false;
757d9bb58e5SYang Zhong 
758d9bb58e5SYang Zhong     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
759d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
760d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
761d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
762d9bb58e5SYang Zhong     }
763d9bb58e5SYang Zhong 
764d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
765d9bb58e5SYang Zhong 
766d9bb58e5SYang Zhong     if (mr->global_locking) {
767d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
768d9bb58e5SYang Zhong         locked = true;
769d9bb58e5SYang Zhong     }
770d9bb58e5SYang Zhong     memory_region_dispatch_read(mr, physaddr, &val, size, iotlbentry->attrs);
771d9bb58e5SYang Zhong     if (locked) {
772d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
773d9bb58e5SYang Zhong     }
774d9bb58e5SYang Zhong 
775d9bb58e5SYang Zhong     return val;
776d9bb58e5SYang Zhong }
777d9bb58e5SYang Zhong 
778d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
779d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
780d9bb58e5SYang Zhong                       uintptr_t retaddr, int size)
781d9bb58e5SYang Zhong {
782d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
783d9bb58e5SYang Zhong     hwaddr physaddr = iotlbentry->addr;
784d9bb58e5SYang Zhong     MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
785d9bb58e5SYang Zhong     bool locked = false;
786d9bb58e5SYang Zhong 
787d9bb58e5SYang Zhong     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
788d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
789d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
790d9bb58e5SYang Zhong     }
791d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
792d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
793d9bb58e5SYang Zhong 
794d9bb58e5SYang Zhong     if (mr->global_locking) {
795d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
796d9bb58e5SYang Zhong         locked = true;
797d9bb58e5SYang Zhong     }
798d9bb58e5SYang Zhong     memory_region_dispatch_write(mr, physaddr, val, size, iotlbentry->attrs);
799d9bb58e5SYang Zhong     if (locked) {
800d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
801d9bb58e5SYang Zhong     }
802d9bb58e5SYang Zhong }
803d9bb58e5SYang Zhong 
804d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
805d9bb58e5SYang Zhong    back to the main tlb.  */
806d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
807d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
808d9bb58e5SYang Zhong {
809d9bb58e5SYang Zhong     size_t vidx;
810d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
811d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
812d9bb58e5SYang Zhong         target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
813d9bb58e5SYang Zhong 
814d9bb58e5SYang Zhong         if (cmp == page) {
815d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
816d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
817d9bb58e5SYang Zhong 
818d9bb58e5SYang Zhong             copy_tlb_helper(&tmptlb, tlb, false);
819d9bb58e5SYang Zhong             copy_tlb_helper(tlb, vtlb, true);
820d9bb58e5SYang Zhong             copy_tlb_helper(vtlb, &tmptlb, true);
821d9bb58e5SYang Zhong 
822d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
823d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
824d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
825d9bb58e5SYang Zhong             return true;
826d9bb58e5SYang Zhong         }
827d9bb58e5SYang Zhong     }
828d9bb58e5SYang Zhong     return false;
829d9bb58e5SYang Zhong }
830d9bb58e5SYang Zhong 
831d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
832d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
833d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
834d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
835d9bb58e5SYang Zhong 
836f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
837f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
838f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
839f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
840f2553f04SKONRAD Frederic  */
841f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
842f2553f04SKONRAD Frederic {
843f2553f04SKONRAD Frederic     int mmu_idx, index, pd;
844f2553f04SKONRAD Frederic     void *p;
845f2553f04SKONRAD Frederic     MemoryRegion *mr;
846f2553f04SKONRAD Frederic     CPUState *cpu = ENV_GET_CPU(env);
847f2553f04SKONRAD Frederic     CPUIOTLBEntry *iotlbentry;
848f2553f04SKONRAD Frederic 
849f2553f04SKONRAD Frederic     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
850f2553f04SKONRAD Frederic     mmu_idx = cpu_mmu_index(env, true);
851f2553f04SKONRAD Frederic     if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
852*71b9a453SKONRAD Frederic                  (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
853*71b9a453SKONRAD Frederic         if (!VICTIM_TLB_HIT(addr_read, addr)) {
854*71b9a453SKONRAD Frederic             tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
855*71b9a453SKONRAD Frederic         }
856f2553f04SKONRAD Frederic     }
857f2553f04SKONRAD Frederic     iotlbentry = &env->iotlb[mmu_idx][index];
858f2553f04SKONRAD Frederic     pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
859f2553f04SKONRAD Frederic     mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
860f2553f04SKONRAD Frederic     if (memory_region_is_unassigned(mr)) {
861f2553f04SKONRAD Frederic         cpu_unassigned_access(cpu, addr, false, true, 0, 4);
862f2553f04SKONRAD Frederic         /* The CPU's unassigned access hook might have longjumped out
863f2553f04SKONRAD Frederic          * with an exception. If it didn't (or there was no hook) then
864f2553f04SKONRAD Frederic          * we can't proceed further.
865f2553f04SKONRAD Frederic          */
866f2553f04SKONRAD Frederic         report_bad_exec(cpu, addr);
867f2553f04SKONRAD Frederic         exit(1);
868f2553f04SKONRAD Frederic     }
869f2553f04SKONRAD Frederic     p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
870f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
871f2553f04SKONRAD Frederic }
872f2553f04SKONRAD Frederic 
873d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
874d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
875d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
876d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
877d9bb58e5SYang Zhong  * entry in the TLB for this access.
878d9bb58e5SYang Zhong  */
879d9bb58e5SYang Zhong void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
880d9bb58e5SYang Zhong                  uintptr_t retaddr)
881d9bb58e5SYang Zhong {
882d9bb58e5SYang Zhong     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
883d9bb58e5SYang Zhong     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
884d9bb58e5SYang Zhong 
885d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
886d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
887d9bb58e5SYang Zhong         /* TLB entry is for a different page */
888d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
889d9bb58e5SYang Zhong             tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
890d9bb58e5SYang Zhong         }
891d9bb58e5SYang Zhong     }
892d9bb58e5SYang Zhong }
893d9bb58e5SYang Zhong 
894d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
895d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
896d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
897d9bb58e5SYang Zhong                                TCGMemOpIdx oi, uintptr_t retaddr)
898d9bb58e5SYang Zhong {
899d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
900d9bb58e5SYang Zhong     size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
901d9bb58e5SYang Zhong     CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
902d9bb58e5SYang Zhong     target_ulong tlb_addr = tlbe->addr_write;
903d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
904d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
905d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
906d9bb58e5SYang Zhong 
907d9bb58e5SYang Zhong     /* Adjust the given return address.  */
908d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
909d9bb58e5SYang Zhong 
910d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
911d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
912d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
913d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
914d9bb58e5SYang Zhong                              mmu_idx, retaddr);
915d9bb58e5SYang Zhong     }
916d9bb58e5SYang Zhong 
917d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
918d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
919d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
920d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
921d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
922d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
923d9bb58e5SYang Zhong         goto stop_the_world;
924d9bb58e5SYang Zhong     }
925d9bb58e5SYang Zhong 
926d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
927d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
928d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
929d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
930d9bb58e5SYang Zhong             tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
931d9bb58e5SYang Zhong         }
932d9bb58e5SYang Zhong         tlb_addr = tlbe->addr_write;
933d9bb58e5SYang Zhong     }
934d9bb58e5SYang Zhong 
935d9bb58e5SYang Zhong     /* Check notdirty */
936d9bb58e5SYang Zhong     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
937d9bb58e5SYang Zhong         tlb_set_dirty(ENV_GET_CPU(env), addr);
938d9bb58e5SYang Zhong         tlb_addr = tlb_addr & ~TLB_NOTDIRTY;
939d9bb58e5SYang Zhong     }
940d9bb58e5SYang Zhong 
941d9bb58e5SYang Zhong     /* Notice an IO access  */
942d9bb58e5SYang Zhong     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
943d9bb58e5SYang Zhong         /* There's really nothing that can be done to
944d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
945d9bb58e5SYang Zhong         goto stop_the_world;
946d9bb58e5SYang Zhong     }
947d9bb58e5SYang Zhong 
948d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
949d9bb58e5SYang Zhong     if (unlikely(tlbe->addr_read != tlb_addr)) {
950d9bb58e5SYang Zhong         tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
951d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
952d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
953d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
954d9bb58e5SYang Zhong         goto stop_the_world;
955d9bb58e5SYang Zhong     }
956d9bb58e5SYang Zhong 
957d9bb58e5SYang Zhong     return (void *)((uintptr_t)addr + tlbe->addend);
958d9bb58e5SYang Zhong 
959d9bb58e5SYang Zhong  stop_the_world:
960d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
961d9bb58e5SYang Zhong }
962d9bb58e5SYang Zhong 
963d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
964d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
965d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
966d9bb58e5SYang Zhong #else
967d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
968d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
969d9bb58e5SYang Zhong #endif
970d9bb58e5SYang Zhong 
971d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
972d9bb58e5SYang Zhong 
973d9bb58e5SYang Zhong #define DATA_SIZE 1
974d9bb58e5SYang Zhong #include "softmmu_template.h"
975d9bb58e5SYang Zhong 
976d9bb58e5SYang Zhong #define DATA_SIZE 2
977d9bb58e5SYang Zhong #include "softmmu_template.h"
978d9bb58e5SYang Zhong 
979d9bb58e5SYang Zhong #define DATA_SIZE 4
980d9bb58e5SYang Zhong #include "softmmu_template.h"
981d9bb58e5SYang Zhong 
982d9bb58e5SYang Zhong #define DATA_SIZE 8
983d9bb58e5SYang Zhong #include "softmmu_template.h"
984d9bb58e5SYang Zhong 
985d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
986d9bb58e5SYang Zhong    them callable from other helpers.  */
987d9bb58e5SYang Zhong 
988d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
989d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
990d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
991d9bb58e5SYang Zhong #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, retaddr)
992d9bb58e5SYang Zhong 
993d9bb58e5SYang Zhong #define DATA_SIZE 1
994d9bb58e5SYang Zhong #include "atomic_template.h"
995d9bb58e5SYang Zhong 
996d9bb58e5SYang Zhong #define DATA_SIZE 2
997d9bb58e5SYang Zhong #include "atomic_template.h"
998d9bb58e5SYang Zhong 
999d9bb58e5SYang Zhong #define DATA_SIZE 4
1000d9bb58e5SYang Zhong #include "atomic_template.h"
1001d9bb58e5SYang Zhong 
1002d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1003d9bb58e5SYang Zhong #define DATA_SIZE 8
1004d9bb58e5SYang Zhong #include "atomic_template.h"
1005d9bb58e5SYang Zhong #endif
1006d9bb58e5SYang Zhong 
1007d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128
1008d9bb58e5SYang Zhong #define DATA_SIZE 16
1009d9bb58e5SYang Zhong #include "atomic_template.h"
1010d9bb58e5SYang Zhong #endif
1011d9bb58e5SYang Zhong 
1012d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1013d9bb58e5SYang Zhong 
1014d9bb58e5SYang Zhong #undef EXTRA_ARGS
1015d9bb58e5SYang Zhong #undef ATOMIC_NAME
1016d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1017d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1018d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1019d9bb58e5SYang Zhong #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
1020d9bb58e5SYang Zhong 
1021d9bb58e5SYang Zhong #define DATA_SIZE 1
1022d9bb58e5SYang Zhong #include "atomic_template.h"
1023d9bb58e5SYang Zhong 
1024d9bb58e5SYang Zhong #define DATA_SIZE 2
1025d9bb58e5SYang Zhong #include "atomic_template.h"
1026d9bb58e5SYang Zhong 
1027d9bb58e5SYang Zhong #define DATA_SIZE 4
1028d9bb58e5SYang Zhong #include "atomic_template.h"
1029d9bb58e5SYang Zhong 
1030d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1031d9bb58e5SYang Zhong #define DATA_SIZE 8
1032d9bb58e5SYang Zhong #include "atomic_template.h"
1033d9bb58e5SYang Zhong #endif
1034d9bb58e5SYang Zhong 
1035d9bb58e5SYang Zhong /* Code access functions.  */
1036d9bb58e5SYang Zhong 
1037d9bb58e5SYang Zhong #undef MMUSUFFIX
1038d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1039d9bb58e5SYang Zhong #undef GETPC
1040d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1041d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1042d9bb58e5SYang Zhong 
1043d9bb58e5SYang Zhong #define DATA_SIZE 1
1044d9bb58e5SYang Zhong #include "softmmu_template.h"
1045d9bb58e5SYang Zhong 
1046d9bb58e5SYang Zhong #define DATA_SIZE 2
1047d9bb58e5SYang Zhong #include "softmmu_template.h"
1048d9bb58e5SYang Zhong 
1049d9bb58e5SYang Zhong #define DATA_SIZE 4
1050d9bb58e5SYang Zhong #include "softmmu_template.h"
1051d9bb58e5SYang Zhong 
1052d9bb58e5SYang Zhong #define DATA_SIZE 8
1053d9bb58e5SYang Zhong #include "softmmu_template.h"
1054