xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 2d54f19401bc54b3b56d1cc44c96e4087b604b97)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35d9bb58e5SYang Zhong 
36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37d9bb58e5SYang Zhong /* #define DEBUG_TLB */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
39d9bb58e5SYang Zhong 
40d9bb58e5SYang Zhong #ifdef DEBUG_TLB
41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
43d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
44d9bb58e5SYang Zhong # else
45d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
46d9bb58e5SYang Zhong # endif
47d9bb58e5SYang Zhong #else
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
50d9bb58e5SYang Zhong #endif
51d9bb58e5SYang Zhong 
52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
53d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
54d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
56d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
57d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58d9bb58e5SYang Zhong     } \
59d9bb58e5SYang Zhong } while (0)
60d9bb58e5SYang Zhong 
61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do {                         \
62d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
63d9bb58e5SYang Zhong             g_assert(!cpu->created || qemu_cpu_is_self(cpu));     \
64d9bb58e5SYang Zhong         }                                                         \
65d9bb58e5SYang Zhong     } while (0)
66d9bb58e5SYang Zhong 
67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
68d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70d9bb58e5SYang Zhong 
71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72d9bb58e5SYang Zhong  */
73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75d9bb58e5SYang Zhong 
76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
77d9bb58e5SYang Zhong  *
78d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
79d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
80d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
81d9bb58e5SYang Zhong  * again.
82d9bb58e5SYang Zhong  */
83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84d9bb58e5SYang Zhong                              run_on_cpu_data d)
85d9bb58e5SYang Zhong {
86d9bb58e5SYang Zhong     CPUState *cpu;
87d9bb58e5SYang Zhong 
88d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
89d9bb58e5SYang Zhong         if (cpu != src) {
90d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
91d9bb58e5SYang Zhong         }
92d9bb58e5SYang Zhong     }
93d9bb58e5SYang Zhong }
94d9bb58e5SYang Zhong 
9583974cf4SEmilio G. Cota size_t tlb_flush_count(void)
9683974cf4SEmilio G. Cota {
9783974cf4SEmilio G. Cota     CPUState *cpu;
9883974cf4SEmilio G. Cota     size_t count = 0;
9983974cf4SEmilio G. Cota 
10083974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
10183974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
10283974cf4SEmilio G. Cota 
10383974cf4SEmilio G. Cota         count += atomic_read(&env->tlb_flush_count);
10483974cf4SEmilio G. Cota     }
10583974cf4SEmilio G. Cota     return count;
10683974cf4SEmilio G. Cota }
107d9bb58e5SYang Zhong 
108d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
109d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
110d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
111d9bb58e5SYang Zhong  * not a correctness issue.
112d9bb58e5SYang Zhong  */
113d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
114d9bb58e5SYang Zhong {
115d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
116d9bb58e5SYang Zhong 
117d9bb58e5SYang Zhong     /* The QOM tests will trigger tlb_flushes without setting up TCG
118d9bb58e5SYang Zhong      * so we bug out here in that case.
119d9bb58e5SYang Zhong      */
120d9bb58e5SYang Zhong     if (!tcg_enabled()) {
121d9bb58e5SYang Zhong         return;
122d9bb58e5SYang Zhong     }
123d9bb58e5SYang Zhong 
124d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
12583974cf4SEmilio G. Cota     atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
12683974cf4SEmilio G. Cota     tlb_debug("(count: %zu)\n", tlb_flush_count());
127d9bb58e5SYang Zhong 
128d9bb58e5SYang Zhong     tb_lock();
129d9bb58e5SYang Zhong 
130d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
131d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
132f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
133d9bb58e5SYang Zhong 
134d9bb58e5SYang Zhong     env->vtlb_index = 0;
135d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
136d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
137d9bb58e5SYang Zhong 
138d9bb58e5SYang Zhong     tb_unlock();
139d9bb58e5SYang Zhong 
140d9bb58e5SYang Zhong     atomic_mb_set(&cpu->pending_tlb_flush, 0);
141d9bb58e5SYang Zhong }
142d9bb58e5SYang Zhong 
143d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
144d9bb58e5SYang Zhong {
145d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
146d9bb58e5SYang Zhong }
147d9bb58e5SYang Zhong 
148d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
149d9bb58e5SYang Zhong {
150d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
151d9bb58e5SYang Zhong         if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
152d9bb58e5SYang Zhong             atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
153d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
154d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
155d9bb58e5SYang Zhong         }
156d9bb58e5SYang Zhong     } else {
157d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
158d9bb58e5SYang Zhong     }
159d9bb58e5SYang Zhong }
160d9bb58e5SYang Zhong 
161d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
162d9bb58e5SYang Zhong {
163d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
164d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
165d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
166d9bb58e5SYang Zhong }
167d9bb58e5SYang Zhong 
168d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
169d9bb58e5SYang Zhong {
170d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
171d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
172d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
173d9bb58e5SYang Zhong }
174d9bb58e5SYang Zhong 
175d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
176d9bb58e5SYang Zhong {
177d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
178d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
179d9bb58e5SYang Zhong     int mmu_idx;
180d9bb58e5SYang Zhong 
181d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
182d9bb58e5SYang Zhong 
183d9bb58e5SYang Zhong     tb_lock();
184d9bb58e5SYang Zhong 
185d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
186d9bb58e5SYang Zhong 
187d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
188d9bb58e5SYang Zhong 
189d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
190d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
191d9bb58e5SYang Zhong 
192d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
193d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
194d9bb58e5SYang Zhong         }
195d9bb58e5SYang Zhong     }
196d9bb58e5SYang Zhong 
197f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
198d9bb58e5SYang Zhong 
199d9bb58e5SYang Zhong     tlb_debug("done\n");
200d9bb58e5SYang Zhong 
201d9bb58e5SYang Zhong     tb_unlock();
202d9bb58e5SYang Zhong }
203d9bb58e5SYang Zhong 
204d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
205d9bb58e5SYang Zhong {
206d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
207d9bb58e5SYang Zhong 
208d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
209d9bb58e5SYang Zhong         uint16_t pending_flushes = idxmap;
210d9bb58e5SYang Zhong         pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
211d9bb58e5SYang Zhong 
212d9bb58e5SYang Zhong         if (pending_flushes) {
213d9bb58e5SYang Zhong             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
214d9bb58e5SYang Zhong 
215d9bb58e5SYang Zhong             atomic_or(&cpu->pending_tlb_flush, pending_flushes);
216d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
217d9bb58e5SYang Zhong                              RUN_ON_CPU_HOST_INT(pending_flushes));
218d9bb58e5SYang Zhong         }
219d9bb58e5SYang Zhong     } else {
220d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
221d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(idxmap));
222d9bb58e5SYang Zhong     }
223d9bb58e5SYang Zhong }
224d9bb58e5SYang Zhong 
225d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
226d9bb58e5SYang Zhong {
227d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
228d9bb58e5SYang Zhong 
229d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
230d9bb58e5SYang Zhong 
231d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
232d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
233d9bb58e5SYang Zhong }
234d9bb58e5SYang Zhong 
235d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
236d9bb58e5SYang Zhong                                                        uint16_t idxmap)
237d9bb58e5SYang Zhong {
238d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
239d9bb58e5SYang Zhong 
240d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
241d9bb58e5SYang Zhong 
242d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
243d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
244d9bb58e5SYang Zhong }
245d9bb58e5SYang Zhong 
246d9bb58e5SYang Zhong 
247d9bb58e5SYang Zhong 
248d9bb58e5SYang Zhong static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
249d9bb58e5SYang Zhong {
250d9bb58e5SYang Zhong     if (addr == (tlb_entry->addr_read &
251d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
252d9bb58e5SYang Zhong         addr == (tlb_entry->addr_write &
253d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
254d9bb58e5SYang Zhong         addr == (tlb_entry->addr_code &
255d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
256d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
257d9bb58e5SYang Zhong     }
258d9bb58e5SYang Zhong }
259d9bb58e5SYang Zhong 
260d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
261d9bb58e5SYang Zhong {
262d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
263d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
264d9bb58e5SYang Zhong     int i;
265d9bb58e5SYang Zhong     int mmu_idx;
266d9bb58e5SYang Zhong 
267d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
268d9bb58e5SYang Zhong 
269d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
270d9bb58e5SYang Zhong 
271d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
272d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
273d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
274d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
275d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
276d9bb58e5SYang Zhong 
277d9bb58e5SYang Zhong         tlb_flush(cpu);
278d9bb58e5SYang Zhong         return;
279d9bb58e5SYang Zhong     }
280d9bb58e5SYang Zhong 
281d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
282d9bb58e5SYang Zhong     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
283d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
284d9bb58e5SYang Zhong         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
285d9bb58e5SYang Zhong     }
286d9bb58e5SYang Zhong 
287d9bb58e5SYang Zhong     /* check whether there are entries that need to be flushed in the vtlb */
288d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
289d9bb58e5SYang Zhong         int k;
290d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
291d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
292d9bb58e5SYang Zhong         }
293d9bb58e5SYang Zhong     }
294d9bb58e5SYang Zhong 
295d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
296d9bb58e5SYang Zhong }
297d9bb58e5SYang Zhong 
298d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
299d9bb58e5SYang Zhong {
300d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
301d9bb58e5SYang Zhong 
302d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
303d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
304d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
305d9bb58e5SYang Zhong     } else {
306d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
307d9bb58e5SYang Zhong     }
308d9bb58e5SYang Zhong }
309d9bb58e5SYang Zhong 
310d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
311d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
312d9bb58e5SYang Zhong  */
313d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
314d9bb58e5SYang Zhong 
315d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
316d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
317d9bb58e5SYang Zhong {
318d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
319d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
320d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
321d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
322d9bb58e5SYang Zhong     int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
323d9bb58e5SYang Zhong     int mmu_idx;
324d9bb58e5SYang Zhong     int i;
325d9bb58e5SYang Zhong 
326d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
327d9bb58e5SYang Zhong 
328d9bb58e5SYang Zhong     tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
329d9bb58e5SYang Zhong               page, addr, mmu_idx_bitmap);
330d9bb58e5SYang Zhong 
331d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
332d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
333d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
334d9bb58e5SYang Zhong 
335d9bb58e5SYang Zhong             /* check whether there are vltb entries that need to be flushed */
336d9bb58e5SYang Zhong             for (i = 0; i < CPU_VTLB_SIZE; i++) {
337d9bb58e5SYang Zhong                 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
338d9bb58e5SYang Zhong             }
339d9bb58e5SYang Zhong         }
340d9bb58e5SYang Zhong     }
341d9bb58e5SYang Zhong 
342d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
343d9bb58e5SYang Zhong }
344d9bb58e5SYang Zhong 
345d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
346d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
347d9bb58e5SYang Zhong {
348d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
349d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
350d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
351d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
352d9bb58e5SYang Zhong 
353d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
354d9bb58e5SYang Zhong 
355d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
356d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
357d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
358d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
359d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
360d9bb58e5SYang Zhong 
361d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
362d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
363d9bb58e5SYang Zhong     } else {
364d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
365d9bb58e5SYang Zhong     }
366d9bb58e5SYang Zhong }
367d9bb58e5SYang Zhong 
368d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
369d9bb58e5SYang Zhong {
370d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
371d9bb58e5SYang Zhong 
372d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
373d9bb58e5SYang Zhong 
374d9bb58e5SYang Zhong     /* This should already be page aligned */
375d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
376d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
377d9bb58e5SYang Zhong 
378d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
379d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
380d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
381d9bb58e5SYang Zhong     } else {
382d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
383d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
384d9bb58e5SYang Zhong     }
385d9bb58e5SYang Zhong }
386d9bb58e5SYang Zhong 
387d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
388d9bb58e5SYang Zhong                                        uint16_t idxmap)
389d9bb58e5SYang Zhong {
390d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
391d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
392d9bb58e5SYang Zhong 
393d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
394d9bb58e5SYang Zhong 
395d9bb58e5SYang Zhong     /* This should already be page aligned */
396d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
397d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
398d9bb58e5SYang Zhong 
399d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
400d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
401d9bb58e5SYang Zhong }
402d9bb58e5SYang Zhong 
403d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
404d9bb58e5SYang Zhong                                                             target_ulong addr,
405d9bb58e5SYang Zhong                                                             uint16_t idxmap)
406d9bb58e5SYang Zhong {
407d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
408d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
409d9bb58e5SYang Zhong 
410d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
411d9bb58e5SYang Zhong 
412d9bb58e5SYang Zhong     /* This should already be page aligned */
413d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
414d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
415d9bb58e5SYang Zhong 
416d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
417d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
418d9bb58e5SYang Zhong }
419d9bb58e5SYang Zhong 
420d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
421d9bb58e5SYang Zhong {
422d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
423d9bb58e5SYang Zhong 
424d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
425d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
426d9bb58e5SYang Zhong }
427d9bb58e5SYang Zhong 
428d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
429d9bb58e5SYang Zhong                                                   target_ulong addr)
430d9bb58e5SYang Zhong {
431d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
432d9bb58e5SYang Zhong 
433d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
434d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
435d9bb58e5SYang Zhong }
436d9bb58e5SYang Zhong 
437d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
438d9bb58e5SYang Zhong    can be detected */
439d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
440d9bb58e5SYang Zhong {
441d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
442d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
443d9bb58e5SYang Zhong }
444d9bb58e5SYang Zhong 
445d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
446d9bb58e5SYang Zhong    tested for self modifying code */
447d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
448d9bb58e5SYang Zhong {
449d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
450d9bb58e5SYang Zhong }
451d9bb58e5SYang Zhong 
452d9bb58e5SYang Zhong 
453d9bb58e5SYang Zhong /*
454d9bb58e5SYang Zhong  * Dirty write flag handling
455d9bb58e5SYang Zhong  *
456d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
457d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
458d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
459d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
460d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
461d9bb58e5SYang Zhong  * generated code.
462d9bb58e5SYang Zhong  *
463d9bb58e5SYang Zhong  * Because we want other vCPUs to respond to changes straight away we
464d9bb58e5SYang Zhong  * update the te->addr_write field atomically. If the TLB entry has
465d9bb58e5SYang Zhong  * been changed by the vCPU in the mean time we skip the update.
466d9bb58e5SYang Zhong  *
467d9bb58e5SYang Zhong  * As this function uses atomic accesses we also need to ensure
468d9bb58e5SYang Zhong  * updates to tlb_entries follow the same access rules. We don't need
469d9bb58e5SYang Zhong  * to worry about this for oversized guests as MTTCG is disabled for
470d9bb58e5SYang Zhong  * them.
471d9bb58e5SYang Zhong  */
472d9bb58e5SYang Zhong 
473d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
474d9bb58e5SYang Zhong                            uintptr_t length)
475d9bb58e5SYang Zhong {
476d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
477d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
478d9bb58e5SYang Zhong 
479d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
480d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
481d9bb58e5SYang Zhong         addr += tlb_entry->addend;
482d9bb58e5SYang Zhong         if ((addr - start) < length) {
483d9bb58e5SYang Zhong             tlb_entry->addr_write |= TLB_NOTDIRTY;
484d9bb58e5SYang Zhong         }
485d9bb58e5SYang Zhong     }
486d9bb58e5SYang Zhong #else
487d9bb58e5SYang Zhong     /* paired with atomic_mb_set in tlb_set_page_with_attrs */
488d9bb58e5SYang Zhong     uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
489d9bb58e5SYang Zhong     uintptr_t addr = orig_addr;
490d9bb58e5SYang Zhong 
491d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
492d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
493d9bb58e5SYang Zhong         addr += atomic_read(&tlb_entry->addend);
494d9bb58e5SYang Zhong         if ((addr - start) < length) {
495d9bb58e5SYang Zhong             uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
496d9bb58e5SYang Zhong             atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
497d9bb58e5SYang Zhong         }
498d9bb58e5SYang Zhong     }
499d9bb58e5SYang Zhong #endif
500d9bb58e5SYang Zhong }
501d9bb58e5SYang Zhong 
502d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right
503d9bb58e5SYang Zhong  * primitives when copying entries */
504d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
505d9bb58e5SYang Zhong                                    bool atomic_set)
506d9bb58e5SYang Zhong {
507d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
508d9bb58e5SYang Zhong     *d = *s;
509d9bb58e5SYang Zhong #else
510d9bb58e5SYang Zhong     if (atomic_set) {
511d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
512d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
513d9bb58e5SYang Zhong         atomic_set(&d->addend, atomic_read(&s->addend));
514d9bb58e5SYang Zhong         /* Pairs with flag setting in tlb_reset_dirty_range */
515d9bb58e5SYang Zhong         atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
516d9bb58e5SYang Zhong     } else {
517d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
518d9bb58e5SYang Zhong         d->addr_write = atomic_read(&s->addr_write);
519d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
520d9bb58e5SYang Zhong         d->addend = atomic_read(&s->addend);
521d9bb58e5SYang Zhong     }
522d9bb58e5SYang Zhong #endif
523d9bb58e5SYang Zhong }
524d9bb58e5SYang Zhong 
525d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
526d9bb58e5SYang Zhong  * the target vCPU). As such care needs to be taken that we don't
527d9bb58e5SYang Zhong  * dangerously race with another vCPU update. The only thing actually
528d9bb58e5SYang Zhong  * updated is the target TLB entry ->addr_write flags.
529d9bb58e5SYang Zhong  */
530d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
531d9bb58e5SYang Zhong {
532d9bb58e5SYang Zhong     CPUArchState *env;
533d9bb58e5SYang Zhong 
534d9bb58e5SYang Zhong     int mmu_idx;
535d9bb58e5SYang Zhong 
536d9bb58e5SYang Zhong     env = cpu->env_ptr;
537d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
538d9bb58e5SYang Zhong         unsigned int i;
539d9bb58e5SYang Zhong 
540d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
541d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
542d9bb58e5SYang Zhong                                   start1, length);
543d9bb58e5SYang Zhong         }
544d9bb58e5SYang Zhong 
545d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
546d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
547d9bb58e5SYang Zhong                                   start1, length);
548d9bb58e5SYang Zhong         }
549d9bb58e5SYang Zhong     }
550d9bb58e5SYang Zhong }
551d9bb58e5SYang Zhong 
552d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
553d9bb58e5SYang Zhong {
554d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
555d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
556d9bb58e5SYang Zhong     }
557d9bb58e5SYang Zhong }
558d9bb58e5SYang Zhong 
559d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
560d9bb58e5SYang Zhong    so that it is no longer dirty */
561d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
562d9bb58e5SYang Zhong {
563d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
564d9bb58e5SYang Zhong     int i;
565d9bb58e5SYang Zhong     int mmu_idx;
566d9bb58e5SYang Zhong 
567d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
568d9bb58e5SYang Zhong 
569d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
570d9bb58e5SYang Zhong     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
571d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
572d9bb58e5SYang Zhong         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
573d9bb58e5SYang Zhong     }
574d9bb58e5SYang Zhong 
575d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
576d9bb58e5SYang Zhong         int k;
577d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
578d9bb58e5SYang Zhong             tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
579d9bb58e5SYang Zhong         }
580d9bb58e5SYang Zhong     }
581d9bb58e5SYang Zhong }
582d9bb58e5SYang Zhong 
583d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
584d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
585d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
586d9bb58e5SYang Zhong                                target_ulong size)
587d9bb58e5SYang Zhong {
588d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
589d9bb58e5SYang Zhong 
590d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
591d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
592d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
593d9bb58e5SYang Zhong         return;
594d9bb58e5SYang Zhong     }
595d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
596d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
597d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
598d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
599d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
600d9bb58e5SYang Zhong         mask <<= 1;
601d9bb58e5SYang Zhong     }
602d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
603d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
604d9bb58e5SYang Zhong }
605d9bb58e5SYang Zhong 
606d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
607d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
608d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
609d9bb58e5SYang Zhong  *
610d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
611d9bb58e5SYang Zhong  * critical section.
612d9bb58e5SYang Zhong  */
613d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
614d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
615d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
616d9bb58e5SYang Zhong {
617d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
618d9bb58e5SYang Zhong     MemoryRegionSection *section;
619d9bb58e5SYang Zhong     unsigned int index;
620d9bb58e5SYang Zhong     target_ulong address;
621d9bb58e5SYang Zhong     target_ulong code_address;
622d9bb58e5SYang Zhong     uintptr_t addend;
623d9bb58e5SYang Zhong     CPUTLBEntry *te, *tv, tn;
624d9bb58e5SYang Zhong     hwaddr iotlb, xlat, sz;
625d9bb58e5SYang Zhong     unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
626d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
627d9bb58e5SYang Zhong 
628d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
629d9bb58e5SYang Zhong     assert(size >= TARGET_PAGE_SIZE);
630d9bb58e5SYang Zhong     if (size != TARGET_PAGE_SIZE) {
631d9bb58e5SYang Zhong         tlb_add_large_page(env, vaddr, size);
632d9bb58e5SYang Zhong     }
633d9bb58e5SYang Zhong 
634d9bb58e5SYang Zhong     sz = size;
635d9bb58e5SYang Zhong     section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
636d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
637d9bb58e5SYang Zhong 
638d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
639d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
640d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
641d9bb58e5SYang Zhong 
642d9bb58e5SYang Zhong     address = vaddr;
643d9bb58e5SYang Zhong     if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
644d9bb58e5SYang Zhong         /* IO memory case */
645d9bb58e5SYang Zhong         address |= TLB_MMIO;
646d9bb58e5SYang Zhong         addend = 0;
647d9bb58e5SYang Zhong     } else {
648d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
649d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
650d9bb58e5SYang Zhong     }
651d9bb58e5SYang Zhong 
652d9bb58e5SYang Zhong     code_address = address;
653d9bb58e5SYang Zhong     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
654d9bb58e5SYang Zhong                                             prot, &address);
655d9bb58e5SYang Zhong 
656d9bb58e5SYang Zhong     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
657d9bb58e5SYang Zhong     te = &env->tlb_table[mmu_idx][index];
658d9bb58e5SYang Zhong     /* do not discard the translation in te, evict it into a victim tlb */
659d9bb58e5SYang Zhong     tv = &env->tlb_v_table[mmu_idx][vidx];
660d9bb58e5SYang Zhong 
661d9bb58e5SYang Zhong     /* addr_write can race with tlb_reset_dirty_range */
662d9bb58e5SYang Zhong     copy_tlb_helper(tv, te, true);
663d9bb58e5SYang Zhong 
664d9bb58e5SYang Zhong     env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
665d9bb58e5SYang Zhong 
666d9bb58e5SYang Zhong     /* refill the tlb */
667ace41090SPeter Maydell     /*
668ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
669ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
670ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
671ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
672ace41090SPeter Maydell      * We subtract the vaddr (which is page aligned and thus won't
673ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
674ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
675ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
676ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
677ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
678ace41090SPeter Maydell      */
679d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
680d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
681d9bb58e5SYang Zhong 
682d9bb58e5SYang Zhong     /* Now calculate the new entry */
683d9bb58e5SYang Zhong     tn.addend = addend - vaddr;
684d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
685d9bb58e5SYang Zhong         tn.addr_read = address;
686d9bb58e5SYang Zhong     } else {
687d9bb58e5SYang Zhong         tn.addr_read = -1;
688d9bb58e5SYang Zhong     }
689d9bb58e5SYang Zhong 
690d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
691d9bb58e5SYang Zhong         tn.addr_code = code_address;
692d9bb58e5SYang Zhong     } else {
693d9bb58e5SYang Zhong         tn.addr_code = -1;
694d9bb58e5SYang Zhong     }
695d9bb58e5SYang Zhong 
696d9bb58e5SYang Zhong     tn.addr_write = -1;
697d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
698d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
699d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
700d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
701d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
702d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
703d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
704d9bb58e5SYang Zhong                         memory_region_get_ram_addr(section->mr) + xlat)) {
705d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
706d9bb58e5SYang Zhong         } else {
707d9bb58e5SYang Zhong             tn.addr_write = address;
708d9bb58e5SYang Zhong         }
709f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
710f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
711f52bfb12SDavid Hildenbrand         }
712d9bb58e5SYang Zhong     }
713d9bb58e5SYang Zhong 
714d9bb58e5SYang Zhong     /* Pairs with flag setting in tlb_reset_dirty_range */
715d9bb58e5SYang Zhong     copy_tlb_helper(te, &tn, true);
716d9bb58e5SYang Zhong     /* atomic_mb_set(&te->addr_write, write_address); */
717d9bb58e5SYang Zhong }
718d9bb58e5SYang Zhong 
719d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
720d9bb58e5SYang Zhong  * transaction attributes to be used.
721d9bb58e5SYang Zhong  */
722d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
723d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
724d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
725d9bb58e5SYang Zhong {
726d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
727d9bb58e5SYang Zhong                             prot, mmu_idx, size);
728d9bb58e5SYang Zhong }
729d9bb58e5SYang Zhong 
730d9bb58e5SYang Zhong static void report_bad_exec(CPUState *cpu, target_ulong addr)
731d9bb58e5SYang Zhong {
732d9bb58e5SYang Zhong     /* Accidentally executing outside RAM or ROM is quite common for
733d9bb58e5SYang Zhong      * several user-error situations, so report it in a way that
734d9bb58e5SYang Zhong      * makes it clear that this isn't a QEMU bug and provide suggestions
735d9bb58e5SYang Zhong      * about what a user could do to fix things.
736d9bb58e5SYang Zhong      */
737d9bb58e5SYang Zhong     error_report("Trying to execute code outside RAM or ROM at 0x"
738d9bb58e5SYang Zhong                  TARGET_FMT_lx, addr);
739d9bb58e5SYang Zhong     error_printf("This usually means one of the following happened:\n\n"
740d9bb58e5SYang Zhong                  "(1) You told QEMU to execute a kernel for the wrong machine "
741d9bb58e5SYang Zhong                  "type, and it crashed on startup (eg trying to run a "
742d9bb58e5SYang Zhong                  "raspberry pi kernel on a versatilepb QEMU machine)\n"
743d9bb58e5SYang Zhong                  "(2) You didn't give QEMU a kernel or BIOS filename at all, "
744d9bb58e5SYang Zhong                  "and QEMU executed a ROM full of no-op instructions until "
745d9bb58e5SYang Zhong                  "it fell off the end\n"
746d9bb58e5SYang Zhong                  "(3) Your guest kernel has a bug and crashed by jumping "
747d9bb58e5SYang Zhong                  "off into nowhere\n\n"
748d9bb58e5SYang Zhong                  "This is almost always one of the first two, so check your "
749d9bb58e5SYang Zhong                  "command line and that you are using the right type of kernel "
750d9bb58e5SYang Zhong                  "for this machine.\n"
751d9bb58e5SYang Zhong                  "If you think option (3) is likely then you can try debugging "
752d9bb58e5SYang Zhong                  "your guest with the -d debug options; in particular "
753d9bb58e5SYang Zhong                  "-d guest_errors will cause the log to include a dump of the "
754d9bb58e5SYang Zhong                  "guest register state at this point.\n\n"
755d9bb58e5SYang Zhong                  "Execution cannot continue; stopping here.\n\n");
756d9bb58e5SYang Zhong 
757d9bb58e5SYang Zhong     /* Report also to the logs, with more detail including register dump */
758d9bb58e5SYang Zhong     qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
759d9bb58e5SYang Zhong                   "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
760d9bb58e5SYang Zhong     log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
761d9bb58e5SYang Zhong }
762d9bb58e5SYang Zhong 
763d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
764d9bb58e5SYang Zhong {
765d9bb58e5SYang Zhong     ram_addr_t ram_addr;
766d9bb58e5SYang Zhong 
767d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
768d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
769d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
770d9bb58e5SYang Zhong         abort();
771d9bb58e5SYang Zhong     }
772d9bb58e5SYang Zhong     return ram_addr;
773d9bb58e5SYang Zhong }
774d9bb58e5SYang Zhong 
775d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
77604e3aabdSPeter Maydell                          int mmu_idx,
777d9bb58e5SYang Zhong                          target_ulong addr, uintptr_t retaddr, int size)
778d9bb58e5SYang Zhong {
779d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
780*2d54f194SPeter Maydell     hwaddr mr_offset;
781*2d54f194SPeter Maydell     MemoryRegionSection *section;
782*2d54f194SPeter Maydell     MemoryRegion *mr;
783d9bb58e5SYang Zhong     uint64_t val;
784d9bb58e5SYang Zhong     bool locked = false;
78504e3aabdSPeter Maydell     MemTxResult r;
786d9bb58e5SYang Zhong 
787*2d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
788*2d54f194SPeter Maydell     mr = section->mr;
789*2d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
790d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
791d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
792d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
793d9bb58e5SYang Zhong     }
794d9bb58e5SYang Zhong 
795d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
796d9bb58e5SYang Zhong 
7978b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
798d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
799d9bb58e5SYang Zhong         locked = true;
800d9bb58e5SYang Zhong     }
801*2d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
80204e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
80304e3aabdSPeter Maydell     if (r != MEMTX_OK) {
804*2d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
805*2d54f194SPeter Maydell             section->offset_within_address_space -
806*2d54f194SPeter Maydell             section->offset_within_region;
807*2d54f194SPeter Maydell 
80804e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
80904e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
81004e3aabdSPeter Maydell     }
811d9bb58e5SYang Zhong     if (locked) {
812d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
813d9bb58e5SYang Zhong     }
814d9bb58e5SYang Zhong 
815d9bb58e5SYang Zhong     return val;
816d9bb58e5SYang Zhong }
817d9bb58e5SYang Zhong 
818d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
81904e3aabdSPeter Maydell                       int mmu_idx,
820d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
821d9bb58e5SYang Zhong                       uintptr_t retaddr, int size)
822d9bb58e5SYang Zhong {
823d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
824*2d54f194SPeter Maydell     hwaddr mr_offset;
825*2d54f194SPeter Maydell     MemoryRegionSection *section;
826*2d54f194SPeter Maydell     MemoryRegion *mr;
827d9bb58e5SYang Zhong     bool locked = false;
82804e3aabdSPeter Maydell     MemTxResult r;
829d9bb58e5SYang Zhong 
830*2d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
831*2d54f194SPeter Maydell     mr = section->mr;
832*2d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
833d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
834d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
835d9bb58e5SYang Zhong     }
836d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
837d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
838d9bb58e5SYang Zhong 
8398b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
840d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
841d9bb58e5SYang Zhong         locked = true;
842d9bb58e5SYang Zhong     }
843*2d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
84404e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
84504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
846*2d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
847*2d54f194SPeter Maydell             section->offset_within_address_space -
848*2d54f194SPeter Maydell             section->offset_within_region;
849*2d54f194SPeter Maydell 
85004e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
85104e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
85204e3aabdSPeter Maydell     }
853d9bb58e5SYang Zhong     if (locked) {
854d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
855d9bb58e5SYang Zhong     }
856d9bb58e5SYang Zhong }
857d9bb58e5SYang Zhong 
858d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
859d9bb58e5SYang Zhong    back to the main tlb.  */
860d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
861d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
862d9bb58e5SYang Zhong {
863d9bb58e5SYang Zhong     size_t vidx;
864d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
865d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
866d9bb58e5SYang Zhong         target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
867d9bb58e5SYang Zhong 
868d9bb58e5SYang Zhong         if (cmp == page) {
869d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
870d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
871d9bb58e5SYang Zhong 
872d9bb58e5SYang Zhong             copy_tlb_helper(&tmptlb, tlb, false);
873d9bb58e5SYang Zhong             copy_tlb_helper(tlb, vtlb, true);
874d9bb58e5SYang Zhong             copy_tlb_helper(vtlb, &tmptlb, true);
875d9bb58e5SYang Zhong 
876d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
877d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
878d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
879d9bb58e5SYang Zhong             return true;
880d9bb58e5SYang Zhong         }
881d9bb58e5SYang Zhong     }
882d9bb58e5SYang Zhong     return false;
883d9bb58e5SYang Zhong }
884d9bb58e5SYang Zhong 
885d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
886d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
887d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
888d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
889d9bb58e5SYang Zhong 
890f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
891f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
892f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
893f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
894f2553f04SKONRAD Frederic  */
895f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
896f2553f04SKONRAD Frederic {
897*2d54f194SPeter Maydell     int mmu_idx, index;
898f2553f04SKONRAD Frederic     void *p;
899f2553f04SKONRAD Frederic     MemoryRegion *mr;
900*2d54f194SPeter Maydell     MemoryRegionSection *section;
901f2553f04SKONRAD Frederic     CPUState *cpu = ENV_GET_CPU(env);
902f2553f04SKONRAD Frederic     CPUIOTLBEntry *iotlbentry;
903*2d54f194SPeter Maydell     hwaddr physaddr, mr_offset;
904f2553f04SKONRAD Frederic 
905f2553f04SKONRAD Frederic     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
906f2553f04SKONRAD Frederic     mmu_idx = cpu_mmu_index(env, true);
907f2553f04SKONRAD Frederic     if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
90871b9a453SKONRAD Frederic                  (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
90971b9a453SKONRAD Frederic         if (!VICTIM_TLB_HIT(addr_read, addr)) {
91098670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
91171b9a453SKONRAD Frederic         }
912f2553f04SKONRAD Frederic     }
913f2553f04SKONRAD Frederic     iotlbentry = &env->iotlb[mmu_idx][index];
914*2d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
915*2d54f194SPeter Maydell     mr = section->mr;
916f2553f04SKONRAD Frederic     if (memory_region_is_unassigned(mr)) {
917c9356746SKONRAD Frederic         qemu_mutex_lock_iothread();
918c9356746SKONRAD Frederic         if (memory_region_request_mmio_ptr(mr, addr)) {
919c9356746SKONRAD Frederic             qemu_mutex_unlock_iothread();
920c9356746SKONRAD Frederic             /* A MemoryRegion is potentially added so re-run the
921c9356746SKONRAD Frederic              * get_page_addr_code.
922c9356746SKONRAD Frederic              */
923c9356746SKONRAD Frederic             return get_page_addr_code(env, addr);
924c9356746SKONRAD Frederic         }
925c9356746SKONRAD Frederic         qemu_mutex_unlock_iothread();
926c9356746SKONRAD Frederic 
92704e3aabdSPeter Maydell         /* Give the new-style cpu_transaction_failed() hook first chance
92804e3aabdSPeter Maydell          * to handle this.
92904e3aabdSPeter Maydell          * This is not the ideal place to detect and generate CPU
93004e3aabdSPeter Maydell          * exceptions for instruction fetch failure (for instance
93104e3aabdSPeter Maydell          * we don't know the length of the access that the CPU would
93204e3aabdSPeter Maydell          * use, and it would be better to go ahead and try the access
93304e3aabdSPeter Maydell          * and use the MemTXResult it produced). However it is the
93404e3aabdSPeter Maydell          * simplest place we have currently available for the check.
93504e3aabdSPeter Maydell          */
936*2d54f194SPeter Maydell         mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
937*2d54f194SPeter Maydell         physaddr = mr_offset +
938*2d54f194SPeter Maydell             section->offset_within_address_space -
939*2d54f194SPeter Maydell             section->offset_within_region;
94004e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
94104e3aabdSPeter Maydell                                iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
94204e3aabdSPeter Maydell 
943f2553f04SKONRAD Frederic         cpu_unassigned_access(cpu, addr, false, true, 0, 4);
944f2553f04SKONRAD Frederic         /* The CPU's unassigned access hook might have longjumped out
945f2553f04SKONRAD Frederic          * with an exception. If it didn't (or there was no hook) then
946f2553f04SKONRAD Frederic          * we can't proceed further.
947f2553f04SKONRAD Frederic          */
948f2553f04SKONRAD Frederic         report_bad_exec(cpu, addr);
949f2553f04SKONRAD Frederic         exit(1);
950f2553f04SKONRAD Frederic     }
951f2553f04SKONRAD Frederic     p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
952f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
953f2553f04SKONRAD Frederic }
954f2553f04SKONRAD Frederic 
955d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
956d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
957d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
958d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
959d9bb58e5SYang Zhong  * entry in the TLB for this access.
960d9bb58e5SYang Zhong  */
96198670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
962d9bb58e5SYang Zhong                  uintptr_t retaddr)
963d9bb58e5SYang Zhong {
964d9bb58e5SYang Zhong     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
965d9bb58e5SYang Zhong     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
966d9bb58e5SYang Zhong 
967d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
968d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
969d9bb58e5SYang Zhong         /* TLB entry is for a different page */
970d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
97198670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
97298670d47SLaurent Vivier                      mmu_idx, retaddr);
973d9bb58e5SYang Zhong         }
974d9bb58e5SYang Zhong     }
975d9bb58e5SYang Zhong }
976d9bb58e5SYang Zhong 
977d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
978d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
979d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
98034d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
98134d49937SPeter Maydell                                NotDirtyInfo *ndi)
982d9bb58e5SYang Zhong {
983d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
984d9bb58e5SYang Zhong     size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
985d9bb58e5SYang Zhong     CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
986d9bb58e5SYang Zhong     target_ulong tlb_addr = tlbe->addr_write;
987d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
988d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
989d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
99034d49937SPeter Maydell     void *hostaddr;
991d9bb58e5SYang Zhong 
992d9bb58e5SYang Zhong     /* Adjust the given return address.  */
993d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
994d9bb58e5SYang Zhong 
995d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
996d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
997d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
998d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
999d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1000d9bb58e5SYang Zhong     }
1001d9bb58e5SYang Zhong 
1002d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1003d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1004d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1005d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1006d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1007d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1008d9bb58e5SYang Zhong         goto stop_the_world;
1009d9bb58e5SYang Zhong     }
1010d9bb58e5SYang Zhong 
1011d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1012d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
1013d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1014d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
101598670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
101698670d47SLaurent Vivier                      mmu_idx, retaddr);
1017d9bb58e5SYang Zhong         }
1018f52bfb12SDavid Hildenbrand         tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
1019d9bb58e5SYang Zhong     }
1020d9bb58e5SYang Zhong 
1021d9bb58e5SYang Zhong     /* Notice an IO access  */
102234d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_MMIO)) {
1023d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1024d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1025d9bb58e5SYang Zhong         goto stop_the_world;
1026d9bb58e5SYang Zhong     }
1027d9bb58e5SYang Zhong 
1028d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
102934d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
103098670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
103198670d47SLaurent Vivier                  mmu_idx, retaddr);
1032d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1033d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1034d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1035d9bb58e5SYang Zhong         goto stop_the_world;
1036d9bb58e5SYang Zhong     }
1037d9bb58e5SYang Zhong 
103834d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
103934d49937SPeter Maydell 
104034d49937SPeter Maydell     ndi->active = false;
104134d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
104234d49937SPeter Maydell         ndi->active = true;
104334d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
104434d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
104534d49937SPeter Maydell                                       1 << s_bits);
104634d49937SPeter Maydell     }
104734d49937SPeter Maydell 
104834d49937SPeter Maydell     return hostaddr;
1049d9bb58e5SYang Zhong 
1050d9bb58e5SYang Zhong  stop_the_world:
1051d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1052d9bb58e5SYang Zhong }
1053d9bb58e5SYang Zhong 
1054d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1055d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
1056d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1057d9bb58e5SYang Zhong #else
1058d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1059d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1060d9bb58e5SYang Zhong #endif
1061d9bb58e5SYang Zhong 
1062d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1063d9bb58e5SYang Zhong 
1064d9bb58e5SYang Zhong #define DATA_SIZE 1
1065d9bb58e5SYang Zhong #include "softmmu_template.h"
1066d9bb58e5SYang Zhong 
1067d9bb58e5SYang Zhong #define DATA_SIZE 2
1068d9bb58e5SYang Zhong #include "softmmu_template.h"
1069d9bb58e5SYang Zhong 
1070d9bb58e5SYang Zhong #define DATA_SIZE 4
1071d9bb58e5SYang Zhong #include "softmmu_template.h"
1072d9bb58e5SYang Zhong 
1073d9bb58e5SYang Zhong #define DATA_SIZE 8
1074d9bb58e5SYang Zhong #include "softmmu_template.h"
1075d9bb58e5SYang Zhong 
1076d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1077d9bb58e5SYang Zhong    them callable from other helpers.  */
1078d9bb58e5SYang Zhong 
1079d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1080d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1081d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
108234d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
108334d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
108434d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
108534d49937SPeter Maydell     do {                                                \
108634d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
108734d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
108834d49937SPeter Maydell         }                                               \
108934d49937SPeter Maydell     } while (0)
1090d9bb58e5SYang Zhong 
1091d9bb58e5SYang Zhong #define DATA_SIZE 1
1092d9bb58e5SYang Zhong #include "atomic_template.h"
1093d9bb58e5SYang Zhong 
1094d9bb58e5SYang Zhong #define DATA_SIZE 2
1095d9bb58e5SYang Zhong #include "atomic_template.h"
1096d9bb58e5SYang Zhong 
1097d9bb58e5SYang Zhong #define DATA_SIZE 4
1098d9bb58e5SYang Zhong #include "atomic_template.h"
1099d9bb58e5SYang Zhong 
1100d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1101d9bb58e5SYang Zhong #define DATA_SIZE 8
1102d9bb58e5SYang Zhong #include "atomic_template.h"
1103d9bb58e5SYang Zhong #endif
1104d9bb58e5SYang Zhong 
1105d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128
1106d9bb58e5SYang Zhong #define DATA_SIZE 16
1107d9bb58e5SYang Zhong #include "atomic_template.h"
1108d9bb58e5SYang Zhong #endif
1109d9bb58e5SYang Zhong 
1110d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1111d9bb58e5SYang Zhong 
1112d9bb58e5SYang Zhong #undef EXTRA_ARGS
1113d9bb58e5SYang Zhong #undef ATOMIC_NAME
1114d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1115d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1116d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
111734d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1118d9bb58e5SYang Zhong 
1119d9bb58e5SYang Zhong #define DATA_SIZE 1
1120d9bb58e5SYang Zhong #include "atomic_template.h"
1121d9bb58e5SYang Zhong 
1122d9bb58e5SYang Zhong #define DATA_SIZE 2
1123d9bb58e5SYang Zhong #include "atomic_template.h"
1124d9bb58e5SYang Zhong 
1125d9bb58e5SYang Zhong #define DATA_SIZE 4
1126d9bb58e5SYang Zhong #include "atomic_template.h"
1127d9bb58e5SYang Zhong 
1128d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1129d9bb58e5SYang Zhong #define DATA_SIZE 8
1130d9bb58e5SYang Zhong #include "atomic_template.h"
1131d9bb58e5SYang Zhong #endif
1132d9bb58e5SYang Zhong 
1133d9bb58e5SYang Zhong /* Code access functions.  */
1134d9bb58e5SYang Zhong 
1135d9bb58e5SYang Zhong #undef MMUSUFFIX
1136d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1137d9bb58e5SYang Zhong #undef GETPC
1138d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1139d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1140d9bb58e5SYang Zhong 
1141d9bb58e5SYang Zhong #define DATA_SIZE 1
1142d9bb58e5SYang Zhong #include "softmmu_template.h"
1143d9bb58e5SYang Zhong 
1144d9bb58e5SYang Zhong #define DATA_SIZE 2
1145d9bb58e5SYang Zhong #include "softmmu_template.h"
1146d9bb58e5SYang Zhong 
1147d9bb58e5SYang Zhong #define DATA_SIZE 4
1148d9bb58e5SYang Zhong #include "softmmu_template.h"
1149d9bb58e5SYang Zhong 
1150d9bb58e5SYang Zhong #define DATA_SIZE 8
1151d9bb58e5SYang Zhong #include "softmmu_template.h"
1152