xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 3d1523ced6060cdfe9e768a814d064067ccabfe5)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
36d9bb58e5SYang Zhong 
37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB */
39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
40d9bb58e5SYang Zhong 
41d9bb58e5SYang Zhong #ifdef DEBUG_TLB
42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
44d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
45d9bb58e5SYang Zhong # else
46d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
47d9bb58e5SYang Zhong # endif
48d9bb58e5SYang Zhong #else
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
51d9bb58e5SYang Zhong #endif
52d9bb58e5SYang Zhong 
53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
54d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
55d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
57d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
58d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59d9bb58e5SYang Zhong     } \
60d9bb58e5SYang Zhong } while (0)
61d9bb58e5SYang Zhong 
62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
63d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
64ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65d9bb58e5SYang Zhong         }                                                         \
66d9bb58e5SYang Zhong     } while (0)
67d9bb58e5SYang Zhong 
68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
69d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71d9bb58e5SYang Zhong 
72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73d9bb58e5SYang Zhong  */
74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76d9bb58e5SYang Zhong 
775005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
785005e253SEmilio G. Cota {
7971aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
8071aec354SEmilio G. Cota 
8153d28455SRichard Henderson     qemu_spin_init(&env->tlb_c.lock);
82*3d1523ceSRichard Henderson 
83*3d1523ceSRichard Henderson     /* Ensure that cpu_reset performs a full flush.  */
84*3d1523ceSRichard Henderson     env->tlb_c.dirty = ALL_MMUIDX_BITS;
855005e253SEmilio G. Cota }
865005e253SEmilio G. Cota 
87d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
88d9bb58e5SYang Zhong  *
89d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
90d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
91d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
92d9bb58e5SYang Zhong  * again.
93d9bb58e5SYang Zhong  */
94d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
95d9bb58e5SYang Zhong                              run_on_cpu_data d)
96d9bb58e5SYang Zhong {
97d9bb58e5SYang Zhong     CPUState *cpu;
98d9bb58e5SYang Zhong 
99d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
100d9bb58e5SYang Zhong         if (cpu != src) {
101d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
102d9bb58e5SYang Zhong         }
103d9bb58e5SYang Zhong     }
104d9bb58e5SYang Zhong }
105d9bb58e5SYang Zhong 
106e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
10783974cf4SEmilio G. Cota {
10883974cf4SEmilio G. Cota     CPUState *cpu;
109e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
11083974cf4SEmilio G. Cota 
11183974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
11283974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
11383974cf4SEmilio G. Cota 
114e09de0a2SRichard Henderson         full += atomic_read(&env->tlb_c.full_flush_count);
115e09de0a2SRichard Henderson         part += atomic_read(&env->tlb_c.part_flush_count);
116e09de0a2SRichard Henderson         elide += atomic_read(&env->tlb_c.elide_flush_count);
11783974cf4SEmilio G. Cota     }
118e09de0a2SRichard Henderson     *pfull = full;
119e09de0a2SRichard Henderson     *ppart = part;
120e09de0a2SRichard Henderson     *pelide = elide;
12183974cf4SEmilio G. Cota }
122d9bb58e5SYang Zhong 
1231308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
1241308e026SRichard Henderson {
1251308e026SRichard Henderson     memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
1261308e026SRichard Henderson     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
1271308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_addr = -1;
1281308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_mask = -1;
129d5363e58SRichard Henderson     env->tlb_d[mmu_idx].vindex = 0;
1301308e026SRichard Henderson }
1311308e026SRichard Henderson 
132d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
133d9bb58e5SYang Zhong {
134d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
135*3d1523ceSRichard Henderson     uint16_t asked = data.host_int;
136*3d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
137d9bb58e5SYang Zhong 
138d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
139d9bb58e5SYang Zhong 
140*3d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
141d9bb58e5SYang Zhong 
14253d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
14360a2ad7dSRichard Henderson 
144*3d1523ceSRichard Henderson     all_dirty = env->tlb_c.dirty;
145*3d1523ceSRichard Henderson     to_clean = asked & all_dirty;
146*3d1523ceSRichard Henderson     all_dirty &= ~to_clean;
147*3d1523ceSRichard Henderson     env->tlb_c.dirty = all_dirty;
148*3d1523ceSRichard Henderson 
149*3d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
150*3d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
1511308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx);
152d9bb58e5SYang Zhong     }
153*3d1523ceSRichard Henderson 
15453d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
155d9bb58e5SYang Zhong 
156f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
15764f2674bSRichard Henderson 
158*3d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
159e09de0a2SRichard Henderson         atomic_set(&env->tlb_c.full_flush_count,
160e09de0a2SRichard Henderson                    env->tlb_c.full_flush_count + 1);
161e09de0a2SRichard Henderson     } else {
162e09de0a2SRichard Henderson         atomic_set(&env->tlb_c.part_flush_count,
163*3d1523ceSRichard Henderson                    env->tlb_c.part_flush_count + ctpop16(to_clean));
164*3d1523ceSRichard Henderson         if (to_clean != asked) {
165*3d1523ceSRichard Henderson             atomic_set(&env->tlb_c.elide_flush_count,
166*3d1523ceSRichard Henderson                        env->tlb_c.elide_flush_count +
167*3d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
168*3d1523ceSRichard Henderson         }
16964f2674bSRichard Henderson     }
170d9bb58e5SYang Zhong }
171d9bb58e5SYang Zhong 
172d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
173d9bb58e5SYang Zhong {
174d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
175d9bb58e5SYang Zhong 
17664f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
17760a2ad7dSRichard Henderson         CPUArchState *env = cpu->env_ptr;
17860a2ad7dSRichard Henderson         uint16_t pending, to_clean;
179d9bb58e5SYang Zhong 
18060a2ad7dSRichard Henderson         qemu_spin_lock(&env->tlb_c.lock);
18160a2ad7dSRichard Henderson         pending = env->tlb_c.pending_flush;
18260a2ad7dSRichard Henderson         to_clean = idxmap & ~pending;
18360a2ad7dSRichard Henderson         env->tlb_c.pending_flush = pending | idxmap;
18460a2ad7dSRichard Henderson         qemu_spin_unlock(&env->tlb_c.lock);
185d9bb58e5SYang Zhong 
18660a2ad7dSRichard Henderson         if (to_clean) {
18760a2ad7dSRichard Henderson             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", to_clean);
188d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
18960a2ad7dSRichard Henderson                              RUN_ON_CPU_HOST_INT(to_clean));
190d9bb58e5SYang Zhong         }
191d9bb58e5SYang Zhong     } else {
19260a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
193d9bb58e5SYang Zhong     }
194d9bb58e5SYang Zhong }
195d9bb58e5SYang Zhong 
19664f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
19764f2674bSRichard Henderson {
19864f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
19964f2674bSRichard Henderson }
20064f2674bSRichard Henderson 
201d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
202d9bb58e5SYang Zhong {
203d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
204d9bb58e5SYang Zhong 
205d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
206d9bb58e5SYang Zhong 
207d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
208d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
209d9bb58e5SYang Zhong }
210d9bb58e5SYang Zhong 
21164f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
21264f2674bSRichard Henderson {
21364f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
21464f2674bSRichard Henderson }
21564f2674bSRichard Henderson 
21664f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
217d9bb58e5SYang Zhong {
218d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
219d9bb58e5SYang Zhong 
220d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
221d9bb58e5SYang Zhong 
222d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
223d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
224d9bb58e5SYang Zhong }
225d9bb58e5SYang Zhong 
22664f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
22764f2674bSRichard Henderson {
22864f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
22964f2674bSRichard Henderson }
23064f2674bSRichard Henderson 
23168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
23268fea038SRichard Henderson                                         target_ulong page)
233d9bb58e5SYang Zhong {
23468fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
235403f290cSEmilio G. Cota            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
23668fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
23768fea038SRichard Henderson }
23868fea038SRichard Henderson 
23953d28455SRichard Henderson /* Called with tlb_c.lock held */
24071aec354SEmilio G. Cota static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
24171aec354SEmilio G. Cota                                           target_ulong page)
24268fea038SRichard Henderson {
24368fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
244d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
245d9bb58e5SYang Zhong     }
246d9bb58e5SYang Zhong }
247d9bb58e5SYang Zhong 
24853d28455SRichard Henderson /* Called with tlb_c.lock held */
24971aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
25068fea038SRichard Henderson                                               target_ulong page)
25168fea038SRichard Henderson {
25268fea038SRichard Henderson     int k;
25371aec354SEmilio G. Cota 
25471aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
25568fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
25671aec354SEmilio G. Cota         tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page);
25768fea038SRichard Henderson     }
25868fea038SRichard Henderson }
25968fea038SRichard Henderson 
2601308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
2611308e026SRichard Henderson                                   target_ulong page)
2621308e026SRichard Henderson {
2631308e026SRichard Henderson     target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
2641308e026SRichard Henderson     target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
2651308e026SRichard Henderson 
2661308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
2671308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
2681308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
2691308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2701308e026SRichard Henderson                   midx, lp_addr, lp_mask);
2711308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx);
2721308e026SRichard Henderson     } else {
2731308e026SRichard Henderson         tlb_flush_entry_locked(tlb_entry(env, midx, page), page);
2741308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
2751308e026SRichard Henderson     }
2761308e026SRichard Henderson }
2771308e026SRichard Henderson 
278d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
279d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
280d9bb58e5SYang Zhong  */
281d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
282d9bb58e5SYang Zhong 
283d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
284d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
285d9bb58e5SYang Zhong {
286d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
287d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
288d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
289d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
290d9bb58e5SYang Zhong     int mmu_idx;
291d9bb58e5SYang Zhong 
292d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
293d9bb58e5SYang Zhong 
2941308e026SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
295383beda9SRichard Henderson               addr, mmu_idx_bitmap);
296d9bb58e5SYang Zhong 
29753d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
298d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
299d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
3001308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
301d9bb58e5SYang Zhong         }
302d9bb58e5SYang Zhong     }
30353d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
304d9bb58e5SYang Zhong 
305d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
306d9bb58e5SYang Zhong }
307d9bb58e5SYang Zhong 
308d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
309d9bb58e5SYang Zhong {
310d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
311d9bb58e5SYang Zhong 
312d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
313d9bb58e5SYang Zhong 
314d9bb58e5SYang Zhong     /* This should already be page aligned */
315d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
316d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
317d9bb58e5SYang Zhong 
318d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
3191308e026SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
320d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
321d9bb58e5SYang Zhong     } else {
3221308e026SRichard Henderson         tlb_flush_page_by_mmuidx_async_work(
323d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
324d9bb58e5SYang Zhong     }
325d9bb58e5SYang Zhong }
326d9bb58e5SYang Zhong 
327f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
328f8144c6cSRichard Henderson {
329f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
330f8144c6cSRichard Henderson }
331f8144c6cSRichard Henderson 
332d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
333d9bb58e5SYang Zhong                                        uint16_t idxmap)
334d9bb58e5SYang Zhong {
3351308e026SRichard Henderson     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
336d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
337d9bb58e5SYang Zhong 
338d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
339d9bb58e5SYang Zhong 
340d9bb58e5SYang Zhong     /* This should already be page aligned */
341d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
342d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
343d9bb58e5SYang Zhong 
344d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
345d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
346d9bb58e5SYang Zhong }
347d9bb58e5SYang Zhong 
348f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
349f8144c6cSRichard Henderson {
350f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
351f8144c6cSRichard Henderson }
352f8144c6cSRichard Henderson 
353d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
354d9bb58e5SYang Zhong                                               target_ulong addr,
355d9bb58e5SYang Zhong                                               uint16_t idxmap)
356d9bb58e5SYang Zhong {
3571308e026SRichard Henderson     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
358d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
359d9bb58e5SYang Zhong 
360d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
361d9bb58e5SYang Zhong 
362d9bb58e5SYang Zhong     /* This should already be page aligned */
363d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
364d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
365d9bb58e5SYang Zhong 
366d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
367d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
368d9bb58e5SYang Zhong }
369d9bb58e5SYang Zhong 
370f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
371d9bb58e5SYang Zhong {
372f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
373d9bb58e5SYang Zhong }
374d9bb58e5SYang Zhong 
375d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
376d9bb58e5SYang Zhong    can be detected */
377d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
378d9bb58e5SYang Zhong {
379d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
380d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
381d9bb58e5SYang Zhong }
382d9bb58e5SYang Zhong 
383d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
384d9bb58e5SYang Zhong    tested for self modifying code */
385d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
386d9bb58e5SYang Zhong {
387d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
388d9bb58e5SYang Zhong }
389d9bb58e5SYang Zhong 
390d9bb58e5SYang Zhong 
391d9bb58e5SYang Zhong /*
392d9bb58e5SYang Zhong  * Dirty write flag handling
393d9bb58e5SYang Zhong  *
394d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
395d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
396d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
397d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
398d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
399d9bb58e5SYang Zhong  * generated code.
400d9bb58e5SYang Zhong  *
40171aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
40271aec354SEmilio G. Cota  * te->addr_write with atomic_set. We don't need to worry about this for
40371aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
404d9bb58e5SYang Zhong  *
40553d28455SRichard Henderson  * Called with tlb_c.lock held.
406d9bb58e5SYang Zhong  */
40771aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
40871aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
409d9bb58e5SYang Zhong {
410d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
411d9bb58e5SYang Zhong 
412d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
413d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
414d9bb58e5SYang Zhong         addr += tlb_entry->addend;
415d9bb58e5SYang Zhong         if ((addr - start) < length) {
416d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
41771aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
418d9bb58e5SYang Zhong #else
41971aec354SEmilio G. Cota             atomic_set(&tlb_entry->addr_write,
42071aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
421d9bb58e5SYang Zhong #endif
422d9bb58e5SYang Zhong         }
42371aec354SEmilio G. Cota     }
42471aec354SEmilio G. Cota }
42571aec354SEmilio G. Cota 
42671aec354SEmilio G. Cota /*
42753d28455SRichard Henderson  * Called with tlb_c.lock held.
42871aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
42971aec354SEmilio G. Cota  */
43071aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
43171aec354SEmilio G. Cota {
43271aec354SEmilio G. Cota     *d = *s;
43371aec354SEmilio G. Cota }
434d9bb58e5SYang Zhong 
435d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
43671aec354SEmilio G. Cota  * the target vCPU).
43753d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
43871aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
439d9bb58e5SYang Zhong  */
440d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
441d9bb58e5SYang Zhong {
442d9bb58e5SYang Zhong     CPUArchState *env;
443d9bb58e5SYang Zhong 
444d9bb58e5SYang Zhong     int mmu_idx;
445d9bb58e5SYang Zhong 
446d9bb58e5SYang Zhong     env = cpu->env_ptr;
44753d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
448d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
449d9bb58e5SYang Zhong         unsigned int i;
450d9bb58e5SYang Zhong 
451d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
45271aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
45371aec354SEmilio G. Cota                                          length);
454d9bb58e5SYang Zhong         }
455d9bb58e5SYang Zhong 
456d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
45771aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
45871aec354SEmilio G. Cota                                          length);
459d9bb58e5SYang Zhong         }
460d9bb58e5SYang Zhong     }
46153d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
462d9bb58e5SYang Zhong }
463d9bb58e5SYang Zhong 
46453d28455SRichard Henderson /* Called with tlb_c.lock held */
46571aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
46671aec354SEmilio G. Cota                                          target_ulong vaddr)
467d9bb58e5SYang Zhong {
468d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
469d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
470d9bb58e5SYang Zhong     }
471d9bb58e5SYang Zhong }
472d9bb58e5SYang Zhong 
473d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
474d9bb58e5SYang Zhong    so that it is no longer dirty */
475d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
476d9bb58e5SYang Zhong {
477d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
478d9bb58e5SYang Zhong     int mmu_idx;
479d9bb58e5SYang Zhong 
480d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
481d9bb58e5SYang Zhong 
482d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
48353d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
484d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
485383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
486d9bb58e5SYang Zhong     }
487d9bb58e5SYang Zhong 
488d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
489d9bb58e5SYang Zhong         int k;
490d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
49171aec354SEmilio G. Cota             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
492d9bb58e5SYang Zhong         }
493d9bb58e5SYang Zhong     }
49453d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
495d9bb58e5SYang Zhong }
496d9bb58e5SYang Zhong 
497d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
498d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
4991308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
5001308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
501d9bb58e5SYang Zhong {
5021308e026SRichard Henderson     target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
5031308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
504d9bb58e5SYang Zhong 
5051308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
5061308e026SRichard Henderson         /* No previous large page.  */
5071308e026SRichard Henderson         lp_addr = vaddr;
5081308e026SRichard Henderson     } else {
509d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
5101308e026SRichard Henderson            This is a compromise between unnecessary flushes and
5111308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
5121308e026SRichard Henderson         lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
5131308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
5141308e026SRichard Henderson             lp_mask <<= 1;
515d9bb58e5SYang Zhong         }
5161308e026SRichard Henderson     }
5171308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
5181308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_mask = lp_mask;
519d9bb58e5SYang Zhong }
520d9bb58e5SYang Zhong 
521d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
522d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
523d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
524d9bb58e5SYang Zhong  *
525d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
526d9bb58e5SYang Zhong  * critical section.
527d9bb58e5SYang Zhong  */
528d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
529d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
530d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
531d9bb58e5SYang Zhong {
532d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
533d9bb58e5SYang Zhong     MemoryRegionSection *section;
534d9bb58e5SYang Zhong     unsigned int index;
535d9bb58e5SYang Zhong     target_ulong address;
536d9bb58e5SYang Zhong     target_ulong code_address;
537d9bb58e5SYang Zhong     uintptr_t addend;
53868fea038SRichard Henderson     CPUTLBEntry *te, tn;
53955df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
54055df6fcfSPeter Maydell     target_ulong vaddr_page;
541d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
542d9bb58e5SYang Zhong 
543d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
54455df6fcfSPeter Maydell 
5451308e026SRichard Henderson     if (size <= TARGET_PAGE_SIZE) {
54655df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
54755df6fcfSPeter Maydell     } else {
5481308e026SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, size);
549d9bb58e5SYang Zhong         sz = size;
55055df6fcfSPeter Maydell     }
55155df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
55255df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
55355df6fcfSPeter Maydell 
55455df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
55555df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
556d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
557d9bb58e5SYang Zhong 
558d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
559d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
560d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
561d9bb58e5SYang Zhong 
56255df6fcfSPeter Maydell     address = vaddr_page;
56355df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
56455df6fcfSPeter Maydell         /*
56555df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
56655df6fcfSPeter Maydell          * fill on every access.
56755df6fcfSPeter Maydell          */
56855df6fcfSPeter Maydell         address |= TLB_RECHECK;
56955df6fcfSPeter Maydell     }
57055df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
57155df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
572d9bb58e5SYang Zhong         /* IO memory case */
573d9bb58e5SYang Zhong         address |= TLB_MMIO;
574d9bb58e5SYang Zhong         addend = 0;
575d9bb58e5SYang Zhong     } else {
576d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
577d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
578d9bb58e5SYang Zhong     }
579d9bb58e5SYang Zhong 
580d9bb58e5SYang Zhong     code_address = address;
58155df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
58255df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
583d9bb58e5SYang Zhong 
584383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
585383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
586d9bb58e5SYang Zhong 
58768fea038SRichard Henderson     /*
58871aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
58971aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
59071aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
59171aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
59271aec354SEmilio G. Cota      * is unlikely to be contended.
59371aec354SEmilio G. Cota      */
59453d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
59571aec354SEmilio G. Cota 
596*3d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
597*3d1523ceSRichard Henderson     env->tlb_c.dirty |= 1 << mmu_idx;
598*3d1523ceSRichard Henderson 
59971aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
60071aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
60171aec354SEmilio G. Cota 
60271aec354SEmilio G. Cota     /*
60368fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
60468fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
60568fea038SRichard Henderson      */
60668fea038SRichard Henderson     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
607d5363e58SRichard Henderson         unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
60868fea038SRichard Henderson         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
60968fea038SRichard Henderson 
61068fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
61171aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
612d9bb58e5SYang Zhong         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
61368fea038SRichard Henderson     }
614d9bb58e5SYang Zhong 
615d9bb58e5SYang Zhong     /* refill the tlb */
616ace41090SPeter Maydell     /*
617ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
618ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
619ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
620ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
62155df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
622ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
623ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
624ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
625ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
626ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
627ace41090SPeter Maydell      */
62855df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
629d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
630d9bb58e5SYang Zhong 
631d9bb58e5SYang Zhong     /* Now calculate the new entry */
63255df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
633d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
634d9bb58e5SYang Zhong         tn.addr_read = address;
635d9bb58e5SYang Zhong     } else {
636d9bb58e5SYang Zhong         tn.addr_read = -1;
637d9bb58e5SYang Zhong     }
638d9bb58e5SYang Zhong 
639d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
640d9bb58e5SYang Zhong         tn.addr_code = code_address;
641d9bb58e5SYang Zhong     } else {
642d9bb58e5SYang Zhong         tn.addr_code = -1;
643d9bb58e5SYang Zhong     }
644d9bb58e5SYang Zhong 
645d9bb58e5SYang Zhong     tn.addr_write = -1;
646d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
647d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
648d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
649d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
650d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
651d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
652d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
653d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
654d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
655d9bb58e5SYang Zhong         } else {
656d9bb58e5SYang Zhong             tn.addr_write = address;
657d9bb58e5SYang Zhong         }
658f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
659f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
660f52bfb12SDavid Hildenbrand         }
661d9bb58e5SYang Zhong     }
662d9bb58e5SYang Zhong 
66371aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
66453d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
665d9bb58e5SYang Zhong }
666d9bb58e5SYang Zhong 
667d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
668d9bb58e5SYang Zhong  * transaction attributes to be used.
669d9bb58e5SYang Zhong  */
670d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
671d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
672d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
673d9bb58e5SYang Zhong {
674d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
675d9bb58e5SYang Zhong                             prot, mmu_idx, size);
676d9bb58e5SYang Zhong }
677d9bb58e5SYang Zhong 
678d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
679d9bb58e5SYang Zhong {
680d9bb58e5SYang Zhong     ram_addr_t ram_addr;
681d9bb58e5SYang Zhong 
682d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
683d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
684d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
685d9bb58e5SYang Zhong         abort();
686d9bb58e5SYang Zhong     }
687d9bb58e5SYang Zhong     return ram_addr;
688d9bb58e5SYang Zhong }
689d9bb58e5SYang Zhong 
690d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
69104e3aabdSPeter Maydell                          int mmu_idx,
69255df6fcfSPeter Maydell                          target_ulong addr, uintptr_t retaddr,
693dbea78a4SPeter Maydell                          bool recheck, MMUAccessType access_type, int size)
694d9bb58e5SYang Zhong {
695d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
6962d54f194SPeter Maydell     hwaddr mr_offset;
6972d54f194SPeter Maydell     MemoryRegionSection *section;
6982d54f194SPeter Maydell     MemoryRegion *mr;
699d9bb58e5SYang Zhong     uint64_t val;
700d9bb58e5SYang Zhong     bool locked = false;
70104e3aabdSPeter Maydell     MemTxResult r;
702d9bb58e5SYang Zhong 
70355df6fcfSPeter Maydell     if (recheck) {
70455df6fcfSPeter Maydell         /*
70555df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
70655df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
70755df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
70855df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
70955df6fcfSPeter Maydell          */
710383beda9SRichard Henderson         CPUTLBEntry *entry;
71155df6fcfSPeter Maydell         target_ulong tlb_addr;
71255df6fcfSPeter Maydell 
71355df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
71455df6fcfSPeter Maydell 
715383beda9SRichard Henderson         entry = tlb_entry(env, mmu_idx, addr);
716383beda9SRichard Henderson         tlb_addr = entry->addr_read;
71755df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
71855df6fcfSPeter Maydell             /* RAM access */
719383beda9SRichard Henderson             uintptr_t haddr = addr + entry->addend;
72055df6fcfSPeter Maydell 
72155df6fcfSPeter Maydell             return ldn_p((void *)haddr, size);
72255df6fcfSPeter Maydell         }
72355df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
72455df6fcfSPeter Maydell     }
72555df6fcfSPeter Maydell 
7262d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
7272d54f194SPeter Maydell     mr = section->mr;
7282d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
729d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
730d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
731d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
732d9bb58e5SYang Zhong     }
733d9bb58e5SYang Zhong 
734d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
735dbea78a4SPeter Maydell     cpu->mem_io_access_type = access_type;
736d9bb58e5SYang Zhong 
7378b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
738d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
739d9bb58e5SYang Zhong         locked = true;
740d9bb58e5SYang Zhong     }
7412d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
74204e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
74304e3aabdSPeter Maydell     if (r != MEMTX_OK) {
7442d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
7452d54f194SPeter Maydell             section->offset_within_address_space -
7462d54f194SPeter Maydell             section->offset_within_region;
7472d54f194SPeter Maydell 
748dbea78a4SPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
74904e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
75004e3aabdSPeter Maydell     }
751d9bb58e5SYang Zhong     if (locked) {
752d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
753d9bb58e5SYang Zhong     }
754d9bb58e5SYang Zhong 
755d9bb58e5SYang Zhong     return val;
756d9bb58e5SYang Zhong }
757d9bb58e5SYang Zhong 
758d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
75904e3aabdSPeter Maydell                       int mmu_idx,
760d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
76155df6fcfSPeter Maydell                       uintptr_t retaddr, bool recheck, int size)
762d9bb58e5SYang Zhong {
763d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
7642d54f194SPeter Maydell     hwaddr mr_offset;
7652d54f194SPeter Maydell     MemoryRegionSection *section;
7662d54f194SPeter Maydell     MemoryRegion *mr;
767d9bb58e5SYang Zhong     bool locked = false;
76804e3aabdSPeter Maydell     MemTxResult r;
769d9bb58e5SYang Zhong 
77055df6fcfSPeter Maydell     if (recheck) {
77155df6fcfSPeter Maydell         /*
77255df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
77355df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
77455df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
77555df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
77655df6fcfSPeter Maydell          */
777383beda9SRichard Henderson         CPUTLBEntry *entry;
77855df6fcfSPeter Maydell         target_ulong tlb_addr;
77955df6fcfSPeter Maydell 
78055df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
78155df6fcfSPeter Maydell 
782383beda9SRichard Henderson         entry = tlb_entry(env, mmu_idx, addr);
783403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(entry);
78455df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
78555df6fcfSPeter Maydell             /* RAM access */
786383beda9SRichard Henderson             uintptr_t haddr = addr + entry->addend;
78755df6fcfSPeter Maydell 
78855df6fcfSPeter Maydell             stn_p((void *)haddr, size, val);
78955df6fcfSPeter Maydell             return;
79055df6fcfSPeter Maydell         }
79155df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
79255df6fcfSPeter Maydell     }
79355df6fcfSPeter Maydell 
7942d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
7952d54f194SPeter Maydell     mr = section->mr;
7962d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
797d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
798d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
799d9bb58e5SYang Zhong     }
800d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
801d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
802d9bb58e5SYang Zhong 
8038b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
804d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
805d9bb58e5SYang Zhong         locked = true;
806d9bb58e5SYang Zhong     }
8072d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
80804e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
80904e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8102d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8112d54f194SPeter Maydell             section->offset_within_address_space -
8122d54f194SPeter Maydell             section->offset_within_region;
8132d54f194SPeter Maydell 
81404e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
81504e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
81604e3aabdSPeter Maydell     }
817d9bb58e5SYang Zhong     if (locked) {
818d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
819d9bb58e5SYang Zhong     }
820d9bb58e5SYang Zhong }
821d9bb58e5SYang Zhong 
822d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
823d9bb58e5SYang Zhong    back to the main tlb.  */
824d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
825d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
826d9bb58e5SYang Zhong {
827d9bb58e5SYang Zhong     size_t vidx;
82871aec354SEmilio G. Cota 
82971aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
830d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
831d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
832403f290cSEmilio G. Cota         target_ulong cmp;
833403f290cSEmilio G. Cota 
834403f290cSEmilio G. Cota         /* elt_ofs might correspond to .addr_write, so use atomic_read */
835403f290cSEmilio G. Cota #if TCG_OVERSIZED_GUEST
836403f290cSEmilio G. Cota         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
837403f290cSEmilio G. Cota #else
838403f290cSEmilio G. Cota         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
839403f290cSEmilio G. Cota #endif
840d9bb58e5SYang Zhong 
841d9bb58e5SYang Zhong         if (cmp == page) {
842d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
843d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
844d9bb58e5SYang Zhong 
84553d28455SRichard Henderson             qemu_spin_lock(&env->tlb_c.lock);
84671aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
84771aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
84871aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
84953d28455SRichard Henderson             qemu_spin_unlock(&env->tlb_c.lock);
850d9bb58e5SYang Zhong 
851d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
852d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
853d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
854d9bb58e5SYang Zhong             return true;
855d9bb58e5SYang Zhong         }
856d9bb58e5SYang Zhong     }
857d9bb58e5SYang Zhong     return false;
858d9bb58e5SYang Zhong }
859d9bb58e5SYang Zhong 
860d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
861d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
862d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
863d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
864d9bb58e5SYang Zhong 
865f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
866f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
867f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
868f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
869f2553f04SKONRAD Frederic  */
870f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
871f2553f04SKONRAD Frederic {
872383beda9SRichard Henderson     uintptr_t mmu_idx = cpu_mmu_index(env, true);
873383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
874383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
875f2553f04SKONRAD Frederic     void *p;
876f2553f04SKONRAD Frederic 
877383beda9SRichard Henderson     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
878b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
87998670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
88071b9a453SKONRAD Frederic         }
881383beda9SRichard Henderson         assert(tlb_hit(entry->addr_code, addr));
882f2553f04SKONRAD Frederic     }
88355df6fcfSPeter Maydell 
884383beda9SRichard Henderson     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
88555df6fcfSPeter Maydell         /*
88655a7cb14SPeter Maydell          * Return -1 if we can't translate and execute from an entire
88755a7cb14SPeter Maydell          * page of RAM here, which will cause us to execute by loading
88855a7cb14SPeter Maydell          * and translating one insn at a time, without caching:
88955a7cb14SPeter Maydell          *  - TLB_RECHECK: means the MMU protection covers a smaller range
89055a7cb14SPeter Maydell          *    than a target page, so we must redo the MMU check every insn
89155a7cb14SPeter Maydell          *  - TLB_MMIO: region is not backed by RAM
89255df6fcfSPeter Maydell          */
89320cb6ae4SPeter Maydell         return -1;
89455df6fcfSPeter Maydell     }
89555df6fcfSPeter Maydell 
896383beda9SRichard Henderson     p = (void *)((uintptr_t)addr + entry->addend);
897f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
898f2553f04SKONRAD Frederic }
899f2553f04SKONRAD Frederic 
900d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
901d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
902d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
903d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
904d9bb58e5SYang Zhong  * entry in the TLB for this access.
905d9bb58e5SYang Zhong  */
90698670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
907d9bb58e5SYang Zhong                  uintptr_t retaddr)
908d9bb58e5SYang Zhong {
909383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
910383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
911d9bb58e5SYang Zhong 
912403f290cSEmilio G. Cota     if (!tlb_hit(tlb_addr_write(entry), addr)) {
913d9bb58e5SYang Zhong         /* TLB entry is for a different page */
914d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
91598670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
91698670d47SLaurent Vivier                      mmu_idx, retaddr);
917d9bb58e5SYang Zhong         }
918d9bb58e5SYang Zhong     }
919d9bb58e5SYang Zhong }
920d9bb58e5SYang Zhong 
921d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
922d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
923d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
92434d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
92534d49937SPeter Maydell                                NotDirtyInfo *ndi)
926d9bb58e5SYang Zhong {
927d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
928383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
929383beda9SRichard Henderson     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
930403f290cSEmilio G. Cota     target_ulong tlb_addr = tlb_addr_write(tlbe);
931d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
932d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
933d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
93434d49937SPeter Maydell     void *hostaddr;
935d9bb58e5SYang Zhong 
936d9bb58e5SYang Zhong     /* Adjust the given return address.  */
937d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
938d9bb58e5SYang Zhong 
939d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
940d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
941d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
942d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
943d9bb58e5SYang Zhong                              mmu_idx, retaddr);
944d9bb58e5SYang Zhong     }
945d9bb58e5SYang Zhong 
946d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
947d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
948d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
949d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
950d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
951d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
952d9bb58e5SYang Zhong         goto stop_the_world;
953d9bb58e5SYang Zhong     }
954d9bb58e5SYang Zhong 
955d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
956334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
957d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
95898670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
95998670d47SLaurent Vivier                      mmu_idx, retaddr);
960d9bb58e5SYang Zhong         }
961403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
962d9bb58e5SYang Zhong     }
963d9bb58e5SYang Zhong 
96455df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
96555df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
966d9bb58e5SYang Zhong         /* There's really nothing that can be done to
967d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
968d9bb58e5SYang Zhong         goto stop_the_world;
969d9bb58e5SYang Zhong     }
970d9bb58e5SYang Zhong 
971d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
97234d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
97398670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
97498670d47SLaurent Vivier                  mmu_idx, retaddr);
975d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
976d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
977d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
978d9bb58e5SYang Zhong         goto stop_the_world;
979d9bb58e5SYang Zhong     }
980d9bb58e5SYang Zhong 
98134d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
98234d49937SPeter Maydell 
98334d49937SPeter Maydell     ndi->active = false;
98434d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
98534d49937SPeter Maydell         ndi->active = true;
98634d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
98734d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
98834d49937SPeter Maydell                                       1 << s_bits);
98934d49937SPeter Maydell     }
99034d49937SPeter Maydell 
99134d49937SPeter Maydell     return hostaddr;
992d9bb58e5SYang Zhong 
993d9bb58e5SYang Zhong  stop_the_world:
994d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
995d9bb58e5SYang Zhong }
996d9bb58e5SYang Zhong 
997d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
998d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
999d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1000d9bb58e5SYang Zhong #else
1001d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1002d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1003d9bb58e5SYang Zhong #endif
1004d9bb58e5SYang Zhong 
1005d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1006d9bb58e5SYang Zhong 
1007d9bb58e5SYang Zhong #define DATA_SIZE 1
1008d9bb58e5SYang Zhong #include "softmmu_template.h"
1009d9bb58e5SYang Zhong 
1010d9bb58e5SYang Zhong #define DATA_SIZE 2
1011d9bb58e5SYang Zhong #include "softmmu_template.h"
1012d9bb58e5SYang Zhong 
1013d9bb58e5SYang Zhong #define DATA_SIZE 4
1014d9bb58e5SYang Zhong #include "softmmu_template.h"
1015d9bb58e5SYang Zhong 
1016d9bb58e5SYang Zhong #define DATA_SIZE 8
1017d9bb58e5SYang Zhong #include "softmmu_template.h"
1018d9bb58e5SYang Zhong 
1019d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1020d9bb58e5SYang Zhong    them callable from other helpers.  */
1021d9bb58e5SYang Zhong 
1022d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1023d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1024d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
102534d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
102634d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
102734d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
102834d49937SPeter Maydell     do {                                                \
102934d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
103034d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
103134d49937SPeter Maydell         }                                               \
103234d49937SPeter Maydell     } while (0)
1033d9bb58e5SYang Zhong 
1034d9bb58e5SYang Zhong #define DATA_SIZE 1
1035d9bb58e5SYang Zhong #include "atomic_template.h"
1036d9bb58e5SYang Zhong 
1037d9bb58e5SYang Zhong #define DATA_SIZE 2
1038d9bb58e5SYang Zhong #include "atomic_template.h"
1039d9bb58e5SYang Zhong 
1040d9bb58e5SYang Zhong #define DATA_SIZE 4
1041d9bb58e5SYang Zhong #include "atomic_template.h"
1042d9bb58e5SYang Zhong 
1043d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1044d9bb58e5SYang Zhong #define DATA_SIZE 8
1045d9bb58e5SYang Zhong #include "atomic_template.h"
1046d9bb58e5SYang Zhong #endif
1047d9bb58e5SYang Zhong 
1048e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1049d9bb58e5SYang Zhong #define DATA_SIZE 16
1050d9bb58e5SYang Zhong #include "atomic_template.h"
1051d9bb58e5SYang Zhong #endif
1052d9bb58e5SYang Zhong 
1053d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1054d9bb58e5SYang Zhong 
1055d9bb58e5SYang Zhong #undef EXTRA_ARGS
1056d9bb58e5SYang Zhong #undef ATOMIC_NAME
1057d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1058d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1059d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
106034d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1061d9bb58e5SYang Zhong 
1062d9bb58e5SYang Zhong #define DATA_SIZE 1
1063d9bb58e5SYang Zhong #include "atomic_template.h"
1064d9bb58e5SYang Zhong 
1065d9bb58e5SYang Zhong #define DATA_SIZE 2
1066d9bb58e5SYang Zhong #include "atomic_template.h"
1067d9bb58e5SYang Zhong 
1068d9bb58e5SYang Zhong #define DATA_SIZE 4
1069d9bb58e5SYang Zhong #include "atomic_template.h"
1070d9bb58e5SYang Zhong 
1071d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1072d9bb58e5SYang Zhong #define DATA_SIZE 8
1073d9bb58e5SYang Zhong #include "atomic_template.h"
1074d9bb58e5SYang Zhong #endif
1075d9bb58e5SYang Zhong 
1076d9bb58e5SYang Zhong /* Code access functions.  */
1077d9bb58e5SYang Zhong 
1078d9bb58e5SYang Zhong #undef MMUSUFFIX
1079d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1080d9bb58e5SYang Zhong #undef GETPC
1081d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1082d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1083d9bb58e5SYang Zhong 
1084d9bb58e5SYang Zhong #define DATA_SIZE 1
1085d9bb58e5SYang Zhong #include "softmmu_template.h"
1086d9bb58e5SYang Zhong 
1087d9bb58e5SYang Zhong #define DATA_SIZE 2
1088d9bb58e5SYang Zhong #include "softmmu_template.h"
1089d9bb58e5SYang Zhong 
1090d9bb58e5SYang Zhong #define DATA_SIZE 4
1091d9bb58e5SYang Zhong #include "softmmu_template.h"
1092d9bb58e5SYang Zhong 
1093d9bb58e5SYang Zhong #define DATA_SIZE 8
1094d9bb58e5SYang Zhong #include "softmmu_template.h"
1095