xref: /openbmc/qemu/accel/tcg/cputlb.c (revision ea9025cb49027d9b3c4f48c56602351b9cf65ff1)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35d9bb58e5SYang Zhong 
36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37d9bb58e5SYang Zhong /* #define DEBUG_TLB */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
39d9bb58e5SYang Zhong 
40d9bb58e5SYang Zhong #ifdef DEBUG_TLB
41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
43d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
44d9bb58e5SYang Zhong # else
45d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
46d9bb58e5SYang Zhong # endif
47d9bb58e5SYang Zhong #else
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
50d9bb58e5SYang Zhong #endif
51d9bb58e5SYang Zhong 
52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
53d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
54d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
56d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
57d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58d9bb58e5SYang Zhong     } \
59d9bb58e5SYang Zhong } while (0)
60d9bb58e5SYang Zhong 
61*ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
62d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
63*ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
64d9bb58e5SYang Zhong         }                                                         \
65d9bb58e5SYang Zhong     } while (0)
66d9bb58e5SYang Zhong 
67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
68d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70d9bb58e5SYang Zhong 
71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72d9bb58e5SYang Zhong  */
73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75d9bb58e5SYang Zhong 
765005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
775005e253SEmilio G. Cota {
785005e253SEmilio G. Cota }
795005e253SEmilio G. Cota 
80d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
81d9bb58e5SYang Zhong  *
82d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
83d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
84d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
85d9bb58e5SYang Zhong  * again.
86d9bb58e5SYang Zhong  */
87d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
88d9bb58e5SYang Zhong                              run_on_cpu_data d)
89d9bb58e5SYang Zhong {
90d9bb58e5SYang Zhong     CPUState *cpu;
91d9bb58e5SYang Zhong 
92d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
93d9bb58e5SYang Zhong         if (cpu != src) {
94d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
95d9bb58e5SYang Zhong         }
96d9bb58e5SYang Zhong     }
97d9bb58e5SYang Zhong }
98d9bb58e5SYang Zhong 
9983974cf4SEmilio G. Cota size_t tlb_flush_count(void)
10083974cf4SEmilio G. Cota {
10183974cf4SEmilio G. Cota     CPUState *cpu;
10283974cf4SEmilio G. Cota     size_t count = 0;
10383974cf4SEmilio G. Cota 
10483974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
10583974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
10683974cf4SEmilio G. Cota 
10783974cf4SEmilio G. Cota         count += atomic_read(&env->tlb_flush_count);
10883974cf4SEmilio G. Cota     }
10983974cf4SEmilio G. Cota     return count;
11083974cf4SEmilio G. Cota }
111d9bb58e5SYang Zhong 
112d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
113d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
114d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
115d9bb58e5SYang Zhong  * not a correctness issue.
116d9bb58e5SYang Zhong  */
117d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
118d9bb58e5SYang Zhong {
119d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
120d9bb58e5SYang Zhong 
121d9bb58e5SYang Zhong     /* The QOM tests will trigger tlb_flushes without setting up TCG
122d9bb58e5SYang Zhong      * so we bug out here in that case.
123d9bb58e5SYang Zhong      */
124d9bb58e5SYang Zhong     if (!tcg_enabled()) {
125d9bb58e5SYang Zhong         return;
126d9bb58e5SYang Zhong     }
127d9bb58e5SYang Zhong 
128d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
12983974cf4SEmilio G. Cota     atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
13083974cf4SEmilio G. Cota     tlb_debug("(count: %zu)\n", tlb_flush_count());
131d9bb58e5SYang Zhong 
132d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
133d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
134f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
135d9bb58e5SYang Zhong 
136d9bb58e5SYang Zhong     env->vtlb_index = 0;
137d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
138d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
139d9bb58e5SYang Zhong 
140d9bb58e5SYang Zhong     atomic_mb_set(&cpu->pending_tlb_flush, 0);
141d9bb58e5SYang Zhong }
142d9bb58e5SYang Zhong 
143d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
144d9bb58e5SYang Zhong {
145d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
146d9bb58e5SYang Zhong }
147d9bb58e5SYang Zhong 
148d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
149d9bb58e5SYang Zhong {
150d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
151d9bb58e5SYang Zhong         if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
152d9bb58e5SYang Zhong             atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
153d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
154d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
155d9bb58e5SYang Zhong         }
156d9bb58e5SYang Zhong     } else {
157d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
158d9bb58e5SYang Zhong     }
159d9bb58e5SYang Zhong }
160d9bb58e5SYang Zhong 
161d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
162d9bb58e5SYang Zhong {
163d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
164d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
165d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
166d9bb58e5SYang Zhong }
167d9bb58e5SYang Zhong 
168d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
169d9bb58e5SYang Zhong {
170d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
171d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
172d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
173d9bb58e5SYang Zhong }
174d9bb58e5SYang Zhong 
175d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
176d9bb58e5SYang Zhong {
177d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
178d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
179d9bb58e5SYang Zhong     int mmu_idx;
180d9bb58e5SYang Zhong 
181d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
182d9bb58e5SYang Zhong 
183d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
184d9bb58e5SYang Zhong 
185d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
186d9bb58e5SYang Zhong 
187d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
188d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
189d9bb58e5SYang Zhong 
190d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
191d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
192d9bb58e5SYang Zhong         }
193d9bb58e5SYang Zhong     }
194d9bb58e5SYang Zhong 
195f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
196d9bb58e5SYang Zhong 
197d9bb58e5SYang Zhong     tlb_debug("done\n");
198d9bb58e5SYang Zhong }
199d9bb58e5SYang Zhong 
200d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
201d9bb58e5SYang Zhong {
202d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
203d9bb58e5SYang Zhong 
204d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
205d9bb58e5SYang Zhong         uint16_t pending_flushes = idxmap;
206d9bb58e5SYang Zhong         pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
207d9bb58e5SYang Zhong 
208d9bb58e5SYang Zhong         if (pending_flushes) {
209d9bb58e5SYang Zhong             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
210d9bb58e5SYang Zhong 
211d9bb58e5SYang Zhong             atomic_or(&cpu->pending_tlb_flush, pending_flushes);
212d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
213d9bb58e5SYang Zhong                              RUN_ON_CPU_HOST_INT(pending_flushes));
214d9bb58e5SYang Zhong         }
215d9bb58e5SYang Zhong     } else {
216d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
217d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(idxmap));
218d9bb58e5SYang Zhong     }
219d9bb58e5SYang Zhong }
220d9bb58e5SYang Zhong 
221d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
222d9bb58e5SYang Zhong {
223d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
224d9bb58e5SYang Zhong 
225d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
226d9bb58e5SYang Zhong 
227d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
228d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
229d9bb58e5SYang Zhong }
230d9bb58e5SYang Zhong 
231d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
232d9bb58e5SYang Zhong                                                        uint16_t idxmap)
233d9bb58e5SYang Zhong {
234d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
235d9bb58e5SYang Zhong 
236d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
237d9bb58e5SYang Zhong 
238d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
239d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
240d9bb58e5SYang Zhong }
241d9bb58e5SYang Zhong 
24268fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
24368fea038SRichard Henderson                                         target_ulong page)
244d9bb58e5SYang Zhong {
24568fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
24668fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_write, page) ||
24768fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
24868fea038SRichard Henderson }
24968fea038SRichard Henderson 
25068fea038SRichard Henderson static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page)
25168fea038SRichard Henderson {
25268fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
253d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
254d9bb58e5SYang Zhong     }
255d9bb58e5SYang Zhong }
256d9bb58e5SYang Zhong 
25768fea038SRichard Henderson static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx,
25868fea038SRichard Henderson                                        target_ulong page)
25968fea038SRichard Henderson {
26068fea038SRichard Henderson     int k;
26168fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
26268fea038SRichard Henderson         tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page);
26368fea038SRichard Henderson     }
26468fea038SRichard Henderson }
26568fea038SRichard Henderson 
266d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
267d9bb58e5SYang Zhong {
268d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
269d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
270d9bb58e5SYang Zhong     int i;
271d9bb58e5SYang Zhong     int mmu_idx;
272d9bb58e5SYang Zhong 
273d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
274d9bb58e5SYang Zhong 
275d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
276d9bb58e5SYang Zhong 
277d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
278d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
279d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
280d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
281d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
282d9bb58e5SYang Zhong 
283d9bb58e5SYang Zhong         tlb_flush(cpu);
284d9bb58e5SYang Zhong         return;
285d9bb58e5SYang Zhong     }
286d9bb58e5SYang Zhong 
287d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
288d9bb58e5SYang Zhong     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
289d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
290d9bb58e5SYang Zhong         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
29168fea038SRichard Henderson         tlb_flush_vtlb_page(env, mmu_idx, addr);
292d9bb58e5SYang Zhong     }
293d9bb58e5SYang Zhong 
294d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
295d9bb58e5SYang Zhong }
296d9bb58e5SYang Zhong 
297d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
298d9bb58e5SYang Zhong {
299d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
300d9bb58e5SYang Zhong 
301d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
302d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
303d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
304d9bb58e5SYang Zhong     } else {
305d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
306d9bb58e5SYang Zhong     }
307d9bb58e5SYang Zhong }
308d9bb58e5SYang Zhong 
309d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
310d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
311d9bb58e5SYang Zhong  */
312d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
313d9bb58e5SYang Zhong 
314d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
315d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
316d9bb58e5SYang Zhong {
317d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
318d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
319d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
320d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
321d9bb58e5SYang Zhong     int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
322d9bb58e5SYang Zhong     int mmu_idx;
323d9bb58e5SYang Zhong 
324d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
325d9bb58e5SYang Zhong 
326d9bb58e5SYang Zhong     tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
327d9bb58e5SYang Zhong               page, addr, mmu_idx_bitmap);
328d9bb58e5SYang Zhong 
329d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
330d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
331d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
33268fea038SRichard Henderson             tlb_flush_vtlb_page(env, mmu_idx, addr);
333d9bb58e5SYang Zhong         }
334d9bb58e5SYang Zhong     }
335d9bb58e5SYang Zhong 
336d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
337d9bb58e5SYang Zhong }
338d9bb58e5SYang Zhong 
339d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
340d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
341d9bb58e5SYang Zhong {
342d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
343d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
344d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
345d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
346d9bb58e5SYang Zhong 
347d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
348d9bb58e5SYang Zhong 
349d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
350d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
351d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
352d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
353d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
354d9bb58e5SYang Zhong 
355d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
356d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
357d9bb58e5SYang Zhong     } else {
358d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
359d9bb58e5SYang Zhong     }
360d9bb58e5SYang Zhong }
361d9bb58e5SYang Zhong 
362d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
363d9bb58e5SYang Zhong {
364d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
365d9bb58e5SYang Zhong 
366d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
367d9bb58e5SYang Zhong 
368d9bb58e5SYang Zhong     /* This should already be page aligned */
369d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
370d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
371d9bb58e5SYang Zhong 
372d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
373d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
374d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
375d9bb58e5SYang Zhong     } else {
376d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
377d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
378d9bb58e5SYang Zhong     }
379d9bb58e5SYang Zhong }
380d9bb58e5SYang Zhong 
381d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
382d9bb58e5SYang Zhong                                        uint16_t idxmap)
383d9bb58e5SYang Zhong {
384d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
385d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
386d9bb58e5SYang Zhong 
387d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
388d9bb58e5SYang Zhong 
389d9bb58e5SYang Zhong     /* This should already be page aligned */
390d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
391d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
392d9bb58e5SYang Zhong 
393d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
394d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
395d9bb58e5SYang Zhong }
396d9bb58e5SYang Zhong 
397d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
398d9bb58e5SYang Zhong                                                             target_ulong addr,
399d9bb58e5SYang Zhong                                                             uint16_t idxmap)
400d9bb58e5SYang Zhong {
401d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
402d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
403d9bb58e5SYang Zhong 
404d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
405d9bb58e5SYang Zhong 
406d9bb58e5SYang Zhong     /* This should already be page aligned */
407d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
408d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
409d9bb58e5SYang Zhong 
410d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
411d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
412d9bb58e5SYang Zhong }
413d9bb58e5SYang Zhong 
414d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
415d9bb58e5SYang Zhong {
416d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
417d9bb58e5SYang Zhong 
418d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
419d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
420d9bb58e5SYang Zhong }
421d9bb58e5SYang Zhong 
422d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
423d9bb58e5SYang Zhong                                                   target_ulong addr)
424d9bb58e5SYang Zhong {
425d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
426d9bb58e5SYang Zhong 
427d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
428d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
429d9bb58e5SYang Zhong }
430d9bb58e5SYang Zhong 
431d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
432d9bb58e5SYang Zhong    can be detected */
433d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
434d9bb58e5SYang Zhong {
435d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
436d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
437d9bb58e5SYang Zhong }
438d9bb58e5SYang Zhong 
439d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
440d9bb58e5SYang Zhong    tested for self modifying code */
441d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
442d9bb58e5SYang Zhong {
443d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
444d9bb58e5SYang Zhong }
445d9bb58e5SYang Zhong 
446d9bb58e5SYang Zhong 
447d9bb58e5SYang Zhong /*
448d9bb58e5SYang Zhong  * Dirty write flag handling
449d9bb58e5SYang Zhong  *
450d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
451d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
452d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
453d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
454d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
455d9bb58e5SYang Zhong  * generated code.
456d9bb58e5SYang Zhong  *
457d9bb58e5SYang Zhong  * Because we want other vCPUs to respond to changes straight away we
458d9bb58e5SYang Zhong  * update the te->addr_write field atomically. If the TLB entry has
459d9bb58e5SYang Zhong  * been changed by the vCPU in the mean time we skip the update.
460d9bb58e5SYang Zhong  *
461d9bb58e5SYang Zhong  * As this function uses atomic accesses we also need to ensure
462d9bb58e5SYang Zhong  * updates to tlb_entries follow the same access rules. We don't need
463d9bb58e5SYang Zhong  * to worry about this for oversized guests as MTTCG is disabled for
464d9bb58e5SYang Zhong  * them.
465d9bb58e5SYang Zhong  */
466d9bb58e5SYang Zhong 
467d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
468d9bb58e5SYang Zhong                            uintptr_t length)
469d9bb58e5SYang Zhong {
470d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
471d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
472d9bb58e5SYang Zhong 
473d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
474d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
475d9bb58e5SYang Zhong         addr += tlb_entry->addend;
476d9bb58e5SYang Zhong         if ((addr - start) < length) {
477d9bb58e5SYang Zhong             tlb_entry->addr_write |= TLB_NOTDIRTY;
478d9bb58e5SYang Zhong         }
479d9bb58e5SYang Zhong     }
480d9bb58e5SYang Zhong #else
481d9bb58e5SYang Zhong     /* paired with atomic_mb_set in tlb_set_page_with_attrs */
482d9bb58e5SYang Zhong     uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
483d9bb58e5SYang Zhong     uintptr_t addr = orig_addr;
484d9bb58e5SYang Zhong 
485d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
486d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
487d9bb58e5SYang Zhong         addr += atomic_read(&tlb_entry->addend);
488d9bb58e5SYang Zhong         if ((addr - start) < length) {
489d9bb58e5SYang Zhong             uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
490d9bb58e5SYang Zhong             atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
491d9bb58e5SYang Zhong         }
492d9bb58e5SYang Zhong     }
493d9bb58e5SYang Zhong #endif
494d9bb58e5SYang Zhong }
495d9bb58e5SYang Zhong 
496d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right
497d9bb58e5SYang Zhong  * primitives when copying entries */
498d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
499d9bb58e5SYang Zhong                                    bool atomic_set)
500d9bb58e5SYang Zhong {
501d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
502d9bb58e5SYang Zhong     *d = *s;
503d9bb58e5SYang Zhong #else
504d9bb58e5SYang Zhong     if (atomic_set) {
505d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
506d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
507d9bb58e5SYang Zhong         atomic_set(&d->addend, atomic_read(&s->addend));
508d9bb58e5SYang Zhong         /* Pairs with flag setting in tlb_reset_dirty_range */
509d9bb58e5SYang Zhong         atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
510d9bb58e5SYang Zhong     } else {
511d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
512d9bb58e5SYang Zhong         d->addr_write = atomic_read(&s->addr_write);
513d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
514d9bb58e5SYang Zhong         d->addend = atomic_read(&s->addend);
515d9bb58e5SYang Zhong     }
516d9bb58e5SYang Zhong #endif
517d9bb58e5SYang Zhong }
518d9bb58e5SYang Zhong 
519d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
520d9bb58e5SYang Zhong  * the target vCPU). As such care needs to be taken that we don't
521d9bb58e5SYang Zhong  * dangerously race with another vCPU update. The only thing actually
522d9bb58e5SYang Zhong  * updated is the target TLB entry ->addr_write flags.
523d9bb58e5SYang Zhong  */
524d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
525d9bb58e5SYang Zhong {
526d9bb58e5SYang Zhong     CPUArchState *env;
527d9bb58e5SYang Zhong 
528d9bb58e5SYang Zhong     int mmu_idx;
529d9bb58e5SYang Zhong 
530d9bb58e5SYang Zhong     env = cpu->env_ptr;
531d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
532d9bb58e5SYang Zhong         unsigned int i;
533d9bb58e5SYang Zhong 
534d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
535d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
536d9bb58e5SYang Zhong                                   start1, length);
537d9bb58e5SYang Zhong         }
538d9bb58e5SYang Zhong 
539d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
540d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
541d9bb58e5SYang Zhong                                   start1, length);
542d9bb58e5SYang Zhong         }
543d9bb58e5SYang Zhong     }
544d9bb58e5SYang Zhong }
545d9bb58e5SYang Zhong 
546d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
547d9bb58e5SYang Zhong {
548d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
549d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
550d9bb58e5SYang Zhong     }
551d9bb58e5SYang Zhong }
552d9bb58e5SYang Zhong 
553d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
554d9bb58e5SYang Zhong    so that it is no longer dirty */
555d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
556d9bb58e5SYang Zhong {
557d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
558d9bb58e5SYang Zhong     int i;
559d9bb58e5SYang Zhong     int mmu_idx;
560d9bb58e5SYang Zhong 
561d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
562d9bb58e5SYang Zhong 
563d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
564d9bb58e5SYang Zhong     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
565d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
566d9bb58e5SYang Zhong         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
567d9bb58e5SYang Zhong     }
568d9bb58e5SYang Zhong 
569d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
570d9bb58e5SYang Zhong         int k;
571d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
572d9bb58e5SYang Zhong             tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
573d9bb58e5SYang Zhong         }
574d9bb58e5SYang Zhong     }
575d9bb58e5SYang Zhong }
576d9bb58e5SYang Zhong 
577d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
578d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
579d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
580d9bb58e5SYang Zhong                                target_ulong size)
581d9bb58e5SYang Zhong {
582d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
583d9bb58e5SYang Zhong 
584d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
585d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
586d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
587d9bb58e5SYang Zhong         return;
588d9bb58e5SYang Zhong     }
589d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
590d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
591d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
592d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
593d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
594d9bb58e5SYang Zhong         mask <<= 1;
595d9bb58e5SYang Zhong     }
596d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
597d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
598d9bb58e5SYang Zhong }
599d9bb58e5SYang Zhong 
600d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
601d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
602d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
603d9bb58e5SYang Zhong  *
604d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
605d9bb58e5SYang Zhong  * critical section.
606d9bb58e5SYang Zhong  */
607d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
608d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
609d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
610d9bb58e5SYang Zhong {
611d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
612d9bb58e5SYang Zhong     MemoryRegionSection *section;
613d9bb58e5SYang Zhong     unsigned int index;
614d9bb58e5SYang Zhong     target_ulong address;
615d9bb58e5SYang Zhong     target_ulong code_address;
616d9bb58e5SYang Zhong     uintptr_t addend;
61768fea038SRichard Henderson     CPUTLBEntry *te, tn;
61855df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
61955df6fcfSPeter Maydell     target_ulong vaddr_page;
620d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
621d9bb58e5SYang Zhong 
622d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
62355df6fcfSPeter Maydell 
62455df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
62555df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
62655df6fcfSPeter Maydell     } else {
62755df6fcfSPeter Maydell         if (size > TARGET_PAGE_SIZE) {
628d9bb58e5SYang Zhong             tlb_add_large_page(env, vaddr, size);
629d9bb58e5SYang Zhong         }
630d9bb58e5SYang Zhong         sz = size;
63155df6fcfSPeter Maydell     }
63255df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
63355df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
63455df6fcfSPeter Maydell 
63555df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
63655df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
637d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
638d9bb58e5SYang Zhong 
639d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
640d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
641d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
642d9bb58e5SYang Zhong 
64355df6fcfSPeter Maydell     address = vaddr_page;
64455df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
64555df6fcfSPeter Maydell         /*
64655df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
64755df6fcfSPeter Maydell          * fill on every access.
64855df6fcfSPeter Maydell          */
64955df6fcfSPeter Maydell         address |= TLB_RECHECK;
65055df6fcfSPeter Maydell     }
65155df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
65255df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
653d9bb58e5SYang Zhong         /* IO memory case */
654d9bb58e5SYang Zhong         address |= TLB_MMIO;
655d9bb58e5SYang Zhong         addend = 0;
656d9bb58e5SYang Zhong     } else {
657d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
658d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
659d9bb58e5SYang Zhong     }
660d9bb58e5SYang Zhong 
66168fea038SRichard Henderson     /* Make sure there's no cached translation for the new page.  */
66268fea038SRichard Henderson     tlb_flush_vtlb_page(env, mmu_idx, vaddr_page);
66368fea038SRichard Henderson 
664d9bb58e5SYang Zhong     code_address = address;
66555df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
66655df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
667d9bb58e5SYang Zhong 
66855df6fcfSPeter Maydell     index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
669d9bb58e5SYang Zhong     te = &env->tlb_table[mmu_idx][index];
670d9bb58e5SYang Zhong 
67168fea038SRichard Henderson     /*
67268fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
67368fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
67468fea038SRichard Henderson      */
67568fea038SRichard Henderson     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
67668fea038SRichard Henderson         unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
67768fea038SRichard Henderson         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
67868fea038SRichard Henderson 
67968fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
680d9bb58e5SYang Zhong         copy_tlb_helper(tv, te, true);
681d9bb58e5SYang Zhong         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
68268fea038SRichard Henderson     }
683d9bb58e5SYang Zhong 
684d9bb58e5SYang Zhong     /* refill the tlb */
685ace41090SPeter Maydell     /*
686ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
687ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
688ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
689ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
69055df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
691ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
692ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
693ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
694ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
695ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
696ace41090SPeter Maydell      */
69755df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
698d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
699d9bb58e5SYang Zhong 
700d9bb58e5SYang Zhong     /* Now calculate the new entry */
70155df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
702d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
703d9bb58e5SYang Zhong         tn.addr_read = address;
704d9bb58e5SYang Zhong     } else {
705d9bb58e5SYang Zhong         tn.addr_read = -1;
706d9bb58e5SYang Zhong     }
707d9bb58e5SYang Zhong 
708d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
709d9bb58e5SYang Zhong         tn.addr_code = code_address;
710d9bb58e5SYang Zhong     } else {
711d9bb58e5SYang Zhong         tn.addr_code = -1;
712d9bb58e5SYang Zhong     }
713d9bb58e5SYang Zhong 
714d9bb58e5SYang Zhong     tn.addr_write = -1;
715d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
716d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
717d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
718d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
719d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
720d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
721d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
722d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
723d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
724d9bb58e5SYang Zhong         } else {
725d9bb58e5SYang Zhong             tn.addr_write = address;
726d9bb58e5SYang Zhong         }
727f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
728f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
729f52bfb12SDavid Hildenbrand         }
730d9bb58e5SYang Zhong     }
731d9bb58e5SYang Zhong 
732d9bb58e5SYang Zhong     /* Pairs with flag setting in tlb_reset_dirty_range */
733d9bb58e5SYang Zhong     copy_tlb_helper(te, &tn, true);
734d9bb58e5SYang Zhong     /* atomic_mb_set(&te->addr_write, write_address); */
735d9bb58e5SYang Zhong }
736d9bb58e5SYang Zhong 
737d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
738d9bb58e5SYang Zhong  * transaction attributes to be used.
739d9bb58e5SYang Zhong  */
740d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
741d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
742d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
743d9bb58e5SYang Zhong {
744d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
745d9bb58e5SYang Zhong                             prot, mmu_idx, size);
746d9bb58e5SYang Zhong }
747d9bb58e5SYang Zhong 
748d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
749d9bb58e5SYang Zhong {
750d9bb58e5SYang Zhong     ram_addr_t ram_addr;
751d9bb58e5SYang Zhong 
752d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
753d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
754d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
755d9bb58e5SYang Zhong         abort();
756d9bb58e5SYang Zhong     }
757d9bb58e5SYang Zhong     return ram_addr;
758d9bb58e5SYang Zhong }
759d9bb58e5SYang Zhong 
760d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
76104e3aabdSPeter Maydell                          int mmu_idx,
76255df6fcfSPeter Maydell                          target_ulong addr, uintptr_t retaddr,
763dbea78a4SPeter Maydell                          bool recheck, MMUAccessType access_type, int size)
764d9bb58e5SYang Zhong {
765d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
7662d54f194SPeter Maydell     hwaddr mr_offset;
7672d54f194SPeter Maydell     MemoryRegionSection *section;
7682d54f194SPeter Maydell     MemoryRegion *mr;
769d9bb58e5SYang Zhong     uint64_t val;
770d9bb58e5SYang Zhong     bool locked = false;
77104e3aabdSPeter Maydell     MemTxResult r;
772d9bb58e5SYang Zhong 
77355df6fcfSPeter Maydell     if (recheck) {
77455df6fcfSPeter Maydell         /*
77555df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
77655df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
77755df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
77855df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
77955df6fcfSPeter Maydell          */
78055df6fcfSPeter Maydell         int index;
78155df6fcfSPeter Maydell         target_ulong tlb_addr;
78255df6fcfSPeter Maydell 
78355df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
78455df6fcfSPeter Maydell 
78555df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
78655df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
78755df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
78855df6fcfSPeter Maydell             /* RAM access */
78955df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
79055df6fcfSPeter Maydell 
79155df6fcfSPeter Maydell             return ldn_p((void *)haddr, size);
79255df6fcfSPeter Maydell         }
79355df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
79455df6fcfSPeter Maydell     }
79555df6fcfSPeter Maydell 
7962d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
7972d54f194SPeter Maydell     mr = section->mr;
7982d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
799d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
800d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
801d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
802d9bb58e5SYang Zhong     }
803d9bb58e5SYang Zhong 
804d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
805dbea78a4SPeter Maydell     cpu->mem_io_access_type = access_type;
806d9bb58e5SYang Zhong 
8078b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
808d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
809d9bb58e5SYang Zhong         locked = true;
810d9bb58e5SYang Zhong     }
8112d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
81204e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
81304e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8142d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8152d54f194SPeter Maydell             section->offset_within_address_space -
8162d54f194SPeter Maydell             section->offset_within_region;
8172d54f194SPeter Maydell 
818dbea78a4SPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
81904e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
82004e3aabdSPeter Maydell     }
821d9bb58e5SYang Zhong     if (locked) {
822d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
823d9bb58e5SYang Zhong     }
824d9bb58e5SYang Zhong 
825d9bb58e5SYang Zhong     return val;
826d9bb58e5SYang Zhong }
827d9bb58e5SYang Zhong 
828d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
82904e3aabdSPeter Maydell                       int mmu_idx,
830d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
83155df6fcfSPeter Maydell                       uintptr_t retaddr, bool recheck, int size)
832d9bb58e5SYang Zhong {
833d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
8342d54f194SPeter Maydell     hwaddr mr_offset;
8352d54f194SPeter Maydell     MemoryRegionSection *section;
8362d54f194SPeter Maydell     MemoryRegion *mr;
837d9bb58e5SYang Zhong     bool locked = false;
83804e3aabdSPeter Maydell     MemTxResult r;
839d9bb58e5SYang Zhong 
84055df6fcfSPeter Maydell     if (recheck) {
84155df6fcfSPeter Maydell         /*
84255df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
84355df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
84455df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
84555df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
84655df6fcfSPeter Maydell          */
84755df6fcfSPeter Maydell         int index;
84855df6fcfSPeter Maydell         target_ulong tlb_addr;
84955df6fcfSPeter Maydell 
85055df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
85155df6fcfSPeter Maydell 
85255df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
85355df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
85455df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
85555df6fcfSPeter Maydell             /* RAM access */
85655df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
85755df6fcfSPeter Maydell 
85855df6fcfSPeter Maydell             stn_p((void *)haddr, size, val);
85955df6fcfSPeter Maydell             return;
86055df6fcfSPeter Maydell         }
86155df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
86255df6fcfSPeter Maydell     }
86355df6fcfSPeter Maydell 
8642d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8652d54f194SPeter Maydell     mr = section->mr;
8662d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
867d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
868d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
869d9bb58e5SYang Zhong     }
870d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
871d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
872d9bb58e5SYang Zhong 
8738b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
874d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
875d9bb58e5SYang Zhong         locked = true;
876d9bb58e5SYang Zhong     }
8772d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
87804e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
87904e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8802d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8812d54f194SPeter Maydell             section->offset_within_address_space -
8822d54f194SPeter Maydell             section->offset_within_region;
8832d54f194SPeter Maydell 
88404e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
88504e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
88604e3aabdSPeter Maydell     }
887d9bb58e5SYang Zhong     if (locked) {
888d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
889d9bb58e5SYang Zhong     }
890d9bb58e5SYang Zhong }
891d9bb58e5SYang Zhong 
892d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
893d9bb58e5SYang Zhong    back to the main tlb.  */
894d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
895d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
896d9bb58e5SYang Zhong {
897d9bb58e5SYang Zhong     size_t vidx;
898d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
899d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
900d9bb58e5SYang Zhong         target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
901d9bb58e5SYang Zhong 
902d9bb58e5SYang Zhong         if (cmp == page) {
903d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
904d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
905d9bb58e5SYang Zhong 
906d9bb58e5SYang Zhong             copy_tlb_helper(&tmptlb, tlb, false);
907d9bb58e5SYang Zhong             copy_tlb_helper(tlb, vtlb, true);
908d9bb58e5SYang Zhong             copy_tlb_helper(vtlb, &tmptlb, true);
909d9bb58e5SYang Zhong 
910d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
911d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
912d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
913d9bb58e5SYang Zhong             return true;
914d9bb58e5SYang Zhong         }
915d9bb58e5SYang Zhong     }
916d9bb58e5SYang Zhong     return false;
917d9bb58e5SYang Zhong }
918d9bb58e5SYang Zhong 
919d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
920d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
921d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
922d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
923d9bb58e5SYang Zhong 
924f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
925f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
926f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
927f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
928f2553f04SKONRAD Frederic  */
929f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
930f2553f04SKONRAD Frederic {
9312d54f194SPeter Maydell     int mmu_idx, index;
932f2553f04SKONRAD Frederic     void *p;
933f2553f04SKONRAD Frederic 
934f2553f04SKONRAD Frederic     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
935f2553f04SKONRAD Frederic     mmu_idx = cpu_mmu_index(env, true);
936e4c967a7SPeter Maydell     if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) {
937b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
93898670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
93971b9a453SKONRAD Frederic         }
9403474c98aSPeter Maydell         assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
941f2553f04SKONRAD Frederic     }
94255df6fcfSPeter Maydell 
94355a7cb14SPeter Maydell     if (unlikely(env->tlb_table[mmu_idx][index].addr_code &
94455a7cb14SPeter Maydell                  (TLB_RECHECK | TLB_MMIO))) {
94555df6fcfSPeter Maydell         /*
94655a7cb14SPeter Maydell          * Return -1 if we can't translate and execute from an entire
94755a7cb14SPeter Maydell          * page of RAM here, which will cause us to execute by loading
94855a7cb14SPeter Maydell          * and translating one insn at a time, without caching:
94955a7cb14SPeter Maydell          *  - TLB_RECHECK: means the MMU protection covers a smaller range
95055a7cb14SPeter Maydell          *    than a target page, so we must redo the MMU check every insn
95155a7cb14SPeter Maydell          *  - TLB_MMIO: region is not backed by RAM
95255df6fcfSPeter Maydell          */
95320cb6ae4SPeter Maydell         return -1;
95455df6fcfSPeter Maydell     }
95555df6fcfSPeter Maydell 
956f2553f04SKONRAD Frederic     p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
957f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
958f2553f04SKONRAD Frederic }
959f2553f04SKONRAD Frederic 
960d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
961d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
962d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
963d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
964d9bb58e5SYang Zhong  * entry in the TLB for this access.
965d9bb58e5SYang Zhong  */
96698670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
967d9bb58e5SYang Zhong                  uintptr_t retaddr)
968d9bb58e5SYang Zhong {
969d9bb58e5SYang Zhong     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
970d9bb58e5SYang Zhong     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
971d9bb58e5SYang Zhong 
972334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
973d9bb58e5SYang Zhong         /* TLB entry is for a different page */
974d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
97598670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
97698670d47SLaurent Vivier                      mmu_idx, retaddr);
977d9bb58e5SYang Zhong         }
978d9bb58e5SYang Zhong     }
979d9bb58e5SYang Zhong }
980d9bb58e5SYang Zhong 
981d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
982d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
983d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
98434d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
98534d49937SPeter Maydell                                NotDirtyInfo *ndi)
986d9bb58e5SYang Zhong {
987d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
988d9bb58e5SYang Zhong     size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
989d9bb58e5SYang Zhong     CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
990d9bb58e5SYang Zhong     target_ulong tlb_addr = tlbe->addr_write;
991d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
992d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
993d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
99434d49937SPeter Maydell     void *hostaddr;
995d9bb58e5SYang Zhong 
996d9bb58e5SYang Zhong     /* Adjust the given return address.  */
997d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
998d9bb58e5SYang Zhong 
999d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1000d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1001d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1002d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1003d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1004d9bb58e5SYang Zhong     }
1005d9bb58e5SYang Zhong 
1006d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1007d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1008d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1009d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1010d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1011d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1012d9bb58e5SYang Zhong         goto stop_the_world;
1013d9bb58e5SYang Zhong     }
1014d9bb58e5SYang Zhong 
1015d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1016334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1017d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
101898670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
101998670d47SLaurent Vivier                      mmu_idx, retaddr);
1020d9bb58e5SYang Zhong         }
1021f52bfb12SDavid Hildenbrand         tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
1022d9bb58e5SYang Zhong     }
1023d9bb58e5SYang Zhong 
102455df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
102555df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1026d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1027d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1028d9bb58e5SYang Zhong         goto stop_the_world;
1029d9bb58e5SYang Zhong     }
1030d9bb58e5SYang Zhong 
1031d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
103234d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
103398670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
103498670d47SLaurent Vivier                  mmu_idx, retaddr);
1035d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1036d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1037d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1038d9bb58e5SYang Zhong         goto stop_the_world;
1039d9bb58e5SYang Zhong     }
1040d9bb58e5SYang Zhong 
104134d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
104234d49937SPeter Maydell 
104334d49937SPeter Maydell     ndi->active = false;
104434d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
104534d49937SPeter Maydell         ndi->active = true;
104634d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
104734d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
104834d49937SPeter Maydell                                       1 << s_bits);
104934d49937SPeter Maydell     }
105034d49937SPeter Maydell 
105134d49937SPeter Maydell     return hostaddr;
1052d9bb58e5SYang Zhong 
1053d9bb58e5SYang Zhong  stop_the_world:
1054d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1055d9bb58e5SYang Zhong }
1056d9bb58e5SYang Zhong 
1057d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1058d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
1059d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1060d9bb58e5SYang Zhong #else
1061d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1062d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1063d9bb58e5SYang Zhong #endif
1064d9bb58e5SYang Zhong 
1065d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1066d9bb58e5SYang Zhong 
1067d9bb58e5SYang Zhong #define DATA_SIZE 1
1068d9bb58e5SYang Zhong #include "softmmu_template.h"
1069d9bb58e5SYang Zhong 
1070d9bb58e5SYang Zhong #define DATA_SIZE 2
1071d9bb58e5SYang Zhong #include "softmmu_template.h"
1072d9bb58e5SYang Zhong 
1073d9bb58e5SYang Zhong #define DATA_SIZE 4
1074d9bb58e5SYang Zhong #include "softmmu_template.h"
1075d9bb58e5SYang Zhong 
1076d9bb58e5SYang Zhong #define DATA_SIZE 8
1077d9bb58e5SYang Zhong #include "softmmu_template.h"
1078d9bb58e5SYang Zhong 
1079d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1080d9bb58e5SYang Zhong    them callable from other helpers.  */
1081d9bb58e5SYang Zhong 
1082d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1083d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1084d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
108534d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
108634d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
108734d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
108834d49937SPeter Maydell     do {                                                \
108934d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
109034d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
109134d49937SPeter Maydell         }                                               \
109234d49937SPeter Maydell     } while (0)
1093d9bb58e5SYang Zhong 
1094d9bb58e5SYang Zhong #define DATA_SIZE 1
1095d9bb58e5SYang Zhong #include "atomic_template.h"
1096d9bb58e5SYang Zhong 
1097d9bb58e5SYang Zhong #define DATA_SIZE 2
1098d9bb58e5SYang Zhong #include "atomic_template.h"
1099d9bb58e5SYang Zhong 
1100d9bb58e5SYang Zhong #define DATA_SIZE 4
1101d9bb58e5SYang Zhong #include "atomic_template.h"
1102d9bb58e5SYang Zhong 
1103d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1104d9bb58e5SYang Zhong #define DATA_SIZE 8
1105d9bb58e5SYang Zhong #include "atomic_template.h"
1106d9bb58e5SYang Zhong #endif
1107d9bb58e5SYang Zhong 
1108d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128
1109d9bb58e5SYang Zhong #define DATA_SIZE 16
1110d9bb58e5SYang Zhong #include "atomic_template.h"
1111d9bb58e5SYang Zhong #endif
1112d9bb58e5SYang Zhong 
1113d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1114d9bb58e5SYang Zhong 
1115d9bb58e5SYang Zhong #undef EXTRA_ARGS
1116d9bb58e5SYang Zhong #undef ATOMIC_NAME
1117d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1118d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1119d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
112034d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1121d9bb58e5SYang Zhong 
1122d9bb58e5SYang Zhong #define DATA_SIZE 1
1123d9bb58e5SYang Zhong #include "atomic_template.h"
1124d9bb58e5SYang Zhong 
1125d9bb58e5SYang Zhong #define DATA_SIZE 2
1126d9bb58e5SYang Zhong #include "atomic_template.h"
1127d9bb58e5SYang Zhong 
1128d9bb58e5SYang Zhong #define DATA_SIZE 4
1129d9bb58e5SYang Zhong #include "atomic_template.h"
1130d9bb58e5SYang Zhong 
1131d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1132d9bb58e5SYang Zhong #define DATA_SIZE 8
1133d9bb58e5SYang Zhong #include "atomic_template.h"
1134d9bb58e5SYang Zhong #endif
1135d9bb58e5SYang Zhong 
1136d9bb58e5SYang Zhong /* Code access functions.  */
1137d9bb58e5SYang Zhong 
1138d9bb58e5SYang Zhong #undef MMUSUFFIX
1139d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1140d9bb58e5SYang Zhong #undef GETPC
1141d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1142d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1143d9bb58e5SYang Zhong 
1144d9bb58e5SYang Zhong #define DATA_SIZE 1
1145d9bb58e5SYang Zhong #include "softmmu_template.h"
1146d9bb58e5SYang Zhong 
1147d9bb58e5SYang Zhong #define DATA_SIZE 2
1148d9bb58e5SYang Zhong #include "softmmu_template.h"
1149d9bb58e5SYang Zhong 
1150d9bb58e5SYang Zhong #define DATA_SIZE 4
1151d9bb58e5SYang Zhong #include "softmmu_template.h"
1152d9bb58e5SYang Zhong 
1153d9bb58e5SYang Zhong #define DATA_SIZE 8
1154d9bb58e5SYang Zhong #include "softmmu_template.h"
1155