xref: /openbmc/qemu/accel/tcg/cputlb.c (revision dbea78a4d696e35d28a35db95cb29ff075626150)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35d9bb58e5SYang Zhong 
36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37d9bb58e5SYang Zhong /* #define DEBUG_TLB */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
39d9bb58e5SYang Zhong 
40d9bb58e5SYang Zhong #ifdef DEBUG_TLB
41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
43d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
44d9bb58e5SYang Zhong # else
45d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
46d9bb58e5SYang Zhong # endif
47d9bb58e5SYang Zhong #else
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
50d9bb58e5SYang Zhong #endif
51d9bb58e5SYang Zhong 
52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
53d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
54d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
56d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
57d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58d9bb58e5SYang Zhong     } \
59d9bb58e5SYang Zhong } while (0)
60d9bb58e5SYang Zhong 
61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do {                         \
62d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
63d9bb58e5SYang Zhong             g_assert(!cpu->created || qemu_cpu_is_self(cpu));     \
64d9bb58e5SYang Zhong         }                                                         \
65d9bb58e5SYang Zhong     } while (0)
66d9bb58e5SYang Zhong 
67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
68d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70d9bb58e5SYang Zhong 
71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72d9bb58e5SYang Zhong  */
73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75d9bb58e5SYang Zhong 
76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
77d9bb58e5SYang Zhong  *
78d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
79d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
80d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
81d9bb58e5SYang Zhong  * again.
82d9bb58e5SYang Zhong  */
83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84d9bb58e5SYang Zhong                              run_on_cpu_data d)
85d9bb58e5SYang Zhong {
86d9bb58e5SYang Zhong     CPUState *cpu;
87d9bb58e5SYang Zhong 
88d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
89d9bb58e5SYang Zhong         if (cpu != src) {
90d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
91d9bb58e5SYang Zhong         }
92d9bb58e5SYang Zhong     }
93d9bb58e5SYang Zhong }
94d9bb58e5SYang Zhong 
9583974cf4SEmilio G. Cota size_t tlb_flush_count(void)
9683974cf4SEmilio G. Cota {
9783974cf4SEmilio G. Cota     CPUState *cpu;
9883974cf4SEmilio G. Cota     size_t count = 0;
9983974cf4SEmilio G. Cota 
10083974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
10183974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
10283974cf4SEmilio G. Cota 
10383974cf4SEmilio G. Cota         count += atomic_read(&env->tlb_flush_count);
10483974cf4SEmilio G. Cota     }
10583974cf4SEmilio G. Cota     return count;
10683974cf4SEmilio G. Cota }
107d9bb58e5SYang Zhong 
108d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
109d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
110d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
111d9bb58e5SYang Zhong  * not a correctness issue.
112d9bb58e5SYang Zhong  */
113d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
114d9bb58e5SYang Zhong {
115d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
116d9bb58e5SYang Zhong 
117d9bb58e5SYang Zhong     /* The QOM tests will trigger tlb_flushes without setting up TCG
118d9bb58e5SYang Zhong      * so we bug out here in that case.
119d9bb58e5SYang Zhong      */
120d9bb58e5SYang Zhong     if (!tcg_enabled()) {
121d9bb58e5SYang Zhong         return;
122d9bb58e5SYang Zhong     }
123d9bb58e5SYang Zhong 
124d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
12583974cf4SEmilio G. Cota     atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
12683974cf4SEmilio G. Cota     tlb_debug("(count: %zu)\n", tlb_flush_count());
127d9bb58e5SYang Zhong 
128d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
129d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
130f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
131d9bb58e5SYang Zhong 
132d9bb58e5SYang Zhong     env->vtlb_index = 0;
133d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
134d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
135d9bb58e5SYang Zhong 
136d9bb58e5SYang Zhong     atomic_mb_set(&cpu->pending_tlb_flush, 0);
137d9bb58e5SYang Zhong }
138d9bb58e5SYang Zhong 
139d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
140d9bb58e5SYang Zhong {
141d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
142d9bb58e5SYang Zhong }
143d9bb58e5SYang Zhong 
144d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
145d9bb58e5SYang Zhong {
146d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
147d9bb58e5SYang Zhong         if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
148d9bb58e5SYang Zhong             atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
149d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
150d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
151d9bb58e5SYang Zhong         }
152d9bb58e5SYang Zhong     } else {
153d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
154d9bb58e5SYang Zhong     }
155d9bb58e5SYang Zhong }
156d9bb58e5SYang Zhong 
157d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
158d9bb58e5SYang Zhong {
159d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
160d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
161d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
162d9bb58e5SYang Zhong }
163d9bb58e5SYang Zhong 
164d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
165d9bb58e5SYang Zhong {
166d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
167d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
168d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
169d9bb58e5SYang Zhong }
170d9bb58e5SYang Zhong 
171d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
172d9bb58e5SYang Zhong {
173d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
174d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
175d9bb58e5SYang Zhong     int mmu_idx;
176d9bb58e5SYang Zhong 
177d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
178d9bb58e5SYang Zhong 
179d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
180d9bb58e5SYang Zhong 
181d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
182d9bb58e5SYang Zhong 
183d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
184d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
185d9bb58e5SYang Zhong 
186d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
187d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
188d9bb58e5SYang Zhong         }
189d9bb58e5SYang Zhong     }
190d9bb58e5SYang Zhong 
191f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
192d9bb58e5SYang Zhong 
193d9bb58e5SYang Zhong     tlb_debug("done\n");
194d9bb58e5SYang Zhong }
195d9bb58e5SYang Zhong 
196d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
197d9bb58e5SYang Zhong {
198d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
199d9bb58e5SYang Zhong 
200d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
201d9bb58e5SYang Zhong         uint16_t pending_flushes = idxmap;
202d9bb58e5SYang Zhong         pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
203d9bb58e5SYang Zhong 
204d9bb58e5SYang Zhong         if (pending_flushes) {
205d9bb58e5SYang Zhong             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
206d9bb58e5SYang Zhong 
207d9bb58e5SYang Zhong             atomic_or(&cpu->pending_tlb_flush, pending_flushes);
208d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
209d9bb58e5SYang Zhong                              RUN_ON_CPU_HOST_INT(pending_flushes));
210d9bb58e5SYang Zhong         }
211d9bb58e5SYang Zhong     } else {
212d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
213d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(idxmap));
214d9bb58e5SYang Zhong     }
215d9bb58e5SYang Zhong }
216d9bb58e5SYang Zhong 
217d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
218d9bb58e5SYang Zhong {
219d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
220d9bb58e5SYang Zhong 
221d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
222d9bb58e5SYang Zhong 
223d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
224d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
225d9bb58e5SYang Zhong }
226d9bb58e5SYang Zhong 
227d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
228d9bb58e5SYang Zhong                                                        uint16_t idxmap)
229d9bb58e5SYang Zhong {
230d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
231d9bb58e5SYang Zhong 
232d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
233d9bb58e5SYang Zhong 
234d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
235d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
236d9bb58e5SYang Zhong }
237d9bb58e5SYang Zhong 
23868fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
23968fea038SRichard Henderson                                         target_ulong page)
240d9bb58e5SYang Zhong {
24168fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
24268fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_write, page) ||
24368fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
24468fea038SRichard Henderson }
24568fea038SRichard Henderson 
24668fea038SRichard Henderson static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page)
24768fea038SRichard Henderson {
24868fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
249d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
250d9bb58e5SYang Zhong     }
251d9bb58e5SYang Zhong }
252d9bb58e5SYang Zhong 
25368fea038SRichard Henderson static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx,
25468fea038SRichard Henderson                                        target_ulong page)
25568fea038SRichard Henderson {
25668fea038SRichard Henderson     int k;
25768fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
25868fea038SRichard Henderson         tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page);
25968fea038SRichard Henderson     }
26068fea038SRichard Henderson }
26168fea038SRichard Henderson 
262d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
263d9bb58e5SYang Zhong {
264d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
265d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
266d9bb58e5SYang Zhong     int i;
267d9bb58e5SYang Zhong     int mmu_idx;
268d9bb58e5SYang Zhong 
269d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
270d9bb58e5SYang Zhong 
271d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
272d9bb58e5SYang Zhong 
273d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
274d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
275d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
276d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
277d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
278d9bb58e5SYang Zhong 
279d9bb58e5SYang Zhong         tlb_flush(cpu);
280d9bb58e5SYang Zhong         return;
281d9bb58e5SYang Zhong     }
282d9bb58e5SYang Zhong 
283d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
284d9bb58e5SYang Zhong     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
285d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
286d9bb58e5SYang Zhong         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
28768fea038SRichard Henderson         tlb_flush_vtlb_page(env, mmu_idx, addr);
288d9bb58e5SYang Zhong     }
289d9bb58e5SYang Zhong 
290d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
291d9bb58e5SYang Zhong }
292d9bb58e5SYang Zhong 
293d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
294d9bb58e5SYang Zhong {
295d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
296d9bb58e5SYang Zhong 
297d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
298d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
299d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
300d9bb58e5SYang Zhong     } else {
301d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
302d9bb58e5SYang Zhong     }
303d9bb58e5SYang Zhong }
304d9bb58e5SYang Zhong 
305d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
306d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
307d9bb58e5SYang Zhong  */
308d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
309d9bb58e5SYang Zhong 
310d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
311d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
312d9bb58e5SYang Zhong {
313d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
314d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
315d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
316d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
317d9bb58e5SYang Zhong     int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
318d9bb58e5SYang Zhong     int mmu_idx;
319d9bb58e5SYang Zhong 
320d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
321d9bb58e5SYang Zhong 
322d9bb58e5SYang Zhong     tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
323d9bb58e5SYang Zhong               page, addr, mmu_idx_bitmap);
324d9bb58e5SYang Zhong 
325d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
326d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
327d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
32868fea038SRichard Henderson             tlb_flush_vtlb_page(env, mmu_idx, addr);
329d9bb58e5SYang Zhong         }
330d9bb58e5SYang Zhong     }
331d9bb58e5SYang Zhong 
332d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
333d9bb58e5SYang Zhong }
334d9bb58e5SYang Zhong 
335d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
336d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
337d9bb58e5SYang Zhong {
338d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
339d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
340d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
341d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
342d9bb58e5SYang Zhong 
343d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
344d9bb58e5SYang Zhong 
345d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
346d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
347d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
348d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
349d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
350d9bb58e5SYang Zhong 
351d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
352d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
353d9bb58e5SYang Zhong     } else {
354d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
355d9bb58e5SYang Zhong     }
356d9bb58e5SYang Zhong }
357d9bb58e5SYang Zhong 
358d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
359d9bb58e5SYang Zhong {
360d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
361d9bb58e5SYang Zhong 
362d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
363d9bb58e5SYang Zhong 
364d9bb58e5SYang Zhong     /* This should already be page aligned */
365d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
366d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
367d9bb58e5SYang Zhong 
368d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
369d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
370d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
371d9bb58e5SYang Zhong     } else {
372d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
373d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
374d9bb58e5SYang Zhong     }
375d9bb58e5SYang Zhong }
376d9bb58e5SYang Zhong 
377d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
378d9bb58e5SYang Zhong                                        uint16_t idxmap)
379d9bb58e5SYang Zhong {
380d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
381d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
382d9bb58e5SYang Zhong 
383d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
384d9bb58e5SYang Zhong 
385d9bb58e5SYang Zhong     /* This should already be page aligned */
386d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
387d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
388d9bb58e5SYang Zhong 
389d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
390d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
391d9bb58e5SYang Zhong }
392d9bb58e5SYang Zhong 
393d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
394d9bb58e5SYang Zhong                                                             target_ulong addr,
395d9bb58e5SYang Zhong                                                             uint16_t idxmap)
396d9bb58e5SYang Zhong {
397d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
398d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
399d9bb58e5SYang Zhong 
400d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
401d9bb58e5SYang Zhong 
402d9bb58e5SYang Zhong     /* This should already be page aligned */
403d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
404d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
405d9bb58e5SYang Zhong 
406d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
407d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
408d9bb58e5SYang Zhong }
409d9bb58e5SYang Zhong 
410d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
411d9bb58e5SYang Zhong {
412d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
413d9bb58e5SYang Zhong 
414d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
415d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
416d9bb58e5SYang Zhong }
417d9bb58e5SYang Zhong 
418d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
419d9bb58e5SYang Zhong                                                   target_ulong addr)
420d9bb58e5SYang Zhong {
421d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
422d9bb58e5SYang Zhong 
423d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
424d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
425d9bb58e5SYang Zhong }
426d9bb58e5SYang Zhong 
427d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
428d9bb58e5SYang Zhong    can be detected */
429d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
430d9bb58e5SYang Zhong {
431d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
432d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
433d9bb58e5SYang Zhong }
434d9bb58e5SYang Zhong 
435d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
436d9bb58e5SYang Zhong    tested for self modifying code */
437d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
438d9bb58e5SYang Zhong {
439d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
440d9bb58e5SYang Zhong }
441d9bb58e5SYang Zhong 
442d9bb58e5SYang Zhong 
443d9bb58e5SYang Zhong /*
444d9bb58e5SYang Zhong  * Dirty write flag handling
445d9bb58e5SYang Zhong  *
446d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
447d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
448d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
449d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
450d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
451d9bb58e5SYang Zhong  * generated code.
452d9bb58e5SYang Zhong  *
453d9bb58e5SYang Zhong  * Because we want other vCPUs to respond to changes straight away we
454d9bb58e5SYang Zhong  * update the te->addr_write field atomically. If the TLB entry has
455d9bb58e5SYang Zhong  * been changed by the vCPU in the mean time we skip the update.
456d9bb58e5SYang Zhong  *
457d9bb58e5SYang Zhong  * As this function uses atomic accesses we also need to ensure
458d9bb58e5SYang Zhong  * updates to tlb_entries follow the same access rules. We don't need
459d9bb58e5SYang Zhong  * to worry about this for oversized guests as MTTCG is disabled for
460d9bb58e5SYang Zhong  * them.
461d9bb58e5SYang Zhong  */
462d9bb58e5SYang Zhong 
463d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
464d9bb58e5SYang Zhong                            uintptr_t length)
465d9bb58e5SYang Zhong {
466d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
467d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
468d9bb58e5SYang Zhong 
469d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
470d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
471d9bb58e5SYang Zhong         addr += tlb_entry->addend;
472d9bb58e5SYang Zhong         if ((addr - start) < length) {
473d9bb58e5SYang Zhong             tlb_entry->addr_write |= TLB_NOTDIRTY;
474d9bb58e5SYang Zhong         }
475d9bb58e5SYang Zhong     }
476d9bb58e5SYang Zhong #else
477d9bb58e5SYang Zhong     /* paired with atomic_mb_set in tlb_set_page_with_attrs */
478d9bb58e5SYang Zhong     uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
479d9bb58e5SYang Zhong     uintptr_t addr = orig_addr;
480d9bb58e5SYang Zhong 
481d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
482d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
483d9bb58e5SYang Zhong         addr += atomic_read(&tlb_entry->addend);
484d9bb58e5SYang Zhong         if ((addr - start) < length) {
485d9bb58e5SYang Zhong             uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
486d9bb58e5SYang Zhong             atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
487d9bb58e5SYang Zhong         }
488d9bb58e5SYang Zhong     }
489d9bb58e5SYang Zhong #endif
490d9bb58e5SYang Zhong }
491d9bb58e5SYang Zhong 
492d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right
493d9bb58e5SYang Zhong  * primitives when copying entries */
494d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
495d9bb58e5SYang Zhong                                    bool atomic_set)
496d9bb58e5SYang Zhong {
497d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
498d9bb58e5SYang Zhong     *d = *s;
499d9bb58e5SYang Zhong #else
500d9bb58e5SYang Zhong     if (atomic_set) {
501d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
502d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
503d9bb58e5SYang Zhong         atomic_set(&d->addend, atomic_read(&s->addend));
504d9bb58e5SYang Zhong         /* Pairs with flag setting in tlb_reset_dirty_range */
505d9bb58e5SYang Zhong         atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
506d9bb58e5SYang Zhong     } else {
507d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
508d9bb58e5SYang Zhong         d->addr_write = atomic_read(&s->addr_write);
509d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
510d9bb58e5SYang Zhong         d->addend = atomic_read(&s->addend);
511d9bb58e5SYang Zhong     }
512d9bb58e5SYang Zhong #endif
513d9bb58e5SYang Zhong }
514d9bb58e5SYang Zhong 
515d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
516d9bb58e5SYang Zhong  * the target vCPU). As such care needs to be taken that we don't
517d9bb58e5SYang Zhong  * dangerously race with another vCPU update. The only thing actually
518d9bb58e5SYang Zhong  * updated is the target TLB entry ->addr_write flags.
519d9bb58e5SYang Zhong  */
520d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
521d9bb58e5SYang Zhong {
522d9bb58e5SYang Zhong     CPUArchState *env;
523d9bb58e5SYang Zhong 
524d9bb58e5SYang Zhong     int mmu_idx;
525d9bb58e5SYang Zhong 
526d9bb58e5SYang Zhong     env = cpu->env_ptr;
527d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
528d9bb58e5SYang Zhong         unsigned int i;
529d9bb58e5SYang Zhong 
530d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
531d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
532d9bb58e5SYang Zhong                                   start1, length);
533d9bb58e5SYang Zhong         }
534d9bb58e5SYang Zhong 
535d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
536d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
537d9bb58e5SYang Zhong                                   start1, length);
538d9bb58e5SYang Zhong         }
539d9bb58e5SYang Zhong     }
540d9bb58e5SYang Zhong }
541d9bb58e5SYang Zhong 
542d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
543d9bb58e5SYang Zhong {
544d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
545d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
546d9bb58e5SYang Zhong     }
547d9bb58e5SYang Zhong }
548d9bb58e5SYang Zhong 
549d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
550d9bb58e5SYang Zhong    so that it is no longer dirty */
551d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
552d9bb58e5SYang Zhong {
553d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
554d9bb58e5SYang Zhong     int i;
555d9bb58e5SYang Zhong     int mmu_idx;
556d9bb58e5SYang Zhong 
557d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
558d9bb58e5SYang Zhong 
559d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
560d9bb58e5SYang Zhong     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
561d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
562d9bb58e5SYang Zhong         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
563d9bb58e5SYang Zhong     }
564d9bb58e5SYang Zhong 
565d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
566d9bb58e5SYang Zhong         int k;
567d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
568d9bb58e5SYang Zhong             tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
569d9bb58e5SYang Zhong         }
570d9bb58e5SYang Zhong     }
571d9bb58e5SYang Zhong }
572d9bb58e5SYang Zhong 
573d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
574d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
575d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
576d9bb58e5SYang Zhong                                target_ulong size)
577d9bb58e5SYang Zhong {
578d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
579d9bb58e5SYang Zhong 
580d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
581d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
582d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
583d9bb58e5SYang Zhong         return;
584d9bb58e5SYang Zhong     }
585d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
586d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
587d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
588d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
589d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
590d9bb58e5SYang Zhong         mask <<= 1;
591d9bb58e5SYang Zhong     }
592d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
593d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
594d9bb58e5SYang Zhong }
595d9bb58e5SYang Zhong 
596d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
597d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
598d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
599d9bb58e5SYang Zhong  *
600d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
601d9bb58e5SYang Zhong  * critical section.
602d9bb58e5SYang Zhong  */
603d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
604d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
605d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
606d9bb58e5SYang Zhong {
607d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
608d9bb58e5SYang Zhong     MemoryRegionSection *section;
609d9bb58e5SYang Zhong     unsigned int index;
610d9bb58e5SYang Zhong     target_ulong address;
611d9bb58e5SYang Zhong     target_ulong code_address;
612d9bb58e5SYang Zhong     uintptr_t addend;
61368fea038SRichard Henderson     CPUTLBEntry *te, tn;
61455df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
61555df6fcfSPeter Maydell     target_ulong vaddr_page;
616d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
617d9bb58e5SYang Zhong 
618d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
61955df6fcfSPeter Maydell 
62055df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
62155df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
62255df6fcfSPeter Maydell     } else {
62355df6fcfSPeter Maydell         if (size > TARGET_PAGE_SIZE) {
624d9bb58e5SYang Zhong             tlb_add_large_page(env, vaddr, size);
625d9bb58e5SYang Zhong         }
626d9bb58e5SYang Zhong         sz = size;
62755df6fcfSPeter Maydell     }
62855df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
62955df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
63055df6fcfSPeter Maydell 
63155df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
63255df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
633d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
634d9bb58e5SYang Zhong 
635d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
636d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
637d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
638d9bb58e5SYang Zhong 
63955df6fcfSPeter Maydell     address = vaddr_page;
64055df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
64155df6fcfSPeter Maydell         /*
64255df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
64355df6fcfSPeter Maydell          * fill on every access.
64455df6fcfSPeter Maydell          */
64555df6fcfSPeter Maydell         address |= TLB_RECHECK;
64655df6fcfSPeter Maydell     }
64755df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
64855df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
649d9bb58e5SYang Zhong         /* IO memory case */
650d9bb58e5SYang Zhong         address |= TLB_MMIO;
651d9bb58e5SYang Zhong         addend = 0;
652d9bb58e5SYang Zhong     } else {
653d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
654d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
655d9bb58e5SYang Zhong     }
656d9bb58e5SYang Zhong 
65768fea038SRichard Henderson     /* Make sure there's no cached translation for the new page.  */
65868fea038SRichard Henderson     tlb_flush_vtlb_page(env, mmu_idx, vaddr_page);
65968fea038SRichard Henderson 
660d9bb58e5SYang Zhong     code_address = address;
66155df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
66255df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
663d9bb58e5SYang Zhong 
66455df6fcfSPeter Maydell     index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
665d9bb58e5SYang Zhong     te = &env->tlb_table[mmu_idx][index];
666d9bb58e5SYang Zhong 
66768fea038SRichard Henderson     /*
66868fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
66968fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
67068fea038SRichard Henderson      */
67168fea038SRichard Henderson     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
67268fea038SRichard Henderson         unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
67368fea038SRichard Henderson         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
67468fea038SRichard Henderson 
67568fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
676d9bb58e5SYang Zhong         copy_tlb_helper(tv, te, true);
677d9bb58e5SYang Zhong         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
67868fea038SRichard Henderson     }
679d9bb58e5SYang Zhong 
680d9bb58e5SYang Zhong     /* refill the tlb */
681ace41090SPeter Maydell     /*
682ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
683ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
684ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
685ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
68655df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
687ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
688ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
689ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
690ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
691ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
692ace41090SPeter Maydell      */
69355df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
694d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
695d9bb58e5SYang Zhong 
696d9bb58e5SYang Zhong     /* Now calculate the new entry */
69755df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
698d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
699d9bb58e5SYang Zhong         tn.addr_read = address;
700d9bb58e5SYang Zhong     } else {
701d9bb58e5SYang Zhong         tn.addr_read = -1;
702d9bb58e5SYang Zhong     }
703d9bb58e5SYang Zhong 
704d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
705d9bb58e5SYang Zhong         tn.addr_code = code_address;
706d9bb58e5SYang Zhong     } else {
707d9bb58e5SYang Zhong         tn.addr_code = -1;
708d9bb58e5SYang Zhong     }
709d9bb58e5SYang Zhong 
710d9bb58e5SYang Zhong     tn.addr_write = -1;
711d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
712d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
713d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
714d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
715d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
716d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
717d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
718d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
719d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
720d9bb58e5SYang Zhong         } else {
721d9bb58e5SYang Zhong             tn.addr_write = address;
722d9bb58e5SYang Zhong         }
723f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
724f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
725f52bfb12SDavid Hildenbrand         }
726d9bb58e5SYang Zhong     }
727d9bb58e5SYang Zhong 
728d9bb58e5SYang Zhong     /* Pairs with flag setting in tlb_reset_dirty_range */
729d9bb58e5SYang Zhong     copy_tlb_helper(te, &tn, true);
730d9bb58e5SYang Zhong     /* atomic_mb_set(&te->addr_write, write_address); */
731d9bb58e5SYang Zhong }
732d9bb58e5SYang Zhong 
733d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
734d9bb58e5SYang Zhong  * transaction attributes to be used.
735d9bb58e5SYang Zhong  */
736d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
737d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
738d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
739d9bb58e5SYang Zhong {
740d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
741d9bb58e5SYang Zhong                             prot, mmu_idx, size);
742d9bb58e5SYang Zhong }
743d9bb58e5SYang Zhong 
744d9bb58e5SYang Zhong static void report_bad_exec(CPUState *cpu, target_ulong addr)
745d9bb58e5SYang Zhong {
746d9bb58e5SYang Zhong     /* Accidentally executing outside RAM or ROM is quite common for
747d9bb58e5SYang Zhong      * several user-error situations, so report it in a way that
748d9bb58e5SYang Zhong      * makes it clear that this isn't a QEMU bug and provide suggestions
749d9bb58e5SYang Zhong      * about what a user could do to fix things.
750d9bb58e5SYang Zhong      */
751d9bb58e5SYang Zhong     error_report("Trying to execute code outside RAM or ROM at 0x"
752d9bb58e5SYang Zhong                  TARGET_FMT_lx, addr);
753d9bb58e5SYang Zhong     error_printf("This usually means one of the following happened:\n\n"
754d9bb58e5SYang Zhong                  "(1) You told QEMU to execute a kernel for the wrong machine "
755d9bb58e5SYang Zhong                  "type, and it crashed on startup (eg trying to run a "
756d9bb58e5SYang Zhong                  "raspberry pi kernel on a versatilepb QEMU machine)\n"
757d9bb58e5SYang Zhong                  "(2) You didn't give QEMU a kernel or BIOS filename at all, "
758d9bb58e5SYang Zhong                  "and QEMU executed a ROM full of no-op instructions until "
759d9bb58e5SYang Zhong                  "it fell off the end\n"
760d9bb58e5SYang Zhong                  "(3) Your guest kernel has a bug and crashed by jumping "
761d9bb58e5SYang Zhong                  "off into nowhere\n\n"
762d9bb58e5SYang Zhong                  "This is almost always one of the first two, so check your "
763d9bb58e5SYang Zhong                  "command line and that you are using the right type of kernel "
764d9bb58e5SYang Zhong                  "for this machine.\n"
765d9bb58e5SYang Zhong                  "If you think option (3) is likely then you can try debugging "
766d9bb58e5SYang Zhong                  "your guest with the -d debug options; in particular "
767d9bb58e5SYang Zhong                  "-d guest_errors will cause the log to include a dump of the "
768d9bb58e5SYang Zhong                  "guest register state at this point.\n\n"
769d9bb58e5SYang Zhong                  "Execution cannot continue; stopping here.\n\n");
770d9bb58e5SYang Zhong 
771d9bb58e5SYang Zhong     /* Report also to the logs, with more detail including register dump */
772d9bb58e5SYang Zhong     qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
773d9bb58e5SYang Zhong                   "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
774d9bb58e5SYang Zhong     log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
775d9bb58e5SYang Zhong }
776d9bb58e5SYang Zhong 
777d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
778d9bb58e5SYang Zhong {
779d9bb58e5SYang Zhong     ram_addr_t ram_addr;
780d9bb58e5SYang Zhong 
781d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
782d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
783d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
784d9bb58e5SYang Zhong         abort();
785d9bb58e5SYang Zhong     }
786d9bb58e5SYang Zhong     return ram_addr;
787d9bb58e5SYang Zhong }
788d9bb58e5SYang Zhong 
789d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
79004e3aabdSPeter Maydell                          int mmu_idx,
79155df6fcfSPeter Maydell                          target_ulong addr, uintptr_t retaddr,
792*dbea78a4SPeter Maydell                          bool recheck, MMUAccessType access_type, int size)
793d9bb58e5SYang Zhong {
794d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
7952d54f194SPeter Maydell     hwaddr mr_offset;
7962d54f194SPeter Maydell     MemoryRegionSection *section;
7972d54f194SPeter Maydell     MemoryRegion *mr;
798d9bb58e5SYang Zhong     uint64_t val;
799d9bb58e5SYang Zhong     bool locked = false;
80004e3aabdSPeter Maydell     MemTxResult r;
801d9bb58e5SYang Zhong 
80255df6fcfSPeter Maydell     if (recheck) {
80355df6fcfSPeter Maydell         /*
80455df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
80555df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
80655df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
80755df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
80855df6fcfSPeter Maydell          */
80955df6fcfSPeter Maydell         int index;
81055df6fcfSPeter Maydell         target_ulong tlb_addr;
81155df6fcfSPeter Maydell 
81255df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
81355df6fcfSPeter Maydell 
81455df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
81555df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
81655df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
81755df6fcfSPeter Maydell             /* RAM access */
81855df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
81955df6fcfSPeter Maydell 
82055df6fcfSPeter Maydell             return ldn_p((void *)haddr, size);
82155df6fcfSPeter Maydell         }
82255df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
82355df6fcfSPeter Maydell     }
82455df6fcfSPeter Maydell 
8252d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8262d54f194SPeter Maydell     mr = section->mr;
8272d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
828d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
829d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
830d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
831d9bb58e5SYang Zhong     }
832d9bb58e5SYang Zhong 
833d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
834*dbea78a4SPeter Maydell     cpu->mem_io_access_type = access_type;
835d9bb58e5SYang Zhong 
8368b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
837d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
838d9bb58e5SYang Zhong         locked = true;
839d9bb58e5SYang Zhong     }
8402d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
84104e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
84204e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8432d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8442d54f194SPeter Maydell             section->offset_within_address_space -
8452d54f194SPeter Maydell             section->offset_within_region;
8462d54f194SPeter Maydell 
847*dbea78a4SPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
84804e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
84904e3aabdSPeter Maydell     }
850d9bb58e5SYang Zhong     if (locked) {
851d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
852d9bb58e5SYang Zhong     }
853d9bb58e5SYang Zhong 
854d9bb58e5SYang Zhong     return val;
855d9bb58e5SYang Zhong }
856d9bb58e5SYang Zhong 
857d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
85804e3aabdSPeter Maydell                       int mmu_idx,
859d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
86055df6fcfSPeter Maydell                       uintptr_t retaddr, bool recheck, int size)
861d9bb58e5SYang Zhong {
862d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
8632d54f194SPeter Maydell     hwaddr mr_offset;
8642d54f194SPeter Maydell     MemoryRegionSection *section;
8652d54f194SPeter Maydell     MemoryRegion *mr;
866d9bb58e5SYang Zhong     bool locked = false;
86704e3aabdSPeter Maydell     MemTxResult r;
868d9bb58e5SYang Zhong 
86955df6fcfSPeter Maydell     if (recheck) {
87055df6fcfSPeter Maydell         /*
87155df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
87255df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
87355df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
87455df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
87555df6fcfSPeter Maydell          */
87655df6fcfSPeter Maydell         int index;
87755df6fcfSPeter Maydell         target_ulong tlb_addr;
87855df6fcfSPeter Maydell 
87955df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
88055df6fcfSPeter Maydell 
88155df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
88255df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
88355df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
88455df6fcfSPeter Maydell             /* RAM access */
88555df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
88655df6fcfSPeter Maydell 
88755df6fcfSPeter Maydell             stn_p((void *)haddr, size, val);
88855df6fcfSPeter Maydell             return;
88955df6fcfSPeter Maydell         }
89055df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
89155df6fcfSPeter Maydell     }
89255df6fcfSPeter Maydell 
8932d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8942d54f194SPeter Maydell     mr = section->mr;
8952d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
896d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
897d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
898d9bb58e5SYang Zhong     }
899d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
900d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
901d9bb58e5SYang Zhong 
9028b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
903d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
904d9bb58e5SYang Zhong         locked = true;
905d9bb58e5SYang Zhong     }
9062d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
90704e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
90804e3aabdSPeter Maydell     if (r != MEMTX_OK) {
9092d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
9102d54f194SPeter Maydell             section->offset_within_address_space -
9112d54f194SPeter Maydell             section->offset_within_region;
9122d54f194SPeter Maydell 
91304e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
91404e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
91504e3aabdSPeter Maydell     }
916d9bb58e5SYang Zhong     if (locked) {
917d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
918d9bb58e5SYang Zhong     }
919d9bb58e5SYang Zhong }
920d9bb58e5SYang Zhong 
921d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
922d9bb58e5SYang Zhong    back to the main tlb.  */
923d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
924d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
925d9bb58e5SYang Zhong {
926d9bb58e5SYang Zhong     size_t vidx;
927d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
928d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
929d9bb58e5SYang Zhong         target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
930d9bb58e5SYang Zhong 
931d9bb58e5SYang Zhong         if (cmp == page) {
932d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
933d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
934d9bb58e5SYang Zhong 
935d9bb58e5SYang Zhong             copy_tlb_helper(&tmptlb, tlb, false);
936d9bb58e5SYang Zhong             copy_tlb_helper(tlb, vtlb, true);
937d9bb58e5SYang Zhong             copy_tlb_helper(vtlb, &tmptlb, true);
938d9bb58e5SYang Zhong 
939d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
940d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
941d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
942d9bb58e5SYang Zhong             return true;
943d9bb58e5SYang Zhong         }
944d9bb58e5SYang Zhong     }
945d9bb58e5SYang Zhong     return false;
946d9bb58e5SYang Zhong }
947d9bb58e5SYang Zhong 
948d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
949d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
950d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
951d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
952d9bb58e5SYang Zhong 
953f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
954f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
955f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
956f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
957f2553f04SKONRAD Frederic  */
958f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
959f2553f04SKONRAD Frederic {
9602d54f194SPeter Maydell     int mmu_idx, index;
961f2553f04SKONRAD Frederic     void *p;
962f2553f04SKONRAD Frederic     MemoryRegion *mr;
9632d54f194SPeter Maydell     MemoryRegionSection *section;
964f2553f04SKONRAD Frederic     CPUState *cpu = ENV_GET_CPU(env);
965f2553f04SKONRAD Frederic     CPUIOTLBEntry *iotlbentry;
9662d54f194SPeter Maydell     hwaddr physaddr, mr_offset;
967f2553f04SKONRAD Frederic 
968f2553f04SKONRAD Frederic     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
969f2553f04SKONRAD Frederic     mmu_idx = cpu_mmu_index(env, true);
970e4c967a7SPeter Maydell     if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) {
971b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
97298670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
97371b9a453SKONRAD Frederic         }
9743474c98aSPeter Maydell         assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
975f2553f04SKONRAD Frederic     }
97655df6fcfSPeter Maydell 
9773474c98aSPeter Maydell     if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
97855df6fcfSPeter Maydell         /*
97955df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
98055df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
98155df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
98255df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
98355df6fcfSPeter Maydell          */
98455df6fcfSPeter Maydell         int index;
98555df6fcfSPeter Maydell         target_ulong tlb_addr;
98655df6fcfSPeter Maydell 
98755df6fcfSPeter Maydell         tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
98855df6fcfSPeter Maydell 
98955df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
99055df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
99155df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
99255df6fcfSPeter Maydell             /* RAM access. We can't handle this, so for now just stop */
99355df6fcfSPeter Maydell             cpu_abort(cpu, "Unable to handle guest executing from RAM within "
99455df6fcfSPeter Maydell                       "a small MPU region at 0x" TARGET_FMT_lx, addr);
99555df6fcfSPeter Maydell         }
99655df6fcfSPeter Maydell         /*
99755df6fcfSPeter Maydell          * Fall through to handle IO accesses (which will almost certainly
99855df6fcfSPeter Maydell          * also result in failure)
99955df6fcfSPeter Maydell          */
100055df6fcfSPeter Maydell     }
100155df6fcfSPeter Maydell 
1002f2553f04SKONRAD Frederic     iotlbentry = &env->iotlb[mmu_idx][index];
10032d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
10042d54f194SPeter Maydell     mr = section->mr;
1005f2553f04SKONRAD Frederic     if (memory_region_is_unassigned(mr)) {
1006c9356746SKONRAD Frederic         qemu_mutex_lock_iothread();
1007c9356746SKONRAD Frederic         if (memory_region_request_mmio_ptr(mr, addr)) {
1008c9356746SKONRAD Frederic             qemu_mutex_unlock_iothread();
1009c9356746SKONRAD Frederic             /* A MemoryRegion is potentially added so re-run the
1010c9356746SKONRAD Frederic              * get_page_addr_code.
1011c9356746SKONRAD Frederic              */
1012c9356746SKONRAD Frederic             return get_page_addr_code(env, addr);
1013c9356746SKONRAD Frederic         }
1014c9356746SKONRAD Frederic         qemu_mutex_unlock_iothread();
1015c9356746SKONRAD Frederic 
101604e3aabdSPeter Maydell         /* Give the new-style cpu_transaction_failed() hook first chance
101704e3aabdSPeter Maydell          * to handle this.
101804e3aabdSPeter Maydell          * This is not the ideal place to detect and generate CPU
101904e3aabdSPeter Maydell          * exceptions for instruction fetch failure (for instance
102004e3aabdSPeter Maydell          * we don't know the length of the access that the CPU would
102104e3aabdSPeter Maydell          * use, and it would be better to go ahead and try the access
102204e3aabdSPeter Maydell          * and use the MemTXResult it produced). However it is the
102304e3aabdSPeter Maydell          * simplest place we have currently available for the check.
102404e3aabdSPeter Maydell          */
10252d54f194SPeter Maydell         mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
10262d54f194SPeter Maydell         physaddr = mr_offset +
10272d54f194SPeter Maydell             section->offset_within_address_space -
10282d54f194SPeter Maydell             section->offset_within_region;
102904e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
103004e3aabdSPeter Maydell                                iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
103104e3aabdSPeter Maydell 
1032f2553f04SKONRAD Frederic         cpu_unassigned_access(cpu, addr, false, true, 0, 4);
1033f2553f04SKONRAD Frederic         /* The CPU's unassigned access hook might have longjumped out
1034f2553f04SKONRAD Frederic          * with an exception. If it didn't (or there was no hook) then
1035f2553f04SKONRAD Frederic          * we can't proceed further.
1036f2553f04SKONRAD Frederic          */
1037f2553f04SKONRAD Frederic         report_bad_exec(cpu, addr);
1038f2553f04SKONRAD Frederic         exit(1);
1039f2553f04SKONRAD Frederic     }
1040f2553f04SKONRAD Frederic     p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
1041f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
1042f2553f04SKONRAD Frederic }
1043f2553f04SKONRAD Frederic 
1044d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
1045d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
1046d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
1047d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
1048d9bb58e5SYang Zhong  * entry in the TLB for this access.
1049d9bb58e5SYang Zhong  */
105098670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1051d9bb58e5SYang Zhong                  uintptr_t retaddr)
1052d9bb58e5SYang Zhong {
1053d9bb58e5SYang Zhong     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1054d9bb58e5SYang Zhong     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1055d9bb58e5SYang Zhong 
1056334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1057d9bb58e5SYang Zhong         /* TLB entry is for a different page */
1058d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
105998670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
106098670d47SLaurent Vivier                      mmu_idx, retaddr);
1061d9bb58e5SYang Zhong         }
1062d9bb58e5SYang Zhong     }
1063d9bb58e5SYang Zhong }
1064d9bb58e5SYang Zhong 
1065d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1066d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
1067d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
106834d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
106934d49937SPeter Maydell                                NotDirtyInfo *ndi)
1070d9bb58e5SYang Zhong {
1071d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
1072d9bb58e5SYang Zhong     size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1073d9bb58e5SYang Zhong     CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
1074d9bb58e5SYang Zhong     target_ulong tlb_addr = tlbe->addr_write;
1075d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
1076d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
1077d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
107834d49937SPeter Maydell     void *hostaddr;
1079d9bb58e5SYang Zhong 
1080d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1081d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1082d9bb58e5SYang Zhong 
1083d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1084d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1085d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1086d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1087d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1088d9bb58e5SYang Zhong     }
1089d9bb58e5SYang Zhong 
1090d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1091d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1092d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1093d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1094d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1095d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1096d9bb58e5SYang Zhong         goto stop_the_world;
1097d9bb58e5SYang Zhong     }
1098d9bb58e5SYang Zhong 
1099d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1100334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1101d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
110298670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
110398670d47SLaurent Vivier                      mmu_idx, retaddr);
1104d9bb58e5SYang Zhong         }
1105f52bfb12SDavid Hildenbrand         tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
1106d9bb58e5SYang Zhong     }
1107d9bb58e5SYang Zhong 
110855df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
110955df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1110d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1111d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1112d9bb58e5SYang Zhong         goto stop_the_world;
1113d9bb58e5SYang Zhong     }
1114d9bb58e5SYang Zhong 
1115d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
111634d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
111798670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
111898670d47SLaurent Vivier                  mmu_idx, retaddr);
1119d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1120d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1121d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1122d9bb58e5SYang Zhong         goto stop_the_world;
1123d9bb58e5SYang Zhong     }
1124d9bb58e5SYang Zhong 
112534d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
112634d49937SPeter Maydell 
112734d49937SPeter Maydell     ndi->active = false;
112834d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
112934d49937SPeter Maydell         ndi->active = true;
113034d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
113134d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
113234d49937SPeter Maydell                                       1 << s_bits);
113334d49937SPeter Maydell     }
113434d49937SPeter Maydell 
113534d49937SPeter Maydell     return hostaddr;
1136d9bb58e5SYang Zhong 
1137d9bb58e5SYang Zhong  stop_the_world:
1138d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1139d9bb58e5SYang Zhong }
1140d9bb58e5SYang Zhong 
1141d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1142d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
1143d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1144d9bb58e5SYang Zhong #else
1145d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1146d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1147d9bb58e5SYang Zhong #endif
1148d9bb58e5SYang Zhong 
1149d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1150d9bb58e5SYang Zhong 
1151d9bb58e5SYang Zhong #define DATA_SIZE 1
1152d9bb58e5SYang Zhong #include "softmmu_template.h"
1153d9bb58e5SYang Zhong 
1154d9bb58e5SYang Zhong #define DATA_SIZE 2
1155d9bb58e5SYang Zhong #include "softmmu_template.h"
1156d9bb58e5SYang Zhong 
1157d9bb58e5SYang Zhong #define DATA_SIZE 4
1158d9bb58e5SYang Zhong #include "softmmu_template.h"
1159d9bb58e5SYang Zhong 
1160d9bb58e5SYang Zhong #define DATA_SIZE 8
1161d9bb58e5SYang Zhong #include "softmmu_template.h"
1162d9bb58e5SYang Zhong 
1163d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1164d9bb58e5SYang Zhong    them callable from other helpers.  */
1165d9bb58e5SYang Zhong 
1166d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1167d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1168d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
116934d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
117034d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
117134d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
117234d49937SPeter Maydell     do {                                                \
117334d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
117434d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
117534d49937SPeter Maydell         }                                               \
117634d49937SPeter Maydell     } while (0)
1177d9bb58e5SYang Zhong 
1178d9bb58e5SYang Zhong #define DATA_SIZE 1
1179d9bb58e5SYang Zhong #include "atomic_template.h"
1180d9bb58e5SYang Zhong 
1181d9bb58e5SYang Zhong #define DATA_SIZE 2
1182d9bb58e5SYang Zhong #include "atomic_template.h"
1183d9bb58e5SYang Zhong 
1184d9bb58e5SYang Zhong #define DATA_SIZE 4
1185d9bb58e5SYang Zhong #include "atomic_template.h"
1186d9bb58e5SYang Zhong 
1187d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1188d9bb58e5SYang Zhong #define DATA_SIZE 8
1189d9bb58e5SYang Zhong #include "atomic_template.h"
1190d9bb58e5SYang Zhong #endif
1191d9bb58e5SYang Zhong 
1192d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128
1193d9bb58e5SYang Zhong #define DATA_SIZE 16
1194d9bb58e5SYang Zhong #include "atomic_template.h"
1195d9bb58e5SYang Zhong #endif
1196d9bb58e5SYang Zhong 
1197d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1198d9bb58e5SYang Zhong 
1199d9bb58e5SYang Zhong #undef EXTRA_ARGS
1200d9bb58e5SYang Zhong #undef ATOMIC_NAME
1201d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1202d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1203d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
120434d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1205d9bb58e5SYang Zhong 
1206d9bb58e5SYang Zhong #define DATA_SIZE 1
1207d9bb58e5SYang Zhong #include "atomic_template.h"
1208d9bb58e5SYang Zhong 
1209d9bb58e5SYang Zhong #define DATA_SIZE 2
1210d9bb58e5SYang Zhong #include "atomic_template.h"
1211d9bb58e5SYang Zhong 
1212d9bb58e5SYang Zhong #define DATA_SIZE 4
1213d9bb58e5SYang Zhong #include "atomic_template.h"
1214d9bb58e5SYang Zhong 
1215d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1216d9bb58e5SYang Zhong #define DATA_SIZE 8
1217d9bb58e5SYang Zhong #include "atomic_template.h"
1218d9bb58e5SYang Zhong #endif
1219d9bb58e5SYang Zhong 
1220d9bb58e5SYang Zhong /* Code access functions.  */
1221d9bb58e5SYang Zhong 
1222d9bb58e5SYang Zhong #undef MMUSUFFIX
1223d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1224d9bb58e5SYang Zhong #undef GETPC
1225d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1226d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1227d9bb58e5SYang Zhong 
1228d9bb58e5SYang Zhong #define DATA_SIZE 1
1229d9bb58e5SYang Zhong #include "softmmu_template.h"
1230d9bb58e5SYang Zhong 
1231d9bb58e5SYang Zhong #define DATA_SIZE 2
1232d9bb58e5SYang Zhong #include "softmmu_template.h"
1233d9bb58e5SYang Zhong 
1234d9bb58e5SYang Zhong #define DATA_SIZE 4
1235d9bb58e5SYang Zhong #include "softmmu_template.h"
1236d9bb58e5SYang Zhong 
1237d9bb58e5SYang Zhong #define DATA_SIZE 8
1238d9bb58e5SYang Zhong #include "softmmu_template.h"
1239