xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 55df6fcf5476b44bc1b95554e686ab3e91d725c5)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35d9bb58e5SYang Zhong 
36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
37d9bb58e5SYang Zhong /* #define DEBUG_TLB */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
39d9bb58e5SYang Zhong 
40d9bb58e5SYang Zhong #ifdef DEBUG_TLB
41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
43d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
44d9bb58e5SYang Zhong # else
45d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
46d9bb58e5SYang Zhong # endif
47d9bb58e5SYang Zhong #else
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
50d9bb58e5SYang Zhong #endif
51d9bb58e5SYang Zhong 
52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
53d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
54d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
55d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
56d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
57d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
58d9bb58e5SYang Zhong     } \
59d9bb58e5SYang Zhong } while (0)
60d9bb58e5SYang Zhong 
61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do {                         \
62d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
63d9bb58e5SYang Zhong             g_assert(!cpu->created || qemu_cpu_is_self(cpu));     \
64d9bb58e5SYang Zhong         }                                                         \
65d9bb58e5SYang Zhong     } while (0)
66d9bb58e5SYang Zhong 
67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
68d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
70d9bb58e5SYang Zhong 
71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
72d9bb58e5SYang Zhong  */
73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
75d9bb58e5SYang Zhong 
76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
77d9bb58e5SYang Zhong  *
78d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
79d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
80d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
81d9bb58e5SYang Zhong  * again.
82d9bb58e5SYang Zhong  */
83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
84d9bb58e5SYang Zhong                              run_on_cpu_data d)
85d9bb58e5SYang Zhong {
86d9bb58e5SYang Zhong     CPUState *cpu;
87d9bb58e5SYang Zhong 
88d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
89d9bb58e5SYang Zhong         if (cpu != src) {
90d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
91d9bb58e5SYang Zhong         }
92d9bb58e5SYang Zhong     }
93d9bb58e5SYang Zhong }
94d9bb58e5SYang Zhong 
9583974cf4SEmilio G. Cota size_t tlb_flush_count(void)
9683974cf4SEmilio G. Cota {
9783974cf4SEmilio G. Cota     CPUState *cpu;
9883974cf4SEmilio G. Cota     size_t count = 0;
9983974cf4SEmilio G. Cota 
10083974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
10183974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
10283974cf4SEmilio G. Cota 
10383974cf4SEmilio G. Cota         count += atomic_read(&env->tlb_flush_count);
10483974cf4SEmilio G. Cota     }
10583974cf4SEmilio G. Cota     return count;
10683974cf4SEmilio G. Cota }
107d9bb58e5SYang Zhong 
108d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
109d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
110d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
111d9bb58e5SYang Zhong  * not a correctness issue.
112d9bb58e5SYang Zhong  */
113d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
114d9bb58e5SYang Zhong {
115d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
116d9bb58e5SYang Zhong 
117d9bb58e5SYang Zhong     /* The QOM tests will trigger tlb_flushes without setting up TCG
118d9bb58e5SYang Zhong      * so we bug out here in that case.
119d9bb58e5SYang Zhong      */
120d9bb58e5SYang Zhong     if (!tcg_enabled()) {
121d9bb58e5SYang Zhong         return;
122d9bb58e5SYang Zhong     }
123d9bb58e5SYang Zhong 
124d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
12583974cf4SEmilio G. Cota     atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
12683974cf4SEmilio G. Cota     tlb_debug("(count: %zu)\n", tlb_flush_count());
127d9bb58e5SYang Zhong 
128d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
129d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
130f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
131d9bb58e5SYang Zhong 
132d9bb58e5SYang Zhong     env->vtlb_index = 0;
133d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
134d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
135d9bb58e5SYang Zhong 
136d9bb58e5SYang Zhong     atomic_mb_set(&cpu->pending_tlb_flush, 0);
137d9bb58e5SYang Zhong }
138d9bb58e5SYang Zhong 
139d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
140d9bb58e5SYang Zhong {
141d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
142d9bb58e5SYang Zhong }
143d9bb58e5SYang Zhong 
144d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
145d9bb58e5SYang Zhong {
146d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
147d9bb58e5SYang Zhong         if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
148d9bb58e5SYang Zhong             atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
149d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
150d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
151d9bb58e5SYang Zhong         }
152d9bb58e5SYang Zhong     } else {
153d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
154d9bb58e5SYang Zhong     }
155d9bb58e5SYang Zhong }
156d9bb58e5SYang Zhong 
157d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
158d9bb58e5SYang Zhong {
159d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
160d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
161d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
162d9bb58e5SYang Zhong }
163d9bb58e5SYang Zhong 
164d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
165d9bb58e5SYang Zhong {
166d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
167d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
168d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
169d9bb58e5SYang Zhong }
170d9bb58e5SYang Zhong 
171d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
172d9bb58e5SYang Zhong {
173d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
174d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
175d9bb58e5SYang Zhong     int mmu_idx;
176d9bb58e5SYang Zhong 
177d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
178d9bb58e5SYang Zhong 
179d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
180d9bb58e5SYang Zhong 
181d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
182d9bb58e5SYang Zhong 
183d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
184d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
185d9bb58e5SYang Zhong 
186d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
187d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
188d9bb58e5SYang Zhong         }
189d9bb58e5SYang Zhong     }
190d9bb58e5SYang Zhong 
191f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
192d9bb58e5SYang Zhong 
193d9bb58e5SYang Zhong     tlb_debug("done\n");
194d9bb58e5SYang Zhong }
195d9bb58e5SYang Zhong 
196d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
197d9bb58e5SYang Zhong {
198d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
199d9bb58e5SYang Zhong 
200d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
201d9bb58e5SYang Zhong         uint16_t pending_flushes = idxmap;
202d9bb58e5SYang Zhong         pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
203d9bb58e5SYang Zhong 
204d9bb58e5SYang Zhong         if (pending_flushes) {
205d9bb58e5SYang Zhong             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
206d9bb58e5SYang Zhong 
207d9bb58e5SYang Zhong             atomic_or(&cpu->pending_tlb_flush, pending_flushes);
208d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
209d9bb58e5SYang Zhong                              RUN_ON_CPU_HOST_INT(pending_flushes));
210d9bb58e5SYang Zhong         }
211d9bb58e5SYang Zhong     } else {
212d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
213d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(idxmap));
214d9bb58e5SYang Zhong     }
215d9bb58e5SYang Zhong }
216d9bb58e5SYang Zhong 
217d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
218d9bb58e5SYang Zhong {
219d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
220d9bb58e5SYang Zhong 
221d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
222d9bb58e5SYang Zhong 
223d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
224d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
225d9bb58e5SYang Zhong }
226d9bb58e5SYang Zhong 
227d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
228d9bb58e5SYang Zhong                                                        uint16_t idxmap)
229d9bb58e5SYang Zhong {
230d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
231d9bb58e5SYang Zhong 
232d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
233d9bb58e5SYang Zhong 
234d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
235d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
236d9bb58e5SYang Zhong }
237d9bb58e5SYang Zhong 
238d9bb58e5SYang Zhong 
239d9bb58e5SYang Zhong 
240d9bb58e5SYang Zhong static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
241d9bb58e5SYang Zhong {
242d9bb58e5SYang Zhong     if (addr == (tlb_entry->addr_read &
243d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
244d9bb58e5SYang Zhong         addr == (tlb_entry->addr_write &
245d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
246d9bb58e5SYang Zhong         addr == (tlb_entry->addr_code &
247d9bb58e5SYang Zhong                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
248d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
249d9bb58e5SYang Zhong     }
250d9bb58e5SYang Zhong }
251d9bb58e5SYang Zhong 
252d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
253d9bb58e5SYang Zhong {
254d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
255d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
256d9bb58e5SYang Zhong     int i;
257d9bb58e5SYang Zhong     int mmu_idx;
258d9bb58e5SYang Zhong 
259d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
260d9bb58e5SYang Zhong 
261d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
262d9bb58e5SYang Zhong 
263d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
264d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
265d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
266d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
267d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
268d9bb58e5SYang Zhong 
269d9bb58e5SYang Zhong         tlb_flush(cpu);
270d9bb58e5SYang Zhong         return;
271d9bb58e5SYang Zhong     }
272d9bb58e5SYang Zhong 
273d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
274d9bb58e5SYang Zhong     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
275d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
276d9bb58e5SYang Zhong         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
277d9bb58e5SYang Zhong     }
278d9bb58e5SYang Zhong 
279d9bb58e5SYang Zhong     /* check whether there are entries that need to be flushed in the vtlb */
280d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
281d9bb58e5SYang Zhong         int k;
282d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
283d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
284d9bb58e5SYang Zhong         }
285d9bb58e5SYang Zhong     }
286d9bb58e5SYang Zhong 
287d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
288d9bb58e5SYang Zhong }
289d9bb58e5SYang Zhong 
290d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
291d9bb58e5SYang Zhong {
292d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
293d9bb58e5SYang Zhong 
294d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
295d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
296d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
297d9bb58e5SYang Zhong     } else {
298d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
299d9bb58e5SYang Zhong     }
300d9bb58e5SYang Zhong }
301d9bb58e5SYang Zhong 
302d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
303d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
304d9bb58e5SYang Zhong  */
305d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
306d9bb58e5SYang Zhong 
307d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
308d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
309d9bb58e5SYang Zhong {
310d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
311d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
312d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
313d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
314d9bb58e5SYang Zhong     int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
315d9bb58e5SYang Zhong     int mmu_idx;
316d9bb58e5SYang Zhong     int i;
317d9bb58e5SYang Zhong 
318d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
319d9bb58e5SYang Zhong 
320d9bb58e5SYang Zhong     tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
321d9bb58e5SYang Zhong               page, addr, mmu_idx_bitmap);
322d9bb58e5SYang Zhong 
323d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
324d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
325d9bb58e5SYang Zhong             tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
326d9bb58e5SYang Zhong 
327d9bb58e5SYang Zhong             /* check whether there are vltb entries that need to be flushed */
328d9bb58e5SYang Zhong             for (i = 0; i < CPU_VTLB_SIZE; i++) {
329d9bb58e5SYang Zhong                 tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
330d9bb58e5SYang Zhong             }
331d9bb58e5SYang Zhong         }
332d9bb58e5SYang Zhong     }
333d9bb58e5SYang Zhong 
334d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
335d9bb58e5SYang Zhong }
336d9bb58e5SYang Zhong 
337d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
338d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
339d9bb58e5SYang Zhong {
340d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
341d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
342d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
343d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
344d9bb58e5SYang Zhong 
345d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
346d9bb58e5SYang Zhong 
347d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
348d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
349d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
350d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
351d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
352d9bb58e5SYang Zhong 
353d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
354d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
355d9bb58e5SYang Zhong     } else {
356d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
357d9bb58e5SYang Zhong     }
358d9bb58e5SYang Zhong }
359d9bb58e5SYang Zhong 
360d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
361d9bb58e5SYang Zhong {
362d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
363d9bb58e5SYang Zhong 
364d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
365d9bb58e5SYang Zhong 
366d9bb58e5SYang Zhong     /* This should already be page aligned */
367d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
368d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
369d9bb58e5SYang Zhong 
370d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
371d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
372d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
373d9bb58e5SYang Zhong     } else {
374d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
375d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
376d9bb58e5SYang Zhong     }
377d9bb58e5SYang Zhong }
378d9bb58e5SYang Zhong 
379d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
380d9bb58e5SYang Zhong                                        uint16_t idxmap)
381d9bb58e5SYang Zhong {
382d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
383d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
384d9bb58e5SYang Zhong 
385d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
386d9bb58e5SYang Zhong 
387d9bb58e5SYang Zhong     /* This should already be page aligned */
388d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
389d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
390d9bb58e5SYang Zhong 
391d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
392d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
393d9bb58e5SYang Zhong }
394d9bb58e5SYang Zhong 
395d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
396d9bb58e5SYang Zhong                                                             target_ulong addr,
397d9bb58e5SYang Zhong                                                             uint16_t idxmap)
398d9bb58e5SYang Zhong {
399d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
400d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
401d9bb58e5SYang Zhong 
402d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
403d9bb58e5SYang Zhong 
404d9bb58e5SYang Zhong     /* This should already be page aligned */
405d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
406d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
407d9bb58e5SYang Zhong 
408d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
409d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
410d9bb58e5SYang Zhong }
411d9bb58e5SYang Zhong 
412d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
413d9bb58e5SYang Zhong {
414d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
415d9bb58e5SYang Zhong 
416d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
417d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
418d9bb58e5SYang Zhong }
419d9bb58e5SYang Zhong 
420d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
421d9bb58e5SYang Zhong                                                   target_ulong addr)
422d9bb58e5SYang Zhong {
423d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
424d9bb58e5SYang Zhong 
425d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
426d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
427d9bb58e5SYang Zhong }
428d9bb58e5SYang Zhong 
429d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
430d9bb58e5SYang Zhong    can be detected */
431d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
432d9bb58e5SYang Zhong {
433d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
434d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
435d9bb58e5SYang Zhong }
436d9bb58e5SYang Zhong 
437d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
438d9bb58e5SYang Zhong    tested for self modifying code */
439d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
440d9bb58e5SYang Zhong {
441d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
442d9bb58e5SYang Zhong }
443d9bb58e5SYang Zhong 
444d9bb58e5SYang Zhong 
445d9bb58e5SYang Zhong /*
446d9bb58e5SYang Zhong  * Dirty write flag handling
447d9bb58e5SYang Zhong  *
448d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
449d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
450d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
451d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
452d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
453d9bb58e5SYang Zhong  * generated code.
454d9bb58e5SYang Zhong  *
455d9bb58e5SYang Zhong  * Because we want other vCPUs to respond to changes straight away we
456d9bb58e5SYang Zhong  * update the te->addr_write field atomically. If the TLB entry has
457d9bb58e5SYang Zhong  * been changed by the vCPU in the mean time we skip the update.
458d9bb58e5SYang Zhong  *
459d9bb58e5SYang Zhong  * As this function uses atomic accesses we also need to ensure
460d9bb58e5SYang Zhong  * updates to tlb_entries follow the same access rules. We don't need
461d9bb58e5SYang Zhong  * to worry about this for oversized guests as MTTCG is disabled for
462d9bb58e5SYang Zhong  * them.
463d9bb58e5SYang Zhong  */
464d9bb58e5SYang Zhong 
465d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
466d9bb58e5SYang Zhong                            uintptr_t length)
467d9bb58e5SYang Zhong {
468d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
469d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
470d9bb58e5SYang Zhong 
471d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
472d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
473d9bb58e5SYang Zhong         addr += tlb_entry->addend;
474d9bb58e5SYang Zhong         if ((addr - start) < length) {
475d9bb58e5SYang Zhong             tlb_entry->addr_write |= TLB_NOTDIRTY;
476d9bb58e5SYang Zhong         }
477d9bb58e5SYang Zhong     }
478d9bb58e5SYang Zhong #else
479d9bb58e5SYang Zhong     /* paired with atomic_mb_set in tlb_set_page_with_attrs */
480d9bb58e5SYang Zhong     uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
481d9bb58e5SYang Zhong     uintptr_t addr = orig_addr;
482d9bb58e5SYang Zhong 
483d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
484d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
485d9bb58e5SYang Zhong         addr += atomic_read(&tlb_entry->addend);
486d9bb58e5SYang Zhong         if ((addr - start) < length) {
487d9bb58e5SYang Zhong             uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
488d9bb58e5SYang Zhong             atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
489d9bb58e5SYang Zhong         }
490d9bb58e5SYang Zhong     }
491d9bb58e5SYang Zhong #endif
492d9bb58e5SYang Zhong }
493d9bb58e5SYang Zhong 
494d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right
495d9bb58e5SYang Zhong  * primitives when copying entries */
496d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
497d9bb58e5SYang Zhong                                    bool atomic_set)
498d9bb58e5SYang Zhong {
499d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
500d9bb58e5SYang Zhong     *d = *s;
501d9bb58e5SYang Zhong #else
502d9bb58e5SYang Zhong     if (atomic_set) {
503d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
504d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
505d9bb58e5SYang Zhong         atomic_set(&d->addend, atomic_read(&s->addend));
506d9bb58e5SYang Zhong         /* Pairs with flag setting in tlb_reset_dirty_range */
507d9bb58e5SYang Zhong         atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
508d9bb58e5SYang Zhong     } else {
509d9bb58e5SYang Zhong         d->addr_read = s->addr_read;
510d9bb58e5SYang Zhong         d->addr_write = atomic_read(&s->addr_write);
511d9bb58e5SYang Zhong         d->addr_code = s->addr_code;
512d9bb58e5SYang Zhong         d->addend = atomic_read(&s->addend);
513d9bb58e5SYang Zhong     }
514d9bb58e5SYang Zhong #endif
515d9bb58e5SYang Zhong }
516d9bb58e5SYang Zhong 
517d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
518d9bb58e5SYang Zhong  * the target vCPU). As such care needs to be taken that we don't
519d9bb58e5SYang Zhong  * dangerously race with another vCPU update. The only thing actually
520d9bb58e5SYang Zhong  * updated is the target TLB entry ->addr_write flags.
521d9bb58e5SYang Zhong  */
522d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
523d9bb58e5SYang Zhong {
524d9bb58e5SYang Zhong     CPUArchState *env;
525d9bb58e5SYang Zhong 
526d9bb58e5SYang Zhong     int mmu_idx;
527d9bb58e5SYang Zhong 
528d9bb58e5SYang Zhong     env = cpu->env_ptr;
529d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
530d9bb58e5SYang Zhong         unsigned int i;
531d9bb58e5SYang Zhong 
532d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
533d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
534d9bb58e5SYang Zhong                                   start1, length);
535d9bb58e5SYang Zhong         }
536d9bb58e5SYang Zhong 
537d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
538d9bb58e5SYang Zhong             tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
539d9bb58e5SYang Zhong                                   start1, length);
540d9bb58e5SYang Zhong         }
541d9bb58e5SYang Zhong     }
542d9bb58e5SYang Zhong }
543d9bb58e5SYang Zhong 
544d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
545d9bb58e5SYang Zhong {
546d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
547d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
548d9bb58e5SYang Zhong     }
549d9bb58e5SYang Zhong }
550d9bb58e5SYang Zhong 
551d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
552d9bb58e5SYang Zhong    so that it is no longer dirty */
553d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
554d9bb58e5SYang Zhong {
555d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
556d9bb58e5SYang Zhong     int i;
557d9bb58e5SYang Zhong     int mmu_idx;
558d9bb58e5SYang Zhong 
559d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
560d9bb58e5SYang Zhong 
561d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
562d9bb58e5SYang Zhong     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
563d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
564d9bb58e5SYang Zhong         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
565d9bb58e5SYang Zhong     }
566d9bb58e5SYang Zhong 
567d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
568d9bb58e5SYang Zhong         int k;
569d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
570d9bb58e5SYang Zhong             tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
571d9bb58e5SYang Zhong         }
572d9bb58e5SYang Zhong     }
573d9bb58e5SYang Zhong }
574d9bb58e5SYang Zhong 
575d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
576d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
577d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
578d9bb58e5SYang Zhong                                target_ulong size)
579d9bb58e5SYang Zhong {
580d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
581d9bb58e5SYang Zhong 
582d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
583d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
584d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
585d9bb58e5SYang Zhong         return;
586d9bb58e5SYang Zhong     }
587d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
588d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
589d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
590d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
591d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
592d9bb58e5SYang Zhong         mask <<= 1;
593d9bb58e5SYang Zhong     }
594d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
595d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
596d9bb58e5SYang Zhong }
597d9bb58e5SYang Zhong 
598d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
599d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
600d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
601d9bb58e5SYang Zhong  *
602d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
603d9bb58e5SYang Zhong  * critical section.
604d9bb58e5SYang Zhong  */
605d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
606d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
607d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
608d9bb58e5SYang Zhong {
609d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
610d9bb58e5SYang Zhong     MemoryRegionSection *section;
611d9bb58e5SYang Zhong     unsigned int index;
612d9bb58e5SYang Zhong     target_ulong address;
613d9bb58e5SYang Zhong     target_ulong code_address;
614d9bb58e5SYang Zhong     uintptr_t addend;
615d9bb58e5SYang Zhong     CPUTLBEntry *te, *tv, tn;
616*55df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
617*55df6fcfSPeter Maydell     target_ulong vaddr_page;
618d9bb58e5SYang Zhong     unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
619d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
620d9bb58e5SYang Zhong 
621d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
622*55df6fcfSPeter Maydell 
623*55df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
624*55df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
625*55df6fcfSPeter Maydell     } else {
626*55df6fcfSPeter Maydell         if (size > TARGET_PAGE_SIZE) {
627d9bb58e5SYang Zhong             tlb_add_large_page(env, vaddr, size);
628d9bb58e5SYang Zhong         }
629d9bb58e5SYang Zhong         sz = size;
630*55df6fcfSPeter Maydell     }
631*55df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
632*55df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
633*55df6fcfSPeter Maydell 
634*55df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
635*55df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
636d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
637d9bb58e5SYang Zhong 
638d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
639d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
640d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
641d9bb58e5SYang Zhong 
642*55df6fcfSPeter Maydell     address = vaddr_page;
643*55df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
644*55df6fcfSPeter Maydell         /*
645*55df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
646*55df6fcfSPeter Maydell          * fill on every access.
647*55df6fcfSPeter Maydell          */
648*55df6fcfSPeter Maydell         address |= TLB_RECHECK;
649*55df6fcfSPeter Maydell     }
650*55df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
651*55df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
652d9bb58e5SYang Zhong         /* IO memory case */
653d9bb58e5SYang Zhong         address |= TLB_MMIO;
654d9bb58e5SYang Zhong         addend = 0;
655d9bb58e5SYang Zhong     } else {
656d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
657d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
658d9bb58e5SYang Zhong     }
659d9bb58e5SYang Zhong 
660d9bb58e5SYang Zhong     code_address = address;
661*55df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
662*55df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
663d9bb58e5SYang Zhong 
664*55df6fcfSPeter Maydell     index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
665d9bb58e5SYang Zhong     te = &env->tlb_table[mmu_idx][index];
666d9bb58e5SYang Zhong     /* do not discard the translation in te, evict it into a victim tlb */
667d9bb58e5SYang Zhong     tv = &env->tlb_v_table[mmu_idx][vidx];
668d9bb58e5SYang Zhong 
669d9bb58e5SYang Zhong     /* addr_write can race with tlb_reset_dirty_range */
670d9bb58e5SYang Zhong     copy_tlb_helper(tv, te, true);
671d9bb58e5SYang Zhong 
672d9bb58e5SYang Zhong     env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
673d9bb58e5SYang Zhong 
674d9bb58e5SYang Zhong     /* refill the tlb */
675ace41090SPeter Maydell     /*
676ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
677ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
678ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
679ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
680*55df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
681ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
682ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
683ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
684ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
685ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
686ace41090SPeter Maydell      */
687*55df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
688d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
689d9bb58e5SYang Zhong 
690d9bb58e5SYang Zhong     /* Now calculate the new entry */
691*55df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
692d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
693d9bb58e5SYang Zhong         tn.addr_read = address;
694d9bb58e5SYang Zhong     } else {
695d9bb58e5SYang Zhong         tn.addr_read = -1;
696d9bb58e5SYang Zhong     }
697d9bb58e5SYang Zhong 
698d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
699d9bb58e5SYang Zhong         tn.addr_code = code_address;
700d9bb58e5SYang Zhong     } else {
701d9bb58e5SYang Zhong         tn.addr_code = -1;
702d9bb58e5SYang Zhong     }
703d9bb58e5SYang Zhong 
704d9bb58e5SYang Zhong     tn.addr_write = -1;
705d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
706d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
707d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
708d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
709d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
710d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
711d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
712d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
713d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
714d9bb58e5SYang Zhong         } else {
715d9bb58e5SYang Zhong             tn.addr_write = address;
716d9bb58e5SYang Zhong         }
717f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
718f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
719f52bfb12SDavid Hildenbrand         }
720d9bb58e5SYang Zhong     }
721d9bb58e5SYang Zhong 
722d9bb58e5SYang Zhong     /* Pairs with flag setting in tlb_reset_dirty_range */
723d9bb58e5SYang Zhong     copy_tlb_helper(te, &tn, true);
724d9bb58e5SYang Zhong     /* atomic_mb_set(&te->addr_write, write_address); */
725d9bb58e5SYang Zhong }
726d9bb58e5SYang Zhong 
727d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
728d9bb58e5SYang Zhong  * transaction attributes to be used.
729d9bb58e5SYang Zhong  */
730d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
731d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
732d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
733d9bb58e5SYang Zhong {
734d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
735d9bb58e5SYang Zhong                             prot, mmu_idx, size);
736d9bb58e5SYang Zhong }
737d9bb58e5SYang Zhong 
738d9bb58e5SYang Zhong static void report_bad_exec(CPUState *cpu, target_ulong addr)
739d9bb58e5SYang Zhong {
740d9bb58e5SYang Zhong     /* Accidentally executing outside RAM or ROM is quite common for
741d9bb58e5SYang Zhong      * several user-error situations, so report it in a way that
742d9bb58e5SYang Zhong      * makes it clear that this isn't a QEMU bug and provide suggestions
743d9bb58e5SYang Zhong      * about what a user could do to fix things.
744d9bb58e5SYang Zhong      */
745d9bb58e5SYang Zhong     error_report("Trying to execute code outside RAM or ROM at 0x"
746d9bb58e5SYang Zhong                  TARGET_FMT_lx, addr);
747d9bb58e5SYang Zhong     error_printf("This usually means one of the following happened:\n\n"
748d9bb58e5SYang Zhong                  "(1) You told QEMU to execute a kernel for the wrong machine "
749d9bb58e5SYang Zhong                  "type, and it crashed on startup (eg trying to run a "
750d9bb58e5SYang Zhong                  "raspberry pi kernel on a versatilepb QEMU machine)\n"
751d9bb58e5SYang Zhong                  "(2) You didn't give QEMU a kernel or BIOS filename at all, "
752d9bb58e5SYang Zhong                  "and QEMU executed a ROM full of no-op instructions until "
753d9bb58e5SYang Zhong                  "it fell off the end\n"
754d9bb58e5SYang Zhong                  "(3) Your guest kernel has a bug and crashed by jumping "
755d9bb58e5SYang Zhong                  "off into nowhere\n\n"
756d9bb58e5SYang Zhong                  "This is almost always one of the first two, so check your "
757d9bb58e5SYang Zhong                  "command line and that you are using the right type of kernel "
758d9bb58e5SYang Zhong                  "for this machine.\n"
759d9bb58e5SYang Zhong                  "If you think option (3) is likely then you can try debugging "
760d9bb58e5SYang Zhong                  "your guest with the -d debug options; in particular "
761d9bb58e5SYang Zhong                  "-d guest_errors will cause the log to include a dump of the "
762d9bb58e5SYang Zhong                  "guest register state at this point.\n\n"
763d9bb58e5SYang Zhong                  "Execution cannot continue; stopping here.\n\n");
764d9bb58e5SYang Zhong 
765d9bb58e5SYang Zhong     /* Report also to the logs, with more detail including register dump */
766d9bb58e5SYang Zhong     qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
767d9bb58e5SYang Zhong                   "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
768d9bb58e5SYang Zhong     log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
769d9bb58e5SYang Zhong }
770d9bb58e5SYang Zhong 
771d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
772d9bb58e5SYang Zhong {
773d9bb58e5SYang Zhong     ram_addr_t ram_addr;
774d9bb58e5SYang Zhong 
775d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
776d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
777d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
778d9bb58e5SYang Zhong         abort();
779d9bb58e5SYang Zhong     }
780d9bb58e5SYang Zhong     return ram_addr;
781d9bb58e5SYang Zhong }
782d9bb58e5SYang Zhong 
783d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
78404e3aabdSPeter Maydell                          int mmu_idx,
785*55df6fcfSPeter Maydell                          target_ulong addr, uintptr_t retaddr,
786*55df6fcfSPeter Maydell                          bool recheck, int size)
787d9bb58e5SYang Zhong {
788d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
7892d54f194SPeter Maydell     hwaddr mr_offset;
7902d54f194SPeter Maydell     MemoryRegionSection *section;
7912d54f194SPeter Maydell     MemoryRegion *mr;
792d9bb58e5SYang Zhong     uint64_t val;
793d9bb58e5SYang Zhong     bool locked = false;
79404e3aabdSPeter Maydell     MemTxResult r;
795d9bb58e5SYang Zhong 
796*55df6fcfSPeter Maydell     if (recheck) {
797*55df6fcfSPeter Maydell         /*
798*55df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
799*55df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
800*55df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
801*55df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
802*55df6fcfSPeter Maydell          */
803*55df6fcfSPeter Maydell         int index;
804*55df6fcfSPeter Maydell         target_ulong tlb_addr;
805*55df6fcfSPeter Maydell 
806*55df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
807*55df6fcfSPeter Maydell 
808*55df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
809*55df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
810*55df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
811*55df6fcfSPeter Maydell             /* RAM access */
812*55df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
813*55df6fcfSPeter Maydell 
814*55df6fcfSPeter Maydell             return ldn_p((void *)haddr, size);
815*55df6fcfSPeter Maydell         }
816*55df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
817*55df6fcfSPeter Maydell     }
818*55df6fcfSPeter Maydell 
8192d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8202d54f194SPeter Maydell     mr = section->mr;
8212d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
822d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
823d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
824d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
825d9bb58e5SYang Zhong     }
826d9bb58e5SYang Zhong 
827d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
828d9bb58e5SYang Zhong 
8298b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
830d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
831d9bb58e5SYang Zhong         locked = true;
832d9bb58e5SYang Zhong     }
8332d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
83404e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
83504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8362d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8372d54f194SPeter Maydell             section->offset_within_address_space -
8382d54f194SPeter Maydell             section->offset_within_region;
8392d54f194SPeter Maydell 
84004e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
84104e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
84204e3aabdSPeter Maydell     }
843d9bb58e5SYang Zhong     if (locked) {
844d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
845d9bb58e5SYang Zhong     }
846d9bb58e5SYang Zhong 
847d9bb58e5SYang Zhong     return val;
848d9bb58e5SYang Zhong }
849d9bb58e5SYang Zhong 
850d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
85104e3aabdSPeter Maydell                       int mmu_idx,
852d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
853*55df6fcfSPeter Maydell                       uintptr_t retaddr, bool recheck, int size)
854d9bb58e5SYang Zhong {
855d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
8562d54f194SPeter Maydell     hwaddr mr_offset;
8572d54f194SPeter Maydell     MemoryRegionSection *section;
8582d54f194SPeter Maydell     MemoryRegion *mr;
859d9bb58e5SYang Zhong     bool locked = false;
86004e3aabdSPeter Maydell     MemTxResult r;
861d9bb58e5SYang Zhong 
862*55df6fcfSPeter Maydell     if (recheck) {
863*55df6fcfSPeter Maydell         /*
864*55df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
865*55df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
866*55df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
867*55df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
868*55df6fcfSPeter Maydell          */
869*55df6fcfSPeter Maydell         int index;
870*55df6fcfSPeter Maydell         target_ulong tlb_addr;
871*55df6fcfSPeter Maydell 
872*55df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
873*55df6fcfSPeter Maydell 
874*55df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
875*55df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
876*55df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
877*55df6fcfSPeter Maydell             /* RAM access */
878*55df6fcfSPeter Maydell             uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
879*55df6fcfSPeter Maydell 
880*55df6fcfSPeter Maydell             stn_p((void *)haddr, size, val);
881*55df6fcfSPeter Maydell             return;
882*55df6fcfSPeter Maydell         }
883*55df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
884*55df6fcfSPeter Maydell     }
885*55df6fcfSPeter Maydell 
8862d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8872d54f194SPeter Maydell     mr = section->mr;
8882d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
889d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
890d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
891d9bb58e5SYang Zhong     }
892d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
893d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
894d9bb58e5SYang Zhong 
8958b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
896d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
897d9bb58e5SYang Zhong         locked = true;
898d9bb58e5SYang Zhong     }
8992d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
90004e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
90104e3aabdSPeter Maydell     if (r != MEMTX_OK) {
9022d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
9032d54f194SPeter Maydell             section->offset_within_address_space -
9042d54f194SPeter Maydell             section->offset_within_region;
9052d54f194SPeter Maydell 
90604e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
90704e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
90804e3aabdSPeter Maydell     }
909d9bb58e5SYang Zhong     if (locked) {
910d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
911d9bb58e5SYang Zhong     }
912d9bb58e5SYang Zhong }
913d9bb58e5SYang Zhong 
914d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
915d9bb58e5SYang Zhong    back to the main tlb.  */
916d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
917d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
918d9bb58e5SYang Zhong {
919d9bb58e5SYang Zhong     size_t vidx;
920d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
921d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
922d9bb58e5SYang Zhong         target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
923d9bb58e5SYang Zhong 
924d9bb58e5SYang Zhong         if (cmp == page) {
925d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
926d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
927d9bb58e5SYang Zhong 
928d9bb58e5SYang Zhong             copy_tlb_helper(&tmptlb, tlb, false);
929d9bb58e5SYang Zhong             copy_tlb_helper(tlb, vtlb, true);
930d9bb58e5SYang Zhong             copy_tlb_helper(vtlb, &tmptlb, true);
931d9bb58e5SYang Zhong 
932d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
933d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
934d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
935d9bb58e5SYang Zhong             return true;
936d9bb58e5SYang Zhong         }
937d9bb58e5SYang Zhong     }
938d9bb58e5SYang Zhong     return false;
939d9bb58e5SYang Zhong }
940d9bb58e5SYang Zhong 
941d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
942d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
943d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
944d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
945d9bb58e5SYang Zhong 
946f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
947f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
948f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
949f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
950f2553f04SKONRAD Frederic  */
951f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
952f2553f04SKONRAD Frederic {
9532d54f194SPeter Maydell     int mmu_idx, index;
954f2553f04SKONRAD Frederic     void *p;
955f2553f04SKONRAD Frederic     MemoryRegion *mr;
9562d54f194SPeter Maydell     MemoryRegionSection *section;
957f2553f04SKONRAD Frederic     CPUState *cpu = ENV_GET_CPU(env);
958f2553f04SKONRAD Frederic     CPUIOTLBEntry *iotlbentry;
9592d54f194SPeter Maydell     hwaddr physaddr, mr_offset;
960f2553f04SKONRAD Frederic 
961f2553f04SKONRAD Frederic     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
962f2553f04SKONRAD Frederic     mmu_idx = cpu_mmu_index(env, true);
963f2553f04SKONRAD Frederic     if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
96471b9a453SKONRAD Frederic                  (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
96571b9a453SKONRAD Frederic         if (!VICTIM_TLB_HIT(addr_read, addr)) {
96698670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
96771b9a453SKONRAD Frederic         }
968f2553f04SKONRAD Frederic     }
969*55df6fcfSPeter Maydell 
970*55df6fcfSPeter Maydell     if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
971*55df6fcfSPeter Maydell         /*
972*55df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
973*55df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
974*55df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
975*55df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
976*55df6fcfSPeter Maydell          */
977*55df6fcfSPeter Maydell         int index;
978*55df6fcfSPeter Maydell         target_ulong tlb_addr;
979*55df6fcfSPeter Maydell 
980*55df6fcfSPeter Maydell         tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
981*55df6fcfSPeter Maydell 
982*55df6fcfSPeter Maydell         index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
983*55df6fcfSPeter Maydell         tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
984*55df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
985*55df6fcfSPeter Maydell             /* RAM access. We can't handle this, so for now just stop */
986*55df6fcfSPeter Maydell             cpu_abort(cpu, "Unable to handle guest executing from RAM within "
987*55df6fcfSPeter Maydell                       "a small MPU region at 0x" TARGET_FMT_lx, addr);
988*55df6fcfSPeter Maydell         }
989*55df6fcfSPeter Maydell         /*
990*55df6fcfSPeter Maydell          * Fall through to handle IO accesses (which will almost certainly
991*55df6fcfSPeter Maydell          * also result in failure)
992*55df6fcfSPeter Maydell          */
993*55df6fcfSPeter Maydell     }
994*55df6fcfSPeter Maydell 
995f2553f04SKONRAD Frederic     iotlbentry = &env->iotlb[mmu_idx][index];
9962d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
9972d54f194SPeter Maydell     mr = section->mr;
998f2553f04SKONRAD Frederic     if (memory_region_is_unassigned(mr)) {
999c9356746SKONRAD Frederic         qemu_mutex_lock_iothread();
1000c9356746SKONRAD Frederic         if (memory_region_request_mmio_ptr(mr, addr)) {
1001c9356746SKONRAD Frederic             qemu_mutex_unlock_iothread();
1002c9356746SKONRAD Frederic             /* A MemoryRegion is potentially added so re-run the
1003c9356746SKONRAD Frederic              * get_page_addr_code.
1004c9356746SKONRAD Frederic              */
1005c9356746SKONRAD Frederic             return get_page_addr_code(env, addr);
1006c9356746SKONRAD Frederic         }
1007c9356746SKONRAD Frederic         qemu_mutex_unlock_iothread();
1008c9356746SKONRAD Frederic 
100904e3aabdSPeter Maydell         /* Give the new-style cpu_transaction_failed() hook first chance
101004e3aabdSPeter Maydell          * to handle this.
101104e3aabdSPeter Maydell          * This is not the ideal place to detect and generate CPU
101204e3aabdSPeter Maydell          * exceptions for instruction fetch failure (for instance
101304e3aabdSPeter Maydell          * we don't know the length of the access that the CPU would
101404e3aabdSPeter Maydell          * use, and it would be better to go ahead and try the access
101504e3aabdSPeter Maydell          * and use the MemTXResult it produced). However it is the
101604e3aabdSPeter Maydell          * simplest place we have currently available for the check.
101704e3aabdSPeter Maydell          */
10182d54f194SPeter Maydell         mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
10192d54f194SPeter Maydell         physaddr = mr_offset +
10202d54f194SPeter Maydell             section->offset_within_address_space -
10212d54f194SPeter Maydell             section->offset_within_region;
102204e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
102304e3aabdSPeter Maydell                                iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
102404e3aabdSPeter Maydell 
1025f2553f04SKONRAD Frederic         cpu_unassigned_access(cpu, addr, false, true, 0, 4);
1026f2553f04SKONRAD Frederic         /* The CPU's unassigned access hook might have longjumped out
1027f2553f04SKONRAD Frederic          * with an exception. If it didn't (or there was no hook) then
1028f2553f04SKONRAD Frederic          * we can't proceed further.
1029f2553f04SKONRAD Frederic          */
1030f2553f04SKONRAD Frederic         report_bad_exec(cpu, addr);
1031f2553f04SKONRAD Frederic         exit(1);
1032f2553f04SKONRAD Frederic     }
1033f2553f04SKONRAD Frederic     p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
1034f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
1035f2553f04SKONRAD Frederic }
1036f2553f04SKONRAD Frederic 
1037d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
1038d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
1039d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
1040d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
1041d9bb58e5SYang Zhong  * entry in the TLB for this access.
1042d9bb58e5SYang Zhong  */
104398670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1044d9bb58e5SYang Zhong                  uintptr_t retaddr)
1045d9bb58e5SYang Zhong {
1046d9bb58e5SYang Zhong     int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1047d9bb58e5SYang Zhong     target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1048d9bb58e5SYang Zhong 
1049d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
1050d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1051d9bb58e5SYang Zhong         /* TLB entry is for a different page */
1052d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
105398670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
105498670d47SLaurent Vivier                      mmu_idx, retaddr);
1055d9bb58e5SYang Zhong         }
1056d9bb58e5SYang Zhong     }
1057d9bb58e5SYang Zhong }
1058d9bb58e5SYang Zhong 
1059d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1060d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
1061d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
106234d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
106334d49937SPeter Maydell                                NotDirtyInfo *ndi)
1064d9bb58e5SYang Zhong {
1065d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
1066d9bb58e5SYang Zhong     size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1067d9bb58e5SYang Zhong     CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
1068d9bb58e5SYang Zhong     target_ulong tlb_addr = tlbe->addr_write;
1069d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
1070d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
1071d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
107234d49937SPeter Maydell     void *hostaddr;
1073d9bb58e5SYang Zhong 
1074d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1075d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1076d9bb58e5SYang Zhong 
1077d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1078d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1079d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1080d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1081d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1082d9bb58e5SYang Zhong     }
1083d9bb58e5SYang Zhong 
1084d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1085d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1086d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1087d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1088d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1089d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1090d9bb58e5SYang Zhong         goto stop_the_world;
1091d9bb58e5SYang Zhong     }
1092d9bb58e5SYang Zhong 
1093d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1094d9bb58e5SYang Zhong     if ((addr & TARGET_PAGE_MASK)
1095d9bb58e5SYang Zhong         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1096d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
109798670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
109898670d47SLaurent Vivier                      mmu_idx, retaddr);
1099d9bb58e5SYang Zhong         }
1100f52bfb12SDavid Hildenbrand         tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
1101d9bb58e5SYang Zhong     }
1102d9bb58e5SYang Zhong 
1103*55df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
1104*55df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1105d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1106d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1107d9bb58e5SYang Zhong         goto stop_the_world;
1108d9bb58e5SYang Zhong     }
1109d9bb58e5SYang Zhong 
1110d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
111134d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
111298670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
111398670d47SLaurent Vivier                  mmu_idx, retaddr);
1114d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1115d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1116d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1117d9bb58e5SYang Zhong         goto stop_the_world;
1118d9bb58e5SYang Zhong     }
1119d9bb58e5SYang Zhong 
112034d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
112134d49937SPeter Maydell 
112234d49937SPeter Maydell     ndi->active = false;
112334d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
112434d49937SPeter Maydell         ndi->active = true;
112534d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
112634d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
112734d49937SPeter Maydell                                       1 << s_bits);
112834d49937SPeter Maydell     }
112934d49937SPeter Maydell 
113034d49937SPeter Maydell     return hostaddr;
1131d9bb58e5SYang Zhong 
1132d9bb58e5SYang Zhong  stop_the_world:
1133d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1134d9bb58e5SYang Zhong }
1135d9bb58e5SYang Zhong 
1136d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1137d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
1138d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1139d9bb58e5SYang Zhong #else
1140d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1141d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1142d9bb58e5SYang Zhong #endif
1143d9bb58e5SYang Zhong 
1144d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1145d9bb58e5SYang Zhong 
1146d9bb58e5SYang Zhong #define DATA_SIZE 1
1147d9bb58e5SYang Zhong #include "softmmu_template.h"
1148d9bb58e5SYang Zhong 
1149d9bb58e5SYang Zhong #define DATA_SIZE 2
1150d9bb58e5SYang Zhong #include "softmmu_template.h"
1151d9bb58e5SYang Zhong 
1152d9bb58e5SYang Zhong #define DATA_SIZE 4
1153d9bb58e5SYang Zhong #include "softmmu_template.h"
1154d9bb58e5SYang Zhong 
1155d9bb58e5SYang Zhong #define DATA_SIZE 8
1156d9bb58e5SYang Zhong #include "softmmu_template.h"
1157d9bb58e5SYang Zhong 
1158d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1159d9bb58e5SYang Zhong    them callable from other helpers.  */
1160d9bb58e5SYang Zhong 
1161d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1162d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1163d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
116434d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
116534d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
116634d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
116734d49937SPeter Maydell     do {                                                \
116834d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
116934d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
117034d49937SPeter Maydell         }                                               \
117134d49937SPeter Maydell     } while (0)
1172d9bb58e5SYang Zhong 
1173d9bb58e5SYang Zhong #define DATA_SIZE 1
1174d9bb58e5SYang Zhong #include "atomic_template.h"
1175d9bb58e5SYang Zhong 
1176d9bb58e5SYang Zhong #define DATA_SIZE 2
1177d9bb58e5SYang Zhong #include "atomic_template.h"
1178d9bb58e5SYang Zhong 
1179d9bb58e5SYang Zhong #define DATA_SIZE 4
1180d9bb58e5SYang Zhong #include "atomic_template.h"
1181d9bb58e5SYang Zhong 
1182d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1183d9bb58e5SYang Zhong #define DATA_SIZE 8
1184d9bb58e5SYang Zhong #include "atomic_template.h"
1185d9bb58e5SYang Zhong #endif
1186d9bb58e5SYang Zhong 
1187d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128
1188d9bb58e5SYang Zhong #define DATA_SIZE 16
1189d9bb58e5SYang Zhong #include "atomic_template.h"
1190d9bb58e5SYang Zhong #endif
1191d9bb58e5SYang Zhong 
1192d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1193d9bb58e5SYang Zhong 
1194d9bb58e5SYang Zhong #undef EXTRA_ARGS
1195d9bb58e5SYang Zhong #undef ATOMIC_NAME
1196d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1197d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1198d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
119934d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1200d9bb58e5SYang Zhong 
1201d9bb58e5SYang Zhong #define DATA_SIZE 1
1202d9bb58e5SYang Zhong #include "atomic_template.h"
1203d9bb58e5SYang Zhong 
1204d9bb58e5SYang Zhong #define DATA_SIZE 2
1205d9bb58e5SYang Zhong #include "atomic_template.h"
1206d9bb58e5SYang Zhong 
1207d9bb58e5SYang Zhong #define DATA_SIZE 4
1208d9bb58e5SYang Zhong #include "atomic_template.h"
1209d9bb58e5SYang Zhong 
1210d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1211d9bb58e5SYang Zhong #define DATA_SIZE 8
1212d9bb58e5SYang Zhong #include "atomic_template.h"
1213d9bb58e5SYang Zhong #endif
1214d9bb58e5SYang Zhong 
1215d9bb58e5SYang Zhong /* Code access functions.  */
1216d9bb58e5SYang Zhong 
1217d9bb58e5SYang Zhong #undef MMUSUFFIX
1218d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1219d9bb58e5SYang Zhong #undef GETPC
1220d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1221d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1222d9bb58e5SYang Zhong 
1223d9bb58e5SYang Zhong #define DATA_SIZE 1
1224d9bb58e5SYang Zhong #include "softmmu_template.h"
1225d9bb58e5SYang Zhong 
1226d9bb58e5SYang Zhong #define DATA_SIZE 2
1227d9bb58e5SYang Zhong #include "softmmu_template.h"
1228d9bb58e5SYang Zhong 
1229d9bb58e5SYang Zhong #define DATA_SIZE 4
1230d9bb58e5SYang Zhong #include "softmmu_template.h"
1231d9bb58e5SYang Zhong 
1232d9bb58e5SYang Zhong #define DATA_SIZE 8
1233d9bb58e5SYang Zhong #include "softmmu_template.h"
1234