xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 60a2ad7d86e7379e6669806bedaa6cfdf4f2c2f4)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9d9bb58e5SYang Zhong  * version 2 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
36d9bb58e5SYang Zhong 
37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB */
39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
40d9bb58e5SYang Zhong 
41d9bb58e5SYang Zhong #ifdef DEBUG_TLB
42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
44d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
45d9bb58e5SYang Zhong # else
46d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
47d9bb58e5SYang Zhong # endif
48d9bb58e5SYang Zhong #else
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
51d9bb58e5SYang Zhong #endif
52d9bb58e5SYang Zhong 
53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
54d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
55d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
57d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
58d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59d9bb58e5SYang Zhong     } \
60d9bb58e5SYang Zhong } while (0)
61d9bb58e5SYang Zhong 
62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
63d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
64ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65d9bb58e5SYang Zhong         }                                                         \
66d9bb58e5SYang Zhong     } while (0)
67d9bb58e5SYang Zhong 
68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
69d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71d9bb58e5SYang Zhong 
72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73d9bb58e5SYang Zhong  */
74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76d9bb58e5SYang Zhong 
775005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
785005e253SEmilio G. Cota {
7971aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
8071aec354SEmilio G. Cota 
8153d28455SRichard Henderson     qemu_spin_init(&env->tlb_c.lock);
825005e253SEmilio G. Cota }
835005e253SEmilio G. Cota 
84d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
85d9bb58e5SYang Zhong  *
86d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
87d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
88d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
89d9bb58e5SYang Zhong  * again.
90d9bb58e5SYang Zhong  */
91d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
92d9bb58e5SYang Zhong                              run_on_cpu_data d)
93d9bb58e5SYang Zhong {
94d9bb58e5SYang Zhong     CPUState *cpu;
95d9bb58e5SYang Zhong 
96d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
97d9bb58e5SYang Zhong         if (cpu != src) {
98d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
99d9bb58e5SYang Zhong         }
100d9bb58e5SYang Zhong     }
101d9bb58e5SYang Zhong }
102d9bb58e5SYang Zhong 
10383974cf4SEmilio G. Cota size_t tlb_flush_count(void)
10483974cf4SEmilio G. Cota {
10583974cf4SEmilio G. Cota     CPUState *cpu;
10683974cf4SEmilio G. Cota     size_t count = 0;
10783974cf4SEmilio G. Cota 
10883974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
10983974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
11083974cf4SEmilio G. Cota 
11183974cf4SEmilio G. Cota         count += atomic_read(&env->tlb_flush_count);
11283974cf4SEmilio G. Cota     }
11383974cf4SEmilio G. Cota     return count;
11483974cf4SEmilio G. Cota }
115d9bb58e5SYang Zhong 
116d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an
117d9bb58e5SYang Zhong  * implementation to drop entries from the TLB at any time, so
118d9bb58e5SYang Zhong  * flushing more entries than required is only an efficiency issue,
119d9bb58e5SYang Zhong  * not a correctness issue.
120d9bb58e5SYang Zhong  */
121d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu)
122d9bb58e5SYang Zhong {
123d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
124d9bb58e5SYang Zhong 
125d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
12683974cf4SEmilio G. Cota     atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
12783974cf4SEmilio G. Cota     tlb_debug("(count: %zu)\n", tlb_flush_count());
128d9bb58e5SYang Zhong 
12971aec354SEmilio G. Cota     /*
13053d28455SRichard Henderson      * tlb_table/tlb_v_table updates from any thread must hold tlb_c.lock.
13171aec354SEmilio G. Cota      * However, updates from the owner thread (as is the case here; see the
13271aec354SEmilio G. Cota      * above assert_cpu_is_self) do not need atomic_set because all reads
13371aec354SEmilio G. Cota      * that do not hold the lock are performed by the same owner thread.
13471aec354SEmilio G. Cota      */
13553d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
136*60a2ad7dSRichard Henderson     env->tlb_c.pending_flush = 0;
137d9bb58e5SYang Zhong     memset(env->tlb_table, -1, sizeof(env->tlb_table));
138d9bb58e5SYang Zhong     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
13953d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
14071aec354SEmilio G. Cota 
141f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
142d9bb58e5SYang Zhong 
143d9bb58e5SYang Zhong     env->vtlb_index = 0;
144d9bb58e5SYang Zhong     env->tlb_flush_addr = -1;
145d9bb58e5SYang Zhong     env->tlb_flush_mask = 0;
146d9bb58e5SYang Zhong }
147d9bb58e5SYang Zhong 
148d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
149d9bb58e5SYang Zhong {
150d9bb58e5SYang Zhong     tlb_flush_nocheck(cpu);
151d9bb58e5SYang Zhong }
152d9bb58e5SYang Zhong 
153d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu)
154d9bb58e5SYang Zhong {
155d9bb58e5SYang Zhong     if (cpu->created && !qemu_cpu_is_self(cpu)) {
156*60a2ad7dSRichard Henderson         CPUArchState *env = cpu->env_ptr;
157*60a2ad7dSRichard Henderson         uint16_t pending;
158*60a2ad7dSRichard Henderson 
159*60a2ad7dSRichard Henderson         qemu_spin_lock(&env->tlb_c.lock);
160*60a2ad7dSRichard Henderson         pending = env->tlb_c.pending_flush;
161*60a2ad7dSRichard Henderson         env->tlb_c.pending_flush = ALL_MMUIDX_BITS;
162*60a2ad7dSRichard Henderson         qemu_spin_unlock(&env->tlb_c.lock);
163*60a2ad7dSRichard Henderson 
164*60a2ad7dSRichard Henderson         if (pending != ALL_MMUIDX_BITS) {
165d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_global_async_work,
166d9bb58e5SYang Zhong                              RUN_ON_CPU_NULL);
167d9bb58e5SYang Zhong         }
168d9bb58e5SYang Zhong     } else {
169d9bb58e5SYang Zhong         tlb_flush_nocheck(cpu);
170d9bb58e5SYang Zhong     }
171d9bb58e5SYang Zhong }
172d9bb58e5SYang Zhong 
173d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu)
174d9bb58e5SYang Zhong {
175d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
176d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
177d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_NULL);
178d9bb58e5SYang Zhong }
179d9bb58e5SYang Zhong 
180d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu)
181d9bb58e5SYang Zhong {
182d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_global_async_work;
183d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
184d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
185d9bb58e5SYang Zhong }
186d9bb58e5SYang Zhong 
187d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
188d9bb58e5SYang Zhong {
189d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
190d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmask = data.host_int;
191d9bb58e5SYang Zhong     int mmu_idx;
192d9bb58e5SYang Zhong 
193d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
194d9bb58e5SYang Zhong 
195d9bb58e5SYang Zhong     tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
196d9bb58e5SYang Zhong 
19753d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
198*60a2ad7dSRichard Henderson     env->tlb_c.pending_flush &= ~mmu_idx_bitmask;
199*60a2ad7dSRichard Henderson 
200d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
201d9bb58e5SYang Zhong 
202d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
203d9bb58e5SYang Zhong             tlb_debug("%d\n", mmu_idx);
204d9bb58e5SYang Zhong 
205d9bb58e5SYang Zhong             memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
206d9bb58e5SYang Zhong             memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
207d9bb58e5SYang Zhong         }
208d9bb58e5SYang Zhong     }
20953d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
210d9bb58e5SYang Zhong 
211f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
212d9bb58e5SYang Zhong 
213d9bb58e5SYang Zhong     tlb_debug("done\n");
214d9bb58e5SYang Zhong }
215d9bb58e5SYang Zhong 
216d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
217d9bb58e5SYang Zhong {
218d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
219d9bb58e5SYang Zhong 
220d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
221*60a2ad7dSRichard Henderson         CPUArchState *env = cpu->env_ptr;
222*60a2ad7dSRichard Henderson         uint16_t pending, to_clean;
223d9bb58e5SYang Zhong 
224*60a2ad7dSRichard Henderson         qemu_spin_lock(&env->tlb_c.lock);
225*60a2ad7dSRichard Henderson         pending = env->tlb_c.pending_flush;
226*60a2ad7dSRichard Henderson         to_clean = idxmap & ~pending;
227*60a2ad7dSRichard Henderson         env->tlb_c.pending_flush = pending | idxmap;
228*60a2ad7dSRichard Henderson         qemu_spin_unlock(&env->tlb_c.lock);
229d9bb58e5SYang Zhong 
230*60a2ad7dSRichard Henderson         if (to_clean) {
231*60a2ad7dSRichard Henderson             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", to_clean);
232d9bb58e5SYang Zhong             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
233*60a2ad7dSRichard Henderson                              RUN_ON_CPU_HOST_INT(to_clean));
234d9bb58e5SYang Zhong         }
235d9bb58e5SYang Zhong     } else {
236*60a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
237d9bb58e5SYang Zhong     }
238d9bb58e5SYang Zhong }
239d9bb58e5SYang Zhong 
240d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
241d9bb58e5SYang Zhong {
242d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
243d9bb58e5SYang Zhong 
244d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
245d9bb58e5SYang Zhong 
246d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
247d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
248d9bb58e5SYang Zhong }
249d9bb58e5SYang Zhong 
250d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
251d9bb58e5SYang Zhong                                                        uint16_t idxmap)
252d9bb58e5SYang Zhong {
253d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
254d9bb58e5SYang Zhong 
255d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
256d9bb58e5SYang Zhong 
257d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
258d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
259d9bb58e5SYang Zhong }
260d9bb58e5SYang Zhong 
26168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
26268fea038SRichard Henderson                                         target_ulong page)
263d9bb58e5SYang Zhong {
26468fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
265403f290cSEmilio G. Cota            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
26668fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
26768fea038SRichard Henderson }
26868fea038SRichard Henderson 
26953d28455SRichard Henderson /* Called with tlb_c.lock held */
27071aec354SEmilio G. Cota static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
27171aec354SEmilio G. Cota                                           target_ulong page)
27268fea038SRichard Henderson {
27368fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
274d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
275d9bb58e5SYang Zhong     }
276d9bb58e5SYang Zhong }
277d9bb58e5SYang Zhong 
27853d28455SRichard Henderson /* Called with tlb_c.lock held */
27971aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
28068fea038SRichard Henderson                                               target_ulong page)
28168fea038SRichard Henderson {
28268fea038SRichard Henderson     int k;
28371aec354SEmilio G. Cota 
28471aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
28568fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
28671aec354SEmilio G. Cota         tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page);
28768fea038SRichard Henderson     }
28868fea038SRichard Henderson }
28968fea038SRichard Henderson 
290d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
291d9bb58e5SYang Zhong {
292d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
293d9bb58e5SYang Zhong     target_ulong addr = (target_ulong) data.target_ptr;
294d9bb58e5SYang Zhong     int mmu_idx;
295d9bb58e5SYang Zhong 
296d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
297d9bb58e5SYang Zhong 
298d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
299d9bb58e5SYang Zhong 
300d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
301d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
302d9bb58e5SYang Zhong         tlb_debug("forcing full flush ("
303d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
304d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
305d9bb58e5SYang Zhong 
306d9bb58e5SYang Zhong         tlb_flush(cpu);
307d9bb58e5SYang Zhong         return;
308d9bb58e5SYang Zhong     }
309d9bb58e5SYang Zhong 
310d9bb58e5SYang Zhong     addr &= TARGET_PAGE_MASK;
31153d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
312d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
313383beda9SRichard Henderson         tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
31471aec354SEmilio G. Cota         tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
315d9bb58e5SYang Zhong     }
31653d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
317d9bb58e5SYang Zhong 
318d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
319d9bb58e5SYang Zhong }
320d9bb58e5SYang Zhong 
321d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr)
322d9bb58e5SYang Zhong {
323d9bb58e5SYang Zhong     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
324d9bb58e5SYang Zhong 
325d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
326d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_page_async_work,
327d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr));
328d9bb58e5SYang Zhong     } else {
329d9bb58e5SYang Zhong         tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
330d9bb58e5SYang Zhong     }
331d9bb58e5SYang Zhong }
332d9bb58e5SYang Zhong 
333d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
334d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
335d9bb58e5SYang Zhong  */
336d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
337d9bb58e5SYang Zhong 
338d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
339d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
340d9bb58e5SYang Zhong {
341d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
342d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
343d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
344d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
345d9bb58e5SYang Zhong     int mmu_idx;
346d9bb58e5SYang Zhong 
347d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
348d9bb58e5SYang Zhong 
349383beda9SRichard Henderson     tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
350383beda9SRichard Henderson               addr, mmu_idx_bitmap);
351d9bb58e5SYang Zhong 
35253d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
353d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
354d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
355383beda9SRichard Henderson             tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
35671aec354SEmilio G. Cota             tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
357d9bb58e5SYang Zhong         }
358d9bb58e5SYang Zhong     }
35953d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
360d9bb58e5SYang Zhong 
361d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
362d9bb58e5SYang Zhong }
363d9bb58e5SYang Zhong 
364d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
365d9bb58e5SYang Zhong                                                           run_on_cpu_data data)
366d9bb58e5SYang Zhong {
367d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
368d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
369d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
370d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
371d9bb58e5SYang Zhong 
372d9bb58e5SYang Zhong     tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
373d9bb58e5SYang Zhong 
374d9bb58e5SYang Zhong     /* Check if we need to flush due to large pages.  */
375d9bb58e5SYang Zhong     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
376d9bb58e5SYang Zhong         tlb_debug("forced full flush ("
377d9bb58e5SYang Zhong                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
378d9bb58e5SYang Zhong                   env->tlb_flush_addr, env->tlb_flush_mask);
379d9bb58e5SYang Zhong 
380d9bb58e5SYang Zhong         tlb_flush_by_mmuidx_async_work(cpu,
381d9bb58e5SYang Zhong                                        RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
382d9bb58e5SYang Zhong     } else {
383d9bb58e5SYang Zhong         tlb_flush_page_by_mmuidx_async_work(cpu, data);
384d9bb58e5SYang Zhong     }
385d9bb58e5SYang Zhong }
386d9bb58e5SYang Zhong 
387d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
388d9bb58e5SYang Zhong {
389d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
390d9bb58e5SYang Zhong 
391d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
392d9bb58e5SYang Zhong 
393d9bb58e5SYang Zhong     /* This should already be page aligned */
394d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
395d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
396d9bb58e5SYang Zhong 
397d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
398d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
399d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
400d9bb58e5SYang Zhong     } else {
401d9bb58e5SYang Zhong         tlb_check_page_and_flush_by_mmuidx_async_work(
402d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
403d9bb58e5SYang Zhong     }
404d9bb58e5SYang Zhong }
405d9bb58e5SYang Zhong 
406d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
407d9bb58e5SYang Zhong                                        uint16_t idxmap)
408d9bb58e5SYang Zhong {
409d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
410d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
411d9bb58e5SYang Zhong 
412d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
413d9bb58e5SYang Zhong 
414d9bb58e5SYang Zhong     /* This should already be page aligned */
415d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
416d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
417d9bb58e5SYang Zhong 
418d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
419d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
420d9bb58e5SYang Zhong }
421d9bb58e5SYang Zhong 
422d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
423d9bb58e5SYang Zhong                                                             target_ulong addr,
424d9bb58e5SYang Zhong                                                             uint16_t idxmap)
425d9bb58e5SYang Zhong {
426d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
427d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
428d9bb58e5SYang Zhong 
429d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
430d9bb58e5SYang Zhong 
431d9bb58e5SYang Zhong     /* This should already be page aligned */
432d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
433d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
434d9bb58e5SYang Zhong 
435d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
436d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
437d9bb58e5SYang Zhong }
438d9bb58e5SYang Zhong 
439d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
440d9bb58e5SYang Zhong {
441d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
442d9bb58e5SYang Zhong 
443d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
444d9bb58e5SYang Zhong     fn(src, RUN_ON_CPU_TARGET_PTR(addr));
445d9bb58e5SYang Zhong }
446d9bb58e5SYang Zhong 
447d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src,
448d9bb58e5SYang Zhong                                                   target_ulong addr)
449d9bb58e5SYang Zhong {
450d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_page_async_work;
451d9bb58e5SYang Zhong 
452d9bb58e5SYang Zhong     flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
453d9bb58e5SYang Zhong     async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
454d9bb58e5SYang Zhong }
455d9bb58e5SYang Zhong 
456d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
457d9bb58e5SYang Zhong    can be detected */
458d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
459d9bb58e5SYang Zhong {
460d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
461d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
462d9bb58e5SYang Zhong }
463d9bb58e5SYang Zhong 
464d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
465d9bb58e5SYang Zhong    tested for self modifying code */
466d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
467d9bb58e5SYang Zhong {
468d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
469d9bb58e5SYang Zhong }
470d9bb58e5SYang Zhong 
471d9bb58e5SYang Zhong 
472d9bb58e5SYang Zhong /*
473d9bb58e5SYang Zhong  * Dirty write flag handling
474d9bb58e5SYang Zhong  *
475d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
476d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
477d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
478d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
479d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
480d9bb58e5SYang Zhong  * generated code.
481d9bb58e5SYang Zhong  *
48271aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
48371aec354SEmilio G. Cota  * te->addr_write with atomic_set. We don't need to worry about this for
48471aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
485d9bb58e5SYang Zhong  *
48653d28455SRichard Henderson  * Called with tlb_c.lock held.
487d9bb58e5SYang Zhong  */
48871aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
48971aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
490d9bb58e5SYang Zhong {
491d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
492d9bb58e5SYang Zhong 
493d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
494d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
495d9bb58e5SYang Zhong         addr += tlb_entry->addend;
496d9bb58e5SYang Zhong         if ((addr - start) < length) {
497d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
49871aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
499d9bb58e5SYang Zhong #else
50071aec354SEmilio G. Cota             atomic_set(&tlb_entry->addr_write,
50171aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
502d9bb58e5SYang Zhong #endif
503d9bb58e5SYang Zhong         }
50471aec354SEmilio G. Cota     }
50571aec354SEmilio G. Cota }
50671aec354SEmilio G. Cota 
50771aec354SEmilio G. Cota /*
50853d28455SRichard Henderson  * Called with tlb_c.lock held.
50971aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
51071aec354SEmilio G. Cota  */
51171aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
51271aec354SEmilio G. Cota {
51371aec354SEmilio G. Cota     *d = *s;
51471aec354SEmilio G. Cota }
515d9bb58e5SYang Zhong 
516d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
51771aec354SEmilio G. Cota  * the target vCPU).
51853d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
51971aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
520d9bb58e5SYang Zhong  */
521d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
522d9bb58e5SYang Zhong {
523d9bb58e5SYang Zhong     CPUArchState *env;
524d9bb58e5SYang Zhong 
525d9bb58e5SYang Zhong     int mmu_idx;
526d9bb58e5SYang Zhong 
527d9bb58e5SYang Zhong     env = cpu->env_ptr;
52853d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
529d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
530d9bb58e5SYang Zhong         unsigned int i;
531d9bb58e5SYang Zhong 
532d9bb58e5SYang Zhong         for (i = 0; i < CPU_TLB_SIZE; i++) {
53371aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
53471aec354SEmilio G. Cota                                          length);
535d9bb58e5SYang Zhong         }
536d9bb58e5SYang Zhong 
537d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
53871aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
53971aec354SEmilio G. Cota                                          length);
540d9bb58e5SYang Zhong         }
541d9bb58e5SYang Zhong     }
54253d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
543d9bb58e5SYang Zhong }
544d9bb58e5SYang Zhong 
54553d28455SRichard Henderson /* Called with tlb_c.lock held */
54671aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
54771aec354SEmilio G. Cota                                          target_ulong vaddr)
548d9bb58e5SYang Zhong {
549d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
550d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
551d9bb58e5SYang Zhong     }
552d9bb58e5SYang Zhong }
553d9bb58e5SYang Zhong 
554d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
555d9bb58e5SYang Zhong    so that it is no longer dirty */
556d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
557d9bb58e5SYang Zhong {
558d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
559d9bb58e5SYang Zhong     int mmu_idx;
560d9bb58e5SYang Zhong 
561d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
562d9bb58e5SYang Zhong 
563d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
56453d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
565d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
566383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
567d9bb58e5SYang Zhong     }
568d9bb58e5SYang Zhong 
569d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
570d9bb58e5SYang Zhong         int k;
571d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
57271aec354SEmilio G. Cota             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
573d9bb58e5SYang Zhong         }
574d9bb58e5SYang Zhong     }
57553d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
576d9bb58e5SYang Zhong }
577d9bb58e5SYang Zhong 
578d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
579d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
580d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
581d9bb58e5SYang Zhong                                target_ulong size)
582d9bb58e5SYang Zhong {
583d9bb58e5SYang Zhong     target_ulong mask = ~(size - 1);
584d9bb58e5SYang Zhong 
585d9bb58e5SYang Zhong     if (env->tlb_flush_addr == (target_ulong)-1) {
586d9bb58e5SYang Zhong         env->tlb_flush_addr = vaddr & mask;
587d9bb58e5SYang Zhong         env->tlb_flush_mask = mask;
588d9bb58e5SYang Zhong         return;
589d9bb58e5SYang Zhong     }
590d9bb58e5SYang Zhong     /* Extend the existing region to include the new page.
591d9bb58e5SYang Zhong        This is a compromise between unnecessary flushes and the cost
592d9bb58e5SYang Zhong        of maintaining a full variable size TLB.  */
593d9bb58e5SYang Zhong     mask &= env->tlb_flush_mask;
594d9bb58e5SYang Zhong     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
595d9bb58e5SYang Zhong         mask <<= 1;
596d9bb58e5SYang Zhong     }
597d9bb58e5SYang Zhong     env->tlb_flush_addr &= mask;
598d9bb58e5SYang Zhong     env->tlb_flush_mask = mask;
599d9bb58e5SYang Zhong }
600d9bb58e5SYang Zhong 
601d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
602d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
603d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
604d9bb58e5SYang Zhong  *
605d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
606d9bb58e5SYang Zhong  * critical section.
607d9bb58e5SYang Zhong  */
608d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
609d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
610d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
611d9bb58e5SYang Zhong {
612d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
613d9bb58e5SYang Zhong     MemoryRegionSection *section;
614d9bb58e5SYang Zhong     unsigned int index;
615d9bb58e5SYang Zhong     target_ulong address;
616d9bb58e5SYang Zhong     target_ulong code_address;
617d9bb58e5SYang Zhong     uintptr_t addend;
61868fea038SRichard Henderson     CPUTLBEntry *te, tn;
61955df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
62055df6fcfSPeter Maydell     target_ulong vaddr_page;
621d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
622d9bb58e5SYang Zhong 
623d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
62455df6fcfSPeter Maydell 
62555df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
62655df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
62755df6fcfSPeter Maydell     } else {
62855df6fcfSPeter Maydell         if (size > TARGET_PAGE_SIZE) {
629d9bb58e5SYang Zhong             tlb_add_large_page(env, vaddr, size);
630d9bb58e5SYang Zhong         }
631d9bb58e5SYang Zhong         sz = size;
63255df6fcfSPeter Maydell     }
63355df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
63455df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
63555df6fcfSPeter Maydell 
63655df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
63755df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
638d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
639d9bb58e5SYang Zhong 
640d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
641d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
642d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
643d9bb58e5SYang Zhong 
64455df6fcfSPeter Maydell     address = vaddr_page;
64555df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
64655df6fcfSPeter Maydell         /*
64755df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
64855df6fcfSPeter Maydell          * fill on every access.
64955df6fcfSPeter Maydell          */
65055df6fcfSPeter Maydell         address |= TLB_RECHECK;
65155df6fcfSPeter Maydell     }
65255df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
65355df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
654d9bb58e5SYang Zhong         /* IO memory case */
655d9bb58e5SYang Zhong         address |= TLB_MMIO;
656d9bb58e5SYang Zhong         addend = 0;
657d9bb58e5SYang Zhong     } else {
658d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
659d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
660d9bb58e5SYang Zhong     }
661d9bb58e5SYang Zhong 
662d9bb58e5SYang Zhong     code_address = address;
66355df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
66455df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
665d9bb58e5SYang Zhong 
666383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
667383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
668d9bb58e5SYang Zhong 
66968fea038SRichard Henderson     /*
67071aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
67171aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
67271aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
67371aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
67471aec354SEmilio G. Cota      * is unlikely to be contended.
67571aec354SEmilio G. Cota      */
67653d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
67771aec354SEmilio G. Cota 
67871aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
67971aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
68071aec354SEmilio G. Cota 
68171aec354SEmilio G. Cota     /*
68268fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
68368fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
68468fea038SRichard Henderson      */
68568fea038SRichard Henderson     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
68668fea038SRichard Henderson         unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
68768fea038SRichard Henderson         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
68868fea038SRichard Henderson 
68968fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
69071aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
691d9bb58e5SYang Zhong         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
69268fea038SRichard Henderson     }
693d9bb58e5SYang Zhong 
694d9bb58e5SYang Zhong     /* refill the tlb */
695ace41090SPeter Maydell     /*
696ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
697ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
698ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
699ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
70055df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
701ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
702ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
703ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
704ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
705ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
706ace41090SPeter Maydell      */
70755df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
708d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
709d9bb58e5SYang Zhong 
710d9bb58e5SYang Zhong     /* Now calculate the new entry */
71155df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
712d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
713d9bb58e5SYang Zhong         tn.addr_read = address;
714d9bb58e5SYang Zhong     } else {
715d9bb58e5SYang Zhong         tn.addr_read = -1;
716d9bb58e5SYang Zhong     }
717d9bb58e5SYang Zhong 
718d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
719d9bb58e5SYang Zhong         tn.addr_code = code_address;
720d9bb58e5SYang Zhong     } else {
721d9bb58e5SYang Zhong         tn.addr_code = -1;
722d9bb58e5SYang Zhong     }
723d9bb58e5SYang Zhong 
724d9bb58e5SYang Zhong     tn.addr_write = -1;
725d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
726d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
727d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
728d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
729d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
730d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
731d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
732d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
733d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
734d9bb58e5SYang Zhong         } else {
735d9bb58e5SYang Zhong             tn.addr_write = address;
736d9bb58e5SYang Zhong         }
737f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
738f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
739f52bfb12SDavid Hildenbrand         }
740d9bb58e5SYang Zhong     }
741d9bb58e5SYang Zhong 
74271aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
74353d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
744d9bb58e5SYang Zhong }
745d9bb58e5SYang Zhong 
746d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
747d9bb58e5SYang Zhong  * transaction attributes to be used.
748d9bb58e5SYang Zhong  */
749d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
750d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
751d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
752d9bb58e5SYang Zhong {
753d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
754d9bb58e5SYang Zhong                             prot, mmu_idx, size);
755d9bb58e5SYang Zhong }
756d9bb58e5SYang Zhong 
757d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
758d9bb58e5SYang Zhong {
759d9bb58e5SYang Zhong     ram_addr_t ram_addr;
760d9bb58e5SYang Zhong 
761d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
762d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
763d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
764d9bb58e5SYang Zhong         abort();
765d9bb58e5SYang Zhong     }
766d9bb58e5SYang Zhong     return ram_addr;
767d9bb58e5SYang Zhong }
768d9bb58e5SYang Zhong 
769d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
77004e3aabdSPeter Maydell                          int mmu_idx,
77155df6fcfSPeter Maydell                          target_ulong addr, uintptr_t retaddr,
772dbea78a4SPeter Maydell                          bool recheck, MMUAccessType access_type, int size)
773d9bb58e5SYang Zhong {
774d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
7752d54f194SPeter Maydell     hwaddr mr_offset;
7762d54f194SPeter Maydell     MemoryRegionSection *section;
7772d54f194SPeter Maydell     MemoryRegion *mr;
778d9bb58e5SYang Zhong     uint64_t val;
779d9bb58e5SYang Zhong     bool locked = false;
78004e3aabdSPeter Maydell     MemTxResult r;
781d9bb58e5SYang Zhong 
78255df6fcfSPeter Maydell     if (recheck) {
78355df6fcfSPeter Maydell         /*
78455df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
78555df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
78655df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
78755df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
78855df6fcfSPeter Maydell          */
789383beda9SRichard Henderson         CPUTLBEntry *entry;
79055df6fcfSPeter Maydell         target_ulong tlb_addr;
79155df6fcfSPeter Maydell 
79255df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
79355df6fcfSPeter Maydell 
794383beda9SRichard Henderson         entry = tlb_entry(env, mmu_idx, addr);
795383beda9SRichard Henderson         tlb_addr = entry->addr_read;
79655df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
79755df6fcfSPeter Maydell             /* RAM access */
798383beda9SRichard Henderson             uintptr_t haddr = addr + entry->addend;
79955df6fcfSPeter Maydell 
80055df6fcfSPeter Maydell             return ldn_p((void *)haddr, size);
80155df6fcfSPeter Maydell         }
80255df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
80355df6fcfSPeter Maydell     }
80455df6fcfSPeter Maydell 
8052d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8062d54f194SPeter Maydell     mr = section->mr;
8072d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
808d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
809d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
810d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
811d9bb58e5SYang Zhong     }
812d9bb58e5SYang Zhong 
813d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
814dbea78a4SPeter Maydell     cpu->mem_io_access_type = access_type;
815d9bb58e5SYang Zhong 
8168b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
817d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
818d9bb58e5SYang Zhong         locked = true;
819d9bb58e5SYang Zhong     }
8202d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
82104e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
82204e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8232d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8242d54f194SPeter Maydell             section->offset_within_address_space -
8252d54f194SPeter Maydell             section->offset_within_region;
8262d54f194SPeter Maydell 
827dbea78a4SPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
82804e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
82904e3aabdSPeter Maydell     }
830d9bb58e5SYang Zhong     if (locked) {
831d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
832d9bb58e5SYang Zhong     }
833d9bb58e5SYang Zhong 
834d9bb58e5SYang Zhong     return val;
835d9bb58e5SYang Zhong }
836d9bb58e5SYang Zhong 
837d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
83804e3aabdSPeter Maydell                       int mmu_idx,
839d9bb58e5SYang Zhong                       uint64_t val, target_ulong addr,
84055df6fcfSPeter Maydell                       uintptr_t retaddr, bool recheck, int size)
841d9bb58e5SYang Zhong {
842d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
8432d54f194SPeter Maydell     hwaddr mr_offset;
8442d54f194SPeter Maydell     MemoryRegionSection *section;
8452d54f194SPeter Maydell     MemoryRegion *mr;
846d9bb58e5SYang Zhong     bool locked = false;
84704e3aabdSPeter Maydell     MemTxResult r;
848d9bb58e5SYang Zhong 
84955df6fcfSPeter Maydell     if (recheck) {
85055df6fcfSPeter Maydell         /*
85155df6fcfSPeter Maydell          * This is a TLB_RECHECK access, where the MMU protection
85255df6fcfSPeter Maydell          * covers a smaller range than a target page, and we must
85355df6fcfSPeter Maydell          * repeat the MMU check here. This tlb_fill() call might
85455df6fcfSPeter Maydell          * longjump out if this access should cause a guest exception.
85555df6fcfSPeter Maydell          */
856383beda9SRichard Henderson         CPUTLBEntry *entry;
85755df6fcfSPeter Maydell         target_ulong tlb_addr;
85855df6fcfSPeter Maydell 
85955df6fcfSPeter Maydell         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
86055df6fcfSPeter Maydell 
861383beda9SRichard Henderson         entry = tlb_entry(env, mmu_idx, addr);
862403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(entry);
86355df6fcfSPeter Maydell         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
86455df6fcfSPeter Maydell             /* RAM access */
865383beda9SRichard Henderson             uintptr_t haddr = addr + entry->addend;
86655df6fcfSPeter Maydell 
86755df6fcfSPeter Maydell             stn_p((void *)haddr, size, val);
86855df6fcfSPeter Maydell             return;
86955df6fcfSPeter Maydell         }
87055df6fcfSPeter Maydell         /* Fall through for handling IO accesses */
87155df6fcfSPeter Maydell     }
87255df6fcfSPeter Maydell 
8732d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8742d54f194SPeter Maydell     mr = section->mr;
8752d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
876d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
877d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
878d9bb58e5SYang Zhong     }
879d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
880d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
881d9bb58e5SYang Zhong 
8828b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
883d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
884d9bb58e5SYang Zhong         locked = true;
885d9bb58e5SYang Zhong     }
8862d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
88704e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
88804e3aabdSPeter Maydell     if (r != MEMTX_OK) {
8892d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
8902d54f194SPeter Maydell             section->offset_within_address_space -
8912d54f194SPeter Maydell             section->offset_within_region;
8922d54f194SPeter Maydell 
89304e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
89404e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
89504e3aabdSPeter Maydell     }
896d9bb58e5SYang Zhong     if (locked) {
897d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
898d9bb58e5SYang Zhong     }
899d9bb58e5SYang Zhong }
900d9bb58e5SYang Zhong 
901d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
902d9bb58e5SYang Zhong    back to the main tlb.  */
903d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
904d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
905d9bb58e5SYang Zhong {
906d9bb58e5SYang Zhong     size_t vidx;
90771aec354SEmilio G. Cota 
90871aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
909d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
910d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
911403f290cSEmilio G. Cota         target_ulong cmp;
912403f290cSEmilio G. Cota 
913403f290cSEmilio G. Cota         /* elt_ofs might correspond to .addr_write, so use atomic_read */
914403f290cSEmilio G. Cota #if TCG_OVERSIZED_GUEST
915403f290cSEmilio G. Cota         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
916403f290cSEmilio G. Cota #else
917403f290cSEmilio G. Cota         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
918403f290cSEmilio G. Cota #endif
919d9bb58e5SYang Zhong 
920d9bb58e5SYang Zhong         if (cmp == page) {
921d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
922d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
923d9bb58e5SYang Zhong 
92453d28455SRichard Henderson             qemu_spin_lock(&env->tlb_c.lock);
92571aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
92671aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
92771aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
92853d28455SRichard Henderson             qemu_spin_unlock(&env->tlb_c.lock);
929d9bb58e5SYang Zhong 
930d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
931d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
932d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
933d9bb58e5SYang Zhong             return true;
934d9bb58e5SYang Zhong         }
935d9bb58e5SYang Zhong     }
936d9bb58e5SYang Zhong     return false;
937d9bb58e5SYang Zhong }
938d9bb58e5SYang Zhong 
939d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
940d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
941d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
942d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
943d9bb58e5SYang Zhong 
944f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
945f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
946f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
947f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
948f2553f04SKONRAD Frederic  */
949f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
950f2553f04SKONRAD Frederic {
951383beda9SRichard Henderson     uintptr_t mmu_idx = cpu_mmu_index(env, true);
952383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
953383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
954f2553f04SKONRAD Frederic     void *p;
955f2553f04SKONRAD Frederic 
956383beda9SRichard Henderson     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
957b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
95898670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
95971b9a453SKONRAD Frederic         }
960383beda9SRichard Henderson         assert(tlb_hit(entry->addr_code, addr));
961f2553f04SKONRAD Frederic     }
96255df6fcfSPeter Maydell 
963383beda9SRichard Henderson     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
96455df6fcfSPeter Maydell         /*
96555a7cb14SPeter Maydell          * Return -1 if we can't translate and execute from an entire
96655a7cb14SPeter Maydell          * page of RAM here, which will cause us to execute by loading
96755a7cb14SPeter Maydell          * and translating one insn at a time, without caching:
96855a7cb14SPeter Maydell          *  - TLB_RECHECK: means the MMU protection covers a smaller range
96955a7cb14SPeter Maydell          *    than a target page, so we must redo the MMU check every insn
97055a7cb14SPeter Maydell          *  - TLB_MMIO: region is not backed by RAM
97155df6fcfSPeter Maydell          */
97220cb6ae4SPeter Maydell         return -1;
97355df6fcfSPeter Maydell     }
97455df6fcfSPeter Maydell 
975383beda9SRichard Henderson     p = (void *)((uintptr_t)addr + entry->addend);
976f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
977f2553f04SKONRAD Frederic }
978f2553f04SKONRAD Frederic 
979d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
980d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
981d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
982d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
983d9bb58e5SYang Zhong  * entry in the TLB for this access.
984d9bb58e5SYang Zhong  */
98598670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
986d9bb58e5SYang Zhong                  uintptr_t retaddr)
987d9bb58e5SYang Zhong {
988383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
989383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
990d9bb58e5SYang Zhong 
991403f290cSEmilio G. Cota     if (!tlb_hit(tlb_addr_write(entry), addr)) {
992d9bb58e5SYang Zhong         /* TLB entry is for a different page */
993d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
99498670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
99598670d47SLaurent Vivier                      mmu_idx, retaddr);
996d9bb58e5SYang Zhong         }
997d9bb58e5SYang Zhong     }
998d9bb58e5SYang Zhong }
999d9bb58e5SYang Zhong 
1000d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1001d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
1002d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
100334d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
100434d49937SPeter Maydell                                NotDirtyInfo *ndi)
1005d9bb58e5SYang Zhong {
1006d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
1007383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1008383beda9SRichard Henderson     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1009403f290cSEmilio G. Cota     target_ulong tlb_addr = tlb_addr_write(tlbe);
1010d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
1011d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
1012d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
101334d49937SPeter Maydell     void *hostaddr;
1014d9bb58e5SYang Zhong 
1015d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1016d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1017d9bb58e5SYang Zhong 
1018d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1019d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1020d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1021d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1022d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1023d9bb58e5SYang Zhong     }
1024d9bb58e5SYang Zhong 
1025d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1026d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1027d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1028d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1029d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1030d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1031d9bb58e5SYang Zhong         goto stop_the_world;
1032d9bb58e5SYang Zhong     }
1033d9bb58e5SYang Zhong 
1034d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1035334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1036d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
103798670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
103898670d47SLaurent Vivier                      mmu_idx, retaddr);
1039d9bb58e5SYang Zhong         }
1040403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1041d9bb58e5SYang Zhong     }
1042d9bb58e5SYang Zhong 
104355df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
104455df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1045d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1046d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1047d9bb58e5SYang Zhong         goto stop_the_world;
1048d9bb58e5SYang Zhong     }
1049d9bb58e5SYang Zhong 
1050d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
105134d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
105298670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
105398670d47SLaurent Vivier                  mmu_idx, retaddr);
1054d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1055d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1056d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1057d9bb58e5SYang Zhong         goto stop_the_world;
1058d9bb58e5SYang Zhong     }
1059d9bb58e5SYang Zhong 
106034d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
106134d49937SPeter Maydell 
106234d49937SPeter Maydell     ndi->active = false;
106334d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
106434d49937SPeter Maydell         ndi->active = true;
106534d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
106634d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
106734d49937SPeter Maydell                                       1 << s_bits);
106834d49937SPeter Maydell     }
106934d49937SPeter Maydell 
107034d49937SPeter Maydell     return hostaddr;
1071d9bb58e5SYang Zhong 
1072d9bb58e5SYang Zhong  stop_the_world:
1073d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1074d9bb58e5SYang Zhong }
1075d9bb58e5SYang Zhong 
1076d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1077d9bb58e5SYang Zhong # define TGT_BE(X)  (X)
1078d9bb58e5SYang Zhong # define TGT_LE(X)  BSWAP(X)
1079d9bb58e5SYang Zhong #else
1080d9bb58e5SYang Zhong # define TGT_BE(X)  BSWAP(X)
1081d9bb58e5SYang Zhong # define TGT_LE(X)  (X)
1082d9bb58e5SYang Zhong #endif
1083d9bb58e5SYang Zhong 
1084d9bb58e5SYang Zhong #define MMUSUFFIX _mmu
1085d9bb58e5SYang Zhong 
1086d9bb58e5SYang Zhong #define DATA_SIZE 1
1087d9bb58e5SYang Zhong #include "softmmu_template.h"
1088d9bb58e5SYang Zhong 
1089d9bb58e5SYang Zhong #define DATA_SIZE 2
1090d9bb58e5SYang Zhong #include "softmmu_template.h"
1091d9bb58e5SYang Zhong 
1092d9bb58e5SYang Zhong #define DATA_SIZE 4
1093d9bb58e5SYang Zhong #include "softmmu_template.h"
1094d9bb58e5SYang Zhong 
1095d9bb58e5SYang Zhong #define DATA_SIZE 8
1096d9bb58e5SYang Zhong #include "softmmu_template.h"
1097d9bb58e5SYang Zhong 
1098d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1099d9bb58e5SYang Zhong    them callable from other helpers.  */
1100d9bb58e5SYang Zhong 
1101d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1102d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1103d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
110434d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
110534d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
110634d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
110734d49937SPeter Maydell     do {                                                \
110834d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
110934d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
111034d49937SPeter Maydell         }                                               \
111134d49937SPeter Maydell     } while (0)
1112d9bb58e5SYang Zhong 
1113d9bb58e5SYang Zhong #define DATA_SIZE 1
1114d9bb58e5SYang Zhong #include "atomic_template.h"
1115d9bb58e5SYang Zhong 
1116d9bb58e5SYang Zhong #define DATA_SIZE 2
1117d9bb58e5SYang Zhong #include "atomic_template.h"
1118d9bb58e5SYang Zhong 
1119d9bb58e5SYang Zhong #define DATA_SIZE 4
1120d9bb58e5SYang Zhong #include "atomic_template.h"
1121d9bb58e5SYang Zhong 
1122d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1123d9bb58e5SYang Zhong #define DATA_SIZE 8
1124d9bb58e5SYang Zhong #include "atomic_template.h"
1125d9bb58e5SYang Zhong #endif
1126d9bb58e5SYang Zhong 
1127e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1128d9bb58e5SYang Zhong #define DATA_SIZE 16
1129d9bb58e5SYang Zhong #include "atomic_template.h"
1130d9bb58e5SYang Zhong #endif
1131d9bb58e5SYang Zhong 
1132d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1133d9bb58e5SYang Zhong 
1134d9bb58e5SYang Zhong #undef EXTRA_ARGS
1135d9bb58e5SYang Zhong #undef ATOMIC_NAME
1136d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1137d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1138d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
113934d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1140d9bb58e5SYang Zhong 
1141d9bb58e5SYang Zhong #define DATA_SIZE 1
1142d9bb58e5SYang Zhong #include "atomic_template.h"
1143d9bb58e5SYang Zhong 
1144d9bb58e5SYang Zhong #define DATA_SIZE 2
1145d9bb58e5SYang Zhong #include "atomic_template.h"
1146d9bb58e5SYang Zhong 
1147d9bb58e5SYang Zhong #define DATA_SIZE 4
1148d9bb58e5SYang Zhong #include "atomic_template.h"
1149d9bb58e5SYang Zhong 
1150d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1151d9bb58e5SYang Zhong #define DATA_SIZE 8
1152d9bb58e5SYang Zhong #include "atomic_template.h"
1153d9bb58e5SYang Zhong #endif
1154d9bb58e5SYang Zhong 
1155d9bb58e5SYang Zhong /* Code access functions.  */
1156d9bb58e5SYang Zhong 
1157d9bb58e5SYang Zhong #undef MMUSUFFIX
1158d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu
1159d9bb58e5SYang Zhong #undef GETPC
1160d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0)
1161d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS
1162d9bb58e5SYang Zhong 
1163d9bb58e5SYang Zhong #define DATA_SIZE 1
1164d9bb58e5SYang Zhong #include "softmmu_template.h"
1165d9bb58e5SYang Zhong 
1166d9bb58e5SYang Zhong #define DATA_SIZE 2
1167d9bb58e5SYang Zhong #include "softmmu_template.h"
1168d9bb58e5SYang Zhong 
1169d9bb58e5SYang Zhong #define DATA_SIZE 4
1170d9bb58e5SYang Zhong #include "softmmu_template.h"
1171d9bb58e5SYang Zhong 
1172d9bb58e5SYang Zhong #define DATA_SIZE 8
1173d9bb58e5SYang Zhong #include "softmmu_template.h"
1174