xref: /openbmc/qemu/accel/tcg/cputlb.c (revision e09de0a2)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40 
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 #  define DEBUG_TLB_LOG_GATE 1
45 # else
46 #  define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52 
53 #define tlb_debug(fmt, ...) do { \
54     if (DEBUG_TLB_LOG_GATE) { \
55         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56                       ## __VA_ARGS__); \
57     } else if (DEBUG_TLB_GATE) { \
58         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59     } \
60 } while (0)
61 
62 #define assert_cpu_is_self(cpu) do {                              \
63         if (DEBUG_TLB_GATE) {                                     \
64             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65         }                                                         \
66     } while (0)
67 
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69  * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73  */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 
77 void tlb_init(CPUState *cpu)
78 {
79     CPUArchState *env = cpu->env_ptr;
80 
81     qemu_spin_init(&env->tlb_c.lock);
82 }
83 
84 /* flush_all_helper: run fn across all cpus
85  *
86  * If the wait flag is set then the src cpu's helper will be queued as
87  * "safe" work and the loop exited creating a synchronisation point
88  * where all queued work will be finished before execution starts
89  * again.
90  */
91 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
92                              run_on_cpu_data d)
93 {
94     CPUState *cpu;
95 
96     CPU_FOREACH(cpu) {
97         if (cpu != src) {
98             async_run_on_cpu(cpu, fn, d);
99         }
100     }
101 }
102 
103 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
104 {
105     CPUState *cpu;
106     size_t full = 0, part = 0, elide = 0;
107 
108     CPU_FOREACH(cpu) {
109         CPUArchState *env = cpu->env_ptr;
110 
111         full += atomic_read(&env->tlb_c.full_flush_count);
112         part += atomic_read(&env->tlb_c.part_flush_count);
113         elide += atomic_read(&env->tlb_c.elide_flush_count);
114     }
115     *pfull = full;
116     *ppart = part;
117     *pelide = elide;
118 }
119 
120 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
121 {
122     memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
123     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
124     env->tlb_d[mmu_idx].large_page_addr = -1;
125     env->tlb_d[mmu_idx].large_page_mask = -1;
126     env->tlb_d[mmu_idx].vindex = 0;
127 }
128 
129 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
130 {
131     CPUArchState *env = cpu->env_ptr;
132     unsigned long mmu_idx_bitmask = data.host_int;
133     int mmu_idx;
134 
135     assert_cpu_is_self(cpu);
136 
137     tlb_debug("mmu_idx:0x%04lx\n", mmu_idx_bitmask);
138 
139     qemu_spin_lock(&env->tlb_c.lock);
140     env->tlb_c.pending_flush &= ~mmu_idx_bitmask;
141 
142     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
143         if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
144             tlb_flush_one_mmuidx_locked(env, mmu_idx);
145         }
146     }
147     qemu_spin_unlock(&env->tlb_c.lock);
148 
149     cpu_tb_jmp_cache_clear(cpu);
150 
151     if (mmu_idx_bitmask == ALL_MMUIDX_BITS) {
152         atomic_set(&env->tlb_c.full_flush_count,
153                    env->tlb_c.full_flush_count + 1);
154     } else {
155         atomic_set(&env->tlb_c.part_flush_count,
156                    env->tlb_c.part_flush_count + ctpop16(mmu_idx_bitmask));
157     }
158 }
159 
160 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
161 {
162     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
163 
164     if (cpu->created && !qemu_cpu_is_self(cpu)) {
165         CPUArchState *env = cpu->env_ptr;
166         uint16_t pending, to_clean;
167 
168         qemu_spin_lock(&env->tlb_c.lock);
169         pending = env->tlb_c.pending_flush;
170         to_clean = idxmap & ~pending;
171         env->tlb_c.pending_flush = pending | idxmap;
172         qemu_spin_unlock(&env->tlb_c.lock);
173 
174         if (to_clean) {
175             tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", to_clean);
176             async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
177                              RUN_ON_CPU_HOST_INT(to_clean));
178         }
179     } else {
180         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
181     }
182 }
183 
184 void tlb_flush(CPUState *cpu)
185 {
186     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
187 }
188 
189 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
190 {
191     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
192 
193     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
194 
195     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
196     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
197 }
198 
199 void tlb_flush_all_cpus(CPUState *src_cpu)
200 {
201     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
202 }
203 
204 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
205 {
206     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
207 
208     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
209 
210     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
211     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
212 }
213 
214 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
215 {
216     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
217 }
218 
219 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
220                                         target_ulong page)
221 {
222     return tlb_hit_page(tlb_entry->addr_read, page) ||
223            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
224            tlb_hit_page(tlb_entry->addr_code, page);
225 }
226 
227 /* Called with tlb_c.lock held */
228 static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
229                                           target_ulong page)
230 {
231     if (tlb_hit_page_anyprot(tlb_entry, page)) {
232         memset(tlb_entry, -1, sizeof(*tlb_entry));
233     }
234 }
235 
236 /* Called with tlb_c.lock held */
237 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
238                                               target_ulong page)
239 {
240     int k;
241 
242     assert_cpu_is_self(ENV_GET_CPU(env));
243     for (k = 0; k < CPU_VTLB_SIZE; k++) {
244         tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page);
245     }
246 }
247 
248 static void tlb_flush_page_locked(CPUArchState *env, int midx,
249                                   target_ulong page)
250 {
251     target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
252     target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
253 
254     /* Check if we need to flush due to large pages.  */
255     if ((page & lp_mask) == lp_addr) {
256         tlb_debug("forcing full flush midx %d ("
257                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
258                   midx, lp_addr, lp_mask);
259         tlb_flush_one_mmuidx_locked(env, midx);
260     } else {
261         tlb_flush_entry_locked(tlb_entry(env, midx, page), page);
262         tlb_flush_vtlb_page_locked(env, midx, page);
263     }
264 }
265 
266 /* As we are going to hijack the bottom bits of the page address for a
267  * mmuidx bit mask we need to fail to build if we can't do that
268  */
269 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
270 
271 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
272                                                 run_on_cpu_data data)
273 {
274     CPUArchState *env = cpu->env_ptr;
275     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
276     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
277     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
278     int mmu_idx;
279 
280     assert_cpu_is_self(cpu);
281 
282     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
283               addr, mmu_idx_bitmap);
284 
285     qemu_spin_lock(&env->tlb_c.lock);
286     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
287         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
288             tlb_flush_page_locked(env, mmu_idx, addr);
289         }
290     }
291     qemu_spin_unlock(&env->tlb_c.lock);
292 
293     tb_flush_jmp_cache(cpu, addr);
294 }
295 
296 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
297 {
298     target_ulong addr_and_mmu_idx;
299 
300     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
301 
302     /* This should already be page aligned */
303     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
304     addr_and_mmu_idx |= idxmap;
305 
306     if (!qemu_cpu_is_self(cpu)) {
307         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
308                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
309     } else {
310         tlb_flush_page_by_mmuidx_async_work(
311             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
312     }
313 }
314 
315 void tlb_flush_page(CPUState *cpu, target_ulong addr)
316 {
317     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
318 }
319 
320 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
321                                        uint16_t idxmap)
322 {
323     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
324     target_ulong addr_and_mmu_idx;
325 
326     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
327 
328     /* This should already be page aligned */
329     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
330     addr_and_mmu_idx |= idxmap;
331 
332     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
333     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
334 }
335 
336 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
337 {
338     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
339 }
340 
341 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
342                                               target_ulong addr,
343                                               uint16_t idxmap)
344 {
345     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
346     target_ulong addr_and_mmu_idx;
347 
348     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
349 
350     /* This should already be page aligned */
351     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
352     addr_and_mmu_idx |= idxmap;
353 
354     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
355     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
356 }
357 
358 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
359 {
360     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
361 }
362 
363 /* update the TLBs so that writes to code in the virtual page 'addr'
364    can be detected */
365 void tlb_protect_code(ram_addr_t ram_addr)
366 {
367     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
368                                              DIRTY_MEMORY_CODE);
369 }
370 
371 /* update the TLB so that writes in physical page 'phys_addr' are no longer
372    tested for self modifying code */
373 void tlb_unprotect_code(ram_addr_t ram_addr)
374 {
375     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
376 }
377 
378 
379 /*
380  * Dirty write flag handling
381  *
382  * When the TCG code writes to a location it looks up the address in
383  * the TLB and uses that data to compute the final address. If any of
384  * the lower bits of the address are set then the slow path is forced.
385  * There are a number of reasons to do this but for normal RAM the
386  * most usual is detecting writes to code regions which may invalidate
387  * generated code.
388  *
389  * Other vCPUs might be reading their TLBs during guest execution, so we update
390  * te->addr_write with atomic_set. We don't need to worry about this for
391  * oversized guests as MTTCG is disabled for them.
392  *
393  * Called with tlb_c.lock held.
394  */
395 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
396                                          uintptr_t start, uintptr_t length)
397 {
398     uintptr_t addr = tlb_entry->addr_write;
399 
400     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
401         addr &= TARGET_PAGE_MASK;
402         addr += tlb_entry->addend;
403         if ((addr - start) < length) {
404 #if TCG_OVERSIZED_GUEST
405             tlb_entry->addr_write |= TLB_NOTDIRTY;
406 #else
407             atomic_set(&tlb_entry->addr_write,
408                        tlb_entry->addr_write | TLB_NOTDIRTY);
409 #endif
410         }
411     }
412 }
413 
414 /*
415  * Called with tlb_c.lock held.
416  * Called only from the vCPU context, i.e. the TLB's owner thread.
417  */
418 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
419 {
420     *d = *s;
421 }
422 
423 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
424  * the target vCPU).
425  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
426  * thing actually updated is the target TLB entry ->addr_write flags.
427  */
428 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
429 {
430     CPUArchState *env;
431 
432     int mmu_idx;
433 
434     env = cpu->env_ptr;
435     qemu_spin_lock(&env->tlb_c.lock);
436     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
437         unsigned int i;
438 
439         for (i = 0; i < CPU_TLB_SIZE; i++) {
440             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
441                                          length);
442         }
443 
444         for (i = 0; i < CPU_VTLB_SIZE; i++) {
445             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
446                                          length);
447         }
448     }
449     qemu_spin_unlock(&env->tlb_c.lock);
450 }
451 
452 /* Called with tlb_c.lock held */
453 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
454                                          target_ulong vaddr)
455 {
456     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
457         tlb_entry->addr_write = vaddr;
458     }
459 }
460 
461 /* update the TLB corresponding to virtual page vaddr
462    so that it is no longer dirty */
463 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
464 {
465     CPUArchState *env = cpu->env_ptr;
466     int mmu_idx;
467 
468     assert_cpu_is_self(cpu);
469 
470     vaddr &= TARGET_PAGE_MASK;
471     qemu_spin_lock(&env->tlb_c.lock);
472     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
473         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
474     }
475 
476     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
477         int k;
478         for (k = 0; k < CPU_VTLB_SIZE; k++) {
479             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
480         }
481     }
482     qemu_spin_unlock(&env->tlb_c.lock);
483 }
484 
485 /* Our TLB does not support large pages, so remember the area covered by
486    large pages and trigger a full TLB flush if these are invalidated.  */
487 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
488                                target_ulong vaddr, target_ulong size)
489 {
490     target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
491     target_ulong lp_mask = ~(size - 1);
492 
493     if (lp_addr == (target_ulong)-1) {
494         /* No previous large page.  */
495         lp_addr = vaddr;
496     } else {
497         /* Extend the existing region to include the new page.
498            This is a compromise between unnecessary flushes and
499            the cost of maintaining a full variable size TLB.  */
500         lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
501         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
502             lp_mask <<= 1;
503         }
504     }
505     env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
506     env->tlb_d[mmu_idx].large_page_mask = lp_mask;
507 }
508 
509 /* Add a new TLB entry. At most one entry for a given virtual address
510  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
511  * supplied size is only used by tlb_flush_page.
512  *
513  * Called from TCG-generated code, which is under an RCU read-side
514  * critical section.
515  */
516 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
517                              hwaddr paddr, MemTxAttrs attrs, int prot,
518                              int mmu_idx, target_ulong size)
519 {
520     CPUArchState *env = cpu->env_ptr;
521     MemoryRegionSection *section;
522     unsigned int index;
523     target_ulong address;
524     target_ulong code_address;
525     uintptr_t addend;
526     CPUTLBEntry *te, tn;
527     hwaddr iotlb, xlat, sz, paddr_page;
528     target_ulong vaddr_page;
529     int asidx = cpu_asidx_from_attrs(cpu, attrs);
530 
531     assert_cpu_is_self(cpu);
532 
533     if (size <= TARGET_PAGE_SIZE) {
534         sz = TARGET_PAGE_SIZE;
535     } else {
536         tlb_add_large_page(env, mmu_idx, vaddr, size);
537         sz = size;
538     }
539     vaddr_page = vaddr & TARGET_PAGE_MASK;
540     paddr_page = paddr & TARGET_PAGE_MASK;
541 
542     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
543                                                 &xlat, &sz, attrs, &prot);
544     assert(sz >= TARGET_PAGE_SIZE);
545 
546     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
547               " prot=%x idx=%d\n",
548               vaddr, paddr, prot, mmu_idx);
549 
550     address = vaddr_page;
551     if (size < TARGET_PAGE_SIZE) {
552         /*
553          * Slow-path the TLB entries; we will repeat the MMU check and TLB
554          * fill on every access.
555          */
556         address |= TLB_RECHECK;
557     }
558     if (!memory_region_is_ram(section->mr) &&
559         !memory_region_is_romd(section->mr)) {
560         /* IO memory case */
561         address |= TLB_MMIO;
562         addend = 0;
563     } else {
564         /* TLB_MMIO for rom/romd handled below */
565         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
566     }
567 
568     code_address = address;
569     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
570                                             paddr_page, xlat, prot, &address);
571 
572     index = tlb_index(env, mmu_idx, vaddr_page);
573     te = tlb_entry(env, mmu_idx, vaddr_page);
574 
575     /*
576      * Hold the TLB lock for the rest of the function. We could acquire/release
577      * the lock several times in the function, but it is faster to amortize the
578      * acquisition cost by acquiring it just once. Note that this leads to
579      * a longer critical section, but this is not a concern since the TLB lock
580      * is unlikely to be contended.
581      */
582     qemu_spin_lock(&env->tlb_c.lock);
583 
584     /* Make sure there's no cached translation for the new page.  */
585     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
586 
587     /*
588      * Only evict the old entry to the victim tlb if it's for a
589      * different page; otherwise just overwrite the stale data.
590      */
591     if (!tlb_hit_page_anyprot(te, vaddr_page)) {
592         unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
593         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
594 
595         /* Evict the old entry into the victim tlb.  */
596         copy_tlb_helper_locked(tv, te);
597         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
598     }
599 
600     /* refill the tlb */
601     /*
602      * At this point iotlb contains a physical section number in the lower
603      * TARGET_PAGE_BITS, and either
604      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
605      *  + the offset within section->mr of the page base (otherwise)
606      * We subtract the vaddr_page (which is page aligned and thus won't
607      * disturb the low bits) to give an offset which can be added to the
608      * (non-page-aligned) vaddr of the eventual memory access to get
609      * the MemoryRegion offset for the access. Note that the vaddr we
610      * subtract here is that of the page base, and not the same as the
611      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
612      */
613     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
614     env->iotlb[mmu_idx][index].attrs = attrs;
615 
616     /* Now calculate the new entry */
617     tn.addend = addend - vaddr_page;
618     if (prot & PAGE_READ) {
619         tn.addr_read = address;
620     } else {
621         tn.addr_read = -1;
622     }
623 
624     if (prot & PAGE_EXEC) {
625         tn.addr_code = code_address;
626     } else {
627         tn.addr_code = -1;
628     }
629 
630     tn.addr_write = -1;
631     if (prot & PAGE_WRITE) {
632         if ((memory_region_is_ram(section->mr) && section->readonly)
633             || memory_region_is_romd(section->mr)) {
634             /* Write access calls the I/O callback.  */
635             tn.addr_write = address | TLB_MMIO;
636         } else if (memory_region_is_ram(section->mr)
637                    && cpu_physical_memory_is_clean(
638                        memory_region_get_ram_addr(section->mr) + xlat)) {
639             tn.addr_write = address | TLB_NOTDIRTY;
640         } else {
641             tn.addr_write = address;
642         }
643         if (prot & PAGE_WRITE_INV) {
644             tn.addr_write |= TLB_INVALID_MASK;
645         }
646     }
647 
648     copy_tlb_helper_locked(te, &tn);
649     qemu_spin_unlock(&env->tlb_c.lock);
650 }
651 
652 /* Add a new TLB entry, but without specifying the memory
653  * transaction attributes to be used.
654  */
655 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
656                   hwaddr paddr, int prot,
657                   int mmu_idx, target_ulong size)
658 {
659     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
660                             prot, mmu_idx, size);
661 }
662 
663 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
664 {
665     ram_addr_t ram_addr;
666 
667     ram_addr = qemu_ram_addr_from_host(ptr);
668     if (ram_addr == RAM_ADDR_INVALID) {
669         error_report("Bad ram pointer %p", ptr);
670         abort();
671     }
672     return ram_addr;
673 }
674 
675 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
676                          int mmu_idx,
677                          target_ulong addr, uintptr_t retaddr,
678                          bool recheck, MMUAccessType access_type, int size)
679 {
680     CPUState *cpu = ENV_GET_CPU(env);
681     hwaddr mr_offset;
682     MemoryRegionSection *section;
683     MemoryRegion *mr;
684     uint64_t val;
685     bool locked = false;
686     MemTxResult r;
687 
688     if (recheck) {
689         /*
690          * This is a TLB_RECHECK access, where the MMU protection
691          * covers a smaller range than a target page, and we must
692          * repeat the MMU check here. This tlb_fill() call might
693          * longjump out if this access should cause a guest exception.
694          */
695         CPUTLBEntry *entry;
696         target_ulong tlb_addr;
697 
698         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
699 
700         entry = tlb_entry(env, mmu_idx, addr);
701         tlb_addr = entry->addr_read;
702         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
703             /* RAM access */
704             uintptr_t haddr = addr + entry->addend;
705 
706             return ldn_p((void *)haddr, size);
707         }
708         /* Fall through for handling IO accesses */
709     }
710 
711     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
712     mr = section->mr;
713     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
714     cpu->mem_io_pc = retaddr;
715     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
716         cpu_io_recompile(cpu, retaddr);
717     }
718 
719     cpu->mem_io_vaddr = addr;
720     cpu->mem_io_access_type = access_type;
721 
722     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
723         qemu_mutex_lock_iothread();
724         locked = true;
725     }
726     r = memory_region_dispatch_read(mr, mr_offset,
727                                     &val, size, iotlbentry->attrs);
728     if (r != MEMTX_OK) {
729         hwaddr physaddr = mr_offset +
730             section->offset_within_address_space -
731             section->offset_within_region;
732 
733         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
734                                mmu_idx, iotlbentry->attrs, r, retaddr);
735     }
736     if (locked) {
737         qemu_mutex_unlock_iothread();
738     }
739 
740     return val;
741 }
742 
743 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
744                       int mmu_idx,
745                       uint64_t val, target_ulong addr,
746                       uintptr_t retaddr, bool recheck, int size)
747 {
748     CPUState *cpu = ENV_GET_CPU(env);
749     hwaddr mr_offset;
750     MemoryRegionSection *section;
751     MemoryRegion *mr;
752     bool locked = false;
753     MemTxResult r;
754 
755     if (recheck) {
756         /*
757          * This is a TLB_RECHECK access, where the MMU protection
758          * covers a smaller range than a target page, and we must
759          * repeat the MMU check here. This tlb_fill() call might
760          * longjump out if this access should cause a guest exception.
761          */
762         CPUTLBEntry *entry;
763         target_ulong tlb_addr;
764 
765         tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
766 
767         entry = tlb_entry(env, mmu_idx, addr);
768         tlb_addr = tlb_addr_write(entry);
769         if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
770             /* RAM access */
771             uintptr_t haddr = addr + entry->addend;
772 
773             stn_p((void *)haddr, size, val);
774             return;
775         }
776         /* Fall through for handling IO accesses */
777     }
778 
779     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
780     mr = section->mr;
781     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
782     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
783         cpu_io_recompile(cpu, retaddr);
784     }
785     cpu->mem_io_vaddr = addr;
786     cpu->mem_io_pc = retaddr;
787 
788     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
789         qemu_mutex_lock_iothread();
790         locked = true;
791     }
792     r = memory_region_dispatch_write(mr, mr_offset,
793                                      val, size, iotlbentry->attrs);
794     if (r != MEMTX_OK) {
795         hwaddr physaddr = mr_offset +
796             section->offset_within_address_space -
797             section->offset_within_region;
798 
799         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
800                                mmu_idx, iotlbentry->attrs, r, retaddr);
801     }
802     if (locked) {
803         qemu_mutex_unlock_iothread();
804     }
805 }
806 
807 /* Return true if ADDR is present in the victim tlb, and has been copied
808    back to the main tlb.  */
809 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
810                            size_t elt_ofs, target_ulong page)
811 {
812     size_t vidx;
813 
814     assert_cpu_is_self(ENV_GET_CPU(env));
815     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
816         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
817         target_ulong cmp;
818 
819         /* elt_ofs might correspond to .addr_write, so use atomic_read */
820 #if TCG_OVERSIZED_GUEST
821         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
822 #else
823         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
824 #endif
825 
826         if (cmp == page) {
827             /* Found entry in victim tlb, swap tlb and iotlb.  */
828             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
829 
830             qemu_spin_lock(&env->tlb_c.lock);
831             copy_tlb_helper_locked(&tmptlb, tlb);
832             copy_tlb_helper_locked(tlb, vtlb);
833             copy_tlb_helper_locked(vtlb, &tmptlb);
834             qemu_spin_unlock(&env->tlb_c.lock);
835 
836             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
837             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
838             tmpio = *io; *io = *vio; *vio = tmpio;
839             return true;
840         }
841     }
842     return false;
843 }
844 
845 /* Macro to call the above, with local variables from the use context.  */
846 #define VICTIM_TLB_HIT(TY, ADDR) \
847   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
848                  (ADDR) & TARGET_PAGE_MASK)
849 
850 /* NOTE: this function can trigger an exception */
851 /* NOTE2: the returned address is not exactly the physical address: it
852  * is actually a ram_addr_t (in system mode; the user mode emulation
853  * version of this function returns a guest virtual address).
854  */
855 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
856 {
857     uintptr_t mmu_idx = cpu_mmu_index(env, true);
858     uintptr_t index = tlb_index(env, mmu_idx, addr);
859     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
860     void *p;
861 
862     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
863         if (!VICTIM_TLB_HIT(addr_code, addr)) {
864             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
865         }
866         assert(tlb_hit(entry->addr_code, addr));
867     }
868 
869     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
870         /*
871          * Return -1 if we can't translate and execute from an entire
872          * page of RAM here, which will cause us to execute by loading
873          * and translating one insn at a time, without caching:
874          *  - TLB_RECHECK: means the MMU protection covers a smaller range
875          *    than a target page, so we must redo the MMU check every insn
876          *  - TLB_MMIO: region is not backed by RAM
877          */
878         return -1;
879     }
880 
881     p = (void *)((uintptr_t)addr + entry->addend);
882     return qemu_ram_addr_from_host_nofail(p);
883 }
884 
885 /* Probe for whether the specified guest write access is permitted.
886  * If it is not permitted then an exception will be taken in the same
887  * way as if this were a real write access (and we will not return).
888  * Otherwise the function will return, and there will be a valid
889  * entry in the TLB for this access.
890  */
891 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
892                  uintptr_t retaddr)
893 {
894     uintptr_t index = tlb_index(env, mmu_idx, addr);
895     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
896 
897     if (!tlb_hit(tlb_addr_write(entry), addr)) {
898         /* TLB entry is for a different page */
899         if (!VICTIM_TLB_HIT(addr_write, addr)) {
900             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
901                      mmu_idx, retaddr);
902         }
903     }
904 }
905 
906 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
907  * operations, or io operations to proceed.  Return the host address.  */
908 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
909                                TCGMemOpIdx oi, uintptr_t retaddr,
910                                NotDirtyInfo *ndi)
911 {
912     size_t mmu_idx = get_mmuidx(oi);
913     uintptr_t index = tlb_index(env, mmu_idx, addr);
914     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
915     target_ulong tlb_addr = tlb_addr_write(tlbe);
916     TCGMemOp mop = get_memop(oi);
917     int a_bits = get_alignment_bits(mop);
918     int s_bits = mop & MO_SIZE;
919     void *hostaddr;
920 
921     /* Adjust the given return address.  */
922     retaddr -= GETPC_ADJ;
923 
924     /* Enforce guest required alignment.  */
925     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
926         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
927         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
928                              mmu_idx, retaddr);
929     }
930 
931     /* Enforce qemu required alignment.  */
932     if (unlikely(addr & ((1 << s_bits) - 1))) {
933         /* We get here if guest alignment was not requested,
934            or was not enforced by cpu_unaligned_access above.
935            We might widen the access and emulate, but for now
936            mark an exception and exit the cpu loop.  */
937         goto stop_the_world;
938     }
939 
940     /* Check TLB entry and enforce page permissions.  */
941     if (!tlb_hit(tlb_addr, addr)) {
942         if (!VICTIM_TLB_HIT(addr_write, addr)) {
943             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
944                      mmu_idx, retaddr);
945         }
946         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
947     }
948 
949     /* Notice an IO access or a needs-MMU-lookup access */
950     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
951         /* There's really nothing that can be done to
952            support this apart from stop-the-world.  */
953         goto stop_the_world;
954     }
955 
956     /* Let the guest notice RMW on a write-only page.  */
957     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
958         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
959                  mmu_idx, retaddr);
960         /* Since we don't support reads and writes to different addresses,
961            and we do have the proper page loaded for write, this shouldn't
962            ever return.  But just in case, handle via stop-the-world.  */
963         goto stop_the_world;
964     }
965 
966     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
967 
968     ndi->active = false;
969     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
970         ndi->active = true;
971         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
972                                       qemu_ram_addr_from_host_nofail(hostaddr),
973                                       1 << s_bits);
974     }
975 
976     return hostaddr;
977 
978  stop_the_world:
979     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
980 }
981 
982 #ifdef TARGET_WORDS_BIGENDIAN
983 # define TGT_BE(X)  (X)
984 # define TGT_LE(X)  BSWAP(X)
985 #else
986 # define TGT_BE(X)  BSWAP(X)
987 # define TGT_LE(X)  (X)
988 #endif
989 
990 #define MMUSUFFIX _mmu
991 
992 #define DATA_SIZE 1
993 #include "softmmu_template.h"
994 
995 #define DATA_SIZE 2
996 #include "softmmu_template.h"
997 
998 #define DATA_SIZE 4
999 #include "softmmu_template.h"
1000 
1001 #define DATA_SIZE 8
1002 #include "softmmu_template.h"
1003 
1004 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1005    them callable from other helpers.  */
1006 
1007 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1008 #define ATOMIC_NAME(X) \
1009     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1010 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1011 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1012 #define ATOMIC_MMU_CLEANUP                              \
1013     do {                                                \
1014         if (unlikely(ndi.active)) {                     \
1015             memory_notdirty_write_complete(&ndi);       \
1016         }                                               \
1017     } while (0)
1018 
1019 #define DATA_SIZE 1
1020 #include "atomic_template.h"
1021 
1022 #define DATA_SIZE 2
1023 #include "atomic_template.h"
1024 
1025 #define DATA_SIZE 4
1026 #include "atomic_template.h"
1027 
1028 #ifdef CONFIG_ATOMIC64
1029 #define DATA_SIZE 8
1030 #include "atomic_template.h"
1031 #endif
1032 
1033 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1034 #define DATA_SIZE 16
1035 #include "atomic_template.h"
1036 #endif
1037 
1038 /* Second set of helpers are directly callable from TCG as helpers.  */
1039 
1040 #undef EXTRA_ARGS
1041 #undef ATOMIC_NAME
1042 #undef ATOMIC_MMU_LOOKUP
1043 #define EXTRA_ARGS         , TCGMemOpIdx oi
1044 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1045 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1046 
1047 #define DATA_SIZE 1
1048 #include "atomic_template.h"
1049 
1050 #define DATA_SIZE 2
1051 #include "atomic_template.h"
1052 
1053 #define DATA_SIZE 4
1054 #include "atomic_template.h"
1055 
1056 #ifdef CONFIG_ATOMIC64
1057 #define DATA_SIZE 8
1058 #include "atomic_template.h"
1059 #endif
1060 
1061 /* Code access functions.  */
1062 
1063 #undef MMUSUFFIX
1064 #define MMUSUFFIX _cmmu
1065 #undef GETPC
1066 #define GETPC() ((uintptr_t)0)
1067 #define SOFTMMU_CODE_ACCESS
1068 
1069 #define DATA_SIZE 1
1070 #include "softmmu_template.h"
1071 
1072 #define DATA_SIZE 2
1073 #include "softmmu_template.h"
1074 
1075 #define DATA_SIZE 4
1076 #include "softmmu_template.h"
1077 
1078 #define DATA_SIZE 8
1079 #include "softmmu_template.h"
1080