xref: /openbmc/qemu/accel/tcg/cputlb.c (revision f1be3696)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40 
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 #  define DEBUG_TLB_LOG_GATE 1
45 # else
46 #  define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52 
53 #define tlb_debug(fmt, ...) do { \
54     if (DEBUG_TLB_LOG_GATE) { \
55         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56                       ## __VA_ARGS__); \
57     } else if (DEBUG_TLB_GATE) { \
58         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59     } \
60 } while (0)
61 
62 #define assert_cpu_is_self(cpu) do {                              \
63         if (DEBUG_TLB_GATE) {                                     \
64             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65         }                                                         \
66     } while (0)
67 
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69  * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73  */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78 {
79     return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
80 }
81 
82 static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
83                              size_t max_entries)
84 {
85     window->begin_ns = ns;
86     window->max_entries = max_entries;
87 }
88 
89 static void tlb_dyn_init(CPUArchState *env)
90 {
91     int i;
92 
93     for (i = 0; i < NB_MMU_MODES; i++) {
94         CPUTLBDesc *desc = &env->tlb_d[i];
95         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96 
97         tlb_window_reset(&desc->window, get_clock_realtime(), 0);
98         desc->n_used_entries = 0;
99         env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100         env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
101         env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
102     }
103 }
104 
105 /**
106  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107  * @env: CPU that owns the TLB
108  * @mmu_idx: MMU index of the TLB
109  *
110  * Called with tlb_lock_held.
111  *
112  * We have two main constraints when resizing a TLB: (1) we only resize it
113  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114  * the array or unnecessarily flushing it), which means we do not control how
115  * frequently the resizing can occur; (2) we don't have access to the guest's
116  * future scheduling decisions, and therefore have to decide the magnitude of
117  * the resize based on past observations.
118  *
119  * In general, a memory-hungry process can benefit greatly from an appropriately
120  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121  * we just have to make the TLB as large as possible; while an oversized TLB
122  * results in minimal TLB miss rates, it also takes longer to be flushed
123  * (flushes can be _very_ frequent), and the reduced locality can also hurt
124  * performance.
125  *
126  * To achieve near-optimal performance for all kinds of workloads, we:
127  *
128  * 1. Aggressively increase the size of the TLB when the use rate of the
129  * TLB being flushed is high, since it is likely that in the near future this
130  * memory-hungry process will execute again, and its memory hungriness will
131  * probably be similar.
132  *
133  * 2. Slowly reduce the size of the TLB as the use rate declines over a
134  * reasonably large time window. The rationale is that if in such a time window
135  * we have not observed a high TLB use rate, it is likely that we won't observe
136  * it in the near future. In that case, once a time window expires we downsize
137  * the TLB to match the maximum use rate observed in the window.
138  *
139  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140  * since in that range performance is likely near-optimal. Recall that the TLB
141  * is direct mapped, so we want the use rate to be low (or at least not too
142  * high), since otherwise we are likely to have a significant amount of
143  * conflict misses.
144  */
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146 {
147     CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
148     size_t old_size = tlb_n_entries(env, mmu_idx);
149     size_t rate;
150     size_t new_size = old_size;
151     int64_t now = get_clock_realtime();
152     int64_t window_len_ms = 100;
153     int64_t window_len_ns = window_len_ms * 1000 * 1000;
154     bool window_expired = now > desc->window.begin_ns + window_len_ns;
155 
156     if (desc->n_used_entries > desc->window.max_entries) {
157         desc->window.max_entries = desc->n_used_entries;
158     }
159     rate = desc->window.max_entries * 100 / old_size;
160 
161     if (rate > 70) {
162         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163     } else if (rate < 30 && window_expired) {
164         size_t ceil = pow2ceil(desc->window.max_entries);
165         size_t expected_rate = desc->window.max_entries * 100 / ceil;
166 
167         /*
168          * Avoid undersizing when the max number of entries seen is just below
169          * a pow2. For instance, if max_entries == 1025, the expected use rate
170          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172          * later. Thus, make sure that the expected use rate remains below 70%.
173          * (and since we double the size, that means the lowest rate we'd
174          * expect to get is 35%, which is still in the 30-70% range where
175          * we consider that the size is appropriate.)
176          */
177         if (expected_rate > 70) {
178             ceil *= 2;
179         }
180         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181     }
182 
183     if (new_size == old_size) {
184         if (window_expired) {
185             tlb_window_reset(&desc->window, now, desc->n_used_entries);
186         }
187         return;
188     }
189 
190     g_free(env->tlb_table[mmu_idx]);
191     g_free(env->iotlb[mmu_idx]);
192 
193     tlb_window_reset(&desc->window, now, 0);
194     /* desc->n_used_entries is cleared by the caller */
195     env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196     env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
197     env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
198     /*
199      * If the allocations fail, try smaller sizes. We just freed some
200      * memory, so going back to half of new_size has a good chance of working.
201      * Increased memory pressure elsewhere in the system might cause the
202      * allocations to fail though, so we progressively reduce the allocation
203      * size, aborting if we cannot even allocate the smallest TLB we support.
204      */
205     while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
206         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
207             error_report("%s: %s", __func__, strerror(errno));
208             abort();
209         }
210         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
211         env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
212 
213         g_free(env->tlb_table[mmu_idx]);
214         g_free(env->iotlb[mmu_idx]);
215         env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
216         env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
217     }
218 }
219 
220 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
221 {
222     tlb_mmu_resize_locked(env, mmu_idx);
223     memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
224     env->tlb_d[mmu_idx].n_used_entries = 0;
225 }
226 
227 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
228 {
229     env->tlb_d[mmu_idx].n_used_entries++;
230 }
231 
232 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
233 {
234     env->tlb_d[mmu_idx].n_used_entries--;
235 }
236 
237 void tlb_init(CPUState *cpu)
238 {
239     CPUArchState *env = cpu->env_ptr;
240 
241     qemu_spin_init(&env->tlb_c.lock);
242 
243     /* Ensure that cpu_reset performs a full flush.  */
244     env->tlb_c.dirty = ALL_MMUIDX_BITS;
245 
246     tlb_dyn_init(env);
247 }
248 
249 /* flush_all_helper: run fn across all cpus
250  *
251  * If the wait flag is set then the src cpu's helper will be queued as
252  * "safe" work and the loop exited creating a synchronisation point
253  * where all queued work will be finished before execution starts
254  * again.
255  */
256 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
257                              run_on_cpu_data d)
258 {
259     CPUState *cpu;
260 
261     CPU_FOREACH(cpu) {
262         if (cpu != src) {
263             async_run_on_cpu(cpu, fn, d);
264         }
265     }
266 }
267 
268 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
269 {
270     CPUState *cpu;
271     size_t full = 0, part = 0, elide = 0;
272 
273     CPU_FOREACH(cpu) {
274         CPUArchState *env = cpu->env_ptr;
275 
276         full += atomic_read(&env->tlb_c.full_flush_count);
277         part += atomic_read(&env->tlb_c.part_flush_count);
278         elide += atomic_read(&env->tlb_c.elide_flush_count);
279     }
280     *pfull = full;
281     *ppart = part;
282     *pelide = elide;
283 }
284 
285 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
286 {
287     tlb_table_flush_by_mmuidx(env, mmu_idx);
288     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
289     env->tlb_d[mmu_idx].large_page_addr = -1;
290     env->tlb_d[mmu_idx].large_page_mask = -1;
291     env->tlb_d[mmu_idx].vindex = 0;
292 }
293 
294 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
295 {
296     CPUArchState *env = cpu->env_ptr;
297     uint16_t asked = data.host_int;
298     uint16_t all_dirty, work, to_clean;
299 
300     assert_cpu_is_self(cpu);
301 
302     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
303 
304     qemu_spin_lock(&env->tlb_c.lock);
305 
306     all_dirty = env->tlb_c.dirty;
307     to_clean = asked & all_dirty;
308     all_dirty &= ~to_clean;
309     env->tlb_c.dirty = all_dirty;
310 
311     for (work = to_clean; work != 0; work &= work - 1) {
312         int mmu_idx = ctz32(work);
313         tlb_flush_one_mmuidx_locked(env, mmu_idx);
314     }
315 
316     qemu_spin_unlock(&env->tlb_c.lock);
317 
318     cpu_tb_jmp_cache_clear(cpu);
319 
320     if (to_clean == ALL_MMUIDX_BITS) {
321         atomic_set(&env->tlb_c.full_flush_count,
322                    env->tlb_c.full_flush_count + 1);
323     } else {
324         atomic_set(&env->tlb_c.part_flush_count,
325                    env->tlb_c.part_flush_count + ctpop16(to_clean));
326         if (to_clean != asked) {
327             atomic_set(&env->tlb_c.elide_flush_count,
328                        env->tlb_c.elide_flush_count +
329                        ctpop16(asked & ~to_clean));
330         }
331     }
332 }
333 
334 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
335 {
336     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
337 
338     if (cpu->created && !qemu_cpu_is_self(cpu)) {
339         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
340                          RUN_ON_CPU_HOST_INT(idxmap));
341     } else {
342         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
343     }
344 }
345 
346 void tlb_flush(CPUState *cpu)
347 {
348     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
349 }
350 
351 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
352 {
353     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
354 
355     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
356 
357     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
358     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
359 }
360 
361 void tlb_flush_all_cpus(CPUState *src_cpu)
362 {
363     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
364 }
365 
366 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
367 {
368     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
369 
370     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
371 
372     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
373     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
374 }
375 
376 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
377 {
378     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
379 }
380 
381 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
382                                         target_ulong page)
383 {
384     return tlb_hit_page(tlb_entry->addr_read, page) ||
385            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
386            tlb_hit_page(tlb_entry->addr_code, page);
387 }
388 
389 /**
390  * tlb_entry_is_empty - return true if the entry is not in use
391  * @te: pointer to CPUTLBEntry
392  */
393 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
394 {
395     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
396 }
397 
398 /* Called with tlb_c.lock held */
399 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
400                                           target_ulong page)
401 {
402     if (tlb_hit_page_anyprot(tlb_entry, page)) {
403         memset(tlb_entry, -1, sizeof(*tlb_entry));
404         return true;
405     }
406     return false;
407 }
408 
409 /* Called with tlb_c.lock held */
410 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
411                                               target_ulong page)
412 {
413     int k;
414 
415     assert_cpu_is_self(ENV_GET_CPU(env));
416     for (k = 0; k < CPU_VTLB_SIZE; k++) {
417         if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
418             tlb_n_used_entries_dec(env, mmu_idx);
419         }
420     }
421 }
422 
423 static void tlb_flush_page_locked(CPUArchState *env, int midx,
424                                   target_ulong page)
425 {
426     target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
427     target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
428 
429     /* Check if we need to flush due to large pages.  */
430     if ((page & lp_mask) == lp_addr) {
431         tlb_debug("forcing full flush midx %d ("
432                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
433                   midx, lp_addr, lp_mask);
434         tlb_flush_one_mmuidx_locked(env, midx);
435     } else {
436         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
437             tlb_n_used_entries_dec(env, midx);
438         }
439         tlb_flush_vtlb_page_locked(env, midx, page);
440     }
441 }
442 
443 /* As we are going to hijack the bottom bits of the page address for a
444  * mmuidx bit mask we need to fail to build if we can't do that
445  */
446 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
447 
448 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
449                                                 run_on_cpu_data data)
450 {
451     CPUArchState *env = cpu->env_ptr;
452     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
453     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
454     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
455     int mmu_idx;
456 
457     assert_cpu_is_self(cpu);
458 
459     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
460               addr, mmu_idx_bitmap);
461 
462     qemu_spin_lock(&env->tlb_c.lock);
463     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
464         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
465             tlb_flush_page_locked(env, mmu_idx, addr);
466         }
467     }
468     qemu_spin_unlock(&env->tlb_c.lock);
469 
470     tb_flush_jmp_cache(cpu, addr);
471 }
472 
473 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
474 {
475     target_ulong addr_and_mmu_idx;
476 
477     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
478 
479     /* This should already be page aligned */
480     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
481     addr_and_mmu_idx |= idxmap;
482 
483     if (!qemu_cpu_is_self(cpu)) {
484         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
485                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
486     } else {
487         tlb_flush_page_by_mmuidx_async_work(
488             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489     }
490 }
491 
492 void tlb_flush_page(CPUState *cpu, target_ulong addr)
493 {
494     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
495 }
496 
497 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
498                                        uint16_t idxmap)
499 {
500     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
501     target_ulong addr_and_mmu_idx;
502 
503     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
504 
505     /* This should already be page aligned */
506     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
507     addr_and_mmu_idx |= idxmap;
508 
509     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
510     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
511 }
512 
513 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
514 {
515     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
516 }
517 
518 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
519                                               target_ulong addr,
520                                               uint16_t idxmap)
521 {
522     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
523     target_ulong addr_and_mmu_idx;
524 
525     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
526 
527     /* This should already be page aligned */
528     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
529     addr_and_mmu_idx |= idxmap;
530 
531     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
532     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
533 }
534 
535 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
536 {
537     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
538 }
539 
540 /* update the TLBs so that writes to code in the virtual page 'addr'
541    can be detected */
542 void tlb_protect_code(ram_addr_t ram_addr)
543 {
544     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
545                                              DIRTY_MEMORY_CODE);
546 }
547 
548 /* update the TLB so that writes in physical page 'phys_addr' are no longer
549    tested for self modifying code */
550 void tlb_unprotect_code(ram_addr_t ram_addr)
551 {
552     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
553 }
554 
555 
556 /*
557  * Dirty write flag handling
558  *
559  * When the TCG code writes to a location it looks up the address in
560  * the TLB and uses that data to compute the final address. If any of
561  * the lower bits of the address are set then the slow path is forced.
562  * There are a number of reasons to do this but for normal RAM the
563  * most usual is detecting writes to code regions which may invalidate
564  * generated code.
565  *
566  * Other vCPUs might be reading their TLBs during guest execution, so we update
567  * te->addr_write with atomic_set. We don't need to worry about this for
568  * oversized guests as MTTCG is disabled for them.
569  *
570  * Called with tlb_c.lock held.
571  */
572 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
573                                          uintptr_t start, uintptr_t length)
574 {
575     uintptr_t addr = tlb_entry->addr_write;
576 
577     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
578         addr &= TARGET_PAGE_MASK;
579         addr += tlb_entry->addend;
580         if ((addr - start) < length) {
581 #if TCG_OVERSIZED_GUEST
582             tlb_entry->addr_write |= TLB_NOTDIRTY;
583 #else
584             atomic_set(&tlb_entry->addr_write,
585                        tlb_entry->addr_write | TLB_NOTDIRTY);
586 #endif
587         }
588     }
589 }
590 
591 /*
592  * Called with tlb_c.lock held.
593  * Called only from the vCPU context, i.e. the TLB's owner thread.
594  */
595 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
596 {
597     *d = *s;
598 }
599 
600 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
601  * the target vCPU).
602  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
603  * thing actually updated is the target TLB entry ->addr_write flags.
604  */
605 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
606 {
607     CPUArchState *env;
608 
609     int mmu_idx;
610 
611     env = cpu->env_ptr;
612     qemu_spin_lock(&env->tlb_c.lock);
613     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
614         unsigned int i;
615         unsigned int n = tlb_n_entries(env, mmu_idx);
616 
617         for (i = 0; i < n; i++) {
618             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
619                                          length);
620         }
621 
622         for (i = 0; i < CPU_VTLB_SIZE; i++) {
623             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
624                                          length);
625         }
626     }
627     qemu_spin_unlock(&env->tlb_c.lock);
628 }
629 
630 /* Called with tlb_c.lock held */
631 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
632                                          target_ulong vaddr)
633 {
634     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
635         tlb_entry->addr_write = vaddr;
636     }
637 }
638 
639 /* update the TLB corresponding to virtual page vaddr
640    so that it is no longer dirty */
641 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
642 {
643     CPUArchState *env = cpu->env_ptr;
644     int mmu_idx;
645 
646     assert_cpu_is_self(cpu);
647 
648     vaddr &= TARGET_PAGE_MASK;
649     qemu_spin_lock(&env->tlb_c.lock);
650     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
651         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
652     }
653 
654     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
655         int k;
656         for (k = 0; k < CPU_VTLB_SIZE; k++) {
657             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
658         }
659     }
660     qemu_spin_unlock(&env->tlb_c.lock);
661 }
662 
663 /* Our TLB does not support large pages, so remember the area covered by
664    large pages and trigger a full TLB flush if these are invalidated.  */
665 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
666                                target_ulong vaddr, target_ulong size)
667 {
668     target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
669     target_ulong lp_mask = ~(size - 1);
670 
671     if (lp_addr == (target_ulong)-1) {
672         /* No previous large page.  */
673         lp_addr = vaddr;
674     } else {
675         /* Extend the existing region to include the new page.
676            This is a compromise between unnecessary flushes and
677            the cost of maintaining a full variable size TLB.  */
678         lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
679         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
680             lp_mask <<= 1;
681         }
682     }
683     env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
684     env->tlb_d[mmu_idx].large_page_mask = lp_mask;
685 }
686 
687 /* Add a new TLB entry. At most one entry for a given virtual address
688  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
689  * supplied size is only used by tlb_flush_page.
690  *
691  * Called from TCG-generated code, which is under an RCU read-side
692  * critical section.
693  */
694 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
695                              hwaddr paddr, MemTxAttrs attrs, int prot,
696                              int mmu_idx, target_ulong size)
697 {
698     CPUArchState *env = cpu->env_ptr;
699     MemoryRegionSection *section;
700     unsigned int index;
701     target_ulong address;
702     target_ulong code_address;
703     uintptr_t addend;
704     CPUTLBEntry *te, tn;
705     hwaddr iotlb, xlat, sz, paddr_page;
706     target_ulong vaddr_page;
707     int asidx = cpu_asidx_from_attrs(cpu, attrs);
708 
709     assert_cpu_is_self(cpu);
710 
711     if (size <= TARGET_PAGE_SIZE) {
712         sz = TARGET_PAGE_SIZE;
713     } else {
714         tlb_add_large_page(env, mmu_idx, vaddr, size);
715         sz = size;
716     }
717     vaddr_page = vaddr & TARGET_PAGE_MASK;
718     paddr_page = paddr & TARGET_PAGE_MASK;
719 
720     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
721                                                 &xlat, &sz, attrs, &prot);
722     assert(sz >= TARGET_PAGE_SIZE);
723 
724     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
725               " prot=%x idx=%d\n",
726               vaddr, paddr, prot, mmu_idx);
727 
728     address = vaddr_page;
729     if (size < TARGET_PAGE_SIZE) {
730         /*
731          * Slow-path the TLB entries; we will repeat the MMU check and TLB
732          * fill on every access.
733          */
734         address |= TLB_RECHECK;
735     }
736     if (!memory_region_is_ram(section->mr) &&
737         !memory_region_is_romd(section->mr)) {
738         /* IO memory case */
739         address |= TLB_MMIO;
740         addend = 0;
741     } else {
742         /* TLB_MMIO for rom/romd handled below */
743         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
744     }
745 
746     code_address = address;
747     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
748                                             paddr_page, xlat, prot, &address);
749 
750     index = tlb_index(env, mmu_idx, vaddr_page);
751     te = tlb_entry(env, mmu_idx, vaddr_page);
752 
753     /*
754      * Hold the TLB lock for the rest of the function. We could acquire/release
755      * the lock several times in the function, but it is faster to amortize the
756      * acquisition cost by acquiring it just once. Note that this leads to
757      * a longer critical section, but this is not a concern since the TLB lock
758      * is unlikely to be contended.
759      */
760     qemu_spin_lock(&env->tlb_c.lock);
761 
762     /* Note that the tlb is no longer clean.  */
763     env->tlb_c.dirty |= 1 << mmu_idx;
764 
765     /* Make sure there's no cached translation for the new page.  */
766     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
767 
768     /*
769      * Only evict the old entry to the victim tlb if it's for a
770      * different page; otherwise just overwrite the stale data.
771      */
772     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
773         unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
774         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
775 
776         /* Evict the old entry into the victim tlb.  */
777         copy_tlb_helper_locked(tv, te);
778         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
779         tlb_n_used_entries_dec(env, mmu_idx);
780     }
781 
782     /* refill the tlb */
783     /*
784      * At this point iotlb contains a physical section number in the lower
785      * TARGET_PAGE_BITS, and either
786      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
787      *  + the offset within section->mr of the page base (otherwise)
788      * We subtract the vaddr_page (which is page aligned and thus won't
789      * disturb the low bits) to give an offset which can be added to the
790      * (non-page-aligned) vaddr of the eventual memory access to get
791      * the MemoryRegion offset for the access. Note that the vaddr we
792      * subtract here is that of the page base, and not the same as the
793      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
794      */
795     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
796     env->iotlb[mmu_idx][index].attrs = attrs;
797 
798     /* Now calculate the new entry */
799     tn.addend = addend - vaddr_page;
800     if (prot & PAGE_READ) {
801         tn.addr_read = address;
802     } else {
803         tn.addr_read = -1;
804     }
805 
806     if (prot & PAGE_EXEC) {
807         tn.addr_code = code_address;
808     } else {
809         tn.addr_code = -1;
810     }
811 
812     tn.addr_write = -1;
813     if (prot & PAGE_WRITE) {
814         if ((memory_region_is_ram(section->mr) && section->readonly)
815             || memory_region_is_romd(section->mr)) {
816             /* Write access calls the I/O callback.  */
817             tn.addr_write = address | TLB_MMIO;
818         } else if (memory_region_is_ram(section->mr)
819                    && cpu_physical_memory_is_clean(
820                        memory_region_get_ram_addr(section->mr) + xlat)) {
821             tn.addr_write = address | TLB_NOTDIRTY;
822         } else {
823             tn.addr_write = address;
824         }
825         if (prot & PAGE_WRITE_INV) {
826             tn.addr_write |= TLB_INVALID_MASK;
827         }
828     }
829 
830     copy_tlb_helper_locked(te, &tn);
831     tlb_n_used_entries_inc(env, mmu_idx);
832     qemu_spin_unlock(&env->tlb_c.lock);
833 }
834 
835 /* Add a new TLB entry, but without specifying the memory
836  * transaction attributes to be used.
837  */
838 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
839                   hwaddr paddr, int prot,
840                   int mmu_idx, target_ulong size)
841 {
842     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
843                             prot, mmu_idx, size);
844 }
845 
846 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
847 {
848     ram_addr_t ram_addr;
849 
850     ram_addr = qemu_ram_addr_from_host(ptr);
851     if (ram_addr == RAM_ADDR_INVALID) {
852         error_report("Bad ram pointer %p", ptr);
853         abort();
854     }
855     return ram_addr;
856 }
857 
858 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
859                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
860                          MMUAccessType access_type, int size)
861 {
862     CPUState *cpu = ENV_GET_CPU(env);
863     hwaddr mr_offset;
864     MemoryRegionSection *section;
865     MemoryRegion *mr;
866     uint64_t val;
867     bool locked = false;
868     MemTxResult r;
869 
870     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
871     mr = section->mr;
872     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
873     cpu->mem_io_pc = retaddr;
874     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
875         cpu_io_recompile(cpu, retaddr);
876     }
877 
878     cpu->mem_io_vaddr = addr;
879     cpu->mem_io_access_type = access_type;
880 
881     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
882         qemu_mutex_lock_iothread();
883         locked = true;
884     }
885     r = memory_region_dispatch_read(mr, mr_offset,
886                                     &val, size, iotlbentry->attrs);
887     if (r != MEMTX_OK) {
888         hwaddr physaddr = mr_offset +
889             section->offset_within_address_space -
890             section->offset_within_region;
891 
892         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
893                                mmu_idx, iotlbentry->attrs, r, retaddr);
894     }
895     if (locked) {
896         qemu_mutex_unlock_iothread();
897     }
898 
899     return val;
900 }
901 
902 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
903                       int mmu_idx, uint64_t val, target_ulong addr,
904                       uintptr_t retaddr, int size)
905 {
906     CPUState *cpu = ENV_GET_CPU(env);
907     hwaddr mr_offset;
908     MemoryRegionSection *section;
909     MemoryRegion *mr;
910     bool locked = false;
911     MemTxResult r;
912 
913     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
914     mr = section->mr;
915     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
916     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
917         cpu_io_recompile(cpu, retaddr);
918     }
919     cpu->mem_io_vaddr = addr;
920     cpu->mem_io_pc = retaddr;
921 
922     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
923         qemu_mutex_lock_iothread();
924         locked = true;
925     }
926     r = memory_region_dispatch_write(mr, mr_offset,
927                                      val, size, iotlbentry->attrs);
928     if (r != MEMTX_OK) {
929         hwaddr physaddr = mr_offset +
930             section->offset_within_address_space -
931             section->offset_within_region;
932 
933         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
934                                mmu_idx, iotlbentry->attrs, r, retaddr);
935     }
936     if (locked) {
937         qemu_mutex_unlock_iothread();
938     }
939 }
940 
941 /* Return true if ADDR is present in the victim tlb, and has been copied
942    back to the main tlb.  */
943 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
944                            size_t elt_ofs, target_ulong page)
945 {
946     size_t vidx;
947 
948     assert_cpu_is_self(ENV_GET_CPU(env));
949     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
950         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
951         target_ulong cmp;
952 
953         /* elt_ofs might correspond to .addr_write, so use atomic_read */
954 #if TCG_OVERSIZED_GUEST
955         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
956 #else
957         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
958 #endif
959 
960         if (cmp == page) {
961             /* Found entry in victim tlb, swap tlb and iotlb.  */
962             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
963 
964             qemu_spin_lock(&env->tlb_c.lock);
965             copy_tlb_helper_locked(&tmptlb, tlb);
966             copy_tlb_helper_locked(tlb, vtlb);
967             copy_tlb_helper_locked(vtlb, &tmptlb);
968             qemu_spin_unlock(&env->tlb_c.lock);
969 
970             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
971             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
972             tmpio = *io; *io = *vio; *vio = tmpio;
973             return true;
974         }
975     }
976     return false;
977 }
978 
979 /* Macro to call the above, with local variables from the use context.  */
980 #define VICTIM_TLB_HIT(TY, ADDR) \
981   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
982                  (ADDR) & TARGET_PAGE_MASK)
983 
984 /* NOTE: this function can trigger an exception */
985 /* NOTE2: the returned address is not exactly the physical address: it
986  * is actually a ram_addr_t (in system mode; the user mode emulation
987  * version of this function returns a guest virtual address).
988  */
989 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
990 {
991     uintptr_t mmu_idx = cpu_mmu_index(env, true);
992     uintptr_t index = tlb_index(env, mmu_idx, addr);
993     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
994     void *p;
995 
996     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
997         if (!VICTIM_TLB_HIT(addr_code, addr)) {
998             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
999             index = tlb_index(env, mmu_idx, addr);
1000             entry = tlb_entry(env, mmu_idx, addr);
1001         }
1002         assert(tlb_hit(entry->addr_code, addr));
1003     }
1004 
1005     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1006         /*
1007          * Return -1 if we can't translate and execute from an entire
1008          * page of RAM here, which will cause us to execute by loading
1009          * and translating one insn at a time, without caching:
1010          *  - TLB_RECHECK: means the MMU protection covers a smaller range
1011          *    than a target page, so we must redo the MMU check every insn
1012          *  - TLB_MMIO: region is not backed by RAM
1013          */
1014         return -1;
1015     }
1016 
1017     p = (void *)((uintptr_t)addr + entry->addend);
1018     return qemu_ram_addr_from_host_nofail(p);
1019 }
1020 
1021 /* Probe for whether the specified guest write access is permitted.
1022  * If it is not permitted then an exception will be taken in the same
1023  * way as if this were a real write access (and we will not return).
1024  * Otherwise the function will return, and there will be a valid
1025  * entry in the TLB for this access.
1026  */
1027 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1028                  uintptr_t retaddr)
1029 {
1030     uintptr_t index = tlb_index(env, mmu_idx, addr);
1031     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1032 
1033     if (!tlb_hit(tlb_addr_write(entry), addr)) {
1034         /* TLB entry is for a different page */
1035         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1036             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1037                      mmu_idx, retaddr);
1038         }
1039     }
1040 }
1041 
1042 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1043  * operations, or io operations to proceed.  Return the host address.  */
1044 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1045                                TCGMemOpIdx oi, uintptr_t retaddr,
1046                                NotDirtyInfo *ndi)
1047 {
1048     size_t mmu_idx = get_mmuidx(oi);
1049     uintptr_t index = tlb_index(env, mmu_idx, addr);
1050     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1051     target_ulong tlb_addr = tlb_addr_write(tlbe);
1052     TCGMemOp mop = get_memop(oi);
1053     int a_bits = get_alignment_bits(mop);
1054     int s_bits = mop & MO_SIZE;
1055     void *hostaddr;
1056 
1057     /* Adjust the given return address.  */
1058     retaddr -= GETPC_ADJ;
1059 
1060     /* Enforce guest required alignment.  */
1061     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1062         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1063         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1064                              mmu_idx, retaddr);
1065     }
1066 
1067     /* Enforce qemu required alignment.  */
1068     if (unlikely(addr & ((1 << s_bits) - 1))) {
1069         /* We get here if guest alignment was not requested,
1070            or was not enforced by cpu_unaligned_access above.
1071            We might widen the access and emulate, but for now
1072            mark an exception and exit the cpu loop.  */
1073         goto stop_the_world;
1074     }
1075 
1076     /* Check TLB entry and enforce page permissions.  */
1077     if (!tlb_hit(tlb_addr, addr)) {
1078         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1079             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
1080                      mmu_idx, retaddr);
1081             index = tlb_index(env, mmu_idx, addr);
1082             tlbe = tlb_entry(env, mmu_idx, addr);
1083         }
1084         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1085     }
1086 
1087     /* Notice an IO access or a needs-MMU-lookup access */
1088     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1089         /* There's really nothing that can be done to
1090            support this apart from stop-the-world.  */
1091         goto stop_the_world;
1092     }
1093 
1094     /* Let the guest notice RMW on a write-only page.  */
1095     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1096         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1097                  mmu_idx, retaddr);
1098         /* Since we don't support reads and writes to different addresses,
1099            and we do have the proper page loaded for write, this shouldn't
1100            ever return.  But just in case, handle via stop-the-world.  */
1101         goto stop_the_world;
1102     }
1103 
1104     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1105 
1106     ndi->active = false;
1107     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1108         ndi->active = true;
1109         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1110                                       qemu_ram_addr_from_host_nofail(hostaddr),
1111                                       1 << s_bits);
1112     }
1113 
1114     return hostaddr;
1115 
1116  stop_the_world:
1117     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1118 }
1119 
1120 #ifdef TARGET_WORDS_BIGENDIAN
1121 #define NEED_BE_BSWAP 0
1122 #define NEED_LE_BSWAP 1
1123 #else
1124 #define NEED_BE_BSWAP 1
1125 #define NEED_LE_BSWAP 0
1126 #endif
1127 
1128 /*
1129  * Byte Swap Helper
1130  *
1131  * This should all dead code away depending on the build host and
1132  * access type.
1133  */
1134 
1135 static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1136 {
1137     if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1138         switch (size) {
1139         case 1: return val;
1140         case 2: return bswap16(val);
1141         case 4: return bswap32(val);
1142         case 8: return bswap64(val);
1143         default:
1144             g_assert_not_reached();
1145         }
1146     } else {
1147         return val;
1148     }
1149 }
1150 
1151 /*
1152  * Load Helpers
1153  *
1154  * We support two different access types. SOFTMMU_CODE_ACCESS is
1155  * specifically for reading instructions from system memory. It is
1156  * called by the translation loop and in some helpers where the code
1157  * is disassembled. It shouldn't be called directly by guest code.
1158  */
1159 
1160 static uint64_t load_helper(CPUArchState *env, target_ulong addr,
1161                             TCGMemOpIdx oi, uintptr_t retaddr,
1162                             size_t size, bool big_endian,
1163                             bool code_read)
1164 {
1165     uintptr_t mmu_idx = get_mmuidx(oi);
1166     uintptr_t index = tlb_index(env, mmu_idx, addr);
1167     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1168     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1169     const size_t tlb_off = code_read ?
1170         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1171     const MMUAccessType access_type =
1172         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1173     unsigned a_bits = get_alignment_bits(get_memop(oi));
1174     void *haddr;
1175     uint64_t res;
1176 
1177     /* Handle CPU specific unaligned behaviour */
1178     if (addr & ((1 << a_bits) - 1)) {
1179         cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
1180                              mmu_idx, retaddr);
1181     }
1182 
1183     /* If the TLB entry is for a different page, reload and try again.  */
1184     if (!tlb_hit(tlb_addr, addr)) {
1185         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1186                             addr & TARGET_PAGE_MASK)) {
1187             tlb_fill(ENV_GET_CPU(env), addr, size,
1188                      access_type, mmu_idx, retaddr);
1189             index = tlb_index(env, mmu_idx, addr);
1190             entry = tlb_entry(env, mmu_idx, addr);
1191         }
1192         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1193     }
1194 
1195     /* Handle an IO access.  */
1196     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1197         if ((addr & (size - 1)) != 0) {
1198             goto do_unaligned_access;
1199         }
1200 
1201         if (tlb_addr & TLB_RECHECK) {
1202             /*
1203              * This is a TLB_RECHECK access, where the MMU protection
1204              * covers a smaller range than a target page, and we must
1205              * repeat the MMU check here. This tlb_fill() call might
1206              * longjump out if this access should cause a guest exception.
1207              */
1208             tlb_fill(ENV_GET_CPU(env), addr, size,
1209                      access_type, mmu_idx, retaddr);
1210             index = tlb_index(env, mmu_idx, addr);
1211             entry = tlb_entry(env, mmu_idx, addr);
1212 
1213             tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1214             tlb_addr &= ~TLB_RECHECK;
1215             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1216                 /* RAM access */
1217                 goto do_aligned_access;
1218             }
1219         }
1220 
1221         res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
1222                        retaddr, access_type, size);
1223         return handle_bswap(res, size, big_endian);
1224     }
1225 
1226     /* Handle slow unaligned access (it spans two pages or IO).  */
1227     if (size > 1
1228         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1229                     >= TARGET_PAGE_SIZE)) {
1230         target_ulong addr1, addr2;
1231         tcg_target_ulong r1, r2;
1232         unsigned shift;
1233     do_unaligned_access:
1234         addr1 = addr & ~(size - 1);
1235         addr2 = addr1 + size;
1236         r1 = load_helper(env, addr1, oi, retaddr, size, big_endian, code_read);
1237         r2 = load_helper(env, addr2, oi, retaddr, size, big_endian, code_read);
1238         shift = (addr & (size - 1)) * 8;
1239 
1240         if (big_endian) {
1241             /* Big-endian combine.  */
1242             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1243         } else {
1244             /* Little-endian combine.  */
1245             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1246         }
1247         return res & MAKE_64BIT_MASK(0, size * 8);
1248     }
1249 
1250  do_aligned_access:
1251     haddr = (void *)((uintptr_t)addr + entry->addend);
1252     switch (size) {
1253     case 1:
1254         res = ldub_p(haddr);
1255         break;
1256     case 2:
1257         if (big_endian) {
1258             res = lduw_be_p(haddr);
1259         } else {
1260             res = lduw_le_p(haddr);
1261         }
1262         break;
1263     case 4:
1264         if (big_endian) {
1265             res = (uint32_t)ldl_be_p(haddr);
1266         } else {
1267             res = (uint32_t)ldl_le_p(haddr);
1268         }
1269         break;
1270     case 8:
1271         if (big_endian) {
1272             res = ldq_be_p(haddr);
1273         } else {
1274             res = ldq_le_p(haddr);
1275         }
1276         break;
1277     default:
1278         g_assert_not_reached();
1279     }
1280 
1281     return res;
1282 }
1283 
1284 /*
1285  * For the benefit of TCG generated code, we want to avoid the
1286  * complication of ABI-specific return type promotion and always
1287  * return a value extended to the register size of the host. This is
1288  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1289  * data, and for that we always have uint64_t.
1290  *
1291  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1292  */
1293 
1294 tcg_target_ulong __attribute__((flatten))
1295 helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1296                     uintptr_t retaddr)
1297 {
1298     return load_helper(env, addr, oi, retaddr, 1, false, false);
1299 }
1300 
1301 tcg_target_ulong __attribute__((flatten))
1302 helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1303                    uintptr_t retaddr)
1304 {
1305     return load_helper(env, addr, oi, retaddr, 2, false, false);
1306 }
1307 
1308 tcg_target_ulong __attribute__((flatten))
1309 helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1310                    uintptr_t retaddr)
1311 {
1312     return load_helper(env, addr, oi, retaddr, 2, true, false);
1313 }
1314 
1315 tcg_target_ulong __attribute__((flatten))
1316 helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1317                    uintptr_t retaddr)
1318 {
1319     return load_helper(env, addr, oi, retaddr, 4, false, false);
1320 }
1321 
1322 tcg_target_ulong __attribute__((flatten))
1323 helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1324                    uintptr_t retaddr)
1325 {
1326     return load_helper(env, addr, oi, retaddr, 4, true, false);
1327 }
1328 
1329 uint64_t __attribute__((flatten))
1330 helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1331                   uintptr_t retaddr)
1332 {
1333     return load_helper(env, addr, oi, retaddr, 8, false, false);
1334 }
1335 
1336 uint64_t __attribute__((flatten))
1337 helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1338                   uintptr_t retaddr)
1339 {
1340     return load_helper(env, addr, oi, retaddr, 8, true, false);
1341 }
1342 
1343 /*
1344  * Provide signed versions of the load routines as well.  We can of course
1345  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1346  */
1347 
1348 
1349 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1350                                      TCGMemOpIdx oi, uintptr_t retaddr)
1351 {
1352     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1353 }
1354 
1355 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1356                                     TCGMemOpIdx oi, uintptr_t retaddr)
1357 {
1358     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1359 }
1360 
1361 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1362                                     TCGMemOpIdx oi, uintptr_t retaddr)
1363 {
1364     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1365 }
1366 
1367 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1368                                     TCGMemOpIdx oi, uintptr_t retaddr)
1369 {
1370     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1371 }
1372 
1373 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1374                                     TCGMemOpIdx oi, uintptr_t retaddr)
1375 {
1376     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1377 }
1378 
1379 /*
1380  * Store Helpers
1381  */
1382 
1383 static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1384                          TCGMemOpIdx oi, uintptr_t retaddr, size_t size,
1385                          bool big_endian)
1386 {
1387     uintptr_t mmu_idx = get_mmuidx(oi);
1388     uintptr_t index = tlb_index(env, mmu_idx, addr);
1389     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1390     target_ulong tlb_addr = tlb_addr_write(entry);
1391     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1392     unsigned a_bits = get_alignment_bits(get_memop(oi));
1393     void *haddr;
1394 
1395     /* Handle CPU specific unaligned behaviour */
1396     if (addr & ((1 << a_bits) - 1)) {
1397         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1398                              mmu_idx, retaddr);
1399     }
1400 
1401     /* If the TLB entry is for a different page, reload and try again.  */
1402     if (!tlb_hit(tlb_addr, addr)) {
1403         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1404             addr & TARGET_PAGE_MASK)) {
1405             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1406                      mmu_idx, retaddr);
1407             index = tlb_index(env, mmu_idx, addr);
1408             entry = tlb_entry(env, mmu_idx, addr);
1409         }
1410         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1411     }
1412 
1413     /* Handle an IO access.  */
1414     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1415         if ((addr & (size - 1)) != 0) {
1416             goto do_unaligned_access;
1417         }
1418 
1419         if (tlb_addr & TLB_RECHECK) {
1420             /*
1421              * This is a TLB_RECHECK access, where the MMU protection
1422              * covers a smaller range than a target page, and we must
1423              * repeat the MMU check here. This tlb_fill() call might
1424              * longjump out if this access should cause a guest exception.
1425              */
1426             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1427                      mmu_idx, retaddr);
1428             index = tlb_index(env, mmu_idx, addr);
1429             entry = tlb_entry(env, mmu_idx, addr);
1430 
1431             tlb_addr = tlb_addr_write(entry);
1432             tlb_addr &= ~TLB_RECHECK;
1433             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1434                 /* RAM access */
1435                 goto do_aligned_access;
1436             }
1437         }
1438 
1439         io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
1440                   handle_bswap(val, size, big_endian),
1441                   addr, retaddr, size);
1442         return;
1443     }
1444 
1445     /* Handle slow unaligned access (it spans two pages or IO).  */
1446     if (size > 1
1447         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1448                      >= TARGET_PAGE_SIZE)) {
1449         int i;
1450         uintptr_t index2;
1451         CPUTLBEntry *entry2;
1452         target_ulong page2, tlb_addr2;
1453     do_unaligned_access:
1454         /*
1455          * Ensure the second page is in the TLB.  Note that the first page
1456          * is already guaranteed to be filled, and that the second page
1457          * cannot evict the first.
1458          */
1459         page2 = (addr + size) & TARGET_PAGE_MASK;
1460         index2 = tlb_index(env, mmu_idx, page2);
1461         entry2 = tlb_entry(env, mmu_idx, page2);
1462         tlb_addr2 = tlb_addr_write(entry2);
1463         if (!tlb_hit_page(tlb_addr2, page2)
1464             && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1465                                page2 & TARGET_PAGE_MASK)) {
1466             tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
1467                      mmu_idx, retaddr);
1468         }
1469 
1470         /*
1471          * XXX: not efficient, but simple.
1472          * This loop must go in the forward direction to avoid issues
1473          * with self-modifying code in Windows 64-bit.
1474          */
1475         for (i = 0; i < size; ++i) {
1476             uint8_t val8;
1477             if (big_endian) {
1478                 /* Big-endian extract.  */
1479                 val8 = val >> (((size - 1) * 8) - (i * 8));
1480             } else {
1481                 /* Little-endian extract.  */
1482                 val8 = val >> (i * 8);
1483             }
1484             store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
1485         }
1486         return;
1487     }
1488 
1489  do_aligned_access:
1490     haddr = (void *)((uintptr_t)addr + entry->addend);
1491     switch (size) {
1492     case 1:
1493         stb_p(haddr, val);
1494         break;
1495     case 2:
1496         if (big_endian) {
1497             stw_be_p(haddr, val);
1498         } else {
1499             stw_le_p(haddr, val);
1500         }
1501         break;
1502     case 4:
1503         if (big_endian) {
1504             stl_be_p(haddr, val);
1505         } else {
1506             stl_le_p(haddr, val);
1507         }
1508         break;
1509     case 8:
1510         if (big_endian) {
1511             stq_be_p(haddr, val);
1512         } else {
1513             stq_le_p(haddr, val);
1514         }
1515         break;
1516     default:
1517         g_assert_not_reached();
1518         break;
1519     }
1520 }
1521 
1522 void __attribute__((flatten))
1523 helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1524                    TCGMemOpIdx oi, uintptr_t retaddr)
1525 {
1526     store_helper(env, addr, val, oi, retaddr, 1, false);
1527 }
1528 
1529 void __attribute__((flatten))
1530 helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1531                   TCGMemOpIdx oi, uintptr_t retaddr)
1532 {
1533     store_helper(env, addr, val, oi, retaddr, 2, false);
1534 }
1535 
1536 void __attribute__((flatten))
1537 helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1538                   TCGMemOpIdx oi, uintptr_t retaddr)
1539 {
1540     store_helper(env, addr, val, oi, retaddr, 2, true);
1541 }
1542 
1543 void __attribute__((flatten))
1544 helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1545                   TCGMemOpIdx oi, uintptr_t retaddr)
1546 {
1547     store_helper(env, addr, val, oi, retaddr, 4, false);
1548 }
1549 
1550 void __attribute__((flatten))
1551 helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1552                   TCGMemOpIdx oi, uintptr_t retaddr)
1553 {
1554     store_helper(env, addr, val, oi, retaddr, 4, true);
1555 }
1556 
1557 void __attribute__((flatten))
1558 helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1559                   TCGMemOpIdx oi, uintptr_t retaddr)
1560 {
1561     store_helper(env, addr, val, oi, retaddr, 8, false);
1562 }
1563 
1564 void __attribute__((flatten))
1565 helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1566                   TCGMemOpIdx oi, uintptr_t retaddr)
1567 {
1568     store_helper(env, addr, val, oi, retaddr, 8, true);
1569 }
1570 
1571 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1572    them callable from other helpers.  */
1573 
1574 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1575 #define ATOMIC_NAME(X) \
1576     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1577 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1578 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1579 #define ATOMIC_MMU_CLEANUP                              \
1580     do {                                                \
1581         if (unlikely(ndi.active)) {                     \
1582             memory_notdirty_write_complete(&ndi);       \
1583         }                                               \
1584     } while (0)
1585 
1586 #define DATA_SIZE 1
1587 #include "atomic_template.h"
1588 
1589 #define DATA_SIZE 2
1590 #include "atomic_template.h"
1591 
1592 #define DATA_SIZE 4
1593 #include "atomic_template.h"
1594 
1595 #ifdef CONFIG_ATOMIC64
1596 #define DATA_SIZE 8
1597 #include "atomic_template.h"
1598 #endif
1599 
1600 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1601 #define DATA_SIZE 16
1602 #include "atomic_template.h"
1603 #endif
1604 
1605 /* Second set of helpers are directly callable from TCG as helpers.  */
1606 
1607 #undef EXTRA_ARGS
1608 #undef ATOMIC_NAME
1609 #undef ATOMIC_MMU_LOOKUP
1610 #define EXTRA_ARGS         , TCGMemOpIdx oi
1611 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1612 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1613 
1614 #define DATA_SIZE 1
1615 #include "atomic_template.h"
1616 
1617 #define DATA_SIZE 2
1618 #include "atomic_template.h"
1619 
1620 #define DATA_SIZE 4
1621 #include "atomic_template.h"
1622 
1623 #ifdef CONFIG_ATOMIC64
1624 #define DATA_SIZE 8
1625 #include "atomic_template.h"
1626 #endif
1627 
1628 /* Code access functions.  */
1629 
1630 uint8_t __attribute__((flatten))
1631 helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1632                     uintptr_t retaddr)
1633 {
1634     return load_helper(env, addr, oi, retaddr, 1, false, true);
1635 }
1636 
1637 uint16_t __attribute__((flatten))
1638 helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1639                    uintptr_t retaddr)
1640 {
1641     return load_helper(env, addr, oi, retaddr, 2, false, true);
1642 }
1643 
1644 uint16_t __attribute__((flatten))
1645 helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1646                    uintptr_t retaddr)
1647 {
1648     return load_helper(env, addr, oi, retaddr, 2, true, true);
1649 }
1650 
1651 uint32_t __attribute__((flatten))
1652 helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1653                    uintptr_t retaddr)
1654 {
1655     return load_helper(env, addr, oi, retaddr, 4, false, true);
1656 }
1657 
1658 uint32_t __attribute__((flatten))
1659 helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1660                    uintptr_t retaddr)
1661 {
1662     return load_helper(env, addr, oi, retaddr, 4, true, true);
1663 }
1664 
1665 uint64_t __attribute__((flatten))
1666 helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1667                    uintptr_t retaddr)
1668 {
1669     return load_helper(env, addr, oi, retaddr, 8, false, true);
1670 }
1671 
1672 uint64_t __attribute__((flatten))
1673 helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1674                    uintptr_t retaddr)
1675 {
1676     return load_helper(env, addr, oi, retaddr, 8, true, true);
1677 }
1678