xref: /openbmc/qemu/accel/tcg/cputlb.c (revision ae3c12a0)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40 
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 #  define DEBUG_TLB_LOG_GATE 1
45 # else
46 #  define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52 
53 #define tlb_debug(fmt, ...) do { \
54     if (DEBUG_TLB_LOG_GATE) { \
55         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56                       ## __VA_ARGS__); \
57     } else if (DEBUG_TLB_GATE) { \
58         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59     } \
60 } while (0)
61 
62 #define assert_cpu_is_self(cpu) do {                              \
63         if (DEBUG_TLB_GATE) {                                     \
64             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65         }                                                         \
66     } while (0)
67 
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69  * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73  */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78 {
79     return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
80 }
81 
82 static void tlb_window_reset(CPUTLBWindow *window, int64_t ns,
83                              size_t max_entries)
84 {
85     window->begin_ns = ns;
86     window->max_entries = max_entries;
87 }
88 
89 static void tlb_dyn_init(CPUArchState *env)
90 {
91     int i;
92 
93     for (i = 0; i < NB_MMU_MODES; i++) {
94         CPUTLBDesc *desc = &env->tlb_d[i];
95         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96 
97         tlb_window_reset(&desc->window, get_clock_realtime(), 0);
98         desc->n_used_entries = 0;
99         env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100         env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
101         env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
102     }
103 }
104 
105 /**
106  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107  * @env: CPU that owns the TLB
108  * @mmu_idx: MMU index of the TLB
109  *
110  * Called with tlb_lock_held.
111  *
112  * We have two main constraints when resizing a TLB: (1) we only resize it
113  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114  * the array or unnecessarily flushing it), which means we do not control how
115  * frequently the resizing can occur; (2) we don't have access to the guest's
116  * future scheduling decisions, and therefore have to decide the magnitude of
117  * the resize based on past observations.
118  *
119  * In general, a memory-hungry process can benefit greatly from an appropriately
120  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121  * we just have to make the TLB as large as possible; while an oversized TLB
122  * results in minimal TLB miss rates, it also takes longer to be flushed
123  * (flushes can be _very_ frequent), and the reduced locality can also hurt
124  * performance.
125  *
126  * To achieve near-optimal performance for all kinds of workloads, we:
127  *
128  * 1. Aggressively increase the size of the TLB when the use rate of the
129  * TLB being flushed is high, since it is likely that in the near future this
130  * memory-hungry process will execute again, and its memory hungriness will
131  * probably be similar.
132  *
133  * 2. Slowly reduce the size of the TLB as the use rate declines over a
134  * reasonably large time window. The rationale is that if in such a time window
135  * we have not observed a high TLB use rate, it is likely that we won't observe
136  * it in the near future. In that case, once a time window expires we downsize
137  * the TLB to match the maximum use rate observed in the window.
138  *
139  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140  * since in that range performance is likely near-optimal. Recall that the TLB
141  * is direct mapped, so we want the use rate to be low (or at least not too
142  * high), since otherwise we are likely to have a significant amount of
143  * conflict misses.
144  */
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146 {
147     CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
148     size_t old_size = tlb_n_entries(env, mmu_idx);
149     size_t rate;
150     size_t new_size = old_size;
151     int64_t now = get_clock_realtime();
152     int64_t window_len_ms = 100;
153     int64_t window_len_ns = window_len_ms * 1000 * 1000;
154     bool window_expired = now > desc->window.begin_ns + window_len_ns;
155 
156     if (desc->n_used_entries > desc->window.max_entries) {
157         desc->window.max_entries = desc->n_used_entries;
158     }
159     rate = desc->window.max_entries * 100 / old_size;
160 
161     if (rate > 70) {
162         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163     } else if (rate < 30 && window_expired) {
164         size_t ceil = pow2ceil(desc->window.max_entries);
165         size_t expected_rate = desc->window.max_entries * 100 / ceil;
166 
167         /*
168          * Avoid undersizing when the max number of entries seen is just below
169          * a pow2. For instance, if max_entries == 1025, the expected use rate
170          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172          * later. Thus, make sure that the expected use rate remains below 70%.
173          * (and since we double the size, that means the lowest rate we'd
174          * expect to get is 35%, which is still in the 30-70% range where
175          * we consider that the size is appropriate.)
176          */
177         if (expected_rate > 70) {
178             ceil *= 2;
179         }
180         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181     }
182 
183     if (new_size == old_size) {
184         if (window_expired) {
185             tlb_window_reset(&desc->window, now, desc->n_used_entries);
186         }
187         return;
188     }
189 
190     g_free(env->tlb_table[mmu_idx]);
191     g_free(env->iotlb[mmu_idx]);
192 
193     tlb_window_reset(&desc->window, now, 0);
194     /* desc->n_used_entries is cleared by the caller */
195     env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196     env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
197     env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
198     /*
199      * If the allocations fail, try smaller sizes. We just freed some
200      * memory, so going back to half of new_size has a good chance of working.
201      * Increased memory pressure elsewhere in the system might cause the
202      * allocations to fail though, so we progressively reduce the allocation
203      * size, aborting if we cannot even allocate the smallest TLB we support.
204      */
205     while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
206         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
207             error_report("%s: %s", __func__, strerror(errno));
208             abort();
209         }
210         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
211         env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
212 
213         g_free(env->tlb_table[mmu_idx]);
214         g_free(env->iotlb[mmu_idx]);
215         env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
216         env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
217     }
218 }
219 
220 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
221 {
222     tlb_mmu_resize_locked(env, mmu_idx);
223     memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
224     env->tlb_d[mmu_idx].n_used_entries = 0;
225 }
226 
227 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
228 {
229     env->tlb_d[mmu_idx].n_used_entries++;
230 }
231 
232 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
233 {
234     env->tlb_d[mmu_idx].n_used_entries--;
235 }
236 
237 void tlb_init(CPUState *cpu)
238 {
239     CPUArchState *env = cpu->env_ptr;
240 
241     qemu_spin_init(&env->tlb_c.lock);
242 
243     /* Ensure that cpu_reset performs a full flush.  */
244     env->tlb_c.dirty = ALL_MMUIDX_BITS;
245 
246     tlb_dyn_init(env);
247 }
248 
249 /* flush_all_helper: run fn across all cpus
250  *
251  * If the wait flag is set then the src cpu's helper will be queued as
252  * "safe" work and the loop exited creating a synchronisation point
253  * where all queued work will be finished before execution starts
254  * again.
255  */
256 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
257                              run_on_cpu_data d)
258 {
259     CPUState *cpu;
260 
261     CPU_FOREACH(cpu) {
262         if (cpu != src) {
263             async_run_on_cpu(cpu, fn, d);
264         }
265     }
266 }
267 
268 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
269 {
270     CPUState *cpu;
271     size_t full = 0, part = 0, elide = 0;
272 
273     CPU_FOREACH(cpu) {
274         CPUArchState *env = cpu->env_ptr;
275 
276         full += atomic_read(&env->tlb_c.full_flush_count);
277         part += atomic_read(&env->tlb_c.part_flush_count);
278         elide += atomic_read(&env->tlb_c.elide_flush_count);
279     }
280     *pfull = full;
281     *ppart = part;
282     *pelide = elide;
283 }
284 
285 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
286 {
287     tlb_table_flush_by_mmuidx(env, mmu_idx);
288     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
289     env->tlb_d[mmu_idx].large_page_addr = -1;
290     env->tlb_d[mmu_idx].large_page_mask = -1;
291     env->tlb_d[mmu_idx].vindex = 0;
292 }
293 
294 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
295 {
296     CPUArchState *env = cpu->env_ptr;
297     uint16_t asked = data.host_int;
298     uint16_t all_dirty, work, to_clean;
299 
300     assert_cpu_is_self(cpu);
301 
302     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
303 
304     qemu_spin_lock(&env->tlb_c.lock);
305 
306     all_dirty = env->tlb_c.dirty;
307     to_clean = asked & all_dirty;
308     all_dirty &= ~to_clean;
309     env->tlb_c.dirty = all_dirty;
310 
311     for (work = to_clean; work != 0; work &= work - 1) {
312         int mmu_idx = ctz32(work);
313         tlb_flush_one_mmuidx_locked(env, mmu_idx);
314     }
315 
316     qemu_spin_unlock(&env->tlb_c.lock);
317 
318     cpu_tb_jmp_cache_clear(cpu);
319 
320     if (to_clean == ALL_MMUIDX_BITS) {
321         atomic_set(&env->tlb_c.full_flush_count,
322                    env->tlb_c.full_flush_count + 1);
323     } else {
324         atomic_set(&env->tlb_c.part_flush_count,
325                    env->tlb_c.part_flush_count + ctpop16(to_clean));
326         if (to_clean != asked) {
327             atomic_set(&env->tlb_c.elide_flush_count,
328                        env->tlb_c.elide_flush_count +
329                        ctpop16(asked & ~to_clean));
330         }
331     }
332 }
333 
334 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
335 {
336     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
337 
338     if (cpu->created && !qemu_cpu_is_self(cpu)) {
339         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
340                          RUN_ON_CPU_HOST_INT(idxmap));
341     } else {
342         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
343     }
344 }
345 
346 void tlb_flush(CPUState *cpu)
347 {
348     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
349 }
350 
351 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
352 {
353     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
354 
355     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
356 
357     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
358     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
359 }
360 
361 void tlb_flush_all_cpus(CPUState *src_cpu)
362 {
363     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
364 }
365 
366 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
367 {
368     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
369 
370     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
371 
372     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
373     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
374 }
375 
376 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
377 {
378     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
379 }
380 
381 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
382                                         target_ulong page)
383 {
384     return tlb_hit_page(tlb_entry->addr_read, page) ||
385            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
386            tlb_hit_page(tlb_entry->addr_code, page);
387 }
388 
389 /**
390  * tlb_entry_is_empty - return true if the entry is not in use
391  * @te: pointer to CPUTLBEntry
392  */
393 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
394 {
395     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
396 }
397 
398 /* Called with tlb_c.lock held */
399 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
400                                           target_ulong page)
401 {
402     if (tlb_hit_page_anyprot(tlb_entry, page)) {
403         memset(tlb_entry, -1, sizeof(*tlb_entry));
404         return true;
405     }
406     return false;
407 }
408 
409 /* Called with tlb_c.lock held */
410 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
411                                               target_ulong page)
412 {
413     int k;
414 
415     assert_cpu_is_self(ENV_GET_CPU(env));
416     for (k = 0; k < CPU_VTLB_SIZE; k++) {
417         if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
418             tlb_n_used_entries_dec(env, mmu_idx);
419         }
420     }
421 }
422 
423 static void tlb_flush_page_locked(CPUArchState *env, int midx,
424                                   target_ulong page)
425 {
426     target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
427     target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
428 
429     /* Check if we need to flush due to large pages.  */
430     if ((page & lp_mask) == lp_addr) {
431         tlb_debug("forcing full flush midx %d ("
432                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
433                   midx, lp_addr, lp_mask);
434         tlb_flush_one_mmuidx_locked(env, midx);
435     } else {
436         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
437             tlb_n_used_entries_dec(env, midx);
438         }
439         tlb_flush_vtlb_page_locked(env, midx, page);
440     }
441 }
442 
443 /* As we are going to hijack the bottom bits of the page address for a
444  * mmuidx bit mask we need to fail to build if we can't do that
445  */
446 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
447 
448 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
449                                                 run_on_cpu_data data)
450 {
451     CPUArchState *env = cpu->env_ptr;
452     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
453     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
454     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
455     int mmu_idx;
456 
457     assert_cpu_is_self(cpu);
458 
459     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
460               addr, mmu_idx_bitmap);
461 
462     qemu_spin_lock(&env->tlb_c.lock);
463     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
464         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
465             tlb_flush_page_locked(env, mmu_idx, addr);
466         }
467     }
468     qemu_spin_unlock(&env->tlb_c.lock);
469 
470     tb_flush_jmp_cache(cpu, addr);
471 }
472 
473 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
474 {
475     target_ulong addr_and_mmu_idx;
476 
477     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
478 
479     /* This should already be page aligned */
480     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
481     addr_and_mmu_idx |= idxmap;
482 
483     if (!qemu_cpu_is_self(cpu)) {
484         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
485                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
486     } else {
487         tlb_flush_page_by_mmuidx_async_work(
488             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489     }
490 }
491 
492 void tlb_flush_page(CPUState *cpu, target_ulong addr)
493 {
494     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
495 }
496 
497 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
498                                        uint16_t idxmap)
499 {
500     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
501     target_ulong addr_and_mmu_idx;
502 
503     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
504 
505     /* This should already be page aligned */
506     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
507     addr_and_mmu_idx |= idxmap;
508 
509     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
510     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
511 }
512 
513 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
514 {
515     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
516 }
517 
518 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
519                                               target_ulong addr,
520                                               uint16_t idxmap)
521 {
522     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
523     target_ulong addr_and_mmu_idx;
524 
525     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
526 
527     /* This should already be page aligned */
528     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
529     addr_and_mmu_idx |= idxmap;
530 
531     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
532     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
533 }
534 
535 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
536 {
537     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
538 }
539 
540 /* update the TLBs so that writes to code in the virtual page 'addr'
541    can be detected */
542 void tlb_protect_code(ram_addr_t ram_addr)
543 {
544     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
545                                              DIRTY_MEMORY_CODE);
546 }
547 
548 /* update the TLB so that writes in physical page 'phys_addr' are no longer
549    tested for self modifying code */
550 void tlb_unprotect_code(ram_addr_t ram_addr)
551 {
552     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
553 }
554 
555 
556 /*
557  * Dirty write flag handling
558  *
559  * When the TCG code writes to a location it looks up the address in
560  * the TLB and uses that data to compute the final address. If any of
561  * the lower bits of the address are set then the slow path is forced.
562  * There are a number of reasons to do this but for normal RAM the
563  * most usual is detecting writes to code regions which may invalidate
564  * generated code.
565  *
566  * Other vCPUs might be reading their TLBs during guest execution, so we update
567  * te->addr_write with atomic_set. We don't need to worry about this for
568  * oversized guests as MTTCG is disabled for them.
569  *
570  * Called with tlb_c.lock held.
571  */
572 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
573                                          uintptr_t start, uintptr_t length)
574 {
575     uintptr_t addr = tlb_entry->addr_write;
576 
577     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
578         addr &= TARGET_PAGE_MASK;
579         addr += tlb_entry->addend;
580         if ((addr - start) < length) {
581 #if TCG_OVERSIZED_GUEST
582             tlb_entry->addr_write |= TLB_NOTDIRTY;
583 #else
584             atomic_set(&tlb_entry->addr_write,
585                        tlb_entry->addr_write | TLB_NOTDIRTY);
586 #endif
587         }
588     }
589 }
590 
591 /*
592  * Called with tlb_c.lock held.
593  * Called only from the vCPU context, i.e. the TLB's owner thread.
594  */
595 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
596 {
597     *d = *s;
598 }
599 
600 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
601  * the target vCPU).
602  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
603  * thing actually updated is the target TLB entry ->addr_write flags.
604  */
605 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
606 {
607     CPUArchState *env;
608 
609     int mmu_idx;
610 
611     env = cpu->env_ptr;
612     qemu_spin_lock(&env->tlb_c.lock);
613     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
614         unsigned int i;
615         unsigned int n = tlb_n_entries(env, mmu_idx);
616 
617         for (i = 0; i < n; i++) {
618             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
619                                          length);
620         }
621 
622         for (i = 0; i < CPU_VTLB_SIZE; i++) {
623             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
624                                          length);
625         }
626     }
627     qemu_spin_unlock(&env->tlb_c.lock);
628 }
629 
630 /* Called with tlb_c.lock held */
631 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
632                                          target_ulong vaddr)
633 {
634     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
635         tlb_entry->addr_write = vaddr;
636     }
637 }
638 
639 /* update the TLB corresponding to virtual page vaddr
640    so that it is no longer dirty */
641 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
642 {
643     CPUArchState *env = cpu->env_ptr;
644     int mmu_idx;
645 
646     assert_cpu_is_self(cpu);
647 
648     vaddr &= TARGET_PAGE_MASK;
649     qemu_spin_lock(&env->tlb_c.lock);
650     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
651         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
652     }
653 
654     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
655         int k;
656         for (k = 0; k < CPU_VTLB_SIZE; k++) {
657             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
658         }
659     }
660     qemu_spin_unlock(&env->tlb_c.lock);
661 }
662 
663 /* Our TLB does not support large pages, so remember the area covered by
664    large pages and trigger a full TLB flush if these are invalidated.  */
665 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
666                                target_ulong vaddr, target_ulong size)
667 {
668     target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
669     target_ulong lp_mask = ~(size - 1);
670 
671     if (lp_addr == (target_ulong)-1) {
672         /* No previous large page.  */
673         lp_addr = vaddr;
674     } else {
675         /* Extend the existing region to include the new page.
676            This is a compromise between unnecessary flushes and
677            the cost of maintaining a full variable size TLB.  */
678         lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
679         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
680             lp_mask <<= 1;
681         }
682     }
683     env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
684     env->tlb_d[mmu_idx].large_page_mask = lp_mask;
685 }
686 
687 /* Add a new TLB entry. At most one entry for a given virtual address
688  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
689  * supplied size is only used by tlb_flush_page.
690  *
691  * Called from TCG-generated code, which is under an RCU read-side
692  * critical section.
693  */
694 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
695                              hwaddr paddr, MemTxAttrs attrs, int prot,
696                              int mmu_idx, target_ulong size)
697 {
698     CPUArchState *env = cpu->env_ptr;
699     MemoryRegionSection *section;
700     unsigned int index;
701     target_ulong address;
702     target_ulong code_address;
703     uintptr_t addend;
704     CPUTLBEntry *te, tn;
705     hwaddr iotlb, xlat, sz, paddr_page;
706     target_ulong vaddr_page;
707     int asidx = cpu_asidx_from_attrs(cpu, attrs);
708 
709     assert_cpu_is_self(cpu);
710 
711     if (size <= TARGET_PAGE_SIZE) {
712         sz = TARGET_PAGE_SIZE;
713     } else {
714         tlb_add_large_page(env, mmu_idx, vaddr, size);
715         sz = size;
716     }
717     vaddr_page = vaddr & TARGET_PAGE_MASK;
718     paddr_page = paddr & TARGET_PAGE_MASK;
719 
720     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
721                                                 &xlat, &sz, attrs, &prot);
722     assert(sz >= TARGET_PAGE_SIZE);
723 
724     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
725               " prot=%x idx=%d\n",
726               vaddr, paddr, prot, mmu_idx);
727 
728     address = vaddr_page;
729     if (size < TARGET_PAGE_SIZE) {
730         /*
731          * Slow-path the TLB entries; we will repeat the MMU check and TLB
732          * fill on every access.
733          */
734         address |= TLB_RECHECK;
735     }
736     if (!memory_region_is_ram(section->mr) &&
737         !memory_region_is_romd(section->mr)) {
738         /* IO memory case */
739         address |= TLB_MMIO;
740         addend = 0;
741     } else {
742         /* TLB_MMIO for rom/romd handled below */
743         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
744     }
745 
746     code_address = address;
747     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
748                                             paddr_page, xlat, prot, &address);
749 
750     index = tlb_index(env, mmu_idx, vaddr_page);
751     te = tlb_entry(env, mmu_idx, vaddr_page);
752 
753     /*
754      * Hold the TLB lock for the rest of the function. We could acquire/release
755      * the lock several times in the function, but it is faster to amortize the
756      * acquisition cost by acquiring it just once. Note that this leads to
757      * a longer critical section, but this is not a concern since the TLB lock
758      * is unlikely to be contended.
759      */
760     qemu_spin_lock(&env->tlb_c.lock);
761 
762     /* Note that the tlb is no longer clean.  */
763     env->tlb_c.dirty |= 1 << mmu_idx;
764 
765     /* Make sure there's no cached translation for the new page.  */
766     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
767 
768     /*
769      * Only evict the old entry to the victim tlb if it's for a
770      * different page; otherwise just overwrite the stale data.
771      */
772     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
773         unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
774         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
775 
776         /* Evict the old entry into the victim tlb.  */
777         copy_tlb_helper_locked(tv, te);
778         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
779         tlb_n_used_entries_dec(env, mmu_idx);
780     }
781 
782     /* refill the tlb */
783     /*
784      * At this point iotlb contains a physical section number in the lower
785      * TARGET_PAGE_BITS, and either
786      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
787      *  + the offset within section->mr of the page base (otherwise)
788      * We subtract the vaddr_page (which is page aligned and thus won't
789      * disturb the low bits) to give an offset which can be added to the
790      * (non-page-aligned) vaddr of the eventual memory access to get
791      * the MemoryRegion offset for the access. Note that the vaddr we
792      * subtract here is that of the page base, and not the same as the
793      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
794      */
795     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
796     env->iotlb[mmu_idx][index].attrs = attrs;
797 
798     /* Now calculate the new entry */
799     tn.addend = addend - vaddr_page;
800     if (prot & PAGE_READ) {
801         tn.addr_read = address;
802     } else {
803         tn.addr_read = -1;
804     }
805 
806     if (prot & PAGE_EXEC) {
807         tn.addr_code = code_address;
808     } else {
809         tn.addr_code = -1;
810     }
811 
812     tn.addr_write = -1;
813     if (prot & PAGE_WRITE) {
814         if ((memory_region_is_ram(section->mr) && section->readonly)
815             || memory_region_is_romd(section->mr)) {
816             /* Write access calls the I/O callback.  */
817             tn.addr_write = address | TLB_MMIO;
818         } else if (memory_region_is_ram(section->mr)
819                    && cpu_physical_memory_is_clean(
820                        memory_region_get_ram_addr(section->mr) + xlat)) {
821             tn.addr_write = address | TLB_NOTDIRTY;
822         } else {
823             tn.addr_write = address;
824         }
825         if (prot & PAGE_WRITE_INV) {
826             tn.addr_write |= TLB_INVALID_MASK;
827         }
828     }
829 
830     copy_tlb_helper_locked(te, &tn);
831     tlb_n_used_entries_inc(env, mmu_idx);
832     qemu_spin_unlock(&env->tlb_c.lock);
833 }
834 
835 /* Add a new TLB entry, but without specifying the memory
836  * transaction attributes to be used.
837  */
838 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
839                   hwaddr paddr, int prot,
840                   int mmu_idx, target_ulong size)
841 {
842     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
843                             prot, mmu_idx, size);
844 }
845 
846 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
847 {
848     ram_addr_t ram_addr;
849 
850     ram_addr = qemu_ram_addr_from_host(ptr);
851     if (ram_addr == RAM_ADDR_INVALID) {
852         error_report("Bad ram pointer %p", ptr);
853         abort();
854     }
855     return ram_addr;
856 }
857 
858 /*
859  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
860  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
861  * be discarded and looked up again (e.g. via tlb_entry()).
862  */
863 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
864                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
865 {
866     CPUClass *cc = CPU_GET_CLASS(cpu);
867     bool ok;
868 
869     /*
870      * This is not a probe, so only valid return is success; failure
871      * should result in exception + longjmp to the cpu loop.
872      */
873     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
874     assert(ok);
875 }
876 
877 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
878                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
879                          MMUAccessType access_type, int size)
880 {
881     CPUState *cpu = ENV_GET_CPU(env);
882     hwaddr mr_offset;
883     MemoryRegionSection *section;
884     MemoryRegion *mr;
885     uint64_t val;
886     bool locked = false;
887     MemTxResult r;
888 
889     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
890     mr = section->mr;
891     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
892     cpu->mem_io_pc = retaddr;
893     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
894         cpu_io_recompile(cpu, retaddr);
895     }
896 
897     cpu->mem_io_vaddr = addr;
898     cpu->mem_io_access_type = access_type;
899 
900     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
901         qemu_mutex_lock_iothread();
902         locked = true;
903     }
904     r = memory_region_dispatch_read(mr, mr_offset,
905                                     &val, size, iotlbentry->attrs);
906     if (r != MEMTX_OK) {
907         hwaddr physaddr = mr_offset +
908             section->offset_within_address_space -
909             section->offset_within_region;
910 
911         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
912                                mmu_idx, iotlbentry->attrs, r, retaddr);
913     }
914     if (locked) {
915         qemu_mutex_unlock_iothread();
916     }
917 
918     return val;
919 }
920 
921 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
922                       int mmu_idx, uint64_t val, target_ulong addr,
923                       uintptr_t retaddr, int size)
924 {
925     CPUState *cpu = ENV_GET_CPU(env);
926     hwaddr mr_offset;
927     MemoryRegionSection *section;
928     MemoryRegion *mr;
929     bool locked = false;
930     MemTxResult r;
931 
932     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
933     mr = section->mr;
934     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
935     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
936         cpu_io_recompile(cpu, retaddr);
937     }
938     cpu->mem_io_vaddr = addr;
939     cpu->mem_io_pc = retaddr;
940 
941     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
942         qemu_mutex_lock_iothread();
943         locked = true;
944     }
945     r = memory_region_dispatch_write(mr, mr_offset,
946                                      val, size, iotlbentry->attrs);
947     if (r != MEMTX_OK) {
948         hwaddr physaddr = mr_offset +
949             section->offset_within_address_space -
950             section->offset_within_region;
951 
952         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
953                                mmu_idx, iotlbentry->attrs, r, retaddr);
954     }
955     if (locked) {
956         qemu_mutex_unlock_iothread();
957     }
958 }
959 
960 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
961 {
962 #if TCG_OVERSIZED_GUEST
963     return *(target_ulong *)((uintptr_t)entry + ofs);
964 #else
965     /* ofs might correspond to .addr_write, so use atomic_read */
966     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
967 #endif
968 }
969 
970 /* Return true if ADDR is present in the victim tlb, and has been copied
971    back to the main tlb.  */
972 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
973                            size_t elt_ofs, target_ulong page)
974 {
975     size_t vidx;
976 
977     assert_cpu_is_self(ENV_GET_CPU(env));
978     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
979         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
980         target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
981 
982         if (cmp == page) {
983             /* Found entry in victim tlb, swap tlb and iotlb.  */
984             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
985 
986             qemu_spin_lock(&env->tlb_c.lock);
987             copy_tlb_helper_locked(&tmptlb, tlb);
988             copy_tlb_helper_locked(tlb, vtlb);
989             copy_tlb_helper_locked(vtlb, &tmptlb);
990             qemu_spin_unlock(&env->tlb_c.lock);
991 
992             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
993             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
994             tmpio = *io; *io = *vio; *vio = tmpio;
995             return true;
996         }
997     }
998     return false;
999 }
1000 
1001 /* Macro to call the above, with local variables from the use context.  */
1002 #define VICTIM_TLB_HIT(TY, ADDR) \
1003   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1004                  (ADDR) & TARGET_PAGE_MASK)
1005 
1006 /* NOTE: this function can trigger an exception */
1007 /* NOTE2: the returned address is not exactly the physical address: it
1008  * is actually a ram_addr_t (in system mode; the user mode emulation
1009  * version of this function returns a guest virtual address).
1010  */
1011 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1012 {
1013     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1014     uintptr_t index = tlb_index(env, mmu_idx, addr);
1015     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1016     void *p;
1017 
1018     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1019         if (!VICTIM_TLB_HIT(addr_code, addr)) {
1020             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1021             index = tlb_index(env, mmu_idx, addr);
1022             entry = tlb_entry(env, mmu_idx, addr);
1023         }
1024         assert(tlb_hit(entry->addr_code, addr));
1025     }
1026 
1027     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1028         /*
1029          * Return -1 if we can't translate and execute from an entire
1030          * page of RAM here, which will cause us to execute by loading
1031          * and translating one insn at a time, without caching:
1032          *  - TLB_RECHECK: means the MMU protection covers a smaller range
1033          *    than a target page, so we must redo the MMU check every insn
1034          *  - TLB_MMIO: region is not backed by RAM
1035          */
1036         return -1;
1037     }
1038 
1039     p = (void *)((uintptr_t)addr + entry->addend);
1040     return qemu_ram_addr_from_host_nofail(p);
1041 }
1042 
1043 /* Probe for whether the specified guest write access is permitted.
1044  * If it is not permitted then an exception will be taken in the same
1045  * way as if this were a real write access (and we will not return).
1046  * Otherwise the function will return, and there will be a valid
1047  * entry in the TLB for this access.
1048  */
1049 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1050                  uintptr_t retaddr)
1051 {
1052     uintptr_t index = tlb_index(env, mmu_idx, addr);
1053     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1054 
1055     if (!tlb_hit(tlb_addr_write(entry), addr)) {
1056         /* TLB entry is for a different page */
1057         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1058             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1059                      mmu_idx, retaddr);
1060         }
1061     }
1062 }
1063 
1064 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1065                         MMUAccessType access_type, int mmu_idx)
1066 {
1067     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1068     uintptr_t tlb_addr, page;
1069     size_t elt_ofs;
1070 
1071     switch (access_type) {
1072     case MMU_DATA_LOAD:
1073         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1074         break;
1075     case MMU_DATA_STORE:
1076         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1077         break;
1078     case MMU_INST_FETCH:
1079         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1080         break;
1081     default:
1082         g_assert_not_reached();
1083     }
1084 
1085     page = addr & TARGET_PAGE_MASK;
1086     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1087 
1088     if (!tlb_hit_page(tlb_addr, page)) {
1089         uintptr_t index = tlb_index(env, mmu_idx, addr);
1090 
1091         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1092             CPUState *cs = ENV_GET_CPU(env);
1093             CPUClass *cc = CPU_GET_CLASS(cs);
1094 
1095             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1096                 /* Non-faulting page table read failed.  */
1097                 return NULL;
1098             }
1099 
1100             /* TLB resize via tlb_fill may have moved the entry.  */
1101             entry = tlb_entry(env, mmu_idx, addr);
1102         }
1103         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1104     }
1105 
1106     if (tlb_addr & ~TARGET_PAGE_MASK) {
1107         /* IO access */
1108         return NULL;
1109     }
1110 
1111     return (void *)((uintptr_t)addr + entry->addend);
1112 }
1113 
1114 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1115  * operations, or io operations to proceed.  Return the host address.  */
1116 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1117                                TCGMemOpIdx oi, uintptr_t retaddr,
1118                                NotDirtyInfo *ndi)
1119 {
1120     size_t mmu_idx = get_mmuidx(oi);
1121     uintptr_t index = tlb_index(env, mmu_idx, addr);
1122     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1123     target_ulong tlb_addr = tlb_addr_write(tlbe);
1124     TCGMemOp mop = get_memop(oi);
1125     int a_bits = get_alignment_bits(mop);
1126     int s_bits = mop & MO_SIZE;
1127     void *hostaddr;
1128 
1129     /* Adjust the given return address.  */
1130     retaddr -= GETPC_ADJ;
1131 
1132     /* Enforce guest required alignment.  */
1133     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1134         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1135         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1136                              mmu_idx, retaddr);
1137     }
1138 
1139     /* Enforce qemu required alignment.  */
1140     if (unlikely(addr & ((1 << s_bits) - 1))) {
1141         /* We get here if guest alignment was not requested,
1142            or was not enforced by cpu_unaligned_access above.
1143            We might widen the access and emulate, but for now
1144            mark an exception and exit the cpu loop.  */
1145         goto stop_the_world;
1146     }
1147 
1148     /* Check TLB entry and enforce page permissions.  */
1149     if (!tlb_hit(tlb_addr, addr)) {
1150         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1151             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
1152                      mmu_idx, retaddr);
1153             index = tlb_index(env, mmu_idx, addr);
1154             tlbe = tlb_entry(env, mmu_idx, addr);
1155         }
1156         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1157     }
1158 
1159     /* Notice an IO access or a needs-MMU-lookup access */
1160     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1161         /* There's really nothing that can be done to
1162            support this apart from stop-the-world.  */
1163         goto stop_the_world;
1164     }
1165 
1166     /* Let the guest notice RMW on a write-only page.  */
1167     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1168         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1169                  mmu_idx, retaddr);
1170         /* Since we don't support reads and writes to different addresses,
1171            and we do have the proper page loaded for write, this shouldn't
1172            ever return.  But just in case, handle via stop-the-world.  */
1173         goto stop_the_world;
1174     }
1175 
1176     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1177 
1178     ndi->active = false;
1179     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1180         ndi->active = true;
1181         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1182                                       qemu_ram_addr_from_host_nofail(hostaddr),
1183                                       1 << s_bits);
1184     }
1185 
1186     return hostaddr;
1187 
1188  stop_the_world:
1189     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1190 }
1191 
1192 #ifdef TARGET_WORDS_BIGENDIAN
1193 #define NEED_BE_BSWAP 0
1194 #define NEED_LE_BSWAP 1
1195 #else
1196 #define NEED_BE_BSWAP 1
1197 #define NEED_LE_BSWAP 0
1198 #endif
1199 
1200 /*
1201  * Byte Swap Helper
1202  *
1203  * This should all dead code away depending on the build host and
1204  * access type.
1205  */
1206 
1207 static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1208 {
1209     if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1210         switch (size) {
1211         case 1: return val;
1212         case 2: return bswap16(val);
1213         case 4: return bswap32(val);
1214         case 8: return bswap64(val);
1215         default:
1216             g_assert_not_reached();
1217         }
1218     } else {
1219         return val;
1220     }
1221 }
1222 
1223 /*
1224  * Load Helpers
1225  *
1226  * We support two different access types. SOFTMMU_CODE_ACCESS is
1227  * specifically for reading instructions from system memory. It is
1228  * called by the translation loop and in some helpers where the code
1229  * is disassembled. It shouldn't be called directly by guest code.
1230  */
1231 
1232 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1233                                 TCGMemOpIdx oi, uintptr_t retaddr);
1234 
1235 static inline uint64_t __attribute__((always_inline))
1236 load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1237             uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
1238             FullLoadHelper *full_load)
1239 {
1240     uintptr_t mmu_idx = get_mmuidx(oi);
1241     uintptr_t index = tlb_index(env, mmu_idx, addr);
1242     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1243     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1244     const size_t tlb_off = code_read ?
1245         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1246     const MMUAccessType access_type =
1247         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1248     unsigned a_bits = get_alignment_bits(get_memop(oi));
1249     void *haddr;
1250     uint64_t res;
1251 
1252     /* Handle CPU specific unaligned behaviour */
1253     if (addr & ((1 << a_bits) - 1)) {
1254         cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
1255                              mmu_idx, retaddr);
1256     }
1257 
1258     /* If the TLB entry is for a different page, reload and try again.  */
1259     if (!tlb_hit(tlb_addr, addr)) {
1260         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1261                             addr & TARGET_PAGE_MASK)) {
1262             tlb_fill(ENV_GET_CPU(env), addr, size,
1263                      access_type, mmu_idx, retaddr);
1264             index = tlb_index(env, mmu_idx, addr);
1265             entry = tlb_entry(env, mmu_idx, addr);
1266         }
1267         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1268     }
1269 
1270     /* Handle an IO access.  */
1271     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1272         if ((addr & (size - 1)) != 0) {
1273             goto do_unaligned_access;
1274         }
1275 
1276         if (tlb_addr & TLB_RECHECK) {
1277             /*
1278              * This is a TLB_RECHECK access, where the MMU protection
1279              * covers a smaller range than a target page, and we must
1280              * repeat the MMU check here. This tlb_fill() call might
1281              * longjump out if this access should cause a guest exception.
1282              */
1283             tlb_fill(ENV_GET_CPU(env), addr, size,
1284                      access_type, mmu_idx, retaddr);
1285             index = tlb_index(env, mmu_idx, addr);
1286             entry = tlb_entry(env, mmu_idx, addr);
1287 
1288             tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1289             tlb_addr &= ~TLB_RECHECK;
1290             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1291                 /* RAM access */
1292                 goto do_aligned_access;
1293             }
1294         }
1295 
1296         res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
1297                        retaddr, access_type, size);
1298         return handle_bswap(res, size, big_endian);
1299     }
1300 
1301     /* Handle slow unaligned access (it spans two pages or IO).  */
1302     if (size > 1
1303         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1304                     >= TARGET_PAGE_SIZE)) {
1305         target_ulong addr1, addr2;
1306         tcg_target_ulong r1, r2;
1307         unsigned shift;
1308     do_unaligned_access:
1309         addr1 = addr & ~(size - 1);
1310         addr2 = addr1 + size;
1311         r1 = full_load(env, addr1, oi, retaddr);
1312         r2 = full_load(env, addr2, oi, retaddr);
1313         shift = (addr & (size - 1)) * 8;
1314 
1315         if (big_endian) {
1316             /* Big-endian combine.  */
1317             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1318         } else {
1319             /* Little-endian combine.  */
1320             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1321         }
1322         return res & MAKE_64BIT_MASK(0, size * 8);
1323     }
1324 
1325  do_aligned_access:
1326     haddr = (void *)((uintptr_t)addr + entry->addend);
1327     switch (size) {
1328     case 1:
1329         res = ldub_p(haddr);
1330         break;
1331     case 2:
1332         if (big_endian) {
1333             res = lduw_be_p(haddr);
1334         } else {
1335             res = lduw_le_p(haddr);
1336         }
1337         break;
1338     case 4:
1339         if (big_endian) {
1340             res = (uint32_t)ldl_be_p(haddr);
1341         } else {
1342             res = (uint32_t)ldl_le_p(haddr);
1343         }
1344         break;
1345     case 8:
1346         if (big_endian) {
1347             res = ldq_be_p(haddr);
1348         } else {
1349             res = ldq_le_p(haddr);
1350         }
1351         break;
1352     default:
1353         g_assert_not_reached();
1354     }
1355 
1356     return res;
1357 }
1358 
1359 /*
1360  * For the benefit of TCG generated code, we want to avoid the
1361  * complication of ABI-specific return type promotion and always
1362  * return a value extended to the register size of the host. This is
1363  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1364  * data, and for that we always have uint64_t.
1365  *
1366  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1367  */
1368 
1369 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1370                               TCGMemOpIdx oi, uintptr_t retaddr)
1371 {
1372     return load_helper(env, addr, oi, retaddr, 1, false, false,
1373                        full_ldub_mmu);
1374 }
1375 
1376 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1377                                      TCGMemOpIdx oi, uintptr_t retaddr)
1378 {
1379     return full_ldub_mmu(env, addr, oi, retaddr);
1380 }
1381 
1382 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1383                                  TCGMemOpIdx oi, uintptr_t retaddr)
1384 {
1385     return load_helper(env, addr, oi, retaddr, 2, false, false,
1386                        full_le_lduw_mmu);
1387 }
1388 
1389 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1390                                     TCGMemOpIdx oi, uintptr_t retaddr)
1391 {
1392     return full_le_lduw_mmu(env, addr, oi, retaddr);
1393 }
1394 
1395 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1396                                  TCGMemOpIdx oi, uintptr_t retaddr)
1397 {
1398     return load_helper(env, addr, oi, retaddr, 2, true, false,
1399                        full_be_lduw_mmu);
1400 }
1401 
1402 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1403                                     TCGMemOpIdx oi, uintptr_t retaddr)
1404 {
1405     return full_be_lduw_mmu(env, addr, oi, retaddr);
1406 }
1407 
1408 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1409                                  TCGMemOpIdx oi, uintptr_t retaddr)
1410 {
1411     return load_helper(env, addr, oi, retaddr, 4, false, false,
1412                        full_le_ldul_mmu);
1413 }
1414 
1415 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1416                                     TCGMemOpIdx oi, uintptr_t retaddr)
1417 {
1418     return full_le_ldul_mmu(env, addr, oi, retaddr);
1419 }
1420 
1421 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1422                                  TCGMemOpIdx oi, uintptr_t retaddr)
1423 {
1424     return load_helper(env, addr, oi, retaddr, 4, true, false,
1425                        full_be_ldul_mmu);
1426 }
1427 
1428 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1429                                     TCGMemOpIdx oi, uintptr_t retaddr)
1430 {
1431     return full_be_ldul_mmu(env, addr, oi, retaddr);
1432 }
1433 
1434 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1435                            TCGMemOpIdx oi, uintptr_t retaddr)
1436 {
1437     return load_helper(env, addr, oi, retaddr, 8, false, false,
1438                        helper_le_ldq_mmu);
1439 }
1440 
1441 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1442                            TCGMemOpIdx oi, uintptr_t retaddr)
1443 {
1444     return load_helper(env, addr, oi, retaddr, 8, true, false,
1445                        helper_be_ldq_mmu);
1446 }
1447 
1448 /*
1449  * Provide signed versions of the load routines as well.  We can of course
1450  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1451  */
1452 
1453 
1454 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1455                                      TCGMemOpIdx oi, uintptr_t retaddr)
1456 {
1457     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1458 }
1459 
1460 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1461                                     TCGMemOpIdx oi, uintptr_t retaddr)
1462 {
1463     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1464 }
1465 
1466 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1467                                     TCGMemOpIdx oi, uintptr_t retaddr)
1468 {
1469     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1470 }
1471 
1472 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1473                                     TCGMemOpIdx oi, uintptr_t retaddr)
1474 {
1475     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1476 }
1477 
1478 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1479                                     TCGMemOpIdx oi, uintptr_t retaddr)
1480 {
1481     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1482 }
1483 
1484 /*
1485  * Store Helpers
1486  */
1487 
1488 static inline void __attribute__((always_inline))
1489 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1490              TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
1491 {
1492     uintptr_t mmu_idx = get_mmuidx(oi);
1493     uintptr_t index = tlb_index(env, mmu_idx, addr);
1494     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1495     target_ulong tlb_addr = tlb_addr_write(entry);
1496     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1497     unsigned a_bits = get_alignment_bits(get_memop(oi));
1498     void *haddr;
1499 
1500     /* Handle CPU specific unaligned behaviour */
1501     if (addr & ((1 << a_bits) - 1)) {
1502         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1503                              mmu_idx, retaddr);
1504     }
1505 
1506     /* If the TLB entry is for a different page, reload and try again.  */
1507     if (!tlb_hit(tlb_addr, addr)) {
1508         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1509             addr & TARGET_PAGE_MASK)) {
1510             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1511                      mmu_idx, retaddr);
1512             index = tlb_index(env, mmu_idx, addr);
1513             entry = tlb_entry(env, mmu_idx, addr);
1514         }
1515         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1516     }
1517 
1518     /* Handle an IO access.  */
1519     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1520         if ((addr & (size - 1)) != 0) {
1521             goto do_unaligned_access;
1522         }
1523 
1524         if (tlb_addr & TLB_RECHECK) {
1525             /*
1526              * This is a TLB_RECHECK access, where the MMU protection
1527              * covers a smaller range than a target page, and we must
1528              * repeat the MMU check here. This tlb_fill() call might
1529              * longjump out if this access should cause a guest exception.
1530              */
1531             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1532                      mmu_idx, retaddr);
1533             index = tlb_index(env, mmu_idx, addr);
1534             entry = tlb_entry(env, mmu_idx, addr);
1535 
1536             tlb_addr = tlb_addr_write(entry);
1537             tlb_addr &= ~TLB_RECHECK;
1538             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1539                 /* RAM access */
1540                 goto do_aligned_access;
1541             }
1542         }
1543 
1544         io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
1545                   handle_bswap(val, size, big_endian),
1546                   addr, retaddr, size);
1547         return;
1548     }
1549 
1550     /* Handle slow unaligned access (it spans two pages or IO).  */
1551     if (size > 1
1552         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1553                      >= TARGET_PAGE_SIZE)) {
1554         int i;
1555         uintptr_t index2;
1556         CPUTLBEntry *entry2;
1557         target_ulong page2, tlb_addr2;
1558     do_unaligned_access:
1559         /*
1560          * Ensure the second page is in the TLB.  Note that the first page
1561          * is already guaranteed to be filled, and that the second page
1562          * cannot evict the first.
1563          */
1564         page2 = (addr + size) & TARGET_PAGE_MASK;
1565         index2 = tlb_index(env, mmu_idx, page2);
1566         entry2 = tlb_entry(env, mmu_idx, page2);
1567         tlb_addr2 = tlb_addr_write(entry2);
1568         if (!tlb_hit_page(tlb_addr2, page2)
1569             && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1570                                page2 & TARGET_PAGE_MASK)) {
1571             tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
1572                      mmu_idx, retaddr);
1573         }
1574 
1575         /*
1576          * XXX: not efficient, but simple.
1577          * This loop must go in the forward direction to avoid issues
1578          * with self-modifying code in Windows 64-bit.
1579          */
1580         for (i = 0; i < size; ++i) {
1581             uint8_t val8;
1582             if (big_endian) {
1583                 /* Big-endian extract.  */
1584                 val8 = val >> (((size - 1) * 8) - (i * 8));
1585             } else {
1586                 /* Little-endian extract.  */
1587                 val8 = val >> (i * 8);
1588             }
1589             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1590         }
1591         return;
1592     }
1593 
1594  do_aligned_access:
1595     haddr = (void *)((uintptr_t)addr + entry->addend);
1596     switch (size) {
1597     case 1:
1598         stb_p(haddr, val);
1599         break;
1600     case 2:
1601         if (big_endian) {
1602             stw_be_p(haddr, val);
1603         } else {
1604             stw_le_p(haddr, val);
1605         }
1606         break;
1607     case 4:
1608         if (big_endian) {
1609             stl_be_p(haddr, val);
1610         } else {
1611             stl_le_p(haddr, val);
1612         }
1613         break;
1614     case 8:
1615         if (big_endian) {
1616             stq_be_p(haddr, val);
1617         } else {
1618             stq_le_p(haddr, val);
1619         }
1620         break;
1621     default:
1622         g_assert_not_reached();
1623         break;
1624     }
1625 }
1626 
1627 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1628                         TCGMemOpIdx oi, uintptr_t retaddr)
1629 {
1630     store_helper(env, addr, val, oi, retaddr, 1, false);
1631 }
1632 
1633 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1634                        TCGMemOpIdx oi, uintptr_t retaddr)
1635 {
1636     store_helper(env, addr, val, oi, retaddr, 2, false);
1637 }
1638 
1639 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1640                        TCGMemOpIdx oi, uintptr_t retaddr)
1641 {
1642     store_helper(env, addr, val, oi, retaddr, 2, true);
1643 }
1644 
1645 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1646                        TCGMemOpIdx oi, uintptr_t retaddr)
1647 {
1648     store_helper(env, addr, val, oi, retaddr, 4, false);
1649 }
1650 
1651 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1652                        TCGMemOpIdx oi, uintptr_t retaddr)
1653 {
1654     store_helper(env, addr, val, oi, retaddr, 4, true);
1655 }
1656 
1657 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1658                        TCGMemOpIdx oi, uintptr_t retaddr)
1659 {
1660     store_helper(env, addr, val, oi, retaddr, 8, false);
1661 }
1662 
1663 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1664                        TCGMemOpIdx oi, uintptr_t retaddr)
1665 {
1666     store_helper(env, addr, val, oi, retaddr, 8, true);
1667 }
1668 
1669 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1670    them callable from other helpers.  */
1671 
1672 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1673 #define ATOMIC_NAME(X) \
1674     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1675 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1676 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1677 #define ATOMIC_MMU_CLEANUP                              \
1678     do {                                                \
1679         if (unlikely(ndi.active)) {                     \
1680             memory_notdirty_write_complete(&ndi);       \
1681         }                                               \
1682     } while (0)
1683 
1684 #define DATA_SIZE 1
1685 #include "atomic_template.h"
1686 
1687 #define DATA_SIZE 2
1688 #include "atomic_template.h"
1689 
1690 #define DATA_SIZE 4
1691 #include "atomic_template.h"
1692 
1693 #ifdef CONFIG_ATOMIC64
1694 #define DATA_SIZE 8
1695 #include "atomic_template.h"
1696 #endif
1697 
1698 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1699 #define DATA_SIZE 16
1700 #include "atomic_template.h"
1701 #endif
1702 
1703 /* Second set of helpers are directly callable from TCG as helpers.  */
1704 
1705 #undef EXTRA_ARGS
1706 #undef ATOMIC_NAME
1707 #undef ATOMIC_MMU_LOOKUP
1708 #define EXTRA_ARGS         , TCGMemOpIdx oi
1709 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1710 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1711 
1712 #define DATA_SIZE 1
1713 #include "atomic_template.h"
1714 
1715 #define DATA_SIZE 2
1716 #include "atomic_template.h"
1717 
1718 #define DATA_SIZE 4
1719 #include "atomic_template.h"
1720 
1721 #ifdef CONFIG_ATOMIC64
1722 #define DATA_SIZE 8
1723 #include "atomic_template.h"
1724 #endif
1725 
1726 /* Code access functions.  */
1727 
1728 static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1729                                TCGMemOpIdx oi, uintptr_t retaddr)
1730 {
1731     return load_helper(env, addr, oi, retaddr, 1, false, true,
1732                        full_ldub_cmmu);
1733 }
1734 
1735 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1736                             TCGMemOpIdx oi, uintptr_t retaddr)
1737 {
1738     return full_ldub_cmmu(env, addr, oi, retaddr);
1739 }
1740 
1741 static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1742                                   TCGMemOpIdx oi, uintptr_t retaddr)
1743 {
1744     return load_helper(env, addr, oi, retaddr, 2, false, true,
1745                        full_le_lduw_cmmu);
1746 }
1747 
1748 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1749                             TCGMemOpIdx oi, uintptr_t retaddr)
1750 {
1751     return full_le_lduw_cmmu(env, addr, oi, retaddr);
1752 }
1753 
1754 static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1755                                   TCGMemOpIdx oi, uintptr_t retaddr)
1756 {
1757     return load_helper(env, addr, oi, retaddr, 2, true, true,
1758                        full_be_lduw_cmmu);
1759 }
1760 
1761 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1762                             TCGMemOpIdx oi, uintptr_t retaddr)
1763 {
1764     return full_be_lduw_cmmu(env, addr, oi, retaddr);
1765 }
1766 
1767 static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1768                                   TCGMemOpIdx oi, uintptr_t retaddr)
1769 {
1770     return load_helper(env, addr, oi, retaddr, 4, false, true,
1771                        full_le_ldul_cmmu);
1772 }
1773 
1774 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1775                             TCGMemOpIdx oi, uintptr_t retaddr)
1776 {
1777     return full_le_ldul_cmmu(env, addr, oi, retaddr);
1778 }
1779 
1780 static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1781                                   TCGMemOpIdx oi, uintptr_t retaddr)
1782 {
1783     return load_helper(env, addr, oi, retaddr, 4, true, true,
1784                        full_be_ldul_cmmu);
1785 }
1786 
1787 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1788                             TCGMemOpIdx oi, uintptr_t retaddr)
1789 {
1790     return full_be_ldul_cmmu(env, addr, oi, retaddr);
1791 }
1792 
1793 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1794                             TCGMemOpIdx oi, uintptr_t retaddr)
1795 {
1796     return load_helper(env, addr, oi, retaddr, 8, false, true,
1797                        helper_le_ldq_cmmu);
1798 }
1799 
1800 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1801                             TCGMemOpIdx oi, uintptr_t retaddr)
1802 {
1803     return load_helper(env, addr, oi, retaddr, 8, true, true,
1804                        helper_be_ldq_cmmu);
1805 }
1806