xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 9bf825bf)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40 
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 #  define DEBUG_TLB_LOG_GATE 1
45 # else
46 #  define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52 
53 #define tlb_debug(fmt, ...) do { \
54     if (DEBUG_TLB_LOG_GATE) { \
55         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56                       ## __VA_ARGS__); \
57     } else if (DEBUG_TLB_GATE) { \
58         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59     } \
60 } while (0)
61 
62 #define assert_cpu_is_self(cpu) do {                              \
63         if (DEBUG_TLB_GATE) {                                     \
64             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65         }                                                         \
66     } while (0)
67 
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69  * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73  */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78 {
79     return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
80 }
81 
82 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
83                              size_t max_entries)
84 {
85     desc->window_begin_ns = ns;
86     desc->window_max_entries = max_entries;
87 }
88 
89 static void tlb_dyn_init(CPUArchState *env)
90 {
91     int i;
92 
93     for (i = 0; i < NB_MMU_MODES; i++) {
94         CPUTLBDesc *desc = &env_tlb(env)->d[i];
95         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96 
97         tlb_window_reset(desc, get_clock_realtime(), 0);
98         desc->n_used_entries = 0;
99         env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100         env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
101         env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
102     }
103 }
104 
105 /**
106  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107  * @env: CPU that owns the TLB
108  * @mmu_idx: MMU index of the TLB
109  *
110  * Called with tlb_lock_held.
111  *
112  * We have two main constraints when resizing a TLB: (1) we only resize it
113  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114  * the array or unnecessarily flushing it), which means we do not control how
115  * frequently the resizing can occur; (2) we don't have access to the guest's
116  * future scheduling decisions, and therefore have to decide the magnitude of
117  * the resize based on past observations.
118  *
119  * In general, a memory-hungry process can benefit greatly from an appropriately
120  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121  * we just have to make the TLB as large as possible; while an oversized TLB
122  * results in minimal TLB miss rates, it also takes longer to be flushed
123  * (flushes can be _very_ frequent), and the reduced locality can also hurt
124  * performance.
125  *
126  * To achieve near-optimal performance for all kinds of workloads, we:
127  *
128  * 1. Aggressively increase the size of the TLB when the use rate of the
129  * TLB being flushed is high, since it is likely that in the near future this
130  * memory-hungry process will execute again, and its memory hungriness will
131  * probably be similar.
132  *
133  * 2. Slowly reduce the size of the TLB as the use rate declines over a
134  * reasonably large time window. The rationale is that if in such a time window
135  * we have not observed a high TLB use rate, it is likely that we won't observe
136  * it in the near future. In that case, once a time window expires we downsize
137  * the TLB to match the maximum use rate observed in the window.
138  *
139  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140  * since in that range performance is likely near-optimal. Recall that the TLB
141  * is direct mapped, so we want the use rate to be low (or at least not too
142  * high), since otherwise we are likely to have a significant amount of
143  * conflict misses.
144  */
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146 {
147     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
148     size_t old_size = tlb_n_entries(env, mmu_idx);
149     size_t rate;
150     size_t new_size = old_size;
151     int64_t now = get_clock_realtime();
152     int64_t window_len_ms = 100;
153     int64_t window_len_ns = window_len_ms * 1000 * 1000;
154     bool window_expired = now > desc->window_begin_ns + window_len_ns;
155 
156     if (desc->n_used_entries > desc->window_max_entries) {
157         desc->window_max_entries = desc->n_used_entries;
158     }
159     rate = desc->window_max_entries * 100 / old_size;
160 
161     if (rate > 70) {
162         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163     } else if (rate < 30 && window_expired) {
164         size_t ceil = pow2ceil(desc->window_max_entries);
165         size_t expected_rate = desc->window_max_entries * 100 / ceil;
166 
167         /*
168          * Avoid undersizing when the max number of entries seen is just below
169          * a pow2. For instance, if max_entries == 1025, the expected use rate
170          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172          * later. Thus, make sure that the expected use rate remains below 70%.
173          * (and since we double the size, that means the lowest rate we'd
174          * expect to get is 35%, which is still in the 30-70% range where
175          * we consider that the size is appropriate.)
176          */
177         if (expected_rate > 70) {
178             ceil *= 2;
179         }
180         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181     }
182 
183     if (new_size == old_size) {
184         if (window_expired) {
185             tlb_window_reset(desc, now, desc->n_used_entries);
186         }
187         return;
188     }
189 
190     g_free(env_tlb(env)->f[mmu_idx].table);
191     g_free(env_tlb(env)->d[mmu_idx].iotlb);
192 
193     tlb_window_reset(desc, now, 0);
194     /* desc->n_used_entries is cleared by the caller */
195     env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196     env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
197     env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
198     /*
199      * If the allocations fail, try smaller sizes. We just freed some
200      * memory, so going back to half of new_size has a good chance of working.
201      * Increased memory pressure elsewhere in the system might cause the
202      * allocations to fail though, so we progressively reduce the allocation
203      * size, aborting if we cannot even allocate the smallest TLB we support.
204      */
205     while (env_tlb(env)->f[mmu_idx].table == NULL ||
206            env_tlb(env)->d[mmu_idx].iotlb == NULL) {
207         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
208             error_report("%s: %s", __func__, strerror(errno));
209             abort();
210         }
211         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
212         env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
213 
214         g_free(env_tlb(env)->f[mmu_idx].table);
215         g_free(env_tlb(env)->d[mmu_idx].iotlb);
216         env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
217         env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
218     }
219 }
220 
221 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
222 {
223     tlb_mmu_resize_locked(env, mmu_idx);
224     memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
225     env_tlb(env)->d[mmu_idx].n_used_entries = 0;
226 }
227 
228 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
229 {
230     env_tlb(env)->d[mmu_idx].n_used_entries++;
231 }
232 
233 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
234 {
235     env_tlb(env)->d[mmu_idx].n_used_entries--;
236 }
237 
238 void tlb_init(CPUState *cpu)
239 {
240     CPUArchState *env = cpu->env_ptr;
241 
242     qemu_spin_init(&env_tlb(env)->c.lock);
243 
244     /* Ensure that cpu_reset performs a full flush.  */
245     env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
246 
247     tlb_dyn_init(env);
248 }
249 
250 /* flush_all_helper: run fn across all cpus
251  *
252  * If the wait flag is set then the src cpu's helper will be queued as
253  * "safe" work and the loop exited creating a synchronisation point
254  * where all queued work will be finished before execution starts
255  * again.
256  */
257 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
258                              run_on_cpu_data d)
259 {
260     CPUState *cpu;
261 
262     CPU_FOREACH(cpu) {
263         if (cpu != src) {
264             async_run_on_cpu(cpu, fn, d);
265         }
266     }
267 }
268 
269 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
270 {
271     CPUState *cpu;
272     size_t full = 0, part = 0, elide = 0;
273 
274     CPU_FOREACH(cpu) {
275         CPUArchState *env = cpu->env_ptr;
276 
277         full += atomic_read(&env_tlb(env)->c.full_flush_count);
278         part += atomic_read(&env_tlb(env)->c.part_flush_count);
279         elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
280     }
281     *pfull = full;
282     *ppart = part;
283     *pelide = elide;
284 }
285 
286 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
287 {
288     tlb_table_flush_by_mmuidx(env, mmu_idx);
289     env_tlb(env)->d[mmu_idx].large_page_addr = -1;
290     env_tlb(env)->d[mmu_idx].large_page_mask = -1;
291     env_tlb(env)->d[mmu_idx].vindex = 0;
292     memset(env_tlb(env)->d[mmu_idx].vtable, -1,
293            sizeof(env_tlb(env)->d[0].vtable));
294 }
295 
296 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
297 {
298     CPUArchState *env = cpu->env_ptr;
299     uint16_t asked = data.host_int;
300     uint16_t all_dirty, work, to_clean;
301 
302     assert_cpu_is_self(cpu);
303 
304     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
305 
306     qemu_spin_lock(&env_tlb(env)->c.lock);
307 
308     all_dirty = env_tlb(env)->c.dirty;
309     to_clean = asked & all_dirty;
310     all_dirty &= ~to_clean;
311     env_tlb(env)->c.dirty = all_dirty;
312 
313     for (work = to_clean; work != 0; work &= work - 1) {
314         int mmu_idx = ctz32(work);
315         tlb_flush_one_mmuidx_locked(env, mmu_idx);
316     }
317 
318     qemu_spin_unlock(&env_tlb(env)->c.lock);
319 
320     cpu_tb_jmp_cache_clear(cpu);
321 
322     if (to_clean == ALL_MMUIDX_BITS) {
323         atomic_set(&env_tlb(env)->c.full_flush_count,
324                    env_tlb(env)->c.full_flush_count + 1);
325     } else {
326         atomic_set(&env_tlb(env)->c.part_flush_count,
327                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
328         if (to_clean != asked) {
329             atomic_set(&env_tlb(env)->c.elide_flush_count,
330                        env_tlb(env)->c.elide_flush_count +
331                        ctpop16(asked & ~to_clean));
332         }
333     }
334 }
335 
336 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
337 {
338     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
339 
340     if (cpu->created && !qemu_cpu_is_self(cpu)) {
341         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
342                          RUN_ON_CPU_HOST_INT(idxmap));
343     } else {
344         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
345     }
346 }
347 
348 void tlb_flush(CPUState *cpu)
349 {
350     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
351 }
352 
353 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
354 {
355     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
356 
357     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
358 
359     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
360     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
361 }
362 
363 void tlb_flush_all_cpus(CPUState *src_cpu)
364 {
365     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
366 }
367 
368 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
369 {
370     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
371 
372     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
373 
374     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
375     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
376 }
377 
378 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
379 {
380     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
381 }
382 
383 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
384                                         target_ulong page)
385 {
386     return tlb_hit_page(tlb_entry->addr_read, page) ||
387            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
388            tlb_hit_page(tlb_entry->addr_code, page);
389 }
390 
391 /**
392  * tlb_entry_is_empty - return true if the entry is not in use
393  * @te: pointer to CPUTLBEntry
394  */
395 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
396 {
397     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
398 }
399 
400 /* Called with tlb_c.lock held */
401 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
402                                           target_ulong page)
403 {
404     if (tlb_hit_page_anyprot(tlb_entry, page)) {
405         memset(tlb_entry, -1, sizeof(*tlb_entry));
406         return true;
407     }
408     return false;
409 }
410 
411 /* Called with tlb_c.lock held */
412 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
413                                               target_ulong page)
414 {
415     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
416     int k;
417 
418     assert_cpu_is_self(env_cpu(env));
419     for (k = 0; k < CPU_VTLB_SIZE; k++) {
420         if (tlb_flush_entry_locked(&d->vtable[k], page)) {
421             tlb_n_used_entries_dec(env, mmu_idx);
422         }
423     }
424 }
425 
426 static void tlb_flush_page_locked(CPUArchState *env, int midx,
427                                   target_ulong page)
428 {
429     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
430     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
431 
432     /* Check if we need to flush due to large pages.  */
433     if ((page & lp_mask) == lp_addr) {
434         tlb_debug("forcing full flush midx %d ("
435                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
436                   midx, lp_addr, lp_mask);
437         tlb_flush_one_mmuidx_locked(env, midx);
438     } else {
439         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
440             tlb_n_used_entries_dec(env, midx);
441         }
442         tlb_flush_vtlb_page_locked(env, midx, page);
443     }
444 }
445 
446 /* As we are going to hijack the bottom bits of the page address for a
447  * mmuidx bit mask we need to fail to build if we can't do that
448  */
449 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
450 
451 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
452                                                 run_on_cpu_data data)
453 {
454     CPUArchState *env = cpu->env_ptr;
455     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
456     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
457     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
458     int mmu_idx;
459 
460     assert_cpu_is_self(cpu);
461 
462     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
463               addr, mmu_idx_bitmap);
464 
465     qemu_spin_lock(&env_tlb(env)->c.lock);
466     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
467         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
468             tlb_flush_page_locked(env, mmu_idx, addr);
469         }
470     }
471     qemu_spin_unlock(&env_tlb(env)->c.lock);
472 
473     tb_flush_jmp_cache(cpu, addr);
474 }
475 
476 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
477 {
478     target_ulong addr_and_mmu_idx;
479 
480     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
481 
482     /* This should already be page aligned */
483     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
484     addr_and_mmu_idx |= idxmap;
485 
486     if (!qemu_cpu_is_self(cpu)) {
487         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
488                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489     } else {
490         tlb_flush_page_by_mmuidx_async_work(
491             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
492     }
493 }
494 
495 void tlb_flush_page(CPUState *cpu, target_ulong addr)
496 {
497     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
498 }
499 
500 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
501                                        uint16_t idxmap)
502 {
503     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
504     target_ulong addr_and_mmu_idx;
505 
506     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
507 
508     /* This should already be page aligned */
509     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
510     addr_and_mmu_idx |= idxmap;
511 
512     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
513     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
514 }
515 
516 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
517 {
518     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
519 }
520 
521 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
522                                               target_ulong addr,
523                                               uint16_t idxmap)
524 {
525     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
526     target_ulong addr_and_mmu_idx;
527 
528     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
529 
530     /* This should already be page aligned */
531     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
532     addr_and_mmu_idx |= idxmap;
533 
534     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
535     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
536 }
537 
538 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
539 {
540     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
541 }
542 
543 /* update the TLBs so that writes to code in the virtual page 'addr'
544    can be detected */
545 void tlb_protect_code(ram_addr_t ram_addr)
546 {
547     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
548                                              DIRTY_MEMORY_CODE);
549 }
550 
551 /* update the TLB so that writes in physical page 'phys_addr' are no longer
552    tested for self modifying code */
553 void tlb_unprotect_code(ram_addr_t ram_addr)
554 {
555     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
556 }
557 
558 
559 /*
560  * Dirty write flag handling
561  *
562  * When the TCG code writes to a location it looks up the address in
563  * the TLB and uses that data to compute the final address. If any of
564  * the lower bits of the address are set then the slow path is forced.
565  * There are a number of reasons to do this but for normal RAM the
566  * most usual is detecting writes to code regions which may invalidate
567  * generated code.
568  *
569  * Other vCPUs might be reading their TLBs during guest execution, so we update
570  * te->addr_write with atomic_set. We don't need to worry about this for
571  * oversized guests as MTTCG is disabled for them.
572  *
573  * Called with tlb_c.lock held.
574  */
575 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
576                                          uintptr_t start, uintptr_t length)
577 {
578     uintptr_t addr = tlb_entry->addr_write;
579 
580     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
581         addr &= TARGET_PAGE_MASK;
582         addr += tlb_entry->addend;
583         if ((addr - start) < length) {
584 #if TCG_OVERSIZED_GUEST
585             tlb_entry->addr_write |= TLB_NOTDIRTY;
586 #else
587             atomic_set(&tlb_entry->addr_write,
588                        tlb_entry->addr_write | TLB_NOTDIRTY);
589 #endif
590         }
591     }
592 }
593 
594 /*
595  * Called with tlb_c.lock held.
596  * Called only from the vCPU context, i.e. the TLB's owner thread.
597  */
598 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
599 {
600     *d = *s;
601 }
602 
603 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
604  * the target vCPU).
605  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
606  * thing actually updated is the target TLB entry ->addr_write flags.
607  */
608 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
609 {
610     CPUArchState *env;
611 
612     int mmu_idx;
613 
614     env = cpu->env_ptr;
615     qemu_spin_lock(&env_tlb(env)->c.lock);
616     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
617         unsigned int i;
618         unsigned int n = tlb_n_entries(env, mmu_idx);
619 
620         for (i = 0; i < n; i++) {
621             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
622                                          start1, length);
623         }
624 
625         for (i = 0; i < CPU_VTLB_SIZE; i++) {
626             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
627                                          start1, length);
628         }
629     }
630     qemu_spin_unlock(&env_tlb(env)->c.lock);
631 }
632 
633 /* Called with tlb_c.lock held */
634 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
635                                          target_ulong vaddr)
636 {
637     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
638         tlb_entry->addr_write = vaddr;
639     }
640 }
641 
642 /* update the TLB corresponding to virtual page vaddr
643    so that it is no longer dirty */
644 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
645 {
646     CPUArchState *env = cpu->env_ptr;
647     int mmu_idx;
648 
649     assert_cpu_is_self(cpu);
650 
651     vaddr &= TARGET_PAGE_MASK;
652     qemu_spin_lock(&env_tlb(env)->c.lock);
653     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
654         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
655     }
656 
657     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
658         int k;
659         for (k = 0; k < CPU_VTLB_SIZE; k++) {
660             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
661         }
662     }
663     qemu_spin_unlock(&env_tlb(env)->c.lock);
664 }
665 
666 /* Our TLB does not support large pages, so remember the area covered by
667    large pages and trigger a full TLB flush if these are invalidated.  */
668 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
669                                target_ulong vaddr, target_ulong size)
670 {
671     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
672     target_ulong lp_mask = ~(size - 1);
673 
674     if (lp_addr == (target_ulong)-1) {
675         /* No previous large page.  */
676         lp_addr = vaddr;
677     } else {
678         /* Extend the existing region to include the new page.
679            This is a compromise between unnecessary flushes and
680            the cost of maintaining a full variable size TLB.  */
681         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
682         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
683             lp_mask <<= 1;
684         }
685     }
686     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
687     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
688 }
689 
690 /* Add a new TLB entry. At most one entry for a given virtual address
691  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
692  * supplied size is only used by tlb_flush_page.
693  *
694  * Called from TCG-generated code, which is under an RCU read-side
695  * critical section.
696  */
697 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
698                              hwaddr paddr, MemTxAttrs attrs, int prot,
699                              int mmu_idx, target_ulong size)
700 {
701     CPUArchState *env = cpu->env_ptr;
702     CPUTLB *tlb = env_tlb(env);
703     CPUTLBDesc *desc = &tlb->d[mmu_idx];
704     MemoryRegionSection *section;
705     unsigned int index;
706     target_ulong address;
707     target_ulong code_address;
708     uintptr_t addend;
709     CPUTLBEntry *te, tn;
710     hwaddr iotlb, xlat, sz, paddr_page;
711     target_ulong vaddr_page;
712     int asidx = cpu_asidx_from_attrs(cpu, attrs);
713 
714     assert_cpu_is_self(cpu);
715 
716     if (size <= TARGET_PAGE_SIZE) {
717         sz = TARGET_PAGE_SIZE;
718     } else {
719         tlb_add_large_page(env, mmu_idx, vaddr, size);
720         sz = size;
721     }
722     vaddr_page = vaddr & TARGET_PAGE_MASK;
723     paddr_page = paddr & TARGET_PAGE_MASK;
724 
725     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
726                                                 &xlat, &sz, attrs, &prot);
727     assert(sz >= TARGET_PAGE_SIZE);
728 
729     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
730               " prot=%x idx=%d\n",
731               vaddr, paddr, prot, mmu_idx);
732 
733     address = vaddr_page;
734     if (size < TARGET_PAGE_SIZE) {
735         /*
736          * Slow-path the TLB entries; we will repeat the MMU check and TLB
737          * fill on every access.
738          */
739         address |= TLB_RECHECK;
740     }
741     if (!memory_region_is_ram(section->mr) &&
742         !memory_region_is_romd(section->mr)) {
743         /* IO memory case */
744         address |= TLB_MMIO;
745         addend = 0;
746     } else {
747         /* TLB_MMIO for rom/romd handled below */
748         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
749     }
750 
751     code_address = address;
752     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
753                                             paddr_page, xlat, prot, &address);
754 
755     index = tlb_index(env, mmu_idx, vaddr_page);
756     te = tlb_entry(env, mmu_idx, vaddr_page);
757 
758     /*
759      * Hold the TLB lock for the rest of the function. We could acquire/release
760      * the lock several times in the function, but it is faster to amortize the
761      * acquisition cost by acquiring it just once. Note that this leads to
762      * a longer critical section, but this is not a concern since the TLB lock
763      * is unlikely to be contended.
764      */
765     qemu_spin_lock(&tlb->c.lock);
766 
767     /* Note that the tlb is no longer clean.  */
768     tlb->c.dirty |= 1 << mmu_idx;
769 
770     /* Make sure there's no cached translation for the new page.  */
771     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
772 
773     /*
774      * Only evict the old entry to the victim tlb if it's for a
775      * different page; otherwise just overwrite the stale data.
776      */
777     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
778         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
779         CPUTLBEntry *tv = &desc->vtable[vidx];
780 
781         /* Evict the old entry into the victim tlb.  */
782         copy_tlb_helper_locked(tv, te);
783         desc->viotlb[vidx] = desc->iotlb[index];
784         tlb_n_used_entries_dec(env, mmu_idx);
785     }
786 
787     /* refill the tlb */
788     /*
789      * At this point iotlb contains a physical section number in the lower
790      * TARGET_PAGE_BITS, and either
791      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
792      *  + the offset within section->mr of the page base (otherwise)
793      * We subtract the vaddr_page (which is page aligned and thus won't
794      * disturb the low bits) to give an offset which can be added to the
795      * (non-page-aligned) vaddr of the eventual memory access to get
796      * the MemoryRegion offset for the access. Note that the vaddr we
797      * subtract here is that of the page base, and not the same as the
798      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
799      */
800     desc->iotlb[index].addr = iotlb - vaddr_page;
801     desc->iotlb[index].attrs = attrs;
802 
803     /* Now calculate the new entry */
804     tn.addend = addend - vaddr_page;
805     if (prot & PAGE_READ) {
806         tn.addr_read = address;
807     } else {
808         tn.addr_read = -1;
809     }
810 
811     if (prot & PAGE_EXEC) {
812         tn.addr_code = code_address;
813     } else {
814         tn.addr_code = -1;
815     }
816 
817     tn.addr_write = -1;
818     if (prot & PAGE_WRITE) {
819         if ((memory_region_is_ram(section->mr) && section->readonly)
820             || memory_region_is_romd(section->mr)) {
821             /* Write access calls the I/O callback.  */
822             tn.addr_write = address | TLB_MMIO;
823         } else if (memory_region_is_ram(section->mr)
824                    && cpu_physical_memory_is_clean(
825                        memory_region_get_ram_addr(section->mr) + xlat)) {
826             tn.addr_write = address | TLB_NOTDIRTY;
827         } else {
828             tn.addr_write = address;
829         }
830         if (prot & PAGE_WRITE_INV) {
831             tn.addr_write |= TLB_INVALID_MASK;
832         }
833     }
834 
835     copy_tlb_helper_locked(te, &tn);
836     tlb_n_used_entries_inc(env, mmu_idx);
837     qemu_spin_unlock(&tlb->c.lock);
838 }
839 
840 /* Add a new TLB entry, but without specifying the memory
841  * transaction attributes to be used.
842  */
843 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
844                   hwaddr paddr, int prot,
845                   int mmu_idx, target_ulong size)
846 {
847     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
848                             prot, mmu_idx, size);
849 }
850 
851 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
852 {
853     ram_addr_t ram_addr;
854 
855     ram_addr = qemu_ram_addr_from_host(ptr);
856     if (ram_addr == RAM_ADDR_INVALID) {
857         error_report("Bad ram pointer %p", ptr);
858         abort();
859     }
860     return ram_addr;
861 }
862 
863 /*
864  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
865  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
866  * be discarded and looked up again (e.g. via tlb_entry()).
867  */
868 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
869                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
870 {
871     CPUClass *cc = CPU_GET_CLASS(cpu);
872     bool ok;
873 
874     /*
875      * This is not a probe, so only valid return is success; failure
876      * should result in exception + longjmp to the cpu loop.
877      */
878     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
879     assert(ok);
880 }
881 
882 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
883                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
884                          MMUAccessType access_type, MemOp op)
885 {
886     CPUState *cpu = env_cpu(env);
887     hwaddr mr_offset;
888     MemoryRegionSection *section;
889     MemoryRegion *mr;
890     uint64_t val;
891     bool locked = false;
892     MemTxResult r;
893 
894     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
895     mr = section->mr;
896     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
897     cpu->mem_io_pc = retaddr;
898     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
899         cpu_io_recompile(cpu, retaddr);
900     }
901 
902     cpu->mem_io_vaddr = addr;
903     cpu->mem_io_access_type = access_type;
904 
905     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
906         qemu_mutex_lock_iothread();
907         locked = true;
908     }
909     r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
910     if (r != MEMTX_OK) {
911         hwaddr physaddr = mr_offset +
912             section->offset_within_address_space -
913             section->offset_within_region;
914 
915         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
916                                mmu_idx, iotlbentry->attrs, r, retaddr);
917     }
918     if (locked) {
919         qemu_mutex_unlock_iothread();
920     }
921 
922     return val;
923 }
924 
925 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
926                       int mmu_idx, uint64_t val, target_ulong addr,
927                       uintptr_t retaddr, MemOp op)
928 {
929     CPUState *cpu = env_cpu(env);
930     hwaddr mr_offset;
931     MemoryRegionSection *section;
932     MemoryRegion *mr;
933     bool locked = false;
934     MemTxResult r;
935 
936     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
937     mr = section->mr;
938     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
939     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
940         cpu_io_recompile(cpu, retaddr);
941     }
942     cpu->mem_io_vaddr = addr;
943     cpu->mem_io_pc = retaddr;
944 
945     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
946         qemu_mutex_lock_iothread();
947         locked = true;
948     }
949     r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
950     if (r != MEMTX_OK) {
951         hwaddr physaddr = mr_offset +
952             section->offset_within_address_space -
953             section->offset_within_region;
954 
955         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
956                                MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
957                                retaddr);
958     }
959     if (locked) {
960         qemu_mutex_unlock_iothread();
961     }
962 }
963 
964 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
965 {
966 #if TCG_OVERSIZED_GUEST
967     return *(target_ulong *)((uintptr_t)entry + ofs);
968 #else
969     /* ofs might correspond to .addr_write, so use atomic_read */
970     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
971 #endif
972 }
973 
974 /* Return true if ADDR is present in the victim tlb, and has been copied
975    back to the main tlb.  */
976 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
977                            size_t elt_ofs, target_ulong page)
978 {
979     size_t vidx;
980 
981     assert_cpu_is_self(env_cpu(env));
982     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
983         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
984         target_ulong cmp;
985 
986         /* elt_ofs might correspond to .addr_write, so use atomic_read */
987 #if TCG_OVERSIZED_GUEST
988         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
989 #else
990         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
991 #endif
992 
993         if (cmp == page) {
994             /* Found entry in victim tlb, swap tlb and iotlb.  */
995             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
996 
997             qemu_spin_lock(&env_tlb(env)->c.lock);
998             copy_tlb_helper_locked(&tmptlb, tlb);
999             copy_tlb_helper_locked(tlb, vtlb);
1000             copy_tlb_helper_locked(vtlb, &tmptlb);
1001             qemu_spin_unlock(&env_tlb(env)->c.lock);
1002 
1003             CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1004             CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1005             tmpio = *io; *io = *vio; *vio = tmpio;
1006             return true;
1007         }
1008     }
1009     return false;
1010 }
1011 
1012 /* Macro to call the above, with local variables from the use context.  */
1013 #define VICTIM_TLB_HIT(TY, ADDR) \
1014   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1015                  (ADDR) & TARGET_PAGE_MASK)
1016 
1017 /* NOTE: this function can trigger an exception */
1018 /* NOTE2: the returned address is not exactly the physical address: it
1019  * is actually a ram_addr_t (in system mode; the user mode emulation
1020  * version of this function returns a guest virtual address).
1021  */
1022 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1023 {
1024     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1025     uintptr_t index = tlb_index(env, mmu_idx, addr);
1026     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1027     void *p;
1028 
1029     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1030         if (!VICTIM_TLB_HIT(addr_code, addr)) {
1031             tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1032             index = tlb_index(env, mmu_idx, addr);
1033             entry = tlb_entry(env, mmu_idx, addr);
1034         }
1035         assert(tlb_hit(entry->addr_code, addr));
1036     }
1037 
1038     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1039         /*
1040          * Return -1 if we can't translate and execute from an entire
1041          * page of RAM here, which will cause us to execute by loading
1042          * and translating one insn at a time, without caching:
1043          *  - TLB_RECHECK: means the MMU protection covers a smaller range
1044          *    than a target page, so we must redo the MMU check every insn
1045          *  - TLB_MMIO: region is not backed by RAM
1046          */
1047         return -1;
1048     }
1049 
1050     p = (void *)((uintptr_t)addr + entry->addend);
1051     return qemu_ram_addr_from_host_nofail(p);
1052 }
1053 
1054 /* Probe for whether the specified guest write access is permitted.
1055  * If it is not permitted then an exception will be taken in the same
1056  * way as if this were a real write access (and we will not return).
1057  * Otherwise the function will return, and there will be a valid
1058  * entry in the TLB for this access.
1059  */
1060 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1061                  uintptr_t retaddr)
1062 {
1063     uintptr_t index = tlb_index(env, mmu_idx, addr);
1064     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1065 
1066     if (!tlb_hit(tlb_addr_write(entry), addr)) {
1067         /* TLB entry is for a different page */
1068         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1069             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1070                      mmu_idx, retaddr);
1071         }
1072     }
1073 }
1074 
1075 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1076                         MMUAccessType access_type, int mmu_idx)
1077 {
1078     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1079     uintptr_t tlb_addr, page;
1080     size_t elt_ofs;
1081 
1082     switch (access_type) {
1083     case MMU_DATA_LOAD:
1084         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1085         break;
1086     case MMU_DATA_STORE:
1087         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1088         break;
1089     case MMU_INST_FETCH:
1090         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1091         break;
1092     default:
1093         g_assert_not_reached();
1094     }
1095 
1096     page = addr & TARGET_PAGE_MASK;
1097     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1098 
1099     if (!tlb_hit_page(tlb_addr, page)) {
1100         uintptr_t index = tlb_index(env, mmu_idx, addr);
1101 
1102         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1103             CPUState *cs = env_cpu(env);
1104             CPUClass *cc = CPU_GET_CLASS(cs);
1105 
1106             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1107                 /* Non-faulting page table read failed.  */
1108                 return NULL;
1109             }
1110 
1111             /* TLB resize via tlb_fill may have moved the entry.  */
1112             entry = tlb_entry(env, mmu_idx, addr);
1113         }
1114         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1115     }
1116 
1117     if (tlb_addr & ~TARGET_PAGE_MASK) {
1118         /* IO access */
1119         return NULL;
1120     }
1121 
1122     return (void *)((uintptr_t)addr + entry->addend);
1123 }
1124 
1125 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1126  * operations, or io operations to proceed.  Return the host address.  */
1127 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1128                                TCGMemOpIdx oi, uintptr_t retaddr,
1129                                NotDirtyInfo *ndi)
1130 {
1131     size_t mmu_idx = get_mmuidx(oi);
1132     uintptr_t index = tlb_index(env, mmu_idx, addr);
1133     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1134     target_ulong tlb_addr = tlb_addr_write(tlbe);
1135     MemOp mop = get_memop(oi);
1136     int a_bits = get_alignment_bits(mop);
1137     int s_bits = mop & MO_SIZE;
1138     void *hostaddr;
1139 
1140     /* Adjust the given return address.  */
1141     retaddr -= GETPC_ADJ;
1142 
1143     /* Enforce guest required alignment.  */
1144     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1145         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1146         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1147                              mmu_idx, retaddr);
1148     }
1149 
1150     /* Enforce qemu required alignment.  */
1151     if (unlikely(addr & ((1 << s_bits) - 1))) {
1152         /* We get here if guest alignment was not requested,
1153            or was not enforced by cpu_unaligned_access above.
1154            We might widen the access and emulate, but for now
1155            mark an exception and exit the cpu loop.  */
1156         goto stop_the_world;
1157     }
1158 
1159     /* Check TLB entry and enforce page permissions.  */
1160     if (!tlb_hit(tlb_addr, addr)) {
1161         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1162             tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1163                      mmu_idx, retaddr);
1164             index = tlb_index(env, mmu_idx, addr);
1165             tlbe = tlb_entry(env, mmu_idx, addr);
1166         }
1167         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1168     }
1169 
1170     /* Notice an IO access or a needs-MMU-lookup access */
1171     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1172         /* There's really nothing that can be done to
1173            support this apart from stop-the-world.  */
1174         goto stop_the_world;
1175     }
1176 
1177     /* Let the guest notice RMW on a write-only page.  */
1178     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1179         tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1180                  mmu_idx, retaddr);
1181         /* Since we don't support reads and writes to different addresses,
1182            and we do have the proper page loaded for write, this shouldn't
1183            ever return.  But just in case, handle via stop-the-world.  */
1184         goto stop_the_world;
1185     }
1186 
1187     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1188 
1189     ndi->active = false;
1190     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1191         ndi->active = true;
1192         memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
1193                                       qemu_ram_addr_from_host_nofail(hostaddr),
1194                                       1 << s_bits);
1195     }
1196 
1197     return hostaddr;
1198 
1199  stop_the_world:
1200     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1201 }
1202 
1203 /*
1204  * Load Helpers
1205  *
1206  * We support two different access types. SOFTMMU_CODE_ACCESS is
1207  * specifically for reading instructions from system memory. It is
1208  * called by the translation loop and in some helpers where the code
1209  * is disassembled. It shouldn't be called directly by guest code.
1210  */
1211 
1212 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1213                                 TCGMemOpIdx oi, uintptr_t retaddr);
1214 
1215 static inline uint64_t __attribute__((always_inline))
1216 load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1217             uintptr_t retaddr, MemOp op, bool code_read,
1218             FullLoadHelper *full_load)
1219 {
1220     uintptr_t mmu_idx = get_mmuidx(oi);
1221     uintptr_t index = tlb_index(env, mmu_idx, addr);
1222     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1223     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1224     const size_t tlb_off = code_read ?
1225         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1226     const MMUAccessType access_type =
1227         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1228     unsigned a_bits = get_alignment_bits(get_memop(oi));
1229     void *haddr;
1230     uint64_t res;
1231     size_t size = memop_size(op);
1232 
1233     /* Handle CPU specific unaligned behaviour */
1234     if (addr & ((1 << a_bits) - 1)) {
1235         cpu_unaligned_access(env_cpu(env), addr, access_type,
1236                              mmu_idx, retaddr);
1237     }
1238 
1239     /* If the TLB entry is for a different page, reload and try again.  */
1240     if (!tlb_hit(tlb_addr, addr)) {
1241         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1242                             addr & TARGET_PAGE_MASK)) {
1243             tlb_fill(env_cpu(env), addr, size,
1244                      access_type, mmu_idx, retaddr);
1245             index = tlb_index(env, mmu_idx, addr);
1246             entry = tlb_entry(env, mmu_idx, addr);
1247         }
1248         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1249     }
1250 
1251     /* Handle an IO access.  */
1252     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1253         if ((addr & (size - 1)) != 0) {
1254             goto do_unaligned_access;
1255         }
1256 
1257         if (tlb_addr & TLB_RECHECK) {
1258             /*
1259              * This is a TLB_RECHECK access, where the MMU protection
1260              * covers a smaller range than a target page, and we must
1261              * repeat the MMU check here. This tlb_fill() call might
1262              * longjump out if this access should cause a guest exception.
1263              */
1264             tlb_fill(env_cpu(env), addr, size,
1265                      access_type, mmu_idx, retaddr);
1266             index = tlb_index(env, mmu_idx, addr);
1267             entry = tlb_entry(env, mmu_idx, addr);
1268 
1269             tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1270             tlb_addr &= ~TLB_RECHECK;
1271             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1272                 /* RAM access */
1273                 goto do_aligned_access;
1274             }
1275         }
1276 
1277         return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1278                         mmu_idx, addr, retaddr, access_type, op);
1279     }
1280 
1281     /* Handle slow unaligned access (it spans two pages or IO).  */
1282     if (size > 1
1283         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1284                     >= TARGET_PAGE_SIZE)) {
1285         target_ulong addr1, addr2;
1286         uint64_t r1, r2;
1287         unsigned shift;
1288     do_unaligned_access:
1289         addr1 = addr & ~((target_ulong)size - 1);
1290         addr2 = addr1 + size;
1291         r1 = full_load(env, addr1, oi, retaddr);
1292         r2 = full_load(env, addr2, oi, retaddr);
1293         shift = (addr & (size - 1)) * 8;
1294 
1295         if (memop_big_endian(op)) {
1296             /* Big-endian combine.  */
1297             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1298         } else {
1299             /* Little-endian combine.  */
1300             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1301         }
1302         return res & MAKE_64BIT_MASK(0, size * 8);
1303     }
1304 
1305  do_aligned_access:
1306     haddr = (void *)((uintptr_t)addr + entry->addend);
1307     switch (op) {
1308     case MO_UB:
1309         res = ldub_p(haddr);
1310         break;
1311     case MO_BEUW:
1312         res = lduw_be_p(haddr);
1313         break;
1314     case MO_LEUW:
1315         res = lduw_le_p(haddr);
1316         break;
1317     case MO_BEUL:
1318         res = (uint32_t)ldl_be_p(haddr);
1319         break;
1320     case MO_LEUL:
1321         res = (uint32_t)ldl_le_p(haddr);
1322         break;
1323     case MO_BEQ:
1324         res = ldq_be_p(haddr);
1325         break;
1326     case MO_LEQ:
1327         res = ldq_le_p(haddr);
1328         break;
1329     default:
1330         g_assert_not_reached();
1331     }
1332 
1333     return res;
1334 }
1335 
1336 /*
1337  * For the benefit of TCG generated code, we want to avoid the
1338  * complication of ABI-specific return type promotion and always
1339  * return a value extended to the register size of the host. This is
1340  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1341  * data, and for that we always have uint64_t.
1342  *
1343  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1344  */
1345 
1346 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1347                               TCGMemOpIdx oi, uintptr_t retaddr)
1348 {
1349     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
1350 }
1351 
1352 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1353                                      TCGMemOpIdx oi, uintptr_t retaddr)
1354 {
1355     return full_ldub_mmu(env, addr, oi, retaddr);
1356 }
1357 
1358 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1359                                  TCGMemOpIdx oi, uintptr_t retaddr)
1360 {
1361     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
1362                        full_le_lduw_mmu);
1363 }
1364 
1365 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1366                                     TCGMemOpIdx oi, uintptr_t retaddr)
1367 {
1368     return full_le_lduw_mmu(env, addr, oi, retaddr);
1369 }
1370 
1371 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1372                                  TCGMemOpIdx oi, uintptr_t retaddr)
1373 {
1374     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
1375                        full_be_lduw_mmu);
1376 }
1377 
1378 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1379                                     TCGMemOpIdx oi, uintptr_t retaddr)
1380 {
1381     return full_be_lduw_mmu(env, addr, oi, retaddr);
1382 }
1383 
1384 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1385                                  TCGMemOpIdx oi, uintptr_t retaddr)
1386 {
1387     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
1388                        full_le_ldul_mmu);
1389 }
1390 
1391 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1392                                     TCGMemOpIdx oi, uintptr_t retaddr)
1393 {
1394     return full_le_ldul_mmu(env, addr, oi, retaddr);
1395 }
1396 
1397 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1398                                  TCGMemOpIdx oi, uintptr_t retaddr)
1399 {
1400     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
1401                        full_be_ldul_mmu);
1402 }
1403 
1404 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1405                                     TCGMemOpIdx oi, uintptr_t retaddr)
1406 {
1407     return full_be_ldul_mmu(env, addr, oi, retaddr);
1408 }
1409 
1410 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1411                            TCGMemOpIdx oi, uintptr_t retaddr)
1412 {
1413     return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
1414                        helper_le_ldq_mmu);
1415 }
1416 
1417 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1418                            TCGMemOpIdx oi, uintptr_t retaddr)
1419 {
1420     return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
1421                        helper_be_ldq_mmu);
1422 }
1423 
1424 /*
1425  * Provide signed versions of the load routines as well.  We can of course
1426  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1427  */
1428 
1429 
1430 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1431                                      TCGMemOpIdx oi, uintptr_t retaddr)
1432 {
1433     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1434 }
1435 
1436 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1437                                     TCGMemOpIdx oi, uintptr_t retaddr)
1438 {
1439     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1440 }
1441 
1442 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1443                                     TCGMemOpIdx oi, uintptr_t retaddr)
1444 {
1445     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1446 }
1447 
1448 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1449                                     TCGMemOpIdx oi, uintptr_t retaddr)
1450 {
1451     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1452 }
1453 
1454 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1455                                     TCGMemOpIdx oi, uintptr_t retaddr)
1456 {
1457     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1458 }
1459 
1460 /*
1461  * Store Helpers
1462  */
1463 
1464 static inline void __attribute__((always_inline))
1465 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1466              TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
1467 {
1468     uintptr_t mmu_idx = get_mmuidx(oi);
1469     uintptr_t index = tlb_index(env, mmu_idx, addr);
1470     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1471     target_ulong tlb_addr = tlb_addr_write(entry);
1472     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1473     unsigned a_bits = get_alignment_bits(get_memop(oi));
1474     void *haddr;
1475     size_t size = memop_size(op);
1476 
1477     /* Handle CPU specific unaligned behaviour */
1478     if (addr & ((1 << a_bits) - 1)) {
1479         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1480                              mmu_idx, retaddr);
1481     }
1482 
1483     /* If the TLB entry is for a different page, reload and try again.  */
1484     if (!tlb_hit(tlb_addr, addr)) {
1485         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1486             addr & TARGET_PAGE_MASK)) {
1487             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1488                      mmu_idx, retaddr);
1489             index = tlb_index(env, mmu_idx, addr);
1490             entry = tlb_entry(env, mmu_idx, addr);
1491         }
1492         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1493     }
1494 
1495     /* Handle an IO access.  */
1496     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1497         if ((addr & (size - 1)) != 0) {
1498             goto do_unaligned_access;
1499         }
1500 
1501         if (tlb_addr & TLB_RECHECK) {
1502             /*
1503              * This is a TLB_RECHECK access, where the MMU protection
1504              * covers a smaller range than a target page, and we must
1505              * repeat the MMU check here. This tlb_fill() call might
1506              * longjump out if this access should cause a guest exception.
1507              */
1508             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1509                      mmu_idx, retaddr);
1510             index = tlb_index(env, mmu_idx, addr);
1511             entry = tlb_entry(env, mmu_idx, addr);
1512 
1513             tlb_addr = tlb_addr_write(entry);
1514             tlb_addr &= ~TLB_RECHECK;
1515             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1516                 /* RAM access */
1517                 goto do_aligned_access;
1518             }
1519         }
1520 
1521         io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1522                   val, addr, retaddr, op);
1523         return;
1524     }
1525 
1526     /* Handle slow unaligned access (it spans two pages or IO).  */
1527     if (size > 1
1528         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1529                      >= TARGET_PAGE_SIZE)) {
1530         int i;
1531         uintptr_t index2;
1532         CPUTLBEntry *entry2;
1533         target_ulong page2, tlb_addr2;
1534     do_unaligned_access:
1535         /*
1536          * Ensure the second page is in the TLB.  Note that the first page
1537          * is already guaranteed to be filled, and that the second page
1538          * cannot evict the first.
1539          */
1540         page2 = (addr + size) & TARGET_PAGE_MASK;
1541         index2 = tlb_index(env, mmu_idx, page2);
1542         entry2 = tlb_entry(env, mmu_idx, page2);
1543         tlb_addr2 = tlb_addr_write(entry2);
1544         if (!tlb_hit_page(tlb_addr2, page2)
1545             && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1546                                page2 & TARGET_PAGE_MASK)) {
1547             tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
1548                      mmu_idx, retaddr);
1549         }
1550 
1551         /*
1552          * XXX: not efficient, but simple.
1553          * This loop must go in the forward direction to avoid issues
1554          * with self-modifying code in Windows 64-bit.
1555          */
1556         for (i = 0; i < size; ++i) {
1557             uint8_t val8;
1558             if (memop_big_endian(op)) {
1559                 /* Big-endian extract.  */
1560                 val8 = val >> (((size - 1) * 8) - (i * 8));
1561             } else {
1562                 /* Little-endian extract.  */
1563                 val8 = val >> (i * 8);
1564             }
1565             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1566         }
1567         return;
1568     }
1569 
1570  do_aligned_access:
1571     haddr = (void *)((uintptr_t)addr + entry->addend);
1572     switch (op) {
1573     case MO_UB:
1574         stb_p(haddr, val);
1575         break;
1576     case MO_BEUW:
1577         stw_be_p(haddr, val);
1578         break;
1579     case MO_LEUW:
1580         stw_le_p(haddr, val);
1581         break;
1582     case MO_BEUL:
1583         stl_be_p(haddr, val);
1584         break;
1585     case MO_LEUL:
1586         stl_le_p(haddr, val);
1587         break;
1588     case MO_BEQ:
1589         stq_be_p(haddr, val);
1590         break;
1591     case MO_LEQ:
1592         stq_le_p(haddr, val);
1593         break;
1594     default:
1595         g_assert_not_reached();
1596         break;
1597     }
1598 }
1599 
1600 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1601                         TCGMemOpIdx oi, uintptr_t retaddr)
1602 {
1603     store_helper(env, addr, val, oi, retaddr, MO_UB);
1604 }
1605 
1606 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1607                        TCGMemOpIdx oi, uintptr_t retaddr)
1608 {
1609     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
1610 }
1611 
1612 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1613                        TCGMemOpIdx oi, uintptr_t retaddr)
1614 {
1615     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
1616 }
1617 
1618 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1619                        TCGMemOpIdx oi, uintptr_t retaddr)
1620 {
1621     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
1622 }
1623 
1624 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1625                        TCGMemOpIdx oi, uintptr_t retaddr)
1626 {
1627     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
1628 }
1629 
1630 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1631                        TCGMemOpIdx oi, uintptr_t retaddr)
1632 {
1633     store_helper(env, addr, val, oi, retaddr, MO_LEQ);
1634 }
1635 
1636 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1637                        TCGMemOpIdx oi, uintptr_t retaddr)
1638 {
1639     store_helper(env, addr, val, oi, retaddr, MO_BEQ);
1640 }
1641 
1642 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1643    them callable from other helpers.  */
1644 
1645 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1646 #define ATOMIC_NAME(X) \
1647     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1648 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1649 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1650 #define ATOMIC_MMU_CLEANUP                              \
1651     do {                                                \
1652         if (unlikely(ndi.active)) {                     \
1653             memory_notdirty_write_complete(&ndi);       \
1654         }                                               \
1655     } while (0)
1656 
1657 #define DATA_SIZE 1
1658 #include "atomic_template.h"
1659 
1660 #define DATA_SIZE 2
1661 #include "atomic_template.h"
1662 
1663 #define DATA_SIZE 4
1664 #include "atomic_template.h"
1665 
1666 #ifdef CONFIG_ATOMIC64
1667 #define DATA_SIZE 8
1668 #include "atomic_template.h"
1669 #endif
1670 
1671 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1672 #define DATA_SIZE 16
1673 #include "atomic_template.h"
1674 #endif
1675 
1676 /* Second set of helpers are directly callable from TCG as helpers.  */
1677 
1678 #undef EXTRA_ARGS
1679 #undef ATOMIC_NAME
1680 #undef ATOMIC_MMU_LOOKUP
1681 #define EXTRA_ARGS         , TCGMemOpIdx oi
1682 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1683 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1684 
1685 #define DATA_SIZE 1
1686 #include "atomic_template.h"
1687 
1688 #define DATA_SIZE 2
1689 #include "atomic_template.h"
1690 
1691 #define DATA_SIZE 4
1692 #include "atomic_template.h"
1693 
1694 #ifdef CONFIG_ATOMIC64
1695 #define DATA_SIZE 8
1696 #include "atomic_template.h"
1697 #endif
1698 
1699 /* Code access functions.  */
1700 
1701 static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1702                                TCGMemOpIdx oi, uintptr_t retaddr)
1703 {
1704     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
1705 }
1706 
1707 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1708                             TCGMemOpIdx oi, uintptr_t retaddr)
1709 {
1710     return full_ldub_cmmu(env, addr, oi, retaddr);
1711 }
1712 
1713 static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1714                                   TCGMemOpIdx oi, uintptr_t retaddr)
1715 {
1716     return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
1717                        full_le_lduw_cmmu);
1718 }
1719 
1720 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1721                             TCGMemOpIdx oi, uintptr_t retaddr)
1722 {
1723     return full_le_lduw_cmmu(env, addr, oi, retaddr);
1724 }
1725 
1726 static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1727                                   TCGMemOpIdx oi, uintptr_t retaddr)
1728 {
1729     return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
1730                        full_be_lduw_cmmu);
1731 }
1732 
1733 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1734                             TCGMemOpIdx oi, uintptr_t retaddr)
1735 {
1736     return full_be_lduw_cmmu(env, addr, oi, retaddr);
1737 }
1738 
1739 static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1740                                   TCGMemOpIdx oi, uintptr_t retaddr)
1741 {
1742     return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
1743                        full_le_ldul_cmmu);
1744 }
1745 
1746 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1747                             TCGMemOpIdx oi, uintptr_t retaddr)
1748 {
1749     return full_le_ldul_cmmu(env, addr, oi, retaddr);
1750 }
1751 
1752 static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1753                                   TCGMemOpIdx oi, uintptr_t retaddr)
1754 {
1755     return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
1756                        full_be_ldul_cmmu);
1757 }
1758 
1759 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1760                             TCGMemOpIdx oi, uintptr_t retaddr)
1761 {
1762     return full_be_ldul_cmmu(env, addr, oi, retaddr);
1763 }
1764 
1765 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1766                             TCGMemOpIdx oi, uintptr_t retaddr)
1767 {
1768     return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
1769                        helper_le_ldq_cmmu);
1770 }
1771 
1772 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1773                             TCGMemOpIdx oi, uintptr_t retaddr)
1774 {
1775     return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
1776                        helper_be_ldq_cmmu);
1777 }
1778