xref: /openbmc/qemu/accel/tcg/cputlb.c (revision d5d680ca)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
40 
41 #ifdef DEBUG_TLB
42 # define DEBUG_TLB_GATE 1
43 # ifdef DEBUG_TLB_LOG
44 #  define DEBUG_TLB_LOG_GATE 1
45 # else
46 #  define DEBUG_TLB_LOG_GATE 0
47 # endif
48 #else
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
51 #endif
52 
53 #define tlb_debug(fmt, ...) do { \
54     if (DEBUG_TLB_LOG_GATE) { \
55         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56                       ## __VA_ARGS__); \
57     } else if (DEBUG_TLB_GATE) { \
58         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59     } \
60 } while (0)
61 
62 #define assert_cpu_is_self(cpu) do {                              \
63         if (DEBUG_TLB_GATE) {                                     \
64             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65         }                                                         \
66     } while (0)
67 
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69  * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71 
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73  */
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76 
77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78 {
79     return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
80 }
81 
82 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
83                              size_t max_entries)
84 {
85     desc->window_begin_ns = ns;
86     desc->window_max_entries = max_entries;
87 }
88 
89 static void tlb_dyn_init(CPUArchState *env)
90 {
91     int i;
92 
93     for (i = 0; i < NB_MMU_MODES; i++) {
94         CPUTLBDesc *desc = &env_tlb(env)->d[i];
95         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96 
97         tlb_window_reset(desc, get_clock_realtime(), 0);
98         desc->n_used_entries = 0;
99         env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100         env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
101         env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
102     }
103 }
104 
105 /**
106  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107  * @env: CPU that owns the TLB
108  * @mmu_idx: MMU index of the TLB
109  *
110  * Called with tlb_lock_held.
111  *
112  * We have two main constraints when resizing a TLB: (1) we only resize it
113  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114  * the array or unnecessarily flushing it), which means we do not control how
115  * frequently the resizing can occur; (2) we don't have access to the guest's
116  * future scheduling decisions, and therefore have to decide the magnitude of
117  * the resize based on past observations.
118  *
119  * In general, a memory-hungry process can benefit greatly from an appropriately
120  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121  * we just have to make the TLB as large as possible; while an oversized TLB
122  * results in minimal TLB miss rates, it also takes longer to be flushed
123  * (flushes can be _very_ frequent), and the reduced locality can also hurt
124  * performance.
125  *
126  * To achieve near-optimal performance for all kinds of workloads, we:
127  *
128  * 1. Aggressively increase the size of the TLB when the use rate of the
129  * TLB being flushed is high, since it is likely that in the near future this
130  * memory-hungry process will execute again, and its memory hungriness will
131  * probably be similar.
132  *
133  * 2. Slowly reduce the size of the TLB as the use rate declines over a
134  * reasonably large time window. The rationale is that if in such a time window
135  * we have not observed a high TLB use rate, it is likely that we won't observe
136  * it in the near future. In that case, once a time window expires we downsize
137  * the TLB to match the maximum use rate observed in the window.
138  *
139  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140  * since in that range performance is likely near-optimal. Recall that the TLB
141  * is direct mapped, so we want the use rate to be low (or at least not too
142  * high), since otherwise we are likely to have a significant amount of
143  * conflict misses.
144  */
145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146 {
147     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
148     size_t old_size = tlb_n_entries(env, mmu_idx);
149     size_t rate;
150     size_t new_size = old_size;
151     int64_t now = get_clock_realtime();
152     int64_t window_len_ms = 100;
153     int64_t window_len_ns = window_len_ms * 1000 * 1000;
154     bool window_expired = now > desc->window_begin_ns + window_len_ns;
155 
156     if (desc->n_used_entries > desc->window_max_entries) {
157         desc->window_max_entries = desc->n_used_entries;
158     }
159     rate = desc->window_max_entries * 100 / old_size;
160 
161     if (rate > 70) {
162         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163     } else if (rate < 30 && window_expired) {
164         size_t ceil = pow2ceil(desc->window_max_entries);
165         size_t expected_rate = desc->window_max_entries * 100 / ceil;
166 
167         /*
168          * Avoid undersizing when the max number of entries seen is just below
169          * a pow2. For instance, if max_entries == 1025, the expected use rate
170          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172          * later. Thus, make sure that the expected use rate remains below 70%.
173          * (and since we double the size, that means the lowest rate we'd
174          * expect to get is 35%, which is still in the 30-70% range where
175          * we consider that the size is appropriate.)
176          */
177         if (expected_rate > 70) {
178             ceil *= 2;
179         }
180         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181     }
182 
183     if (new_size == old_size) {
184         if (window_expired) {
185             tlb_window_reset(desc, now, desc->n_used_entries);
186         }
187         return;
188     }
189 
190     g_free(env_tlb(env)->f[mmu_idx].table);
191     g_free(env_tlb(env)->d[mmu_idx].iotlb);
192 
193     tlb_window_reset(desc, now, 0);
194     /* desc->n_used_entries is cleared by the caller */
195     env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196     env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
197     env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
198     /*
199      * If the allocations fail, try smaller sizes. We just freed some
200      * memory, so going back to half of new_size has a good chance of working.
201      * Increased memory pressure elsewhere in the system might cause the
202      * allocations to fail though, so we progressively reduce the allocation
203      * size, aborting if we cannot even allocate the smallest TLB we support.
204      */
205     while (env_tlb(env)->f[mmu_idx].table == NULL ||
206            env_tlb(env)->d[mmu_idx].iotlb == NULL) {
207         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
208             error_report("%s: %s", __func__, strerror(errno));
209             abort();
210         }
211         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
212         env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
213 
214         g_free(env_tlb(env)->f[mmu_idx].table);
215         g_free(env_tlb(env)->d[mmu_idx].iotlb);
216         env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
217         env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
218     }
219 }
220 
221 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
222 {
223     tlb_mmu_resize_locked(env, mmu_idx);
224     memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
225     env_tlb(env)->d[mmu_idx].n_used_entries = 0;
226 }
227 
228 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
229 {
230     env_tlb(env)->d[mmu_idx].n_used_entries++;
231 }
232 
233 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
234 {
235     env_tlb(env)->d[mmu_idx].n_used_entries--;
236 }
237 
238 void tlb_init(CPUState *cpu)
239 {
240     CPUArchState *env = cpu->env_ptr;
241 
242     qemu_spin_init(&env_tlb(env)->c.lock);
243 
244     /* Ensure that cpu_reset performs a full flush.  */
245     env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
246 
247     tlb_dyn_init(env);
248 }
249 
250 /* flush_all_helper: run fn across all cpus
251  *
252  * If the wait flag is set then the src cpu's helper will be queued as
253  * "safe" work and the loop exited creating a synchronisation point
254  * where all queued work will be finished before execution starts
255  * again.
256  */
257 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
258                              run_on_cpu_data d)
259 {
260     CPUState *cpu;
261 
262     CPU_FOREACH(cpu) {
263         if (cpu != src) {
264             async_run_on_cpu(cpu, fn, d);
265         }
266     }
267 }
268 
269 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
270 {
271     CPUState *cpu;
272     size_t full = 0, part = 0, elide = 0;
273 
274     CPU_FOREACH(cpu) {
275         CPUArchState *env = cpu->env_ptr;
276 
277         full += atomic_read(&env_tlb(env)->c.full_flush_count);
278         part += atomic_read(&env_tlb(env)->c.part_flush_count);
279         elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
280     }
281     *pfull = full;
282     *ppart = part;
283     *pelide = elide;
284 }
285 
286 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
287 {
288     tlb_table_flush_by_mmuidx(env, mmu_idx);
289     env_tlb(env)->d[mmu_idx].large_page_addr = -1;
290     env_tlb(env)->d[mmu_idx].large_page_mask = -1;
291     env_tlb(env)->d[mmu_idx].vindex = 0;
292     memset(env_tlb(env)->d[mmu_idx].vtable, -1,
293            sizeof(env_tlb(env)->d[0].vtable));
294 }
295 
296 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
297 {
298     CPUArchState *env = cpu->env_ptr;
299     uint16_t asked = data.host_int;
300     uint16_t all_dirty, work, to_clean;
301 
302     assert_cpu_is_self(cpu);
303 
304     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
305 
306     qemu_spin_lock(&env_tlb(env)->c.lock);
307 
308     all_dirty = env_tlb(env)->c.dirty;
309     to_clean = asked & all_dirty;
310     all_dirty &= ~to_clean;
311     env_tlb(env)->c.dirty = all_dirty;
312 
313     for (work = to_clean; work != 0; work &= work - 1) {
314         int mmu_idx = ctz32(work);
315         tlb_flush_one_mmuidx_locked(env, mmu_idx);
316     }
317 
318     qemu_spin_unlock(&env_tlb(env)->c.lock);
319 
320     cpu_tb_jmp_cache_clear(cpu);
321 
322     if (to_clean == ALL_MMUIDX_BITS) {
323         atomic_set(&env_tlb(env)->c.full_flush_count,
324                    env_tlb(env)->c.full_flush_count + 1);
325     } else {
326         atomic_set(&env_tlb(env)->c.part_flush_count,
327                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
328         if (to_clean != asked) {
329             atomic_set(&env_tlb(env)->c.elide_flush_count,
330                        env_tlb(env)->c.elide_flush_count +
331                        ctpop16(asked & ~to_clean));
332         }
333     }
334 }
335 
336 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
337 {
338     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
339 
340     if (cpu->created && !qemu_cpu_is_self(cpu)) {
341         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
342                          RUN_ON_CPU_HOST_INT(idxmap));
343     } else {
344         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
345     }
346 }
347 
348 void tlb_flush(CPUState *cpu)
349 {
350     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
351 }
352 
353 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
354 {
355     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
356 
357     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
358 
359     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
360     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
361 }
362 
363 void tlb_flush_all_cpus(CPUState *src_cpu)
364 {
365     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
366 }
367 
368 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
369 {
370     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
371 
372     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
373 
374     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
375     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
376 }
377 
378 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
379 {
380     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
381 }
382 
383 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
384                                         target_ulong page)
385 {
386     return tlb_hit_page(tlb_entry->addr_read, page) ||
387            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
388            tlb_hit_page(tlb_entry->addr_code, page);
389 }
390 
391 /**
392  * tlb_entry_is_empty - return true if the entry is not in use
393  * @te: pointer to CPUTLBEntry
394  */
395 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
396 {
397     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
398 }
399 
400 /* Called with tlb_c.lock held */
401 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
402                                           target_ulong page)
403 {
404     if (tlb_hit_page_anyprot(tlb_entry, page)) {
405         memset(tlb_entry, -1, sizeof(*tlb_entry));
406         return true;
407     }
408     return false;
409 }
410 
411 /* Called with tlb_c.lock held */
412 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
413                                               target_ulong page)
414 {
415     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
416     int k;
417 
418     assert_cpu_is_self(env_cpu(env));
419     for (k = 0; k < CPU_VTLB_SIZE; k++) {
420         if (tlb_flush_entry_locked(&d->vtable[k], page)) {
421             tlb_n_used_entries_dec(env, mmu_idx);
422         }
423     }
424 }
425 
426 static void tlb_flush_page_locked(CPUArchState *env, int midx,
427                                   target_ulong page)
428 {
429     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
430     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
431 
432     /* Check if we need to flush due to large pages.  */
433     if ((page & lp_mask) == lp_addr) {
434         tlb_debug("forcing full flush midx %d ("
435                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
436                   midx, lp_addr, lp_mask);
437         tlb_flush_one_mmuidx_locked(env, midx);
438     } else {
439         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
440             tlb_n_used_entries_dec(env, midx);
441         }
442         tlb_flush_vtlb_page_locked(env, midx, page);
443     }
444 }
445 
446 /* As we are going to hijack the bottom bits of the page address for a
447  * mmuidx bit mask we need to fail to build if we can't do that
448  */
449 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
450 
451 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
452                                                 run_on_cpu_data data)
453 {
454     CPUArchState *env = cpu->env_ptr;
455     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
456     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
457     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
458     int mmu_idx;
459 
460     assert_cpu_is_self(cpu);
461 
462     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
463               addr, mmu_idx_bitmap);
464 
465     qemu_spin_lock(&env_tlb(env)->c.lock);
466     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
467         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
468             tlb_flush_page_locked(env, mmu_idx, addr);
469         }
470     }
471     qemu_spin_unlock(&env_tlb(env)->c.lock);
472 
473     tb_flush_jmp_cache(cpu, addr);
474 }
475 
476 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
477 {
478     target_ulong addr_and_mmu_idx;
479 
480     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
481 
482     /* This should already be page aligned */
483     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
484     addr_and_mmu_idx |= idxmap;
485 
486     if (!qemu_cpu_is_self(cpu)) {
487         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
488                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489     } else {
490         tlb_flush_page_by_mmuidx_async_work(
491             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
492     }
493 }
494 
495 void tlb_flush_page(CPUState *cpu, target_ulong addr)
496 {
497     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
498 }
499 
500 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
501                                        uint16_t idxmap)
502 {
503     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
504     target_ulong addr_and_mmu_idx;
505 
506     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
507 
508     /* This should already be page aligned */
509     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
510     addr_and_mmu_idx |= idxmap;
511 
512     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
513     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
514 }
515 
516 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
517 {
518     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
519 }
520 
521 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
522                                               target_ulong addr,
523                                               uint16_t idxmap)
524 {
525     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
526     target_ulong addr_and_mmu_idx;
527 
528     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
529 
530     /* This should already be page aligned */
531     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
532     addr_and_mmu_idx |= idxmap;
533 
534     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
535     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
536 }
537 
538 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
539 {
540     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
541 }
542 
543 /* update the TLBs so that writes to code in the virtual page 'addr'
544    can be detected */
545 void tlb_protect_code(ram_addr_t ram_addr)
546 {
547     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
548                                              DIRTY_MEMORY_CODE);
549 }
550 
551 /* update the TLB so that writes in physical page 'phys_addr' are no longer
552    tested for self modifying code */
553 void tlb_unprotect_code(ram_addr_t ram_addr)
554 {
555     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
556 }
557 
558 
559 /*
560  * Dirty write flag handling
561  *
562  * When the TCG code writes to a location it looks up the address in
563  * the TLB and uses that data to compute the final address. If any of
564  * the lower bits of the address are set then the slow path is forced.
565  * There are a number of reasons to do this but for normal RAM the
566  * most usual is detecting writes to code regions which may invalidate
567  * generated code.
568  *
569  * Other vCPUs might be reading their TLBs during guest execution, so we update
570  * te->addr_write with atomic_set. We don't need to worry about this for
571  * oversized guests as MTTCG is disabled for them.
572  *
573  * Called with tlb_c.lock held.
574  */
575 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
576                                          uintptr_t start, uintptr_t length)
577 {
578     uintptr_t addr = tlb_entry->addr_write;
579 
580     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
581         addr &= TARGET_PAGE_MASK;
582         addr += tlb_entry->addend;
583         if ((addr - start) < length) {
584 #if TCG_OVERSIZED_GUEST
585             tlb_entry->addr_write |= TLB_NOTDIRTY;
586 #else
587             atomic_set(&tlb_entry->addr_write,
588                        tlb_entry->addr_write | TLB_NOTDIRTY);
589 #endif
590         }
591     }
592 }
593 
594 /*
595  * Called with tlb_c.lock held.
596  * Called only from the vCPU context, i.e. the TLB's owner thread.
597  */
598 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
599 {
600     *d = *s;
601 }
602 
603 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
604  * the target vCPU).
605  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
606  * thing actually updated is the target TLB entry ->addr_write flags.
607  */
608 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
609 {
610     CPUArchState *env;
611 
612     int mmu_idx;
613 
614     env = cpu->env_ptr;
615     qemu_spin_lock(&env_tlb(env)->c.lock);
616     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
617         unsigned int i;
618         unsigned int n = tlb_n_entries(env, mmu_idx);
619 
620         for (i = 0; i < n; i++) {
621             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
622                                          start1, length);
623         }
624 
625         for (i = 0; i < CPU_VTLB_SIZE; i++) {
626             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
627                                          start1, length);
628         }
629     }
630     qemu_spin_unlock(&env_tlb(env)->c.lock);
631 }
632 
633 /* Called with tlb_c.lock held */
634 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
635                                          target_ulong vaddr)
636 {
637     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
638         tlb_entry->addr_write = vaddr;
639     }
640 }
641 
642 /* update the TLB corresponding to virtual page vaddr
643    so that it is no longer dirty */
644 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
645 {
646     CPUArchState *env = cpu->env_ptr;
647     int mmu_idx;
648 
649     assert_cpu_is_self(cpu);
650 
651     vaddr &= TARGET_PAGE_MASK;
652     qemu_spin_lock(&env_tlb(env)->c.lock);
653     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
654         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
655     }
656 
657     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
658         int k;
659         for (k = 0; k < CPU_VTLB_SIZE; k++) {
660             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
661         }
662     }
663     qemu_spin_unlock(&env_tlb(env)->c.lock);
664 }
665 
666 /* Our TLB does not support large pages, so remember the area covered by
667    large pages and trigger a full TLB flush if these are invalidated.  */
668 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
669                                target_ulong vaddr, target_ulong size)
670 {
671     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
672     target_ulong lp_mask = ~(size - 1);
673 
674     if (lp_addr == (target_ulong)-1) {
675         /* No previous large page.  */
676         lp_addr = vaddr;
677     } else {
678         /* Extend the existing region to include the new page.
679            This is a compromise between unnecessary flushes and
680            the cost of maintaining a full variable size TLB.  */
681         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
682         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
683             lp_mask <<= 1;
684         }
685     }
686     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
687     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
688 }
689 
690 /* Add a new TLB entry. At most one entry for a given virtual address
691  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
692  * supplied size is only used by tlb_flush_page.
693  *
694  * Called from TCG-generated code, which is under an RCU read-side
695  * critical section.
696  */
697 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
698                              hwaddr paddr, MemTxAttrs attrs, int prot,
699                              int mmu_idx, target_ulong size)
700 {
701     CPUArchState *env = cpu->env_ptr;
702     CPUTLB *tlb = env_tlb(env);
703     CPUTLBDesc *desc = &tlb->d[mmu_idx];
704     MemoryRegionSection *section;
705     unsigned int index;
706     target_ulong address;
707     target_ulong code_address;
708     uintptr_t addend;
709     CPUTLBEntry *te, tn;
710     hwaddr iotlb, xlat, sz, paddr_page;
711     target_ulong vaddr_page;
712     int asidx = cpu_asidx_from_attrs(cpu, attrs);
713 
714     assert_cpu_is_self(cpu);
715 
716     if (size <= TARGET_PAGE_SIZE) {
717         sz = TARGET_PAGE_SIZE;
718     } else {
719         tlb_add_large_page(env, mmu_idx, vaddr, size);
720         sz = size;
721     }
722     vaddr_page = vaddr & TARGET_PAGE_MASK;
723     paddr_page = paddr & TARGET_PAGE_MASK;
724 
725     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
726                                                 &xlat, &sz, attrs, &prot);
727     assert(sz >= TARGET_PAGE_SIZE);
728 
729     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
730               " prot=%x idx=%d\n",
731               vaddr, paddr, prot, mmu_idx);
732 
733     address = vaddr_page;
734     if (size < TARGET_PAGE_SIZE) {
735         /*
736          * Slow-path the TLB entries; we will repeat the MMU check and TLB
737          * fill on every access.
738          */
739         address |= TLB_RECHECK;
740     }
741     if (!memory_region_is_ram(section->mr) &&
742         !memory_region_is_romd(section->mr)) {
743         /* IO memory case */
744         address |= TLB_MMIO;
745         addend = 0;
746     } else {
747         /* TLB_MMIO for rom/romd handled below */
748         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
749     }
750 
751     code_address = address;
752     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
753                                             paddr_page, xlat, prot, &address);
754 
755     index = tlb_index(env, mmu_idx, vaddr_page);
756     te = tlb_entry(env, mmu_idx, vaddr_page);
757 
758     /*
759      * Hold the TLB lock for the rest of the function. We could acquire/release
760      * the lock several times in the function, but it is faster to amortize the
761      * acquisition cost by acquiring it just once. Note that this leads to
762      * a longer critical section, but this is not a concern since the TLB lock
763      * is unlikely to be contended.
764      */
765     qemu_spin_lock(&tlb->c.lock);
766 
767     /* Note that the tlb is no longer clean.  */
768     tlb->c.dirty |= 1 << mmu_idx;
769 
770     /* Make sure there's no cached translation for the new page.  */
771     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
772 
773     /*
774      * Only evict the old entry to the victim tlb if it's for a
775      * different page; otherwise just overwrite the stale data.
776      */
777     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
778         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
779         CPUTLBEntry *tv = &desc->vtable[vidx];
780 
781         /* Evict the old entry into the victim tlb.  */
782         copy_tlb_helper_locked(tv, te);
783         desc->viotlb[vidx] = desc->iotlb[index];
784         tlb_n_used_entries_dec(env, mmu_idx);
785     }
786 
787     /* refill the tlb */
788     /*
789      * At this point iotlb contains a physical section number in the lower
790      * TARGET_PAGE_BITS, and either
791      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
792      *  + the offset within section->mr of the page base (otherwise)
793      * We subtract the vaddr_page (which is page aligned and thus won't
794      * disturb the low bits) to give an offset which can be added to the
795      * (non-page-aligned) vaddr of the eventual memory access to get
796      * the MemoryRegion offset for the access. Note that the vaddr we
797      * subtract here is that of the page base, and not the same as the
798      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
799      */
800     desc->iotlb[index].addr = iotlb - vaddr_page;
801     desc->iotlb[index].attrs = attrs;
802 
803     /* Now calculate the new entry */
804     tn.addend = addend - vaddr_page;
805     if (prot & PAGE_READ) {
806         tn.addr_read = address;
807     } else {
808         tn.addr_read = -1;
809     }
810 
811     if (prot & PAGE_EXEC) {
812         tn.addr_code = code_address;
813     } else {
814         tn.addr_code = -1;
815     }
816 
817     tn.addr_write = -1;
818     if (prot & PAGE_WRITE) {
819         if ((memory_region_is_ram(section->mr) && section->readonly)
820             || memory_region_is_romd(section->mr)) {
821             /* Write access calls the I/O callback.  */
822             tn.addr_write = address | TLB_MMIO;
823         } else if (memory_region_is_ram(section->mr)
824                    && cpu_physical_memory_is_clean(
825                        memory_region_get_ram_addr(section->mr) + xlat)) {
826             tn.addr_write = address | TLB_NOTDIRTY;
827         } else {
828             tn.addr_write = address;
829         }
830         if (prot & PAGE_WRITE_INV) {
831             tn.addr_write |= TLB_INVALID_MASK;
832         }
833     }
834 
835     copy_tlb_helper_locked(te, &tn);
836     tlb_n_used_entries_inc(env, mmu_idx);
837     qemu_spin_unlock(&tlb->c.lock);
838 }
839 
840 /* Add a new TLB entry, but without specifying the memory
841  * transaction attributes to be used.
842  */
843 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
844                   hwaddr paddr, int prot,
845                   int mmu_idx, target_ulong size)
846 {
847     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
848                             prot, mmu_idx, size);
849 }
850 
851 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
852 {
853     ram_addr_t ram_addr;
854 
855     ram_addr = qemu_ram_addr_from_host(ptr);
856     if (ram_addr == RAM_ADDR_INVALID) {
857         error_report("Bad ram pointer %p", ptr);
858         abort();
859     }
860     return ram_addr;
861 }
862 
863 /*
864  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
865  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
866  * be discarded and looked up again (e.g. via tlb_entry()).
867  */
868 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
869                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
870 {
871     CPUClass *cc = CPU_GET_CLASS(cpu);
872     bool ok;
873 
874     /*
875      * This is not a probe, so only valid return is success; failure
876      * should result in exception + longjmp to the cpu loop.
877      */
878     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
879     assert(ok);
880 }
881 
882 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
883                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
884                          MMUAccessType access_type, int size)
885 {
886     CPUState *cpu = env_cpu(env);
887     hwaddr mr_offset;
888     MemoryRegionSection *section;
889     MemoryRegion *mr;
890     uint64_t val;
891     bool locked = false;
892     MemTxResult r;
893 
894     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
895     mr = section->mr;
896     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
897     cpu->mem_io_pc = retaddr;
898     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
899         cpu_io_recompile(cpu, retaddr);
900     }
901 
902     cpu->mem_io_vaddr = addr;
903     cpu->mem_io_access_type = access_type;
904 
905     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
906         qemu_mutex_lock_iothread();
907         locked = true;
908     }
909     r = memory_region_dispatch_read(mr, mr_offset, &val,
910                                     size_memop(size) | MO_TE,
911                                     iotlbentry->attrs);
912     if (r != MEMTX_OK) {
913         hwaddr physaddr = mr_offset +
914             section->offset_within_address_space -
915             section->offset_within_region;
916 
917         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
918                                mmu_idx, iotlbentry->attrs, r, retaddr);
919     }
920     if (locked) {
921         qemu_mutex_unlock_iothread();
922     }
923 
924     return val;
925 }
926 
927 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
928                       int mmu_idx, uint64_t val, target_ulong addr,
929                       uintptr_t retaddr, int size)
930 {
931     CPUState *cpu = env_cpu(env);
932     hwaddr mr_offset;
933     MemoryRegionSection *section;
934     MemoryRegion *mr;
935     bool locked = false;
936     MemTxResult r;
937 
938     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
939     mr = section->mr;
940     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
941     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
942         cpu_io_recompile(cpu, retaddr);
943     }
944     cpu->mem_io_vaddr = addr;
945     cpu->mem_io_pc = retaddr;
946 
947     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
948         qemu_mutex_lock_iothread();
949         locked = true;
950     }
951     r = memory_region_dispatch_write(mr, mr_offset, val,
952                                      size_memop(size) | MO_TE,
953                                      iotlbentry->attrs);
954     if (r != MEMTX_OK) {
955         hwaddr physaddr = mr_offset +
956             section->offset_within_address_space -
957             section->offset_within_region;
958 
959         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
960                                mmu_idx, iotlbentry->attrs, r, retaddr);
961     }
962     if (locked) {
963         qemu_mutex_unlock_iothread();
964     }
965 }
966 
967 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
968 {
969 #if TCG_OVERSIZED_GUEST
970     return *(target_ulong *)((uintptr_t)entry + ofs);
971 #else
972     /* ofs might correspond to .addr_write, so use atomic_read */
973     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
974 #endif
975 }
976 
977 /* Return true if ADDR is present in the victim tlb, and has been copied
978    back to the main tlb.  */
979 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
980                            size_t elt_ofs, target_ulong page)
981 {
982     size_t vidx;
983 
984     assert_cpu_is_self(env_cpu(env));
985     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
986         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
987         target_ulong cmp;
988 
989         /* elt_ofs might correspond to .addr_write, so use atomic_read */
990 #if TCG_OVERSIZED_GUEST
991         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
992 #else
993         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
994 #endif
995 
996         if (cmp == page) {
997             /* Found entry in victim tlb, swap tlb and iotlb.  */
998             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
999 
1000             qemu_spin_lock(&env_tlb(env)->c.lock);
1001             copy_tlb_helper_locked(&tmptlb, tlb);
1002             copy_tlb_helper_locked(tlb, vtlb);
1003             copy_tlb_helper_locked(vtlb, &tmptlb);
1004             qemu_spin_unlock(&env_tlb(env)->c.lock);
1005 
1006             CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1007             CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1008             tmpio = *io; *io = *vio; *vio = tmpio;
1009             return true;
1010         }
1011     }
1012     return false;
1013 }
1014 
1015 /* Macro to call the above, with local variables from the use context.  */
1016 #define VICTIM_TLB_HIT(TY, ADDR) \
1017   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1018                  (ADDR) & TARGET_PAGE_MASK)
1019 
1020 /* NOTE: this function can trigger an exception */
1021 /* NOTE2: the returned address is not exactly the physical address: it
1022  * is actually a ram_addr_t (in system mode; the user mode emulation
1023  * version of this function returns a guest virtual address).
1024  */
1025 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1026 {
1027     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1028     uintptr_t index = tlb_index(env, mmu_idx, addr);
1029     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1030     void *p;
1031 
1032     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1033         if (!VICTIM_TLB_HIT(addr_code, addr)) {
1034             tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1035             index = tlb_index(env, mmu_idx, addr);
1036             entry = tlb_entry(env, mmu_idx, addr);
1037         }
1038         assert(tlb_hit(entry->addr_code, addr));
1039     }
1040 
1041     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1042         /*
1043          * Return -1 if we can't translate and execute from an entire
1044          * page of RAM here, which will cause us to execute by loading
1045          * and translating one insn at a time, without caching:
1046          *  - TLB_RECHECK: means the MMU protection covers a smaller range
1047          *    than a target page, so we must redo the MMU check every insn
1048          *  - TLB_MMIO: region is not backed by RAM
1049          */
1050         return -1;
1051     }
1052 
1053     p = (void *)((uintptr_t)addr + entry->addend);
1054     return qemu_ram_addr_from_host_nofail(p);
1055 }
1056 
1057 /* Probe for whether the specified guest write access is permitted.
1058  * If it is not permitted then an exception will be taken in the same
1059  * way as if this were a real write access (and we will not return).
1060  * Otherwise the function will return, and there will be a valid
1061  * entry in the TLB for this access.
1062  */
1063 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1064                  uintptr_t retaddr)
1065 {
1066     uintptr_t index = tlb_index(env, mmu_idx, addr);
1067     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1068 
1069     if (!tlb_hit(tlb_addr_write(entry), addr)) {
1070         /* TLB entry is for a different page */
1071         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1072             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1073                      mmu_idx, retaddr);
1074         }
1075     }
1076 }
1077 
1078 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1079                         MMUAccessType access_type, int mmu_idx)
1080 {
1081     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1082     uintptr_t tlb_addr, page;
1083     size_t elt_ofs;
1084 
1085     switch (access_type) {
1086     case MMU_DATA_LOAD:
1087         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1088         break;
1089     case MMU_DATA_STORE:
1090         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1091         break;
1092     case MMU_INST_FETCH:
1093         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1094         break;
1095     default:
1096         g_assert_not_reached();
1097     }
1098 
1099     page = addr & TARGET_PAGE_MASK;
1100     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1101 
1102     if (!tlb_hit_page(tlb_addr, page)) {
1103         uintptr_t index = tlb_index(env, mmu_idx, addr);
1104 
1105         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1106             CPUState *cs = env_cpu(env);
1107             CPUClass *cc = CPU_GET_CLASS(cs);
1108 
1109             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1110                 /* Non-faulting page table read failed.  */
1111                 return NULL;
1112             }
1113 
1114             /* TLB resize via tlb_fill may have moved the entry.  */
1115             entry = tlb_entry(env, mmu_idx, addr);
1116         }
1117         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1118     }
1119 
1120     if (tlb_addr & ~TARGET_PAGE_MASK) {
1121         /* IO access */
1122         return NULL;
1123     }
1124 
1125     return (void *)((uintptr_t)addr + entry->addend);
1126 }
1127 
1128 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1129  * operations, or io operations to proceed.  Return the host address.  */
1130 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1131                                TCGMemOpIdx oi, uintptr_t retaddr,
1132                                NotDirtyInfo *ndi)
1133 {
1134     size_t mmu_idx = get_mmuidx(oi);
1135     uintptr_t index = tlb_index(env, mmu_idx, addr);
1136     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1137     target_ulong tlb_addr = tlb_addr_write(tlbe);
1138     MemOp mop = get_memop(oi);
1139     int a_bits = get_alignment_bits(mop);
1140     int s_bits = mop & MO_SIZE;
1141     void *hostaddr;
1142 
1143     /* Adjust the given return address.  */
1144     retaddr -= GETPC_ADJ;
1145 
1146     /* Enforce guest required alignment.  */
1147     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1148         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1149         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1150                              mmu_idx, retaddr);
1151     }
1152 
1153     /* Enforce qemu required alignment.  */
1154     if (unlikely(addr & ((1 << s_bits) - 1))) {
1155         /* We get here if guest alignment was not requested,
1156            or was not enforced by cpu_unaligned_access above.
1157            We might widen the access and emulate, but for now
1158            mark an exception and exit the cpu loop.  */
1159         goto stop_the_world;
1160     }
1161 
1162     /* Check TLB entry and enforce page permissions.  */
1163     if (!tlb_hit(tlb_addr, addr)) {
1164         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1165             tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1166                      mmu_idx, retaddr);
1167             index = tlb_index(env, mmu_idx, addr);
1168             tlbe = tlb_entry(env, mmu_idx, addr);
1169         }
1170         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1171     }
1172 
1173     /* Notice an IO access or a needs-MMU-lookup access */
1174     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1175         /* There's really nothing that can be done to
1176            support this apart from stop-the-world.  */
1177         goto stop_the_world;
1178     }
1179 
1180     /* Let the guest notice RMW on a write-only page.  */
1181     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1182         tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1183                  mmu_idx, retaddr);
1184         /* Since we don't support reads and writes to different addresses,
1185            and we do have the proper page loaded for write, this shouldn't
1186            ever return.  But just in case, handle via stop-the-world.  */
1187         goto stop_the_world;
1188     }
1189 
1190     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1191 
1192     ndi->active = false;
1193     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1194         ndi->active = true;
1195         memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
1196                                       qemu_ram_addr_from_host_nofail(hostaddr),
1197                                       1 << s_bits);
1198     }
1199 
1200     return hostaddr;
1201 
1202  stop_the_world:
1203     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1204 }
1205 
1206 #ifdef TARGET_WORDS_BIGENDIAN
1207 #define NEED_BE_BSWAP 0
1208 #define NEED_LE_BSWAP 1
1209 #else
1210 #define NEED_BE_BSWAP 1
1211 #define NEED_LE_BSWAP 0
1212 #endif
1213 
1214 /*
1215  * Byte Swap Helper
1216  *
1217  * This should all dead code away depending on the build host and
1218  * access type.
1219  */
1220 
1221 static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1222 {
1223     if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1224         switch (size) {
1225         case 1: return val;
1226         case 2: return bswap16(val);
1227         case 4: return bswap32(val);
1228         case 8: return bswap64(val);
1229         default:
1230             g_assert_not_reached();
1231         }
1232     } else {
1233         return val;
1234     }
1235 }
1236 
1237 /*
1238  * Load Helpers
1239  *
1240  * We support two different access types. SOFTMMU_CODE_ACCESS is
1241  * specifically for reading instructions from system memory. It is
1242  * called by the translation loop and in some helpers where the code
1243  * is disassembled. It shouldn't be called directly by guest code.
1244  */
1245 
1246 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1247                                 TCGMemOpIdx oi, uintptr_t retaddr);
1248 
1249 static inline uint64_t __attribute__((always_inline))
1250 load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1251             uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
1252             FullLoadHelper *full_load)
1253 {
1254     uintptr_t mmu_idx = get_mmuidx(oi);
1255     uintptr_t index = tlb_index(env, mmu_idx, addr);
1256     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1257     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1258     const size_t tlb_off = code_read ?
1259         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1260     const MMUAccessType access_type =
1261         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1262     unsigned a_bits = get_alignment_bits(get_memop(oi));
1263     void *haddr;
1264     uint64_t res;
1265 
1266     /* Handle CPU specific unaligned behaviour */
1267     if (addr & ((1 << a_bits) - 1)) {
1268         cpu_unaligned_access(env_cpu(env), addr, access_type,
1269                              mmu_idx, retaddr);
1270     }
1271 
1272     /* If the TLB entry is for a different page, reload and try again.  */
1273     if (!tlb_hit(tlb_addr, addr)) {
1274         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1275                             addr & TARGET_PAGE_MASK)) {
1276             tlb_fill(env_cpu(env), addr, size,
1277                      access_type, mmu_idx, retaddr);
1278             index = tlb_index(env, mmu_idx, addr);
1279             entry = tlb_entry(env, mmu_idx, addr);
1280         }
1281         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1282     }
1283 
1284     /* Handle an IO access.  */
1285     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1286         if ((addr & (size - 1)) != 0) {
1287             goto do_unaligned_access;
1288         }
1289 
1290         if (tlb_addr & TLB_RECHECK) {
1291             /*
1292              * This is a TLB_RECHECK access, where the MMU protection
1293              * covers a smaller range than a target page, and we must
1294              * repeat the MMU check here. This tlb_fill() call might
1295              * longjump out if this access should cause a guest exception.
1296              */
1297             tlb_fill(env_cpu(env), addr, size,
1298                      access_type, mmu_idx, retaddr);
1299             index = tlb_index(env, mmu_idx, addr);
1300             entry = tlb_entry(env, mmu_idx, addr);
1301 
1302             tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1303             tlb_addr &= ~TLB_RECHECK;
1304             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1305                 /* RAM access */
1306                 goto do_aligned_access;
1307             }
1308         }
1309 
1310         /* TODO: Merge bswap into io_readx -> memory_region_dispatch_read.  */
1311         res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1312                        mmu_idx, addr, retaddr, access_type, size);
1313         return handle_bswap(res, size, big_endian);
1314     }
1315 
1316     /* Handle slow unaligned access (it spans two pages or IO).  */
1317     if (size > 1
1318         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1319                     >= TARGET_PAGE_SIZE)) {
1320         target_ulong addr1, addr2;
1321         uint64_t r1, r2;
1322         unsigned shift;
1323     do_unaligned_access:
1324         addr1 = addr & ~((target_ulong)size - 1);
1325         addr2 = addr1 + size;
1326         r1 = full_load(env, addr1, oi, retaddr);
1327         r2 = full_load(env, addr2, oi, retaddr);
1328         shift = (addr & (size - 1)) * 8;
1329 
1330         if (big_endian) {
1331             /* Big-endian combine.  */
1332             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1333         } else {
1334             /* Little-endian combine.  */
1335             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1336         }
1337         return res & MAKE_64BIT_MASK(0, size * 8);
1338     }
1339 
1340  do_aligned_access:
1341     haddr = (void *)((uintptr_t)addr + entry->addend);
1342     switch (size) {
1343     case 1:
1344         res = ldub_p(haddr);
1345         break;
1346     case 2:
1347         if (big_endian) {
1348             res = lduw_be_p(haddr);
1349         } else {
1350             res = lduw_le_p(haddr);
1351         }
1352         break;
1353     case 4:
1354         if (big_endian) {
1355             res = (uint32_t)ldl_be_p(haddr);
1356         } else {
1357             res = (uint32_t)ldl_le_p(haddr);
1358         }
1359         break;
1360     case 8:
1361         if (big_endian) {
1362             res = ldq_be_p(haddr);
1363         } else {
1364             res = ldq_le_p(haddr);
1365         }
1366         break;
1367     default:
1368         g_assert_not_reached();
1369     }
1370 
1371     return res;
1372 }
1373 
1374 /*
1375  * For the benefit of TCG generated code, we want to avoid the
1376  * complication of ABI-specific return type promotion and always
1377  * return a value extended to the register size of the host. This is
1378  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1379  * data, and for that we always have uint64_t.
1380  *
1381  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1382  */
1383 
1384 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1385                               TCGMemOpIdx oi, uintptr_t retaddr)
1386 {
1387     return load_helper(env, addr, oi, retaddr, 1, false, false,
1388                        full_ldub_mmu);
1389 }
1390 
1391 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1392                                      TCGMemOpIdx oi, uintptr_t retaddr)
1393 {
1394     return full_ldub_mmu(env, addr, oi, retaddr);
1395 }
1396 
1397 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1398                                  TCGMemOpIdx oi, uintptr_t retaddr)
1399 {
1400     return load_helper(env, addr, oi, retaddr, 2, false, false,
1401                        full_le_lduw_mmu);
1402 }
1403 
1404 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1405                                     TCGMemOpIdx oi, uintptr_t retaddr)
1406 {
1407     return full_le_lduw_mmu(env, addr, oi, retaddr);
1408 }
1409 
1410 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1411                                  TCGMemOpIdx oi, uintptr_t retaddr)
1412 {
1413     return load_helper(env, addr, oi, retaddr, 2, true, false,
1414                        full_be_lduw_mmu);
1415 }
1416 
1417 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1418                                     TCGMemOpIdx oi, uintptr_t retaddr)
1419 {
1420     return full_be_lduw_mmu(env, addr, oi, retaddr);
1421 }
1422 
1423 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1424                                  TCGMemOpIdx oi, uintptr_t retaddr)
1425 {
1426     return load_helper(env, addr, oi, retaddr, 4, false, false,
1427                        full_le_ldul_mmu);
1428 }
1429 
1430 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1431                                     TCGMemOpIdx oi, uintptr_t retaddr)
1432 {
1433     return full_le_ldul_mmu(env, addr, oi, retaddr);
1434 }
1435 
1436 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1437                                  TCGMemOpIdx oi, uintptr_t retaddr)
1438 {
1439     return load_helper(env, addr, oi, retaddr, 4, true, false,
1440                        full_be_ldul_mmu);
1441 }
1442 
1443 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1444                                     TCGMemOpIdx oi, uintptr_t retaddr)
1445 {
1446     return full_be_ldul_mmu(env, addr, oi, retaddr);
1447 }
1448 
1449 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1450                            TCGMemOpIdx oi, uintptr_t retaddr)
1451 {
1452     return load_helper(env, addr, oi, retaddr, 8, false, false,
1453                        helper_le_ldq_mmu);
1454 }
1455 
1456 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1457                            TCGMemOpIdx oi, uintptr_t retaddr)
1458 {
1459     return load_helper(env, addr, oi, retaddr, 8, true, false,
1460                        helper_be_ldq_mmu);
1461 }
1462 
1463 /*
1464  * Provide signed versions of the load routines as well.  We can of course
1465  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1466  */
1467 
1468 
1469 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1470                                      TCGMemOpIdx oi, uintptr_t retaddr)
1471 {
1472     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1473 }
1474 
1475 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1476                                     TCGMemOpIdx oi, uintptr_t retaddr)
1477 {
1478     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1479 }
1480 
1481 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1482                                     TCGMemOpIdx oi, uintptr_t retaddr)
1483 {
1484     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1485 }
1486 
1487 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1488                                     TCGMemOpIdx oi, uintptr_t retaddr)
1489 {
1490     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1491 }
1492 
1493 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1494                                     TCGMemOpIdx oi, uintptr_t retaddr)
1495 {
1496     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1497 }
1498 
1499 /*
1500  * Store Helpers
1501  */
1502 
1503 static inline void __attribute__((always_inline))
1504 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1505              TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
1506 {
1507     uintptr_t mmu_idx = get_mmuidx(oi);
1508     uintptr_t index = tlb_index(env, mmu_idx, addr);
1509     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1510     target_ulong tlb_addr = tlb_addr_write(entry);
1511     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1512     unsigned a_bits = get_alignment_bits(get_memop(oi));
1513     void *haddr;
1514 
1515     /* Handle CPU specific unaligned behaviour */
1516     if (addr & ((1 << a_bits) - 1)) {
1517         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1518                              mmu_idx, retaddr);
1519     }
1520 
1521     /* If the TLB entry is for a different page, reload and try again.  */
1522     if (!tlb_hit(tlb_addr, addr)) {
1523         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1524             addr & TARGET_PAGE_MASK)) {
1525             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1526                      mmu_idx, retaddr);
1527             index = tlb_index(env, mmu_idx, addr);
1528             entry = tlb_entry(env, mmu_idx, addr);
1529         }
1530         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1531     }
1532 
1533     /* Handle an IO access.  */
1534     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1535         if ((addr & (size - 1)) != 0) {
1536             goto do_unaligned_access;
1537         }
1538 
1539         if (tlb_addr & TLB_RECHECK) {
1540             /*
1541              * This is a TLB_RECHECK access, where the MMU protection
1542              * covers a smaller range than a target page, and we must
1543              * repeat the MMU check here. This tlb_fill() call might
1544              * longjump out if this access should cause a guest exception.
1545              */
1546             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1547                      mmu_idx, retaddr);
1548             index = tlb_index(env, mmu_idx, addr);
1549             entry = tlb_entry(env, mmu_idx, addr);
1550 
1551             tlb_addr = tlb_addr_write(entry);
1552             tlb_addr &= ~TLB_RECHECK;
1553             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1554                 /* RAM access */
1555                 goto do_aligned_access;
1556             }
1557         }
1558 
1559         /* TODO: Merge bswap into io_writex -> memory_region_dispatch_write.  */
1560         io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1561                   handle_bswap(val, size, big_endian),
1562                   addr, retaddr, size);
1563         return;
1564     }
1565 
1566     /* Handle slow unaligned access (it spans two pages or IO).  */
1567     if (size > 1
1568         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1569                      >= TARGET_PAGE_SIZE)) {
1570         int i;
1571         uintptr_t index2;
1572         CPUTLBEntry *entry2;
1573         target_ulong page2, tlb_addr2;
1574     do_unaligned_access:
1575         /*
1576          * Ensure the second page is in the TLB.  Note that the first page
1577          * is already guaranteed to be filled, and that the second page
1578          * cannot evict the first.
1579          */
1580         page2 = (addr + size) & TARGET_PAGE_MASK;
1581         index2 = tlb_index(env, mmu_idx, page2);
1582         entry2 = tlb_entry(env, mmu_idx, page2);
1583         tlb_addr2 = tlb_addr_write(entry2);
1584         if (!tlb_hit_page(tlb_addr2, page2)
1585             && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1586                                page2 & TARGET_PAGE_MASK)) {
1587             tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
1588                      mmu_idx, retaddr);
1589         }
1590 
1591         /*
1592          * XXX: not efficient, but simple.
1593          * This loop must go in the forward direction to avoid issues
1594          * with self-modifying code in Windows 64-bit.
1595          */
1596         for (i = 0; i < size; ++i) {
1597             uint8_t val8;
1598             if (big_endian) {
1599                 /* Big-endian extract.  */
1600                 val8 = val >> (((size - 1) * 8) - (i * 8));
1601             } else {
1602                 /* Little-endian extract.  */
1603                 val8 = val >> (i * 8);
1604             }
1605             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1606         }
1607         return;
1608     }
1609 
1610  do_aligned_access:
1611     haddr = (void *)((uintptr_t)addr + entry->addend);
1612     switch (size) {
1613     case 1:
1614         stb_p(haddr, val);
1615         break;
1616     case 2:
1617         if (big_endian) {
1618             stw_be_p(haddr, val);
1619         } else {
1620             stw_le_p(haddr, val);
1621         }
1622         break;
1623     case 4:
1624         if (big_endian) {
1625             stl_be_p(haddr, val);
1626         } else {
1627             stl_le_p(haddr, val);
1628         }
1629         break;
1630     case 8:
1631         if (big_endian) {
1632             stq_be_p(haddr, val);
1633         } else {
1634             stq_le_p(haddr, val);
1635         }
1636         break;
1637     default:
1638         g_assert_not_reached();
1639         break;
1640     }
1641 }
1642 
1643 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1644                         TCGMemOpIdx oi, uintptr_t retaddr)
1645 {
1646     store_helper(env, addr, val, oi, retaddr, 1, false);
1647 }
1648 
1649 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1650                        TCGMemOpIdx oi, uintptr_t retaddr)
1651 {
1652     store_helper(env, addr, val, oi, retaddr, 2, false);
1653 }
1654 
1655 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1656                        TCGMemOpIdx oi, uintptr_t retaddr)
1657 {
1658     store_helper(env, addr, val, oi, retaddr, 2, true);
1659 }
1660 
1661 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1662                        TCGMemOpIdx oi, uintptr_t retaddr)
1663 {
1664     store_helper(env, addr, val, oi, retaddr, 4, false);
1665 }
1666 
1667 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1668                        TCGMemOpIdx oi, uintptr_t retaddr)
1669 {
1670     store_helper(env, addr, val, oi, retaddr, 4, true);
1671 }
1672 
1673 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1674                        TCGMemOpIdx oi, uintptr_t retaddr)
1675 {
1676     store_helper(env, addr, val, oi, retaddr, 8, false);
1677 }
1678 
1679 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1680                        TCGMemOpIdx oi, uintptr_t retaddr)
1681 {
1682     store_helper(env, addr, val, oi, retaddr, 8, true);
1683 }
1684 
1685 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1686    them callable from other helpers.  */
1687 
1688 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1689 #define ATOMIC_NAME(X) \
1690     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1691 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1692 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1693 #define ATOMIC_MMU_CLEANUP                              \
1694     do {                                                \
1695         if (unlikely(ndi.active)) {                     \
1696             memory_notdirty_write_complete(&ndi);       \
1697         }                                               \
1698     } while (0)
1699 
1700 #define DATA_SIZE 1
1701 #include "atomic_template.h"
1702 
1703 #define DATA_SIZE 2
1704 #include "atomic_template.h"
1705 
1706 #define DATA_SIZE 4
1707 #include "atomic_template.h"
1708 
1709 #ifdef CONFIG_ATOMIC64
1710 #define DATA_SIZE 8
1711 #include "atomic_template.h"
1712 #endif
1713 
1714 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1715 #define DATA_SIZE 16
1716 #include "atomic_template.h"
1717 #endif
1718 
1719 /* Second set of helpers are directly callable from TCG as helpers.  */
1720 
1721 #undef EXTRA_ARGS
1722 #undef ATOMIC_NAME
1723 #undef ATOMIC_MMU_LOOKUP
1724 #define EXTRA_ARGS         , TCGMemOpIdx oi
1725 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1726 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1727 
1728 #define DATA_SIZE 1
1729 #include "atomic_template.h"
1730 
1731 #define DATA_SIZE 2
1732 #include "atomic_template.h"
1733 
1734 #define DATA_SIZE 4
1735 #include "atomic_template.h"
1736 
1737 #ifdef CONFIG_ATOMIC64
1738 #define DATA_SIZE 8
1739 #include "atomic_template.h"
1740 #endif
1741 
1742 /* Code access functions.  */
1743 
1744 static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1745                                TCGMemOpIdx oi, uintptr_t retaddr)
1746 {
1747     return load_helper(env, addr, oi, retaddr, 1, false, true,
1748                        full_ldub_cmmu);
1749 }
1750 
1751 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1752                             TCGMemOpIdx oi, uintptr_t retaddr)
1753 {
1754     return full_ldub_cmmu(env, addr, oi, retaddr);
1755 }
1756 
1757 static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1758                                   TCGMemOpIdx oi, uintptr_t retaddr)
1759 {
1760     return load_helper(env, addr, oi, retaddr, 2, false, true,
1761                        full_le_lduw_cmmu);
1762 }
1763 
1764 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1765                             TCGMemOpIdx oi, uintptr_t retaddr)
1766 {
1767     return full_le_lduw_cmmu(env, addr, oi, retaddr);
1768 }
1769 
1770 static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1771                                   TCGMemOpIdx oi, uintptr_t retaddr)
1772 {
1773     return load_helper(env, addr, oi, retaddr, 2, true, true,
1774                        full_be_lduw_cmmu);
1775 }
1776 
1777 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1778                             TCGMemOpIdx oi, uintptr_t retaddr)
1779 {
1780     return full_be_lduw_cmmu(env, addr, oi, retaddr);
1781 }
1782 
1783 static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1784                                   TCGMemOpIdx oi, uintptr_t retaddr)
1785 {
1786     return load_helper(env, addr, oi, retaddr, 4, false, true,
1787                        full_le_ldul_cmmu);
1788 }
1789 
1790 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1791                             TCGMemOpIdx oi, uintptr_t retaddr)
1792 {
1793     return full_le_ldul_cmmu(env, addr, oi, retaddr);
1794 }
1795 
1796 static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1797                                   TCGMemOpIdx oi, uintptr_t retaddr)
1798 {
1799     return load_helper(env, addr, oi, retaddr, 4, true, true,
1800                        full_be_ldul_cmmu);
1801 }
1802 
1803 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1804                             TCGMemOpIdx oi, uintptr_t retaddr)
1805 {
1806     return full_be_ldul_cmmu(env, addr, oi, retaddr);
1807 }
1808 
1809 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1810                             TCGMemOpIdx oi, uintptr_t retaddr)
1811 {
1812     return load_helper(env, addr, oi, retaddr, 8, false, true,
1813                        helper_le_ldq_cmmu);
1814 }
1815 
1816 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1817                             TCGMemOpIdx oi, uintptr_t retaddr)
1818 {
1819     return load_helper(env, addr, oi, retaddr, 8, true, true,
1820                        helper_be_ldq_cmmu);
1821 }
1822